John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * DMABUF System heap exporter |
| 4 | * |
| 5 | * Copyright (C) 2011 Google, Inc. |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 6 | * Copyright (C) 2019, 2020 Linaro Ltd. |
| 7 | * |
| 8 | * Portions based off of Andrew Davis' SRAM heap: |
| 9 | * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ |
| 10 | * Andrew F. Davis <afd@ti.com> |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 11 | */ |
| 12 | |
| 13 | #include <linux/dma-buf.h> |
| 14 | #include <linux/dma-mapping.h> |
| 15 | #include <linux/dma-heap.h> |
| 16 | #include <linux/err.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/scatterlist.h> |
| 21 | #include <linux/slab.h> |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 22 | #include <linux/vmalloc.h> |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 23 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 24 | static struct dma_heap *sys_heap; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 25 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 26 | struct system_heap_buffer { |
| 27 | struct dma_heap *heap; |
| 28 | struct list_head attachments; |
| 29 | struct mutex lock; |
| 30 | unsigned long len; |
| 31 | struct sg_table sg_table; |
| 32 | int vmap_cnt; |
| 33 | void *vaddr; |
| 34 | }; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 35 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 36 | struct dma_heap_attachment { |
| 37 | struct device *dev; |
| 38 | struct sg_table *table; |
| 39 | struct list_head list; |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 40 | bool mapped; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 41 | }; |
| 42 | |
Jaewon Kim | 3ccefde | 2023-03-03 14:03:32 +0900 | [diff] [blame] | 43 | #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO) |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 44 | #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ |
| 45 | | __GFP_NORETRY) & ~__GFP_RECLAIM) \ |
| 46 | | __GFP_COMP) |
Jaewon Kim | 3ccefde | 2023-03-03 14:03:32 +0900 | [diff] [blame] | 47 | static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 48 | /* |
| 49 | * The selection of the orders used for allocation (1MB, 64K, 4K) is designed |
| 50 | * to match with the sizes often found in IOMMUs. Using order 4 pages instead |
| 51 | * of order 0 pages can significantly improve the performance of many IOMMUs |
| 52 | * by reducing TLB pressure and time spent updating page tables. |
| 53 | */ |
| 54 | static const unsigned int orders[] = {8, 4, 0}; |
| 55 | #define NUM_ORDERS ARRAY_SIZE(orders) |
| 56 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 57 | static struct sg_table *dup_sg_table(struct sg_table *table) |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 58 | { |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 59 | struct sg_table *new_table; |
| 60 | int ret, i; |
| 61 | struct scatterlist *sg, *new_sg; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 62 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 63 | new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); |
| 64 | if (!new_table) |
| 65 | return ERR_PTR(-ENOMEM); |
| 66 | |
| 67 | ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); |
| 68 | if (ret) { |
| 69 | kfree(new_table); |
| 70 | return ERR_PTR(-ENOMEM); |
| 71 | } |
| 72 | |
| 73 | new_sg = new_table->sgl; |
| 74 | for_each_sgtable_sg(table, sg, i) { |
| 75 | sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); |
| 76 | new_sg = sg_next(new_sg); |
| 77 | } |
| 78 | |
| 79 | return new_table; |
| 80 | } |
| 81 | |
| 82 | static int system_heap_attach(struct dma_buf *dmabuf, |
| 83 | struct dma_buf_attachment *attachment) |
| 84 | { |
| 85 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 86 | struct dma_heap_attachment *a; |
| 87 | struct sg_table *table; |
| 88 | |
| 89 | a = kzalloc(sizeof(*a), GFP_KERNEL); |
| 90 | if (!a) |
| 91 | return -ENOMEM; |
| 92 | |
| 93 | table = dup_sg_table(&buffer->sg_table); |
| 94 | if (IS_ERR(table)) { |
| 95 | kfree(a); |
| 96 | return -ENOMEM; |
| 97 | } |
| 98 | |
| 99 | a->table = table; |
| 100 | a->dev = attachment->dev; |
| 101 | INIT_LIST_HEAD(&a->list); |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 102 | a->mapped = false; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 103 | |
| 104 | attachment->priv = a; |
| 105 | |
| 106 | mutex_lock(&buffer->lock); |
| 107 | list_add(&a->list, &buffer->attachments); |
| 108 | mutex_unlock(&buffer->lock); |
| 109 | |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | static void system_heap_detach(struct dma_buf *dmabuf, |
| 114 | struct dma_buf_attachment *attachment) |
| 115 | { |
| 116 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 117 | struct dma_heap_attachment *a = attachment->priv; |
| 118 | |
| 119 | mutex_lock(&buffer->lock); |
| 120 | list_del(&a->list); |
| 121 | mutex_unlock(&buffer->lock); |
| 122 | |
| 123 | sg_free_table(a->table); |
| 124 | kfree(a->table); |
| 125 | kfree(a); |
| 126 | } |
| 127 | |
| 128 | static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, |
| 129 | enum dma_data_direction direction) |
| 130 | { |
| 131 | struct dma_heap_attachment *a = attachment->priv; |
| 132 | struct sg_table *table = a->table; |
| 133 | int ret; |
| 134 | |
| 135 | ret = dma_map_sgtable(attachment->dev, table, direction, 0); |
| 136 | if (ret) |
| 137 | return ERR_PTR(ret); |
| 138 | |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 139 | a->mapped = true; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 140 | return table; |
| 141 | } |
| 142 | |
| 143 | static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, |
| 144 | struct sg_table *table, |
| 145 | enum dma_data_direction direction) |
| 146 | { |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 147 | struct dma_heap_attachment *a = attachment->priv; |
| 148 | |
| 149 | a->mapped = false; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 150 | dma_unmap_sgtable(attachment->dev, table, direction, 0); |
| 151 | } |
| 152 | |
| 153 | static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
| 154 | enum dma_data_direction direction) |
| 155 | { |
| 156 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 157 | struct dma_heap_attachment *a; |
| 158 | |
| 159 | mutex_lock(&buffer->lock); |
| 160 | |
| 161 | if (buffer->vmap_cnt) |
| 162 | invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); |
| 163 | |
| 164 | list_for_each_entry(a, &buffer->attachments, list) { |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 165 | if (!a->mapped) |
| 166 | continue; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 167 | dma_sync_sgtable_for_cpu(a->dev, a->table, direction); |
| 168 | } |
| 169 | mutex_unlock(&buffer->lock); |
| 170 | |
| 171 | return 0; |
| 172 | } |
| 173 | |
| 174 | static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
| 175 | enum dma_data_direction direction) |
| 176 | { |
| 177 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 178 | struct dma_heap_attachment *a; |
| 179 | |
| 180 | mutex_lock(&buffer->lock); |
| 181 | |
| 182 | if (buffer->vmap_cnt) |
| 183 | flush_kernel_vmap_range(buffer->vaddr, buffer->len); |
| 184 | |
| 185 | list_for_each_entry(a, &buffer->attachments, list) { |
John Stultz | 4c68e49 | 2020-11-21 23:50:01 +0000 | [diff] [blame] | 186 | if (!a->mapped) |
| 187 | continue; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 188 | dma_sync_sgtable_for_device(a->dev, a->table, direction); |
| 189 | } |
| 190 | mutex_unlock(&buffer->lock); |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
| 196 | { |
| 197 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 198 | struct sg_table *table = &buffer->sg_table; |
| 199 | unsigned long addr = vma->vm_start; |
| 200 | struct sg_page_iter piter; |
| 201 | int ret; |
| 202 | |
| 203 | for_each_sgtable_page(table, &piter, vma->vm_pgoff) { |
| 204 | struct page *page = sg_page_iter_page(&piter); |
| 205 | |
| 206 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, |
| 207 | vma->vm_page_prot); |
| 208 | if (ret) |
| 209 | return ret; |
| 210 | addr += PAGE_SIZE; |
| 211 | if (addr >= vma->vm_end) |
| 212 | return 0; |
| 213 | } |
| 214 | return 0; |
| 215 | } |
| 216 | |
| 217 | static void *system_heap_do_vmap(struct system_heap_buffer *buffer) |
| 218 | { |
| 219 | struct sg_table *table = &buffer->sg_table; |
| 220 | int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; |
| 221 | struct page **pages = vmalloc(sizeof(struct page *) * npages); |
| 222 | struct page **tmp = pages; |
| 223 | struct sg_page_iter piter; |
| 224 | void *vaddr; |
| 225 | |
| 226 | if (!pages) |
| 227 | return ERR_PTR(-ENOMEM); |
| 228 | |
| 229 | for_each_sgtable_page(table, &piter, 0) { |
| 230 | WARN_ON(tmp - pages >= npages); |
| 231 | *tmp++ = sg_page_iter_page(&piter); |
| 232 | } |
| 233 | |
| 234 | vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); |
| 235 | vfree(pages); |
| 236 | |
| 237 | if (!vaddr) |
| 238 | return ERR_PTR(-ENOMEM); |
| 239 | |
| 240 | return vaddr; |
| 241 | } |
| 242 | |
Lucas De Marchi | 7938f42 | 2022-02-04 09:05:41 -0800 | [diff] [blame] | 243 | static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 244 | { |
| 245 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 246 | void *vaddr; |
| 247 | int ret = 0; |
| 248 | |
| 249 | mutex_lock(&buffer->lock); |
| 250 | if (buffer->vmap_cnt) { |
| 251 | buffer->vmap_cnt++; |
Lucas De Marchi | 7938f42 | 2022-02-04 09:05:41 -0800 | [diff] [blame] | 252 | iosys_map_set_vaddr(map, buffer->vaddr); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 253 | goto out; |
| 254 | } |
| 255 | |
| 256 | vaddr = system_heap_do_vmap(buffer); |
| 257 | if (IS_ERR(vaddr)) { |
| 258 | ret = PTR_ERR(vaddr); |
| 259 | goto out; |
| 260 | } |
| 261 | |
| 262 | buffer->vaddr = vaddr; |
| 263 | buffer->vmap_cnt++; |
Lucas De Marchi | 7938f42 | 2022-02-04 09:05:41 -0800 | [diff] [blame] | 264 | iosys_map_set_vaddr(map, buffer->vaddr); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 265 | out: |
| 266 | mutex_unlock(&buffer->lock); |
| 267 | |
| 268 | return ret; |
| 269 | } |
| 270 | |
Lucas De Marchi | 7938f42 | 2022-02-04 09:05:41 -0800 | [diff] [blame] | 271 | static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 272 | { |
| 273 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 274 | |
| 275 | mutex_lock(&buffer->lock); |
| 276 | if (!--buffer->vmap_cnt) { |
| 277 | vunmap(buffer->vaddr); |
| 278 | buffer->vaddr = NULL; |
| 279 | } |
| 280 | mutex_unlock(&buffer->lock); |
Lucas De Marchi | 7938f42 | 2022-02-04 09:05:41 -0800 | [diff] [blame] | 281 | iosys_map_clear(map); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | static void system_heap_dma_buf_release(struct dma_buf *dmabuf) |
| 285 | { |
| 286 | struct system_heap_buffer *buffer = dmabuf->priv; |
| 287 | struct sg_table *table; |
| 288 | struct scatterlist *sg; |
| 289 | int i; |
| 290 | |
| 291 | table = &buffer->sg_table; |
Guangming | 679d94c | 2021-11-26 15:49:04 +0800 | [diff] [blame] | 292 | for_each_sgtable_sg(table, sg, i) { |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 293 | struct page *page = sg_page(sg); |
| 294 | |
| 295 | __free_pages(page, compound_order(page)); |
| 296 | } |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 297 | sg_free_table(table); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 298 | kfree(buffer); |
| 299 | } |
| 300 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 301 | static const struct dma_buf_ops system_heap_buf_ops = { |
| 302 | .attach = system_heap_attach, |
| 303 | .detach = system_heap_detach, |
| 304 | .map_dma_buf = system_heap_map_dma_buf, |
| 305 | .unmap_dma_buf = system_heap_unmap_dma_buf, |
| 306 | .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, |
| 307 | .end_cpu_access = system_heap_dma_buf_end_cpu_access, |
| 308 | .mmap = system_heap_mmap, |
| 309 | .vmap = system_heap_vmap, |
| 310 | .vunmap = system_heap_vunmap, |
| 311 | .release = system_heap_dma_buf_release, |
| 312 | }; |
| 313 | |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 314 | static struct page *alloc_largest_available(unsigned long size, |
| 315 | unsigned int max_order) |
| 316 | { |
| 317 | struct page *page; |
| 318 | int i; |
| 319 | |
| 320 | for (i = 0; i < NUM_ORDERS; i++) { |
| 321 | if (size < (PAGE_SIZE << orders[i])) |
| 322 | continue; |
| 323 | if (max_order < orders[i]) |
| 324 | continue; |
| 325 | |
| 326 | page = alloc_pages(order_flags[i], orders[i]); |
| 327 | if (!page) |
| 328 | continue; |
| 329 | return page; |
| 330 | } |
| 331 | return NULL; |
| 332 | } |
| 333 | |
John Stultz | c7f59e3 | 2021-01-19 20:45:08 +0000 | [diff] [blame] | 334 | static struct dma_buf *system_heap_allocate(struct dma_heap *heap, |
| 335 | unsigned long len, |
| 336 | unsigned long fd_flags, |
| 337 | unsigned long heap_flags) |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 338 | { |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 339 | struct system_heap_buffer *buffer; |
| 340 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 341 | unsigned long size_remaining = len; |
| 342 | unsigned int max_order = orders[0]; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 343 | struct dma_buf *dmabuf; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 344 | struct sg_table *table; |
| 345 | struct scatterlist *sg; |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 346 | struct list_head pages; |
| 347 | struct page *page, *tmp_page; |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 348 | int i, ret = -ENOMEM; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 349 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 350 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
| 351 | if (!buffer) |
John Stultz | c7f59e3 | 2021-01-19 20:45:08 +0000 | [diff] [blame] | 352 | return ERR_PTR(-ENOMEM); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 353 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 354 | INIT_LIST_HEAD(&buffer->attachments); |
| 355 | mutex_init(&buffer->lock); |
| 356 | buffer->heap = heap; |
| 357 | buffer->len = len; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 358 | |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 359 | INIT_LIST_HEAD(&pages); |
| 360 | i = 0; |
| 361 | while (size_remaining > 0) { |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 362 | /* |
| 363 | * Avoid trying to allocate memory if the process |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 364 | * has been killed by SIGKILL |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 365 | */ |
John Stultz | 14a1172 | 2021-01-19 20:45:06 +0000 | [diff] [blame] | 366 | if (fatal_signal_pending(current)) { |
| 367 | ret = -EINTR; |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 368 | goto free_buffer; |
John Stultz | 14a1172 | 2021-01-19 20:45:06 +0000 | [diff] [blame] | 369 | } |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 370 | |
| 371 | page = alloc_largest_available(size_remaining, max_order); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 372 | if (!page) |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 373 | goto free_buffer; |
| 374 | |
| 375 | list_add_tail(&page->lru, &pages); |
| 376 | size_remaining -= page_size(page); |
| 377 | max_order = compound_order(page); |
| 378 | i++; |
| 379 | } |
| 380 | |
| 381 | table = &buffer->sg_table; |
| 382 | if (sg_alloc_table(table, i, GFP_KERNEL)) |
| 383 | goto free_buffer; |
| 384 | |
| 385 | sg = table->sgl; |
| 386 | list_for_each_entry_safe(page, tmp_page, &pages, lru) { |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 387 | sg_set_page(sg, page, page_size(page), 0); |
| 388 | sg = sg_next(sg); |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 389 | list_del(&page->lru); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | /* create the dmabuf */ |
John Stultz | 2eebbdb | 2021-02-09 19:48:18 +0000 | [diff] [blame] | 393 | exp_info.exp_name = dma_heap_get_name(heap); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 394 | exp_info.ops = &system_heap_buf_ops; |
| 395 | exp_info.size = buffer->len; |
| 396 | exp_info.flags = fd_flags; |
| 397 | exp_info.priv = buffer; |
| 398 | dmabuf = dma_buf_export(&exp_info); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 399 | if (IS_ERR(dmabuf)) { |
| 400 | ret = PTR_ERR(dmabuf); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 401 | goto free_pages; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 402 | } |
John Stultz | c7f59e3 | 2021-01-19 20:45:08 +0000 | [diff] [blame] | 403 | return dmabuf; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 404 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 405 | free_pages: |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 406 | for_each_sgtable_sg(table, sg, i) { |
| 407 | struct page *p = sg_page(sg); |
| 408 | |
| 409 | __free_pages(p, compound_order(p)); |
| 410 | } |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 411 | sg_free_table(table); |
| 412 | free_buffer: |
John Stultz | d963ab0 | 2020-11-21 23:50:02 +0000 | [diff] [blame] | 413 | list_for_each_entry_safe(page, tmp_page, &pages, lru) |
| 414 | __free_pages(page, compound_order(page)); |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 415 | kfree(buffer); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 416 | |
John Stultz | c7f59e3 | 2021-01-19 20:45:08 +0000 | [diff] [blame] | 417 | return ERR_PTR(ret); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 418 | } |
| 419 | |
| 420 | static const struct dma_heap_ops system_heap_ops = { |
| 421 | .allocate = system_heap_allocate, |
| 422 | }; |
| 423 | |
| 424 | static int system_heap_create(void) |
| 425 | { |
| 426 | struct dma_heap_export_info exp_info; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 427 | |
Andrew F. Davis | 263e38f | 2019-12-16 08:34:05 -0500 | [diff] [blame] | 428 | exp_info.name = "system"; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 429 | exp_info.ops = &system_heap_ops; |
| 430 | exp_info.priv = NULL; |
| 431 | |
| 432 | sys_heap = dma_heap_add(&exp_info); |
| 433 | if (IS_ERR(sys_heap)) |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 434 | return PTR_ERR(sys_heap); |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 435 | |
John Stultz | 3812957 | 2020-11-21 23:49:58 +0000 | [diff] [blame] | 436 | return 0; |
John Stultz | efa04fe | 2019-12-03 17:26:39 +0000 | [diff] [blame] | 437 | } |
| 438 | module_init(system_heap_create); |