Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 3 | * Copyright (C) 2012 ARM Ltd. |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 4 | * Copyright (c) 2014 The Linux Foundation |
| 5 | */ |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 6 | #include <linux/dma-direct.h> |
| 7 | #include <linux/dma-noncoherent.h> |
| 8 | #include <linux/dma-contiguous.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/genalloc.h> |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/vmalloc.h> |
| 13 | |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 14 | struct page **dma_common_find_pages(void *cpu_addr) |
| 15 | { |
| 16 | struct vm_struct *area = find_vm_area(cpu_addr); |
| 17 | |
| 18 | if (!area || area->flags != VM_DMA_COHERENT) |
| 19 | return NULL; |
| 20 | return area->pages; |
| 21 | } |
| 22 | |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 23 | static struct vm_struct *__dma_common_pages_remap(struct page **pages, |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 24 | size_t size, pgprot_t prot, const void *caller) |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 25 | { |
| 26 | struct vm_struct *area; |
| 27 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 28 | area = get_vm_area_caller(size, VM_DMA_COHERENT, caller); |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 29 | if (!area) |
| 30 | return NULL; |
| 31 | |
| 32 | if (map_vm_area(area, prot, pages)) { |
| 33 | vunmap(area->addr); |
| 34 | return NULL; |
| 35 | } |
| 36 | |
| 37 | return area; |
| 38 | } |
| 39 | |
| 40 | /* |
| 41 | * Remaps an array of PAGE_SIZE pages into another vm_area. |
| 42 | * Cannot be used in non-sleeping contexts |
| 43 | */ |
| 44 | void *dma_common_pages_remap(struct page **pages, size_t size, |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 45 | pgprot_t prot, const void *caller) |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 46 | { |
| 47 | struct vm_struct *area; |
| 48 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 49 | area = __dma_common_pages_remap(pages, size, prot, caller); |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 50 | if (!area) |
| 51 | return NULL; |
| 52 | |
| 53 | area->pages = pages; |
| 54 | |
| 55 | return area->addr; |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Remaps an allocated contiguous region into another vm_area. |
| 60 | * Cannot be used in non-sleeping contexts |
| 61 | */ |
| 62 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 63 | pgprot_t prot, const void *caller) |
| 64 | { |
| 65 | int i; |
| 66 | struct page **pages; |
| 67 | struct vm_struct *area; |
| 68 | |
| 69 | pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); |
| 70 | if (!pages) |
| 71 | return NULL; |
| 72 | |
| 73 | for (i = 0; i < (size >> PAGE_SHIFT); i++) |
| 74 | pages[i] = nth_page(page, i); |
| 75 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 76 | area = __dma_common_pages_remap(pages, size, prot, caller); |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 77 | |
| 78 | kfree(pages); |
| 79 | |
| 80 | if (!area) |
| 81 | return NULL; |
| 82 | return area->addr; |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * Unmaps a range previously mapped by dma_common_*_remap |
| 87 | */ |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 88 | void dma_common_free_remap(void *cpu_addr, size_t size) |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 89 | { |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 90 | struct page **pages = dma_common_find_pages(cpu_addr); |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 91 | |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 92 | if (!pages) { |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 93 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); |
| 94 | return; |
| 95 | } |
| 96 | |
| 97 | unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); |
| 98 | vunmap(cpu_addr); |
| 99 | } |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 100 | |
| 101 | #ifdef CONFIG_DMA_DIRECT_REMAP |
| 102 | static struct gen_pool *atomic_pool __ro_after_init; |
| 103 | |
| 104 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K |
| 105 | static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; |
| 106 | |
| 107 | static int __init early_coherent_pool(char *p) |
| 108 | { |
| 109 | atomic_pool_size = memparse(p, &p); |
| 110 | return 0; |
| 111 | } |
| 112 | early_param("coherent_pool", early_coherent_pool); |
| 113 | |
Christoph Hellwig | 8e3a68f | 2019-08-03 12:42:15 +0300 | [diff] [blame] | 114 | static gfp_t dma_atomic_pool_gfp(void) |
| 115 | { |
| 116 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
| 117 | return GFP_DMA; |
| 118 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
| 119 | return GFP_DMA32; |
| 120 | return GFP_KERNEL; |
| 121 | } |
| 122 | |
| 123 | static int __init dma_atomic_pool_init(void) |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 124 | { |
| 125 | unsigned int pool_size_order = get_order(atomic_pool_size); |
| 126 | unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; |
| 127 | struct page *page; |
| 128 | void *addr; |
| 129 | int ret; |
| 130 | |
| 131 | if (dev_get_cma_area(NULL)) |
| 132 | page = dma_alloc_from_contiguous(NULL, nr_pages, |
| 133 | pool_size_order, false); |
| 134 | else |
Christoph Hellwig | 8e3a68f | 2019-08-03 12:42:15 +0300 | [diff] [blame] | 135 | page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 136 | if (!page) |
| 137 | goto out; |
| 138 | |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 139 | arch_dma_prep_coherent(page, atomic_pool_size); |
| 140 | |
| 141 | atomic_pool = gen_pool_create(PAGE_SHIFT, -1); |
| 142 | if (!atomic_pool) |
| 143 | goto free_page; |
| 144 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 145 | addr = dma_common_contiguous_remap(page, atomic_pool_size, |
Christoph Hellwig | 8e3a68f | 2019-08-03 12:42:15 +0300 | [diff] [blame] | 146 | pgprot_dmacoherent(PAGE_KERNEL), |
| 147 | __builtin_return_address(0)); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 148 | if (!addr) |
| 149 | goto destroy_genpool; |
| 150 | |
| 151 | ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, |
| 152 | page_to_phys(page), atomic_pool_size, -1); |
| 153 | if (ret) |
| 154 | goto remove_mapping; |
| 155 | gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); |
| 156 | |
| 157 | pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", |
| 158 | atomic_pool_size / 1024); |
| 159 | return 0; |
| 160 | |
| 161 | remove_mapping: |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 162 | dma_common_free_remap(addr, atomic_pool_size); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 163 | destroy_genpool: |
| 164 | gen_pool_destroy(atomic_pool); |
| 165 | atomic_pool = NULL; |
| 166 | free_page: |
| 167 | if (!dma_release_from_contiguous(NULL, page, nr_pages)) |
| 168 | __free_pages(page, pool_size_order); |
| 169 | out: |
| 170 | pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", |
| 171 | atomic_pool_size / 1024); |
| 172 | return -ENOMEM; |
| 173 | } |
Christoph Hellwig | 8e3a68f | 2019-08-03 12:42:15 +0300 | [diff] [blame] | 174 | postcore_initcall(dma_atomic_pool_init); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 175 | |
| 176 | bool dma_in_atomic_pool(void *start, size_t size) |
| 177 | { |
Florian Fainelli | 4b4b077 | 2019-06-10 15:54:37 -0700 | [diff] [blame] | 178 | if (unlikely(!atomic_pool)) |
| 179 | return false; |
| 180 | |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 181 | return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); |
| 182 | } |
| 183 | |
| 184 | void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) |
| 185 | { |
| 186 | unsigned long val; |
| 187 | void *ptr = NULL; |
| 188 | |
| 189 | if (!atomic_pool) { |
| 190 | WARN(1, "coherent pool not initialised!\n"); |
| 191 | return NULL; |
| 192 | } |
| 193 | |
| 194 | val = gen_pool_alloc(atomic_pool, size); |
| 195 | if (val) { |
| 196 | phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); |
| 197 | |
| 198 | *ret_page = pfn_to_page(__phys_to_pfn(phys)); |
| 199 | ptr = (void *)val; |
| 200 | memset(ptr, 0, size); |
| 201 | } |
| 202 | |
| 203 | return ptr; |
| 204 | } |
| 205 | |
| 206 | bool dma_free_from_pool(void *start, size_t size) |
| 207 | { |
| 208 | if (!dma_in_atomic_pool(start, size)) |
| 209 | return false; |
| 210 | gen_pool_free(atomic_pool, (unsigned long)start, size); |
| 211 | return true; |
| 212 | } |
| 213 | |
| 214 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 215 | gfp_t flags, unsigned long attrs) |
| 216 | { |
| 217 | struct page *page = NULL; |
Christoph Hellwig | bfd56cd | 2018-11-04 17:38:39 +0100 | [diff] [blame] | 218 | void *ret; |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 219 | |
| 220 | size = PAGE_ALIGN(size); |
| 221 | |
Christoph Hellwig | d98849a | 2019-06-14 16:17:27 +0200 | [diff] [blame] | 222 | if (!gfpflags_allow_blocking(flags)) { |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 223 | ret = dma_alloc_from_pool(size, &page, flags); |
| 224 | if (!ret) |
| 225 | return NULL; |
Christoph Hellwig | 8270f3a | 2019-01-04 18:31:48 +0100 | [diff] [blame] | 226 | goto done; |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 227 | } |
| 228 | |
Christoph Hellwig | bfd56cd | 2018-11-04 17:38:39 +0100 | [diff] [blame] | 229 | page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); |
| 230 | if (!page) |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 231 | return NULL; |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 232 | |
| 233 | /* remove any dirty cache lines on the kernel alias */ |
| 234 | arch_dma_prep_coherent(page, size); |
| 235 | |
| 236 | /* create a coherent mapping */ |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 237 | ret = dma_common_contiguous_remap(page, size, |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 238 | dma_pgprot(dev, PAGE_KERNEL, attrs), |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 239 | __builtin_return_address(0)); |
Marek Szyprowski | a1da439 | 2018-12-05 11:14:01 +0100 | [diff] [blame] | 240 | if (!ret) { |
Christoph Hellwig | bfd56cd | 2018-11-04 17:38:39 +0100 | [diff] [blame] | 241 | __dma_direct_free_pages(dev, size, page); |
Marek Szyprowski | a1da439 | 2018-12-05 11:14:01 +0100 | [diff] [blame] | 242 | return ret; |
| 243 | } |
| 244 | |
Marek Szyprowski | a1da439 | 2018-12-05 11:14:01 +0100 | [diff] [blame] | 245 | memset(ret, 0, size); |
Christoph Hellwig | 8270f3a | 2019-01-04 18:31:48 +0100 | [diff] [blame] | 246 | done: |
| 247 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 248 | return ret; |
| 249 | } |
| 250 | |
| 251 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
| 252 | dma_addr_t dma_handle, unsigned long attrs) |
| 253 | { |
Christoph Hellwig | d98849a | 2019-06-14 16:17:27 +0200 | [diff] [blame] | 254 | if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) { |
Christoph Hellwig | bfd56cd | 2018-11-04 17:38:39 +0100 | [diff] [blame] | 255 | phys_addr_t phys = dma_to_phys(dev, dma_handle); |
| 256 | struct page *page = pfn_to_page(__phys_to_pfn(phys)); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 257 | |
| 258 | vunmap(vaddr); |
Christoph Hellwig | bfd56cd | 2018-11-04 17:38:39 +0100 | [diff] [blame] | 259 | __dma_direct_free_pages(dev, size, page); |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 260 | } |
| 261 | } |
| 262 | |
| 263 | long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, |
| 264 | dma_addr_t dma_addr) |
| 265 | { |
| 266 | return __phys_to_pfn(dma_to_phys(dev, dma_addr)); |
| 267 | } |
| 268 | #endif /* CONFIG_DMA_DIRECT_REMAP */ |