blob: ddcbbdb5d65806dd7bc1e98ac95eed37c5752ca4 [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
Shameer Kolothumf51dc892018-02-13 15:20:51 +000022#include <linux/acpi_iort.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010023#include <linux/device.h>
24#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000025#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010026#include <linux/huge_mm.h>
27#include <linux/iommu.h>
28#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010029#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010030#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010031#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000032#include <linux/scatterlist.h>
33#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010034
Christoph Hellwig81a5a312017-05-22 10:55:30 +020035#define IOMMU_MAPPING_ERROR 0
36
Robin Murphy44bb7e22016-09-12 17:13:59 +010037struct iommu_dma_msi_page {
38 struct list_head list;
39 dma_addr_t iova;
40 phys_addr_t phys;
41};
42
Robin Murphyfdbe5742017-01-19 20:57:46 +000043enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
45 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010046};
47
Robin Murphyfdbe5742017-01-19 20:57:46 +000048struct iommu_dma_cookie {
49 enum iommu_dma_cookie_type type;
50 union {
51 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
52 struct iova_domain iovad;
53 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
54 dma_addr_t msi_iova;
55 };
56 struct list_head msi_page_list;
57 spinlock_t msi_lock;
58};
59
60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
61{
62 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
63 return cookie->iovad.granule;
64 return PAGE_SIZE;
65}
66
Robin Murphyfdbe5742017-01-19 20:57:46 +000067static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
68{
69 struct iommu_dma_cookie *cookie;
70
71 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
72 if (cookie) {
73 spin_lock_init(&cookie->msi_lock);
74 INIT_LIST_HEAD(&cookie->msi_page_list);
75 cookie->type = type;
76 }
77 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010078}
79
Robin Murphy0db2e5d2015-10-01 20:13:58 +010080int iommu_dma_init(void)
81{
82 return iova_cache_get();
83}
84
85/**
86 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
87 * @domain: IOMMU domain to prepare for DMA-API usage
88 *
89 * IOMMU drivers should normally call this from their domain_alloc
90 * callback when domain->type == IOMMU_DOMAIN_DMA.
91 */
92int iommu_get_dma_cookie(struct iommu_domain *domain)
93{
Robin Murphy0db2e5d2015-10-01 20:13:58 +010094 if (domain->iova_cookie)
95 return -EEXIST;
96
Robin Murphyfdbe5742017-01-19 20:57:46 +000097 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
98 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +010099 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100100
Robin Murphy44bb7e22016-09-12 17:13:59 +0100101 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100102}
103EXPORT_SYMBOL(iommu_get_dma_cookie);
104
105/**
Robin Murphyfdbe5742017-01-19 20:57:46 +0000106 * iommu_get_msi_cookie - Acquire just MSI remapping resources
107 * @domain: IOMMU domain to prepare
108 * @base: Start address of IOVA region for MSI mappings
109 *
110 * Users who manage their own IOVA allocation and do not want DMA API support,
111 * but would still like to take advantage of automatic MSI remapping, can use
112 * this to initialise their own domain appropriately. Users should reserve a
113 * contiguous IOVA region, starting at @base, large enough to accommodate the
114 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
115 * used by the devices attached to @domain.
116 */
117int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
118{
119 struct iommu_dma_cookie *cookie;
120
121 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
122 return -EINVAL;
123
124 if (domain->iova_cookie)
125 return -EEXIST;
126
127 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
128 if (!cookie)
129 return -ENOMEM;
130
131 cookie->msi_iova = base;
132 domain->iova_cookie = cookie;
133 return 0;
134}
135EXPORT_SYMBOL(iommu_get_msi_cookie);
136
137/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100138 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000139 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
140 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100141 *
142 * IOMMU drivers should normally call this from their domain_free callback.
143 */
144void iommu_put_dma_cookie(struct iommu_domain *domain)
145{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100146 struct iommu_dma_cookie *cookie = domain->iova_cookie;
147 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100148
Robin Murphy44bb7e22016-09-12 17:13:59 +0100149 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100150 return;
151
Robin Murphyfdbe5742017-01-19 20:57:46 +0000152 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100153 put_iova_domain(&cookie->iovad);
154
155 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
156 list_del(&msi->list);
157 kfree(msi);
158 }
159 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100160 domain->iova_cookie = NULL;
161}
162EXPORT_SYMBOL(iommu_put_dma_cookie);
163
Robin Murphy273df962017-03-16 17:00:19 +0000164/**
165 * iommu_dma_get_resv_regions - Reserved region driver helper
166 * @dev: Device from iommu_get_resv_regions()
167 * @list: Reserved region list from iommu_get_resv_regions()
168 *
169 * IOMMU drivers can use this to implement their .get_resv_regions callback
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100170 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
171 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
172 * reservation.
Robin Murphy273df962017-03-16 17:00:19 +0000173 */
174void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100175{
Robin Murphyfade1ec2016-09-12 17:14:00 +0100176
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100177 if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
178 iort_iommu_msi_get_resv_regions(dev, list);
Shameer Kolothumf51dc892018-02-13 15:20:51 +0000179
Robin Murphyfade1ec2016-09-12 17:14:00 +0100180}
Robin Murphy273df962017-03-16 17:00:19 +0000181EXPORT_SYMBOL(iommu_dma_get_resv_regions);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100182
Robin Murphy7c1b0582017-03-16 17:00:18 +0000183static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
184 phys_addr_t start, phys_addr_t end)
185{
186 struct iova_domain *iovad = &cookie->iovad;
187 struct iommu_dma_msi_page *msi_page;
188 int i, num_pages;
189
190 start -= iova_offset(iovad, start);
191 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
192
193 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
194 if (!msi_page)
195 return -ENOMEM;
196
197 for (i = 0; i < num_pages; i++) {
198 msi_page[i].phys = start;
199 msi_page[i].iova = start;
200 INIT_LIST_HEAD(&msi_page[i].list);
201 list_add(&msi_page[i].list, &cookie->msi_page_list);
202 start += iovad->granule;
203 }
204
205 return 0;
206}
207
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100208static void iova_reserve_pci_windows(struct pci_dev *dev,
209 struct iova_domain *iovad)
210{
211 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
212 struct resource_entry *window;
213 unsigned long lo, hi;
214
215 resource_list_for_each_entry(window, &bridge->windows) {
216 if (resource_type(window->res) != IORESOURCE_MEM)
217 continue;
218
219 lo = iova_pfn(iovad, window->res->start - window->offset);
220 hi = iova_pfn(iovad, window->res->end - window->offset);
221 reserve_iova(iovad, lo, hi);
222 }
223}
224
Robin Murphy7c1b0582017-03-16 17:00:18 +0000225static int iova_reserve_iommu_regions(struct device *dev,
226 struct iommu_domain *domain)
227{
228 struct iommu_dma_cookie *cookie = domain->iova_cookie;
229 struct iova_domain *iovad = &cookie->iovad;
230 struct iommu_resv_region *region;
231 LIST_HEAD(resv_regions);
232 int ret = 0;
233
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100234 if (dev_is_pci(dev))
235 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
236
Robin Murphy7c1b0582017-03-16 17:00:18 +0000237 iommu_get_resv_regions(dev, &resv_regions);
238 list_for_each_entry(region, &resv_regions, list) {
239 unsigned long lo, hi;
240
241 /* We ARE the software that manages these! */
242 if (region->type == IOMMU_RESV_SW_MSI)
243 continue;
244
245 lo = iova_pfn(iovad, region->start);
246 hi = iova_pfn(iovad, region->start + region->length - 1);
247 reserve_iova(iovad, lo, hi);
248
249 if (region->type == IOMMU_RESV_MSI)
250 ret = cookie_init_hw_msi_region(cookie, region->start,
251 region->start + region->length);
252 if (ret)
253 break;
254 }
255 iommu_put_resv_regions(dev, &resv_regions);
256
257 return ret;
258}
259
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100260/**
261 * iommu_dma_init_domain - Initialise a DMA mapping domain
262 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
263 * @base: IOVA at which the mappable address space starts
264 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100265 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100266 *
267 * @base and @size should be exact multiples of IOMMU page granularity to
268 * avoid rounding surprises. If necessary, we reserve the page at address 0
269 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
270 * any change which could make prior IOVAs invalid will fail.
271 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100272int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
273 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100274{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000275 struct iommu_dma_cookie *cookie = domain->iova_cookie;
276 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100277 unsigned long order, base_pfn, end_pfn;
278
Robin Murphyfdbe5742017-01-19 20:57:46 +0000279 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
280 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100281
282 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100283 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100284 base_pfn = max_t(unsigned long, 1, base >> order);
285 end_pfn = (base + size - 1) >> order;
286
287 /* Check the domain allows at least some access to the device... */
288 if (domain->geometry.force_aperture) {
289 if (base > domain->geometry.aperture_end ||
290 base + size <= domain->geometry.aperture_start) {
291 pr_warn("specified DMA range outside IOMMU capability\n");
292 return -EFAULT;
293 }
294 /* ...then finally give it a kicking to make sure it fits */
295 base_pfn = max_t(unsigned long, base_pfn,
296 domain->geometry.aperture_start >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100297 }
298
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000299 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100300 if (iovad->start_pfn) {
301 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000302 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100303 pr_warn("Incompatible range for DMA domain\n");
304 return -EFAULT;
305 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000306
307 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100308 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000309
Zhen Leiaa3ac942017-09-21 16:52:45 +0100310 init_iova_domain(iovad, 1UL << order, base_pfn);
Robin Murphy7c1b0582017-03-16 17:00:18 +0000311 if (!dev)
312 return 0;
313
314 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100315}
316EXPORT_SYMBOL(iommu_dma_init_domain);
317
318/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530319 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
320 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100321 * @dir: Direction of DMA transfer
322 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530323 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100324 *
325 * Return: corresponding IOMMU API page protection flags
326 */
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530327int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
328 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100329{
330 int prot = coherent ? IOMMU_CACHE : 0;
331
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530332 if (attrs & DMA_ATTR_PRIVILEGED)
333 prot |= IOMMU_PRIV;
334
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100335 switch (dir) {
336 case DMA_BIDIRECTIONAL:
337 return prot | IOMMU_READ | IOMMU_WRITE;
338 case DMA_TO_DEVICE:
339 return prot | IOMMU_READ;
340 case DMA_FROM_DEVICE:
341 return prot | IOMMU_WRITE;
342 default:
343 return 0;
344 }
345}
346
Robin Murphy842fe512017-03-31 15:46:05 +0100347static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
348 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100349{
Robin Murphya44e6652017-03-31 15:46:06 +0100350 struct iommu_dma_cookie *cookie = domain->iova_cookie;
351 struct iova_domain *iovad = &cookie->iovad;
Robin Murphybb65a642017-03-31 15:46:07 +0100352 unsigned long shift, iova_len, iova = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100353
Robin Murphya44e6652017-03-31 15:46:06 +0100354 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
355 cookie->msi_iova += size;
356 return cookie->msi_iova - size;
357 }
358
359 shift = iova_shift(iovad);
360 iova_len = size >> shift;
Robin Murphybb65a642017-03-31 15:46:07 +0100361 /*
362 * Freeing non-power-of-two-sized allocations back into the IOVA caches
363 * will come back to bite us badly, so we have to waste a bit of space
364 * rounding up anything cacheable to make sure that can't happen. The
365 * order of the unadjusted size will still match upon freeing.
366 */
367 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
368 iova_len = roundup_pow_of_two(iova_len);
Robin Murphya44e6652017-03-31 15:46:06 +0100369
Robin Murphyc987ff02016-08-09 17:31:35 +0100370 if (domain->geometry.force_aperture)
371 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000372
373 /* Try to get PCI devices a SAC address */
374 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200375 iova = alloc_iova_fast(iovad, iova_len,
376 DMA_BIT_MASK(32) >> shift, false);
Robin Murphy122fac02017-01-16 13:24:55 +0000377
Robin Murphybb65a642017-03-31 15:46:07 +0100378 if (!iova)
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200379 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
380 true);
Robin Murphybb65a642017-03-31 15:46:07 +0100381
382 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100383}
384
Robin Murphy842fe512017-03-31 15:46:05 +0100385static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
386 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100387{
Robin Murphy842fe512017-03-31 15:46:05 +0100388 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100389
Robin Murphya44e6652017-03-31 15:46:06 +0100390 /* The MSI case is only ever cleaning up its most recent allocation */
Robin Murphybb65a642017-03-31 15:46:07 +0100391 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
Robin Murphya44e6652017-03-31 15:46:06 +0100392 cookie->msi_iova -= size;
Robin Murphybb65a642017-03-31 15:46:07 +0100393 else
Robin Murphy1cc896e2017-05-15 16:01:30 +0100394 free_iova_fast(iovad, iova_pfn(iovad, iova),
395 size >> iova_shift(iovad));
Robin Murphy842fe512017-03-31 15:46:05 +0100396}
397
398static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
399 size_t size)
400{
Robin Murphya44e6652017-03-31 15:46:06 +0100401 struct iommu_dma_cookie *cookie = domain->iova_cookie;
402 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy842fe512017-03-31 15:46:05 +0100403 size_t iova_off = iova_offset(iovad, dma_addr);
404
405 dma_addr -= iova_off;
406 size = iova_align(iovad, size + iova_off);
407
408 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
Robin Murphya44e6652017-03-31 15:46:06 +0100409 iommu_dma_free_iova(cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100410}
411
412static void __iommu_dma_free_pages(struct page **pages, int count)
413{
414 while (count--)
415 __free_page(pages[count]);
416 kvfree(pages);
417}
418
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100419static struct page **__iommu_dma_alloc_pages(unsigned int count,
420 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100421{
422 struct page **pages;
423 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100424
425 order_mask &= (2U << MAX_ORDER) - 1;
426 if (!order_mask)
427 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100428
429 if (array_size <= PAGE_SIZE)
430 pages = kzalloc(array_size, GFP_KERNEL);
431 else
432 pages = vzalloc(array_size);
433 if (!pages)
434 return NULL;
435
436 /* IOMMU can map any pages, so himem can also be used here */
437 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
438
439 while (count) {
440 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100441 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100442
443 /*
444 * Higher-order allocations are a convenience rather
445 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100446 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100447 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100448 for (order_mask &= (2U << __fls(count)) - 1;
449 order_mask; order_mask &= ~order_size) {
450 unsigned int order = __fls(order_mask);
451
452 order_size = 1U << order;
453 page = alloc_pages((order_mask - order_size) ?
454 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100455 if (!page)
456 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100457 if (!order)
458 break;
459 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100460 split_page(page, order);
461 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100462 } else if (!split_huge_page(page)) {
463 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100464 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100465 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100466 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100467 if (!page) {
468 __iommu_dma_free_pages(pages, i);
469 return NULL;
470 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100471 count -= order_size;
472 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100473 pages[i++] = page++;
474 }
475 return pages;
476}
477
478/**
479 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
480 * @dev: Device which owns this buffer
481 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
482 * @size: Size of buffer in bytes
483 * @handle: DMA address of buffer
484 *
485 * Frees both the pages associated with the buffer, and the array
486 * describing them
487 */
488void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
489 dma_addr_t *handle)
490{
Robin Murphy842fe512017-03-31 15:46:05 +0100491 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100492 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200493 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100494}
495
496/**
497 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
498 * @dev: Device to allocate memory for. Must be a real device
499 * attached to an iommu_dma_domain
500 * @size: Size of buffer in bytes
501 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100502 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100503 * @prot: IOMMU mapping flags
504 * @handle: Out argument for allocated DMA handle
505 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
506 * given VA/PA are visible to the given non-coherent device.
507 *
508 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
509 * but an IOMMU which supports smaller pages might not map the whole thing.
510 *
511 * Return: Array of struct page pointers describing the buffer,
512 * or NULL on failure.
513 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100514struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700515 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100516 void (*flush_page)(struct device *, const void *, phys_addr_t))
517{
518 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100519 struct iommu_dma_cookie *cookie = domain->iova_cookie;
520 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100521 struct page **pages;
522 struct sg_table sgt;
Robin Murphy842fe512017-03-31 15:46:05 +0100523 dma_addr_t iova;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100524 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100525
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200526 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100527
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100528 min_size = alloc_sizes & -alloc_sizes;
529 if (min_size < PAGE_SIZE) {
530 min_size = PAGE_SIZE;
531 alloc_sizes |= PAGE_SIZE;
532 } else {
533 size = ALIGN(size, min_size);
534 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700535 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100536 alloc_sizes = min_size;
537
538 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
539 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100540 if (!pages)
541 return NULL;
542
Robin Murphy842fe512017-03-31 15:46:05 +0100543 size = iova_align(iovad, size);
544 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100545 if (!iova)
546 goto out_free_pages;
547
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100548 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
549 goto out_free_iova;
550
551 if (!(prot & IOMMU_CACHE)) {
552 struct sg_mapping_iter miter;
553 /*
554 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
555 * sufficient here, so skip it by using the "wrong" direction.
556 */
557 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
558 while (sg_miter_next(&miter))
559 flush_page(dev, miter.addr, page_to_phys(miter.page));
560 sg_miter_stop(&miter);
561 }
562
Robin Murphy842fe512017-03-31 15:46:05 +0100563 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100564 < size)
565 goto out_free_sg;
566
Robin Murphy842fe512017-03-31 15:46:05 +0100567 *handle = iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100568 sg_free_table(&sgt);
569 return pages;
570
571out_free_sg:
572 sg_free_table(&sgt);
573out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100574 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100575out_free_pages:
576 __iommu_dma_free_pages(pages, count);
577 return NULL;
578}
579
580/**
581 * iommu_dma_mmap - Map a buffer into provided user VMA
582 * @pages: Array representing buffer from iommu_dma_alloc()
583 * @size: Size of buffer in bytes
584 * @vma: VMA describing requested userspace mapping
585 *
586 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
587 * for verifying the correct size and protection of @vma beforehand.
588 */
589
590int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
591{
592 unsigned long uaddr = vma->vm_start;
593 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
594 int ret = -ENXIO;
595
596 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
597 ret = vm_insert_page(vma, uaddr, pages[i]);
598 if (ret)
599 break;
600 uaddr += PAGE_SIZE;
601 }
602 return ret;
603}
604
Robin Murphy51f8cc92016-11-14 12:16:26 +0000605static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
606 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100607{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100608 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100609 struct iommu_dma_cookie *cookie = domain->iova_cookie;
Robin Murphy1cc896e2017-05-15 16:01:30 +0100610 size_t iova_off = 0;
Robin Murphy842fe512017-03-31 15:46:05 +0100611 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100612
Robin Murphy1cc896e2017-05-15 16:01:30 +0100613 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
614 iova_off = iova_offset(&cookie->iovad, phys);
615 size = iova_align(&cookie->iovad, size + iova_off);
616 }
617
Robin Murphy842fe512017-03-31 15:46:05 +0100618 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100619 if (!iova)
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200620 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100621
Robin Murphy842fe512017-03-31 15:46:05 +0100622 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
623 iommu_dma_free_iova(cookie, iova, size);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200624 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100625 }
Robin Murphy842fe512017-03-31 15:46:05 +0100626 return iova + iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100627}
628
Robin Murphy51f8cc92016-11-14 12:16:26 +0000629dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
630 unsigned long offset, size_t size, int prot)
631{
632 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
633}
634
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100635void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700636 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100637{
Robin Murphy842fe512017-03-31 15:46:05 +0100638 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100639}
640
641/*
642 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100643 *
644 * At this point the segments are already laid out by iommu_dma_map_sg() to
645 * avoid individually crossing any boundaries, so we merely need to check a
646 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100647 */
648static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
649 dma_addr_t dma_addr)
650{
Robin Murphy809eac52016-04-11 12:32:31 +0100651 struct scatterlist *s, *cur = sg;
652 unsigned long seg_mask = dma_get_seg_boundary(dev);
653 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
654 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100655
656 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100657 /* Restore this segment's original unaligned fields first */
658 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100659 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100660 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100661
Robin Murphy809eac52016-04-11 12:32:31 +0100662 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100663 s->length = s_length;
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200664 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy809eac52016-04-11 12:32:31 +0100665 sg_dma_len(s) = 0;
666
667 /*
668 * Now fill in the real DMA data. If...
669 * - there is a valid output segment to append to
670 * - and this segment starts on an IOVA page boundary
671 * - but doesn't fall at a segment boundary
672 * - and wouldn't make the resulting output segment too long
673 */
674 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
675 (cur_len + s_length <= max_len)) {
676 /* ...then concatenate it with the previous one */
677 cur_len += s_length;
678 } else {
679 /* Otherwise start the next output segment */
680 if (i > 0)
681 cur = sg_next(cur);
682 cur_len = s_length;
683 count++;
684
685 sg_dma_address(cur) = dma_addr + s_iova_off;
686 }
687
688 sg_dma_len(cur) = cur_len;
689 dma_addr += s_iova_len;
690
691 if (s_length + s_iova_off < s_iova_len)
692 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100693 }
Robin Murphy809eac52016-04-11 12:32:31 +0100694 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100695}
696
697/*
698 * If mapping failed, then just restore the original list,
699 * but making sure the DMA fields are invalidated.
700 */
701static void __invalidate_sg(struct scatterlist *sg, int nents)
702{
703 struct scatterlist *s;
704 int i;
705
706 for_each_sg(sg, s, nents, i) {
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200707 if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000708 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100709 if (sg_dma_len(s))
710 s->length = sg_dma_len(s);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200711 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100712 sg_dma_len(s) = 0;
713 }
714}
715
716/*
717 * The DMA API client is passing in a scatterlist which could describe
718 * any old buffer layout, but the IOMMU API requires everything to be
719 * aligned to IOMMU pages. Hence the need for this complicated bit of
720 * impedance-matching, to be able to hand off a suitably-aligned list,
721 * but still preserve the original offsets and sizes for the caller.
722 */
723int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
724 int nents, int prot)
725{
726 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100727 struct iommu_dma_cookie *cookie = domain->iova_cookie;
728 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100729 struct scatterlist *s, *prev = NULL;
Robin Murphy842fe512017-03-31 15:46:05 +0100730 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100731 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100732 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100733 int i;
734
735 /*
736 * Work out how much IOVA space we need, and align the segments to
737 * IOVA granules for the IOMMU driver to handle. With some clever
738 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100739 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100740 */
741 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100742 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100743 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100744 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100745
Robin Murphy809eac52016-04-11 12:32:31 +0100746 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100747 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100748 s->offset -= s_iova_off;
749 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100750 s->length = s_length;
751
752 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100753 * Due to the alignment of our single IOVA allocation, we can
754 * depend on these assumptions about the segment boundary mask:
755 * - If mask size >= IOVA size, then the IOVA range cannot
756 * possibly fall across a boundary, so we don't care.
757 * - If mask size < IOVA size, then the IOVA range must start
758 * exactly on a boundary, therefore we can lay things out
759 * based purely on segment lengths without needing to know
760 * the actual addresses beforehand.
761 * - The mask must be a power of 2, so pad_len == 0 if
762 * iova_len == 0, thus we cannot dereference prev the first
763 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100764 */
Robin Murphy809eac52016-04-11 12:32:31 +0100765 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100766 prev->length += pad_len;
767 iova_len += pad_len;
768 }
769
770 iova_len += s_length;
771 prev = s;
772 }
773
Robin Murphy842fe512017-03-31 15:46:05 +0100774 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100775 if (!iova)
776 goto out_restore_sg;
777
778 /*
779 * We'll leave any physical concatenation to the IOMMU driver's
780 * implementation - it knows better than we do.
781 */
Robin Murphy842fe512017-03-31 15:46:05 +0100782 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100783 goto out_free_iova;
784
Robin Murphy842fe512017-03-31 15:46:05 +0100785 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100786
787out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100788 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100789out_restore_sg:
790 __invalidate_sg(sg, nents);
791 return 0;
792}
793
794void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700795 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100796{
Robin Murphy842fe512017-03-31 15:46:05 +0100797 dma_addr_t start, end;
798 struct scatterlist *tmp;
799 int i;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100800 /*
801 * The scatterlist segments are mapped into a single
802 * contiguous IOVA allocation, so this is incredibly easy.
803 */
Robin Murphy842fe512017-03-31 15:46:05 +0100804 start = sg_dma_address(sg);
805 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
806 if (sg_dma_len(tmp) == 0)
807 break;
808 sg = tmp;
809 }
810 end = sg_dma_address(sg) + sg_dma_len(sg);
811 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100812}
813
Robin Murphy51f8cc92016-11-14 12:16:26 +0000814dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
815 size_t size, enum dma_data_direction dir, unsigned long attrs)
816{
817 return __iommu_dma_map(dev, phys, size,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530818 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000819}
820
821void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
822 size_t size, enum dma_data_direction dir, unsigned long attrs)
823{
Robin Murphy842fe512017-03-31 15:46:05 +0100824 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000825}
826
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100827int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
828{
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200829 return dma_addr == IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100830}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100831
832static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
833 phys_addr_t msi_addr, struct iommu_domain *domain)
834{
835 struct iommu_dma_cookie *cookie = domain->iova_cookie;
836 struct iommu_dma_msi_page *msi_page;
Robin Murphy842fe512017-03-31 15:46:05 +0100837 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100838 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000839 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100840
Robin Murphyfdbe5742017-01-19 20:57:46 +0000841 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100842 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
843 if (msi_page->phys == msi_addr)
844 return msi_page;
845
846 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
847 if (!msi_page)
848 return NULL;
849
Robin Murphya44e6652017-03-31 15:46:06 +0100850 iova = __iommu_dma_map(dev, msi_addr, size, prot);
851 if (iommu_dma_mapping_error(dev, iova))
852 goto out_free_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100853
854 INIT_LIST_HEAD(&msi_page->list);
Robin Murphya44e6652017-03-31 15:46:06 +0100855 msi_page->phys = msi_addr;
856 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100857 list_add(&msi_page->list, &cookie->msi_page_list);
858 return msi_page;
859
Robin Murphy44bb7e22016-09-12 17:13:59 +0100860out_free_page:
861 kfree(msi_page);
862 return NULL;
863}
864
865void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
866{
867 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
868 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
869 struct iommu_dma_cookie *cookie;
870 struct iommu_dma_msi_page *msi_page;
871 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
872 unsigned long flags;
873
874 if (!domain || !domain->iova_cookie)
875 return;
876
877 cookie = domain->iova_cookie;
878
879 /*
880 * We disable IRQs to rule out a possible inversion against
881 * irq_desc_lock if, say, someone tries to retarget the affinity
882 * of an MSI from within an IPI handler.
883 */
884 spin_lock_irqsave(&cookie->msi_lock, flags);
885 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
886 spin_unlock_irqrestore(&cookie->msi_lock, flags);
887
888 if (WARN_ON(!msi_page)) {
889 /*
890 * We're called from a void callback, so the best we can do is
891 * 'fail' by filling the message with obviously bogus values.
892 * Since we got this far due to an IOMMU being present, it's
893 * not like the existing address would have worked anyway...
894 */
895 msg->address_hi = ~0U;
896 msg->address_lo = ~0U;
897 msg->data = ~0U;
898 } else {
899 msg->address_hi = upper_32_bits(msi_page->iova);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000900 msg->address_lo &= cookie_msi_granule(cookie) - 1;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100901 msg->address_lo += lower_32_bits(msi_page->iova);
902 }
903}