blob: 910ae69cae7774dbd1639b8f0ff57a6d4060205e [file] [log] [blame]
Christoph Hellwig545d2922020-08-18 09:30:44 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations. These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
Christoph Hellwig0a0f0d82020-09-22 15:31:03 +02006#include <linux/dma-map-ops.h>
Christoph Hellwig545d2922020-08-18 09:30:44 +02007
8/*
9 * Create scatter-list for the already allocated DMA buffer.
10 */
11int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
12 void *cpu_addr, dma_addr_t dma_addr, size_t size,
13 unsigned long attrs)
14{
15 struct page *page = virt_to_page(cpu_addr);
16 int ret;
17
18 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
19 if (!ret)
20 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
21 return ret;
22}
23
24/*
25 * Create userspace mapping for the DMA-coherent memory.
26 */
27int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
28 void *cpu_addr, dma_addr_t dma_addr, size_t size,
29 unsigned long attrs)
30{
31#ifdef CONFIG_MMU
32 unsigned long user_count = vma_pages(vma);
33 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
34 unsigned long off = vma->vm_pgoff;
35 int ret = -ENXIO;
36
37 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
38
39 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
40 return ret;
41
42 if (off >= count || user_count > count - off)
43 return -ENXIO;
44
45 return remap_pfn_range(vma, vma->vm_start,
46 page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
47 user_count << PAGE_SHIFT, vma->vm_page_prot);
48#else
49 return -ENXIO;
50#endif /* CONFIG_MMU */
51}
Christoph Hellwigefa70f22020-09-01 13:34:33 +020052
53struct page *dma_common_alloc_pages(struct device *dev, size_t size,
54 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
55{
56 const struct dma_map_ops *ops = get_dma_ops(dev);
57 struct page *page;
58
59 page = dma_alloc_contiguous(dev, size, gfp);
60 if (!page)
61 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
62 if (!page)
63 return NULL;
64
65 *dma_handle = ops->map_page(dev, page, 0, size, dir,
66 DMA_ATTR_SKIP_CPU_SYNC);
67 if (*dma_handle == DMA_MAPPING_ERROR) {
68 dma_free_contiguous(dev, page, size);
69 return NULL;
70 }
71
72 memset(page_address(page), 0, size);
73 return page;
74}
75
76void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
77 dma_addr_t dma_handle, enum dma_data_direction dir)
78{
79 const struct dma_map_ops *ops = get_dma_ops(dev);
80
81 if (ops->unmap_page)
82 ops->unmap_page(dev, dma_handle, size, dir,
83 DMA_ATTR_SKIP_CPU_SYNC);
84 dma_free_contiguous(dev, page, size);
85}