blob: 01f2e86f3f7ce69f3e1aab1c336173a1b6de9c9d [file] [log] [blame]
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02001// SPDX-License-Identifier: GPL-2.0
Gerd Hoffmann913965c2018-09-11 15:42:04 +02002#include <linux/cred.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02003#include <linux/device.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02004#include <linux/dma-buf.h>
Dmitry Osipenkoaa3f9982022-11-10 23:13:46 +03005#include <linux/dma-resv.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02006#include <linux/highmem.h>
Gerd Hoffmann913965c2018-09-11 15:42:04 +02007#include <linux/init.h>
8#include <linux/kernel.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02009#include <linux/memfd.h>
Gerd Hoffmann913965c2018-09-11 15:42:04 +020010#include <linux/miscdevice.h>
11#include <linux/module.h>
12#include <linux/shmem_fs.h>
13#include <linux/slab.h>
14#include <linux/udmabuf.h>
Vivek Kasireddy16c243e2021-06-09 11:29:15 -070015#include <linux/hugetlb.h>
Lukasz Wiecaszek7ae2e682022-11-17 18:18:09 +010016#include <linux/vmalloc.h>
17#include <linux/iosys-map.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020018
Dongwon Kim2e717a52021-06-11 14:21:07 -070019static int list_limit = 1024;
20module_param(list_limit, int, 0644);
21MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
22
23static int size_limit_mb = 64;
24module_param(size_limit_mb, int, 0644);
25MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +020026
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020027struct udmabuf {
Gerd Hoffmannb35f57c2018-09-11 15:42:06 +020028 pgoff_t pagecount;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020029 struct page **pages;
Gurchetan Singh284562e2019-12-02 17:36:27 -080030 struct sg_table *sg;
Gurchetan Singhc1bbed62019-12-02 17:36:25 -080031 struct miscdevice *device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020032};
33
Souptick Joarder300133d2019-01-03 15:26:34 -080034static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020035{
36 struct vm_area_struct *vma = vmf->vma;
37 struct udmabuf *ubuf = vma->vm_private_data;
Gerd Hoffmann05b252c2022-06-20 09:15:47 +020038 pgoff_t pgoff = vmf->pgoff;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020039
Gerd Hoffmann05b252c2022-06-20 09:15:47 +020040 if (pgoff >= ubuf->pagecount)
41 return VM_FAULT_SIGBUS;
42 vmf->page = ubuf->pages[pgoff];
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020043 get_page(vmf->page);
44 return 0;
45}
46
47static const struct vm_operations_struct udmabuf_vm_ops = {
48 .fault = udmabuf_vm_fault,
49};
50
51static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
52{
53 struct udmabuf *ubuf = buf->priv;
54
Dmitry Osipenkoaa3f9982022-11-10 23:13:46 +030055 dma_resv_assert_held(buf->resv);
56
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020057 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
58 return -EINVAL;
59
60 vma->vm_ops = &udmabuf_vm_ops;
61 vma->vm_private_data = ubuf;
62 return 0;
63}
64
Lukasz Wiecaszek7ae2e682022-11-17 18:18:09 +010065static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
66{
67 struct udmabuf *ubuf = buf->priv;
68 void *vaddr;
69
70 dma_resv_assert_held(buf->resv);
71
72 vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
73 if (!vaddr)
74 return -EINVAL;
75
76 iosys_map_set_vaddr(map, vaddr);
77 return 0;
78}
79
80static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
81{
82 struct udmabuf *ubuf = buf->priv;
83
84 dma_resv_assert_held(buf->resv);
85
86 vm_unmap_ram(map->vaddr, ubuf->pagecount);
87}
88
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080089static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
90 enum dma_data_direction direction)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020091{
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080092 struct udmabuf *ubuf = buf->priv;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020093 struct sg_table *sg;
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020094 int ret;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020095
96 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
97 if (!sg)
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020098 return ERR_PTR(-ENOMEM);
99 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
100 0, ubuf->pagecount << PAGE_SHIFT,
101 GFP_KERNEL);
102 if (ret < 0)
103 goto err;
Marek Szyprowski62296b392020-04-06 16:41:45 +0200104 ret = dma_map_sgtable(dev, sg, direction, 0);
105 if (ret < 0)
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +0200106 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200107 return sg;
108
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +0200109err:
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200110 sg_free_table(sg);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200111 kfree(sg);
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +0200112 return ERR_PTR(ret);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200113}
114
Gurchetan Singh17a7ce22019-12-02 17:36:26 -0800115static void put_sg_table(struct device *dev, struct sg_table *sg,
116 enum dma_data_direction direction)
117{
Marek Szyprowski62296b392020-04-06 16:41:45 +0200118 dma_unmap_sgtable(dev, sg, direction, 0);
Gurchetan Singh17a7ce22019-12-02 17:36:26 -0800119 sg_free_table(sg);
120 kfree(sg);
121}
122
123static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
124 enum dma_data_direction direction)
125{
126 return get_sg_table(at->dev, at->dmabuf, direction);
127}
128
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200129static void unmap_udmabuf(struct dma_buf_attachment *at,
130 struct sg_table *sg,
131 enum dma_data_direction direction)
132{
Gurchetan Singh17a7ce22019-12-02 17:36:26 -0800133 return put_sg_table(at->dev, sg, direction);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200134}
135
136static void release_udmabuf(struct dma_buf *buf)
137{
138 struct udmabuf *ubuf = buf->priv;
Gurchetan Singh284562e2019-12-02 17:36:27 -0800139 struct device *dev = ubuf->device->this_device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200140 pgoff_t pg;
141
Gurchetan Singh284562e2019-12-02 17:36:27 -0800142 if (ubuf->sg)
143 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
144
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200145 for (pg = 0; pg < ubuf->pagecount; pg++)
146 put_page(ubuf->pages[pg]);
147 kfree(ubuf->pages);
148 kfree(ubuf);
149}
150
Gurchetan Singh284562e2019-12-02 17:36:27 -0800151static int begin_cpu_udmabuf(struct dma_buf *buf,
152 enum dma_data_direction direction)
153{
154 struct udmabuf *ubuf = buf->priv;
155 struct device *dev = ubuf->device->this_device;
Vivek Kasireddyd9c04a12022-08-24 23:35:22 -0700156 int ret = 0;
Gurchetan Singh284562e2019-12-02 17:36:27 -0800157
158 if (!ubuf->sg) {
159 ubuf->sg = get_sg_table(dev, buf, direction);
Vivek Kasireddyd9c04a12022-08-24 23:35:22 -0700160 if (IS_ERR(ubuf->sg)) {
161 ret = PTR_ERR(ubuf->sg);
162 ubuf->sg = NULL;
163 }
Gurchetan Singh284562e2019-12-02 17:36:27 -0800164 } else {
Gurchetan Singh1ffe0952019-12-17 15:02:28 -0800165 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
166 direction);
Gurchetan Singh284562e2019-12-02 17:36:27 -0800167 }
168
Vivek Kasireddyd9c04a12022-08-24 23:35:22 -0700169 return ret;
Gurchetan Singh284562e2019-12-02 17:36:27 -0800170}
171
172static int end_cpu_udmabuf(struct dma_buf *buf,
173 enum dma_data_direction direction)
174{
175 struct udmabuf *ubuf = buf->priv;
176 struct device *dev = ubuf->device->this_device;
177
178 if (!ubuf->sg)
179 return -EINVAL;
180
Gurchetan Singh1ffe0952019-12-17 15:02:28 -0800181 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
Gurchetan Singh284562e2019-12-02 17:36:27 -0800182 return 0;
183}
184
Gerd Hoffmanna3485282018-09-11 15:42:07 +0200185static const struct dma_buf_ops udmabuf_ops = {
Gurchetan Singhbc7a71d2019-12-02 17:36:24 -0800186 .cache_sgt_mapping = true,
187 .map_dma_buf = map_udmabuf,
188 .unmap_dma_buf = unmap_udmabuf,
189 .release = release_udmabuf,
190 .mmap = mmap_udmabuf,
Lukasz Wiecaszek7ae2e682022-11-17 18:18:09 +0100191 .vmap = vmap_udmabuf,
192 .vunmap = vunmap_udmabuf,
Gurchetan Singh284562e2019-12-02 17:36:27 -0800193 .begin_cpu_access = begin_cpu_udmabuf,
194 .end_cpu_access = end_cpu_udmabuf,
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200195};
196
197#define SEALS_WANTED (F_SEAL_SHRINK)
198#define SEALS_DENIED (F_SEAL_WRITE)
199
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800200static long udmabuf_create(struct miscdevice *device,
201 struct udmabuf_create_list *head,
202 struct udmabuf_create_item *list)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200203{
204 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
205 struct file *memfd = NULL;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700206 struct address_space *mapping = NULL;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200207 struct udmabuf *ubuf;
208 struct dma_buf *buf;
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200209 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700210 struct page *page, *hpage = NULL;
211 pgoff_t subpgoff, maxsubpgs;
212 struct hstate *hpstate;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200213 int seals, ret = -EINVAL;
214 u32 i, flags;
215
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200216 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200217 if (!ubuf)
218 return -ENOMEM;
219
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200220 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200221 for (i = 0; i < head->count; i++) {
222 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200223 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200224 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200225 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200226 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200227 if (ubuf->pagecount > pglimit)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200228 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200229 }
Pavel Skripkin2b6dd602021-12-30 17:26:49 +0300230
231 if (!ubuf->pagecount)
232 goto err;
233
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200234 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200235 GFP_KERNEL);
236 if (!ubuf->pages) {
237 ret = -ENOMEM;
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200238 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200239 }
240
241 pgbuf = 0;
242 for (i = 0; i < head->count; i++) {
Gerd Hoffmann7a1c67d2018-09-11 15:42:12 +0200243 ret = -EBADFD;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200244 memfd = fget(list[i].memfd);
245 if (!memfd)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200246 goto err;
Al Viroff581052022-08-20 13:10:13 -0400247 mapping = memfd->f_mapping;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700248 if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200249 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200250 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
Gerd Hoffmann7a1c67d2018-09-11 15:42:12 +0200251 if (seals == -EINVAL)
252 goto err;
253 ret = -EINVAL;
254 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200255 (seals & SEALS_DENIED) != 0)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200256 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200257 pgoff = list[i].offset >> PAGE_SHIFT;
258 pgcnt = list[i].size >> PAGE_SHIFT;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700259 if (is_file_hugepages(memfd)) {
260 hpstate = hstate_file(memfd);
261 pgoff = list[i].offset >> huge_page_shift(hpstate);
262 subpgoff = (list[i].offset &
263 ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
264 maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
265 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200266 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700267 if (is_file_hugepages(memfd)) {
268 if (!hpage) {
269 hpage = find_get_page_flags(mapping, pgoff,
270 FGP_ACCESSED);
Pavel Skripkinb9770b02021-08-11 20:50:52 +0300271 if (!hpage) {
272 ret = -EINVAL;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700273 goto err;
274 }
275 }
276 page = hpage + subpgoff;
277 get_page(page);
278 subpgoff++;
279 if (subpgoff == maxsubpgs) {
280 put_page(hpage);
281 hpage = NULL;
282 subpgoff = 0;
283 pgoff++;
284 }
285 } else {
286 page = shmem_read_mapping_page(mapping,
287 pgoff + pgidx);
288 if (IS_ERR(page)) {
289 ret = PTR_ERR(page);
290 goto err;
291 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200292 }
293 ubuf->pages[pgbuf++] = page;
294 }
295 fput(memfd);
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200296 memfd = NULL;
Vivek Kasireddy16c243e2021-06-09 11:29:15 -0700297 if (hpage) {
298 put_page(hpage);
299 hpage = NULL;
300 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200301 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200302
303 exp_info.ops = &udmabuf_ops;
304 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
305 exp_info.priv = ubuf;
Gerd Hoffmann5c074ee2018-11-14 13:20:29 +0100306 exp_info.flags = O_RDWR;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200307
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800308 ubuf->device = device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200309 buf = dma_buf_export(&exp_info);
310 if (IS_ERR(buf)) {
311 ret = PTR_ERR(buf);
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200312 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200313 }
314
315 flags = 0;
316 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
317 flags |= O_CLOEXEC;
318 return dma_buf_fd(buf, flags);
319
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200320err:
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200321 while (pgbuf > 0)
322 put_page(ubuf->pages[--pgbuf]);
Gustavo A. R. Silva683a0e62018-09-04 14:07:49 -0500323 if (memfd)
324 fput(memfd);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200325 kfree(ubuf->pages);
326 kfree(ubuf);
327 return ret;
328}
329
330static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
331{
332 struct udmabuf_create create;
333 struct udmabuf_create_list head;
334 struct udmabuf_create_item list;
335
336 if (copy_from_user(&create, (void __user *)arg,
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200337 sizeof(create)))
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200338 return -EFAULT;
339
340 head.flags = create.flags;
341 head.count = 1;
342 list.memfd = create.memfd;
343 list.offset = create.offset;
344 list.size = create.size;
345
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800346 return udmabuf_create(filp->private_data, &head, &list);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200347}
348
349static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
350{
351 struct udmabuf_create_list head;
352 struct udmabuf_create_item *list;
353 int ret = -EINVAL;
354 u32 lsize;
355
356 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
357 return -EFAULT;
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200358 if (head.count > list_limit)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200359 return -EINVAL;
360 lsize = sizeof(struct udmabuf_create_item) * head.count;
361 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
362 if (IS_ERR(list))
363 return PTR_ERR(list);
364
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800365 ret = udmabuf_create(filp->private_data, &head, list);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200366 kfree(list);
367 return ret;
368}
369
370static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
371 unsigned long arg)
372{
373 long ret;
374
375 switch (ioctl) {
376 case UDMABUF_CREATE:
377 ret = udmabuf_ioctl_create(filp, arg);
378 break;
379 case UDMABUF_CREATE_LIST:
380 ret = udmabuf_ioctl_create_list(filp, arg);
381 break;
382 default:
Gerd Hoffmann52499d9c2018-09-11 15:42:13 +0200383 ret = -ENOTTY;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200384 break;
385 }
386 return ret;
387}
388
389static const struct file_operations udmabuf_fops = {
390 .owner = THIS_MODULE,
391 .unlocked_ioctl = udmabuf_ioctl,
Kristian H. Kristensend4a197f2020-09-03 18:16:52 +0000392#ifdef CONFIG_COMPAT
393 .compat_ioctl = udmabuf_ioctl,
394#endif
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200395};
396
397static struct miscdevice udmabuf_misc = {
398 .minor = MISC_DYNAMIC_MINOR,
399 .name = "udmabuf",
400 .fops = &udmabuf_fops,
401};
402
403static int __init udmabuf_dev_init(void)
404{
Vivek Kasireddy9e9fa6a2022-05-20 13:52:35 -0700405 int ret;
406
407 ret = misc_register(&udmabuf_misc);
408 if (ret < 0) {
409 pr_err("Could not initialize udmabuf device\n");
410 return ret;
411 }
412
413 ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
414 DMA_BIT_MASK(64));
415 if (ret < 0) {
416 pr_err("Could not setup DMA mask for udmabuf device\n");
417 misc_deregister(&udmabuf_misc);
418 return ret;
419 }
420
421 return 0;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200422}
423
424static void __exit udmabuf_dev_exit(void)
425{
426 misc_deregister(&udmabuf_misc);
427}
428
429module_init(udmabuf_dev_init)
430module_exit(udmabuf_dev_exit)
431
432MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");