blob: acb26c627d27bbc43f766802205866e168ab0c8b [file] [log] [blame]
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02001// SPDX-License-Identifier: GPL-2.0
Gerd Hoffmann913965c2018-09-11 15:42:04 +02002#include <linux/cred.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02003#include <linux/device.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02004#include <linux/dma-buf.h>
5#include <linux/highmem.h>
Gerd Hoffmann913965c2018-09-11 15:42:04 +02006#include <linux/init.h>
7#include <linux/kernel.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +02008#include <linux/memfd.h>
Gerd Hoffmann913965c2018-09-11 15:42:04 +02009#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/shmem_fs.h>
12#include <linux/slab.h>
13#include <linux/udmabuf.h>
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020014
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +020015static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
16static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
17
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020018struct udmabuf {
Gerd Hoffmannb35f57c2018-09-11 15:42:06 +020019 pgoff_t pagecount;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020020 struct page **pages;
Gurchetan Singh284562e2019-12-02 17:36:27 -080021 struct sg_table *sg;
Gurchetan Singhc1bbed62019-12-02 17:36:25 -080022 struct miscdevice *device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020023};
24
Souptick Joarder300133d2019-01-03 15:26:34 -080025static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020026{
27 struct vm_area_struct *vma = vmf->vma;
28 struct udmabuf *ubuf = vma->vm_private_data;
29
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020030 vmf->page = ubuf->pages[vmf->pgoff];
31 get_page(vmf->page);
32 return 0;
33}
34
35static const struct vm_operations_struct udmabuf_vm_ops = {
36 .fault = udmabuf_vm_fault,
37};
38
39static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
40{
41 struct udmabuf *ubuf = buf->priv;
42
43 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
44 return -EINVAL;
45
46 vma->vm_ops = &udmabuf_vm_ops;
47 vma->vm_private_data = ubuf;
48 return 0;
49}
50
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080051static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
52 enum dma_data_direction direction)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020053{
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080054 struct udmabuf *ubuf = buf->priv;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020055 struct sg_table *sg;
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020056 int ret;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020057
58 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
59 if (!sg)
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020060 return ERR_PTR(-ENOMEM);
61 ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
62 0, ubuf->pagecount << PAGE_SHIFT,
63 GFP_KERNEL);
64 if (ret < 0)
65 goto err;
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080066 if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
Dan Carpenter6f19eb22018-09-14 09:56:15 +030067 ret = -EINVAL;
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020068 goto err;
Dan Carpenter6f19eb22018-09-14 09:56:15 +030069 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020070 return sg;
71
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020072err:
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020073 sg_free_table(sg);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020074 kfree(sg);
Gerd Hoffmanna3e722d2018-09-11 15:42:05 +020075 return ERR_PTR(ret);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020076}
77
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080078static void put_sg_table(struct device *dev, struct sg_table *sg,
79 enum dma_data_direction direction)
80{
81 dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
82 sg_free_table(sg);
83 kfree(sg);
84}
85
86static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
87 enum dma_data_direction direction)
88{
89 return get_sg_table(at->dev, at->dmabuf, direction);
90}
91
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020092static void unmap_udmabuf(struct dma_buf_attachment *at,
93 struct sg_table *sg,
94 enum dma_data_direction direction)
95{
Gurchetan Singh17a7ce22019-12-02 17:36:26 -080096 return put_sg_table(at->dev, sg, direction);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +020097}
98
99static void release_udmabuf(struct dma_buf *buf)
100{
101 struct udmabuf *ubuf = buf->priv;
Gurchetan Singh284562e2019-12-02 17:36:27 -0800102 struct device *dev = ubuf->device->this_device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200103 pgoff_t pg;
104
Gurchetan Singh284562e2019-12-02 17:36:27 -0800105 if (ubuf->sg)
106 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
107
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200108 for (pg = 0; pg < ubuf->pagecount; pg++)
109 put_page(ubuf->pages[pg]);
110 kfree(ubuf->pages);
111 kfree(ubuf);
112}
113
Gurchetan Singh284562e2019-12-02 17:36:27 -0800114static int begin_cpu_udmabuf(struct dma_buf *buf,
115 enum dma_data_direction direction)
116{
117 struct udmabuf *ubuf = buf->priv;
118 struct device *dev = ubuf->device->this_device;
119
120 if (!ubuf->sg) {
121 ubuf->sg = get_sg_table(dev, buf, direction);
122 if (IS_ERR(ubuf->sg))
123 return PTR_ERR(ubuf->sg);
124 } else {
Gurchetan Singh1ffe0952019-12-17 15:02:28 -0800125 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
126 direction);
Gurchetan Singh284562e2019-12-02 17:36:27 -0800127 }
128
129 return 0;
130}
131
132static int end_cpu_udmabuf(struct dma_buf *buf,
133 enum dma_data_direction direction)
134{
135 struct udmabuf *ubuf = buf->priv;
136 struct device *dev = ubuf->device->this_device;
137
138 if (!ubuf->sg)
139 return -EINVAL;
140
Gurchetan Singh1ffe0952019-12-17 15:02:28 -0800141 dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
Gurchetan Singh284562e2019-12-02 17:36:27 -0800142 return 0;
143}
144
Gerd Hoffmanna3485282018-09-11 15:42:07 +0200145static const struct dma_buf_ops udmabuf_ops = {
Gurchetan Singhbc7a71d2019-12-02 17:36:24 -0800146 .cache_sgt_mapping = true,
147 .map_dma_buf = map_udmabuf,
148 .unmap_dma_buf = unmap_udmabuf,
149 .release = release_udmabuf,
150 .mmap = mmap_udmabuf,
Gurchetan Singh284562e2019-12-02 17:36:27 -0800151 .begin_cpu_access = begin_cpu_udmabuf,
152 .end_cpu_access = end_cpu_udmabuf,
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200153};
154
155#define SEALS_WANTED (F_SEAL_SHRINK)
156#define SEALS_DENIED (F_SEAL_WRITE)
157
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800158static long udmabuf_create(struct miscdevice *device,
159 struct udmabuf_create_list *head,
160 struct udmabuf_create_item *list)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200161{
162 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
163 struct file *memfd = NULL;
164 struct udmabuf *ubuf;
165 struct dma_buf *buf;
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200166 pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200167 struct page *page;
168 int seals, ret = -EINVAL;
169 u32 i, flags;
170
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200171 ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200172 if (!ubuf)
173 return -ENOMEM;
174
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200175 pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200176 for (i = 0; i < head->count; i++) {
177 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200178 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200179 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200180 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200181 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200182 if (ubuf->pagecount > pglimit)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200183 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200184 }
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200185 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200186 GFP_KERNEL);
187 if (!ubuf->pages) {
188 ret = -ENOMEM;
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200189 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200190 }
191
192 pgbuf = 0;
193 for (i = 0; i < head->count; i++) {
Gerd Hoffmann7a1c67d2018-09-11 15:42:12 +0200194 ret = -EBADFD;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200195 memfd = fget(list[i].memfd);
196 if (!memfd)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200197 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200198 if (!shmem_mapping(file_inode(memfd)->i_mapping))
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200199 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200200 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
Gerd Hoffmann7a1c67d2018-09-11 15:42:12 +0200201 if (seals == -EINVAL)
202 goto err;
203 ret = -EINVAL;
204 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200205 (seals & SEALS_DENIED) != 0)
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200206 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200207 pgoff = list[i].offset >> PAGE_SHIFT;
208 pgcnt = list[i].size >> PAGE_SHIFT;
209 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
210 page = shmem_read_mapping_page(
211 file_inode(memfd)->i_mapping, pgoff + pgidx);
212 if (IS_ERR(page)) {
213 ret = PTR_ERR(page);
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200214 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200215 }
216 ubuf->pages[pgbuf++] = page;
217 }
218 fput(memfd);
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200219 memfd = NULL;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200220 }
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200221
222 exp_info.ops = &udmabuf_ops;
223 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
224 exp_info.priv = ubuf;
Gerd Hoffmann5c074ee2018-11-14 13:20:29 +0100225 exp_info.flags = O_RDWR;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200226
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800227 ubuf->device = device;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200228 buf = dma_buf_export(&exp_info);
229 if (IS_ERR(buf)) {
230 ret = PTR_ERR(buf);
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200231 goto err;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200232 }
233
234 flags = 0;
235 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
236 flags |= O_CLOEXEC;
237 return dma_buf_fd(buf, flags);
238
Gerd Hoffmann0d174552018-09-11 15:42:11 +0200239err:
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200240 while (pgbuf > 0)
241 put_page(ubuf->pages[--pgbuf]);
Gustavo A. R. Silva683a0e62018-09-04 14:07:49 -0500242 if (memfd)
243 fput(memfd);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200244 kfree(ubuf->pages);
245 kfree(ubuf);
246 return ret;
247}
248
249static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
250{
251 struct udmabuf_create create;
252 struct udmabuf_create_list head;
253 struct udmabuf_create_item list;
254
255 if (copy_from_user(&create, (void __user *)arg,
Gerd Hoffmann33f35422018-09-11 15:42:15 +0200256 sizeof(create)))
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200257 return -EFAULT;
258
259 head.flags = create.flags;
260 head.count = 1;
261 list.memfd = create.memfd;
262 list.offset = create.offset;
263 list.size = create.size;
264
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800265 return udmabuf_create(filp->private_data, &head, &list);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200266}
267
268static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
269{
270 struct udmabuf_create_list head;
271 struct udmabuf_create_item *list;
272 int ret = -EINVAL;
273 u32 lsize;
274
275 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
276 return -EFAULT;
Gerd Hoffmanndc4716d2018-09-11 15:42:10 +0200277 if (head.count > list_limit)
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200278 return -EINVAL;
279 lsize = sizeof(struct udmabuf_create_item) * head.count;
280 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
281 if (IS_ERR(list))
282 return PTR_ERR(list);
283
Gurchetan Singhc1bbed62019-12-02 17:36:25 -0800284 ret = udmabuf_create(filp->private_data, &head, list);
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200285 kfree(list);
286 return ret;
287}
288
289static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
290 unsigned long arg)
291{
292 long ret;
293
294 switch (ioctl) {
295 case UDMABUF_CREATE:
296 ret = udmabuf_ioctl_create(filp, arg);
297 break;
298 case UDMABUF_CREATE_LIST:
299 ret = udmabuf_ioctl_create_list(filp, arg);
300 break;
301 default:
Gerd Hoffmann52499d9c2018-09-11 15:42:13 +0200302 ret = -ENOTTY;
Gerd Hoffmannfbb0de72018-08-27 11:34:44 +0200303 break;
304 }
305 return ret;
306}
307
308static const struct file_operations udmabuf_fops = {
309 .owner = THIS_MODULE,
310 .unlocked_ioctl = udmabuf_ioctl,
311};
312
313static struct miscdevice udmabuf_misc = {
314 .minor = MISC_DYNAMIC_MINOR,
315 .name = "udmabuf",
316 .fops = &udmabuf_fops,
317};
318
319static int __init udmabuf_dev_init(void)
320{
321 return misc_register(&udmabuf_misc);
322}
323
324static void __exit udmabuf_dev_exit(void)
325{
326 misc_deregister(&udmabuf_misc);
327}
328
329module_init(udmabuf_dev_init)
330module_exit(udmabuf_dev_exit)
331
332MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
333MODULE_LICENSE("GPL v2");