blob: 183037fb12732d97d353080bde65d3ab7b478b81 [file] [log] [blame]
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03001/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
Andrzej Pietrasiewicz66e988e2018-12-13 08:31:07 -05006 * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -03007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
Dmitry Osipenko3a6ca182022-11-10 23:13:48 +030013#include <linux/dma-resv.h>
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030014#include <linux/module.h>
15#include <linux/mm.h>
Elena Reshetova6c4bb652017-03-06 11:21:00 -030016#include <linux/refcount.h>
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030017#include <linux/scatterlist.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21
Junghak Sungc1399902015-09-22 10:30:29 -030022#include <media/videobuf2-v4l2.h>
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030023#include <media/videobuf2-memops.h>
24#include <media/videobuf2-dma-sg.h>
25
Hans Verkuilffdc78e2013-03-02 05:12:08 -030026static int debug;
27module_param(debug, int, 0644);
28
29#define dprintk(level, fmt, arg...) \
30 do { \
31 if (debug >= level) \
32 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
33 } while (0)
34
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030035struct vb2_dma_sg_buf {
Hans Verkuil0c3a14c2014-11-18 09:51:01 -030036 struct device *dev;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030037 void *vaddr;
38 struct page **pages;
Jan Kara3336c242015-07-13 11:55:47 -030039 struct frame_vector *vec;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030040 int offset;
Hans Verkuilcd474032014-11-18 09:50:58 -030041 enum dma_data_direction dma_dir;
Ricardo Ribalda22301242013-08-02 10:20:00 -030042 struct sg_table sg_table;
Hans Verkuile078b792014-11-18 09:51:03 -030043 /*
44 * This will point to sg_table when used with the MMAP or USERPTR
45 * memory model, and to the dma_buf sglist when used with the
46 * DMABUF memory model.
47 */
48 struct sg_table *dma_sgt;
Ricardo Ribalda22301242013-08-02 10:20:00 -030049 size_t size;
50 unsigned int num_pages;
Elena Reshetova6c4bb652017-03-06 11:21:00 -030051 refcount_t refcount;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030052 struct vb2_vmarea_handler handler;
Hans Verkuile078b792014-11-18 09:51:03 -030053
54 struct dma_buf_attachment *db_attach;
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +020055
56 struct vb2_buffer *vb;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -030057};
58
59static void vb2_dma_sg_put(void *buf_priv);
60
Ricardo Ribaldadf237282013-08-02 10:19:59 -030061static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
62 gfp_t gfp_flags)
63{
64 unsigned int last_page = 0;
Sakari Ailus14f28f5c2018-12-12 07:44:14 -050065 unsigned long size = buf->size;
Ricardo Ribaldadf237282013-08-02 10:19:59 -030066
67 while (size > 0) {
68 struct page *pages;
69 int order;
70 int i;
71
72 order = get_order(size);
Mauro Carvalho Chehab4b129dc2019-02-18 14:29:05 -050073 /* Don't over allocate*/
Ricardo Ribaldadf237282013-08-02 10:19:59 -030074 if ((PAGE_SIZE << order) > size)
75 order--;
76
77 pages = NULL;
78 while (!pages) {
79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
80 __GFP_NOWARN | gfp_flags, order);
81 if (pages)
82 break;
83
84 if (order == 0) {
85 while (last_page--)
86 __free_page(buf->pages[last_page]);
87 return -ENOMEM;
88 }
89 order--;
90 }
91
92 split_page(pages, order);
Ricardo Ribalda22301242013-08-02 10:20:00 -030093 for (i = 0; i < (1 << order); i++)
94 buf->pages[last_page++] = &pages[i];
Ricardo Ribaldadf237282013-08-02 10:19:59 -030095
96 size -= PAGE_SIZE << order;
97 }
98
99 return 0;
100}
101
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200102static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
103 unsigned long size)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300104{
105 struct vb2_dma_sg_buf *buf;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300106 struct sg_table *sgt;
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300107 int ret;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300108 int num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300109
Ricardo Ribalda9cc25c42021-03-09 21:51:08 +0100110 if (WARN_ON(!dev) || WARN_ON(!size))
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300111 return ERR_PTR(-EINVAL);
112
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300113 buf = kzalloc(sizeof *buf, GFP_KERNEL);
114 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300115 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300116
117 buf->vaddr = NULL;
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200118 buf->dma_dir = vb->vb2_queue->dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300119 buf->offset = 0;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300120 buf->size = size;
Mauro Carvalho Chehab7f841452013-04-19 07:18:01 -0300121 /* size is already page aligned */
Ricardo Ribalda22301242013-08-02 10:20:00 -0300122 buf->num_pages = size >> PAGE_SHIFT;
Hans Verkuile078b792014-11-18 09:51:03 -0300123 buf->dma_sgt = &buf->sg_table;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300124
Sergey Senozhatskyd4db5eb2020-05-14 18:01:50 +0200125 /*
126 * NOTE: dma-sg allocates memory using the page allocator directly, so
127 * there is no memory consistency guarantee, hence dma-sg ignores DMA
Sergey Senozhatsky129134e2020-09-11 05:07:58 +0200128 * attributes passed from the upper layer.
Sergey Senozhatskyd4db5eb2020-05-14 18:01:50 +0200129 */
Hans Verkuild5a80992022-05-02 09:16:25 +0200130 buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300131 if (!buf->pages)
132 goto fail_pages_array_alloc;
133
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200134 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
Ricardo Ribaldadf237282013-08-02 10:19:59 -0300135 if (ret)
136 goto fail_pages_alloc;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300137
Hans Verkuile078b792014-11-18 09:51:03 -0300138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Hans Verkuil47bc59c2014-08-01 09:18:50 -0300139 buf->num_pages, 0, size, GFP_KERNEL);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300140 if (ret)
141 goto fail_table_alloc;
142
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300143 /* Prevent the device from being released while the buffer is used */
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300144 buf->dev = get_device(dev);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300145
146 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300147 /*
148 * No need to sync to the device, this will happen later when the
149 * prepare() memop is called.
150 */
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200151 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152 DMA_ATTR_SKIP_CPU_SYNC))
Hans Verkuild790b7e2014-11-24 08:50:31 -0300153 goto fail_map;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300154
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200158 buf->vb = vb;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300159
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300160 refcount_set(&buf->refcount, 1);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300161
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300162 dprintk(1, "%s: Allocated buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300163 __func__, buf->num_pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300164 return buf;
165
Hans Verkuild790b7e2014-11-24 08:50:31 -0300166fail_map:
167 put_device(buf->dev);
Hans Verkuile078b792014-11-18 09:51:03 -0300168 sg_free_table(buf->dma_sgt);
Ricardo Ribalda22301242013-08-02 10:20:00 -0300169fail_table_alloc:
170 num_pages = buf->num_pages;
171 while (num_pages--)
172 __free_page(buf->pages[num_pages]);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300173fail_pages_alloc:
Tomasz Figa758d90e2017-06-19 00:53:43 -0300174 kvfree(buf->pages);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300175fail_pages_array_alloc:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300176 kfree(buf);
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300177 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300178}
179
180static void vb2_dma_sg_put(void *buf_priv)
181{
182 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300183 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300184 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300185
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300186 if (refcount_dec_and_test(&buf->refcount)) {
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300187 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300188 buf->num_pages);
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200189 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 DMA_ATTR_SKIP_CPU_SYNC);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300191 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300192 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300193 sg_free_table(buf->dma_sgt);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300194 while (--i >= 0)
195 __free_page(buf->pages[i]);
Tomasz Figa758d90e2017-06-19 00:53:43 -0300196 kvfree(buf->pages);
Hans Verkuil0c3a14c2014-11-18 09:51:01 -0300197 put_device(buf->dev);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300198 kfree(buf);
199 }
200}
201
Hans Verkuild790b7e2014-11-24 08:50:31 -0300202static void vb2_dma_sg_prepare(void *buf_priv)
203{
204 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300205 struct sg_table *sgt = buf->dma_sgt;
206
Sergey Senozhatskycde513f2021-09-09 13:24:26 +0200207 if (buf->vb->skip_cache_sync_on_prepare)
208 return;
209
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200210 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300211}
212
213static void vb2_dma_sg_finish(void *buf_priv)
214{
215 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuile078b792014-11-18 09:51:03 -0300216 struct sg_table *sgt = buf->dma_sgt;
217
Sergey Senozhatskycde513f2021-09-09 13:24:26 +0200218 if (buf->vb->skip_cache_sync_on_finish)
219 return;
220
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200221 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
Hans Verkuild790b7e2014-11-24 08:50:31 -0300222}
223
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200224static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
225 unsigned long vaddr, unsigned long size)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300226{
227 struct vb2_dma_sg_buf *buf;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300228 struct sg_table *sgt;
Jan Kara3336c242015-07-13 11:55:47 -0300229 struct frame_vector *vec;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300230
Hans Verkuil10791822016-07-21 09:14:03 -0300231 if (WARN_ON(!dev))
232 return ERR_PTR(-EINVAL);
233
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300234 buf = kzalloc(sizeof *buf, GFP_KERNEL);
235 if (!buf)
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300236 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300237
238 buf->vaddr = NULL;
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300239 buf->dev = dev;
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200240 buf->dma_dir = vb->vb2_queue->dma_dir;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300241 buf->offset = vaddr & ~PAGE_MASK;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300242 buf->size = size;
Hans Verkuile078b792014-11-18 09:51:03 -0300243 buf->dma_sgt = &buf->sg_table;
Hans de Goeded55c3ee2021-11-01 14:53:55 +0000244 buf->vb = vb;
Hans Verkuile2fc6ed2022-11-28 08:23:56 +0000245 vec = vb2_create_framevec(vaddr, size,
246 buf->dma_dir == DMA_FROM_DEVICE ||
247 buf->dma_dir == DMA_BIDIRECTIONAL);
Jan Kara3336c242015-07-13 11:55:47 -0300248 if (IS_ERR(vec))
249 goto userptr_fail_pfnvec;
250 buf->vec = vec;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300251
Jan Kara3336c242015-07-13 11:55:47 -0300252 buf->pages = frame_vector_pages(vec);
253 if (IS_ERR(buf->pages))
254 goto userptr_fail_sgtable;
255 buf->num_pages = frame_vector_count(vec);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300256
Hans Verkuile078b792014-11-18 09:51:03 -0300257 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
Ricardo Ribalda22301242013-08-02 10:20:00 -0300258 buf->num_pages, buf->offset, size, 0))
Jan Kara3336c242015-07-13 11:55:47 -0300259 goto userptr_fail_sgtable;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300260
Hans Verkuild790b7e2014-11-24 08:50:31 -0300261 sgt = &buf->sg_table;
Hans Verkuil251a79f2014-11-18 09:51:08 -0300262 /*
263 * No need to sync to the device, this will happen later when the
264 * prepare() memop is called.
265 */
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200266 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
267 DMA_ATTR_SKIP_CPU_SYNC))
Hans Verkuild790b7e2014-11-24 08:50:31 -0300268 goto userptr_fail_map;
Ricardo Ribalda6a5d77c2015-04-29 09:00:45 -0300269
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300270 return buf;
271
Hans Verkuild790b7e2014-11-24 08:50:31 -0300272userptr_fail_map:
273 sg_free_table(&buf->sg_table);
Jan Kara3336c242015-07-13 11:55:47 -0300274userptr_fail_sgtable:
275 vb2_destroy_framevec(vec);
276userptr_fail_pfnvec:
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300277 kfree(buf);
Hans Verkuil0ff657b2016-07-21 09:14:02 -0300278 return ERR_PTR(-ENOMEM);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300279}
280
281/*
282 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
283 * be used
284 */
285static void vb2_dma_sg_put_userptr(void *buf_priv)
286{
287 struct vb2_dma_sg_buf *buf = buf_priv;
Hans Verkuild790b7e2014-11-24 08:50:31 -0300288 struct sg_table *sgt = &buf->sg_table;
Ricardo Ribalda22301242013-08-02 10:20:00 -0300289 int i = buf->num_pages;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300290
Hans Verkuilffdc78e2013-03-02 05:12:08 -0300291 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
Ricardo Ribalda22301242013-08-02 10:20:00 -0300292 __func__, buf->num_pages);
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200293 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300294 if (buf->vaddr)
Ricardo Ribalda22301242013-08-02 10:20:00 -0300295 vm_unmap_ram(buf->vaddr, buf->num_pages);
Hans Verkuile078b792014-11-18 09:51:03 -0300296 sg_free_table(buf->dma_sgt);
Stanimir Varbanovc0cb7652017-08-29 07:26:03 -0400297 if (buf->dma_dir == DMA_FROM_DEVICE ||
298 buf->dma_dir == DMA_BIDIRECTIONAL)
299 while (--i >= 0)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300300 set_page_dirty_lock(buf->pages[i]);
Jan Kara3336c242015-07-13 11:55:47 -0300301 vb2_destroy_framevec(buf->vec);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300302 kfree(buf);
303}
304
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200305static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300306{
307 struct vb2_dma_sg_buf *buf = buf_priv;
Lucas De Marchi7938f422022-02-04 09:05:41 -0800308 struct iosys_map map;
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200309 int ret;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300310
311 BUG_ON(!buf);
312
Hans Verkuile078b792014-11-18 09:51:03 -0300313 if (!buf->vaddr) {
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200314 if (buf->db_attach) {
Dmitry Osipenkoa26ee3b2022-10-17 20:22:21 +0300315 ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200316 buf->vaddr = ret ? NULL : map.vaddr;
317 } else {
Christoph Hellwigd4efd792020-06-01 21:51:27 -0700318 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200319 }
Hans Verkuile078b792014-11-18 09:51:03 -0300320 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300321
322 /* add offset in case userptr is not page-aligned */
Hans Verkuile078b792014-11-18 09:51:03 -0300323 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300324}
325
326static unsigned int vb2_dma_sg_num_users(void *buf_priv)
327{
328 struct vb2_dma_sg_buf *buf = buf_priv;
329
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300330 return refcount_read(&buf->refcount);
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300331}
332
333static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
334{
335 struct vb2_dma_sg_buf *buf = buf_priv;
Souptick Joardera17ae142019-05-13 17:22:19 -0700336 int err;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300337
338 if (!buf) {
339 printk(KERN_ERR "No memory to map\n");
340 return -EINVAL;
341 }
342
Souptick Joardera17ae142019-05-13 17:22:19 -0700343 err = vm_map_pages(vma, buf->pages, buf->num_pages);
344 if (err) {
345 printk(KERN_ERR "Remapping memory, error: %d\n", err);
346 return err;
347 }
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300348
349 /*
350 * Use common vm_area operations to track buffer refcount.
351 */
352 vma->vm_private_data = &buf->handler;
353 vma->vm_ops = &vb2_common_vm_ops;
354
355 vma->vm_ops->open(vma);
356
357 return 0;
358}
359
Hans Verkuile078b792014-11-18 09:51:03 -0300360/*********************************************/
Hans Verkuil041c7b62014-11-18 09:51:04 -0300361/* DMABUF ops for exporters */
362/*********************************************/
363
364struct vb2_dma_sg_attachment {
365 struct sg_table sgt;
366 enum dma_data_direction dma_dir;
367};
368
Christian Königa19741e2018-05-28 11:47:52 +0200369static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
Hans Verkuil041c7b62014-11-18 09:51:04 -0300370 struct dma_buf_attachment *dbuf_attach)
371{
372 struct vb2_dma_sg_attachment *attach;
373 unsigned int i;
374 struct scatterlist *rd, *wr;
375 struct sg_table *sgt;
376 struct vb2_dma_sg_buf *buf = dbuf->priv;
377 int ret;
378
379 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
380 if (!attach)
381 return -ENOMEM;
382
383 sgt = &attach->sgt;
384 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
385 * map the same scatter list to multiple attachments at the same time.
386 */
387 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
388 if (ret) {
389 kfree(attach);
390 return -ENOMEM;
391 }
392
393 rd = buf->dma_sgt->sgl;
394 wr = sgt->sgl;
395 for (i = 0; i < sgt->orig_nents; ++i) {
396 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
397 rd = sg_next(rd);
398 wr = sg_next(wr);
399 }
400
401 attach->dma_dir = DMA_NONE;
402 dbuf_attach->priv = attach;
403
404 return 0;
405}
406
407static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
408 struct dma_buf_attachment *db_attach)
409{
410 struct vb2_dma_sg_attachment *attach = db_attach->priv;
411 struct sg_table *sgt;
412
413 if (!attach)
414 return;
415
416 sgt = &attach->sgt;
417
418 /* release the scatterlist cache */
419 if (attach->dma_dir != DMA_NONE)
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200420 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300421 sg_free_table(sgt);
422 kfree(attach);
423 db_attach->priv = NULL;
424}
425
426static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
427 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
428{
429 struct vb2_dma_sg_attachment *attach = db_attach->priv;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300430 struct sg_table *sgt;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300431
Hans Verkuil041c7b62014-11-18 09:51:04 -0300432 sgt = &attach->sgt;
433 /* return previously mapped sg table */
Dmitry Osipenko23543b32022-10-17 20:22:28 +0300434 if (attach->dma_dir == dma_dir)
Hans Verkuil041c7b62014-11-18 09:51:04 -0300435 return sgt;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300436
437 /* release any previous cache */
438 if (attach->dma_dir != DMA_NONE) {
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200439 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300440 attach->dma_dir = DMA_NONE;
441 }
442
443 /* mapping to the client with new direction */
Marek Szyprowski8b7c0282020-09-04 15:17:11 +0200444 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
Hans Verkuil041c7b62014-11-18 09:51:04 -0300445 pr_err("failed to map scatterlist\n");
Hans Verkuil041c7b62014-11-18 09:51:04 -0300446 return ERR_PTR(-EIO);
447 }
448
449 attach->dma_dir = dma_dir;
450
Hans Verkuil041c7b62014-11-18 09:51:04 -0300451 return sgt;
452}
453
454static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
455 struct sg_table *sgt, enum dma_data_direction dma_dir)
456{
457 /* nothing to be done here */
458}
459
460static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
461{
462 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
463 vb2_dma_sg_put(dbuf->priv);
464}
465
Sergey Senozhatskyd4db5eb2020-05-14 18:01:50 +0200466static int
467vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
468 enum dma_data_direction direction)
469{
470 struct vb2_dma_sg_buf *buf = dbuf->priv;
471 struct sg_table *sgt = buf->dma_sgt;
472
473 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
474 return 0;
475}
476
477static int
478vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
479 enum dma_data_direction direction)
480{
481 struct vb2_dma_sg_buf *buf = dbuf->priv;
482 struct sg_table *sgt = buf->dma_sgt;
483
484 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
485 return 0;
486}
487
Lucas De Marchi7938f422022-02-04 09:05:41 -0800488static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
489 struct iosys_map *map)
Hans Verkuil041c7b62014-11-18 09:51:04 -0300490{
491 struct vb2_dma_sg_buf *buf = dbuf->priv;
492
Lucas De Marchi7938f422022-02-04 09:05:41 -0800493 iosys_map_set_vaddr(map, buf->vaddr);
Thomas Zimmermann6619ccf2020-09-25 13:55:59 +0200494
495 return 0;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300496}
497
498static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
499 struct vm_area_struct *vma)
500{
Dmitry Osipenko3a6ca182022-11-10 23:13:48 +0300501 dma_resv_assert_held(dbuf->resv);
502
Hans Verkuil041c7b62014-11-18 09:51:04 -0300503 return vb2_dma_sg_mmap(dbuf->priv, vma);
504}
505
Arvind Yadavefaf5152017-07-01 08:18:24 -0400506static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
Hans Verkuil041c7b62014-11-18 09:51:04 -0300507 .attach = vb2_dma_sg_dmabuf_ops_attach,
508 .detach = vb2_dma_sg_dmabuf_ops_detach,
509 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
510 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
Sergey Senozhatskyd4db5eb2020-05-14 18:01:50 +0200511 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
512 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
Hans Verkuil041c7b62014-11-18 09:51:04 -0300513 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
514 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
515 .release = vb2_dma_sg_dmabuf_ops_release,
516};
517
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200518static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
519 void *buf_priv,
520 unsigned long flags)
Hans Verkuil041c7b62014-11-18 09:51:04 -0300521{
522 struct vb2_dma_sg_buf *buf = buf_priv;
523 struct dma_buf *dbuf;
Sumit Semwald8fbe342015-01-23 12:53:43 +0530524 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
525
526 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
527 exp_info.size = buf->size;
528 exp_info.flags = flags;
529 exp_info.priv = buf;
Hans Verkuil041c7b62014-11-18 09:51:04 -0300530
531 if (WARN_ON(!buf->dma_sgt))
532 return NULL;
533
Sumit Semwald8fbe342015-01-23 12:53:43 +0530534 dbuf = dma_buf_export(&exp_info);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300535 if (IS_ERR(dbuf))
536 return NULL;
537
538 /* dmabuf keeps reference to vb2 buffer */
Elena Reshetova6c4bb652017-03-06 11:21:00 -0300539 refcount_inc(&buf->refcount);
Hans Verkuil041c7b62014-11-18 09:51:04 -0300540
541 return dbuf;
542}
543
544/*********************************************/
Hans Verkuile078b792014-11-18 09:51:03 -0300545/* callbacks for DMABUF buffers */
546/*********************************************/
547
548static int vb2_dma_sg_map_dmabuf(void *mem_priv)
549{
550 struct vb2_dma_sg_buf *buf = mem_priv;
551 struct sg_table *sgt;
552
553 if (WARN_ON(!buf->db_attach)) {
554 pr_err("trying to pin a non attached buffer\n");
555 return -EINVAL;
556 }
557
558 if (WARN_ON(buf->dma_sgt)) {
559 pr_err("dmabuf buffer is already pinned\n");
560 return 0;
561 }
562
563 /* get the associated scatterlist for this buffer */
Dmitry Osipenkoa26ee3b2022-10-17 20:22:21 +0300564 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
Hans Verkuile078b792014-11-18 09:51:03 -0300565 if (IS_ERR(sgt)) {
566 pr_err("Error getting dmabuf scatterlist\n");
567 return -EINVAL;
568 }
569
570 buf->dma_sgt = sgt;
571 buf->vaddr = NULL;
572
573 return 0;
574}
575
576static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
577{
578 struct vb2_dma_sg_buf *buf = mem_priv;
579 struct sg_table *sgt = buf->dma_sgt;
Lucas De Marchi7938f422022-02-04 09:05:41 -0800580 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
Hans Verkuile078b792014-11-18 09:51:03 -0300581
582 if (WARN_ON(!buf->db_attach)) {
583 pr_err("trying to unpin a not attached buffer\n");
584 return;
585 }
586
587 if (WARN_ON(!sgt)) {
588 pr_err("dmabuf buffer is already unpinned\n");
589 return;
590 }
591
592 if (buf->vaddr) {
Dmitry Osipenkoa26ee3b2022-10-17 20:22:21 +0300593 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
Hans Verkuile078b792014-11-18 09:51:03 -0300594 buf->vaddr = NULL;
595 }
Dmitry Osipenkoa26ee3b2022-10-17 20:22:21 +0300596 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
Hans Verkuile078b792014-11-18 09:51:03 -0300597
598 buf->dma_sgt = NULL;
599}
600
601static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
602{
603 struct vb2_dma_sg_buf *buf = mem_priv;
604
605 /* if vb2 works correctly you should never detach mapped buffer */
606 if (WARN_ON(buf->dma_sgt))
607 vb2_dma_sg_unmap_dmabuf(buf);
608
609 /* detach this attachment */
610 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
611 kfree(buf);
612}
613
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200614static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
615 struct dma_buf *dbuf, unsigned long size)
Hans Verkuile078b792014-11-18 09:51:03 -0300616{
Hans Verkuile078b792014-11-18 09:51:03 -0300617 struct vb2_dma_sg_buf *buf;
618 struct dma_buf_attachment *dba;
619
Hans Verkuil10791822016-07-21 09:14:03 -0300620 if (WARN_ON(!dev))
621 return ERR_PTR(-EINVAL);
622
Hans Verkuile078b792014-11-18 09:51:03 -0300623 if (dbuf->size < size)
624 return ERR_PTR(-EFAULT);
625
626 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
627 if (!buf)
628 return ERR_PTR(-ENOMEM);
629
Hans Verkuil36c0f8b2016-04-15 09:15:05 -0300630 buf->dev = dev;
Hans Verkuile078b792014-11-18 09:51:03 -0300631 /* create attachment for the dmabuf with the user device */
632 dba = dma_buf_attach(dbuf, buf->dev);
633 if (IS_ERR(dba)) {
634 pr_err("failed to attach dmabuf\n");
635 kfree(buf);
636 return dba;
637 }
638
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200639 buf->dma_dir = vb->vb2_queue->dma_dir;
Hans Verkuile078b792014-11-18 09:51:03 -0300640 buf->size = size;
641 buf->db_attach = dba;
Hans de Goeded55c3ee2021-11-01 14:53:55 +0000642 buf->vb = vb;
Hans Verkuile078b792014-11-18 09:51:03 -0300643
644 return buf;
645}
646
Sergey Senozhatskya4b83de2021-09-09 13:24:23 +0200647static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300648{
649 struct vb2_dma_sg_buf *buf = buf_priv;
650
Hans Verkuile078b792014-11-18 09:51:03 -0300651 return buf->dma_sgt;
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300652}
653
654const struct vb2_mem_ops vb2_dma_sg_memops = {
655 .alloc = vb2_dma_sg_alloc,
656 .put = vb2_dma_sg_put,
657 .get_userptr = vb2_dma_sg_get_userptr,
658 .put_userptr = vb2_dma_sg_put_userptr,
Hans Verkuild790b7e2014-11-24 08:50:31 -0300659 .prepare = vb2_dma_sg_prepare,
660 .finish = vb2_dma_sg_finish,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300661 .vaddr = vb2_dma_sg_vaddr,
662 .mmap = vb2_dma_sg_mmap,
663 .num_users = vb2_dma_sg_num_users,
Hans Verkuil041c7b62014-11-18 09:51:04 -0300664 .get_dmabuf = vb2_dma_sg_get_dmabuf,
Hans Verkuile078b792014-11-18 09:51:03 -0300665 .map_dmabuf = vb2_dma_sg_map_dmabuf,
666 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
667 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
668 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300669 .cookie = vb2_dma_sg_cookie,
670};
671EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
672
Andrzej Pietrasiewicz5ba3f752010-11-29 11:53:34 -0300673MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
674MODULE_AUTHOR("Andrzej Pietrasiewicz");
675MODULE_LICENSE("GPL");
Greg Kroah-Hartman16b03142021-10-10 14:46:28 +0200676MODULE_IMPORT_NS(DMA_BUF);