blob: 29ddb76187e57d4a85b5a6e980edaf3b515f882a [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Scatter-Gather buffer
4 *
5 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/vmalloc.h>
Takashi Iwai9d069dc2012-09-20 20:29:12 -070011#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <sound/memalloc.h>
13
14
15/* table entries are align to 32 */
16#define SGBUF_TBL_ALIGN 32
Clemens Ladisch7ab39922006-10-09 08:13:32 +020017#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
20{
21 struct snd_sg_buf *sgbuf = dmab->private_data;
22 struct snd_dma_buffer tmpb;
23 int i;
24
25 if (! sgbuf)
26 return -EINVAL;
27
Markus Elfringd712eaf2014-11-21 18:34:48 +010028 vunmap(dmab->area);
Takashi Iwai6af845e2009-03-17 14:00:06 +010029 dmab->area = NULL;
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
Takashi Iwai42e748a2018-08-08 17:01:00 +020032 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
33 tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 tmpb.dev.dev = sgbuf->dev;
35 for (i = 0; i < sgbuf->pages; i++) {
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020036 if (!(sgbuf->table[i].addr & ~PAGE_MASK))
37 continue; /* continuous pages */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 tmpb.area = sgbuf->table[i].buf;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020039 tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
40 tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 snd_dma_free_pages(&tmpb);
42 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 kfree(sgbuf->table);
45 kfree(sgbuf->page_table);
46 kfree(sgbuf);
47 dmab->private_data = NULL;
48
49 return 0;
50}
51
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020052#define MAX_ALLOC_PAGES 32
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054void *snd_malloc_sgbuf_pages(struct device *device,
55 size_t size, struct snd_dma_buffer *dmab,
56 size_t *res_size)
57{
58 struct snd_sg_buf *sgbuf;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020059 unsigned int i, pages, chunk, maxpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 struct snd_dma_buffer tmpb;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020061 struct snd_sg_page *table;
62 struct page **pgtable;
Takashi Iwai42e748a2018-08-08 17:01:00 +020063 int type = SNDRV_DMA_TYPE_DEV;
64 pgprot_t prot = PAGE_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 dmab->area = NULL;
67 dmab->addr = 0;
Panagiotis Issaris59feddb2006-07-25 15:28:03 +020068 dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 if (! sgbuf)
70 return NULL;
Takashi Iwai42e748a2018-08-08 17:01:00 +020071 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
72 type = SNDRV_DMA_TYPE_DEV_UC;
73#ifdef pgprot_noncached
74 prot = pgprot_noncached(PAGE_KERNEL);
75#endif
76 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 sgbuf->dev = device;
78 pages = snd_sgbuf_aligned_pages(size);
79 sgbuf->tblsize = sgbuf_align_table(pages);
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020080 table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
81 if (!table)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020083 sgbuf->table = table;
84 pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
85 if (!pgtable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020087 sgbuf->page_table = pgtable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020089 /* allocate pages */
90 maxpages = MAX_ALLOC_PAGES;
91 while (pages > 0) {
92 chunk = pages;
93 /* don't be too eager to take a huge chunk */
94 if (chunk > maxpages)
95 chunk = maxpages;
96 chunk <<= PAGE_SHIFT;
Takashi Iwai42e748a2018-08-08 17:01:00 +020097 if (snd_dma_alloc_pages_fallback(type, device,
Takashi Iwai51e9f2e2008-07-30 15:13:33 +020098 chunk, &tmpb) < 0) {
99 if (!sgbuf->pages)
Takashi Iwaic810f902012-08-03 12:48:32 +0200100 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200101 if (!res_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200103 size = sgbuf->pages * PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 break;
105 }
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200106 chunk = tmpb.bytes >> PAGE_SHIFT;
107 for (i = 0; i < chunk; i++) {
108 table->buf = tmpb.area;
109 table->addr = tmpb.addr;
110 if (!i)
111 table->addr |= chunk; /* mark head */
112 table++;
113 *pgtable++ = virt_to_page(tmpb.area);
114 tmpb.area += PAGE_SIZE;
115 tmpb.addr += PAGE_SIZE;
116 }
117 sgbuf->pages += chunk;
118 pages -= chunk;
119 if (chunk < maxpages)
120 maxpages = chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 }
122
123 sgbuf->size = size;
Takashi Iwai42e748a2018-08-08 17:01:00 +0200124 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 if (! dmab->area)
126 goto _failed;
Takashi Iwai51e9f2e2008-07-30 15:13:33 +0200127 if (res_size)
128 *res_size = sgbuf->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return dmab->area;
130
131 _failed:
132 snd_free_sgbuf_pages(dmab); /* free the table */
133 return NULL;
134}
Takashi Iwai9d069dc2012-09-20 20:29:12 -0700135
136/*
137 * compute the max chunk size with continuous pages on sg-buffer
138 */
139unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
140 unsigned int ofs, unsigned int size)
141{
142 struct snd_sg_buf *sg = dmab->private_data;
143 unsigned int start, end, pg;
144
Takashi Iwai2a1f3362020-06-15 18:00:44 +0200145 if (!sg)
146 return size;
147
Takashi Iwai9d069dc2012-09-20 20:29:12 -0700148 start = ofs >> PAGE_SHIFT;
149 end = (ofs + size - 1) >> PAGE_SHIFT;
150 /* check page continuity */
151 pg = sg->table[start].addr >> PAGE_SHIFT;
152 for (;;) {
153 start++;
154 if (start > end)
155 break;
156 pg++;
157 if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
158 return (start << PAGE_SHIFT) - ofs;
159 }
160 /* ok, all on continuous pages */
161 return size;
162}
163EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);