blob: eeb3f4d87c51046dd5c2d8577e9d673fcf565656 [file] [log] [blame]
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -08001#include <linux/kernel.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/ktime.h>
6#include <linux/debugfs.h>
David Hildenbranda0ac9b352022-12-05 20:37:13 +01007#include <linux/highmem.h>
John Hubbardb9dcfdf2020-12-14 19:05:08 -08008#include "gup_test.h"
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -08009
John Hubbard41c45d32020-04-01 21:05:41 -070010static void put_back_pages(unsigned int cmd, struct page **pages,
John Hubbardf4f9bda2020-12-14 19:05:21 -080011 unsigned long nr_pages, unsigned int gup_test_flags)
John Hubbard41c45d32020-04-01 21:05:41 -070012{
13 unsigned long i;
14
15 switch (cmd) {
16 case GUP_FAST_BENCHMARK:
John Hubbarda9bed1e2020-12-14 19:05:17 -080017 case GUP_BASIC_TEST:
John Hubbard41c45d32020-04-01 21:05:41 -070018 for (i = 0; i < nr_pages; i++)
19 put_page(pages[i]);
20 break;
21
22 case PIN_FAST_BENCHMARK:
John Hubbarda9bed1e2020-12-14 19:05:17 -080023 case PIN_BASIC_TEST:
Barry Song657d4f72020-10-13 16:51:54 -070024 case PIN_LONGTERM_BENCHMARK:
John Hubbard41c45d32020-04-01 21:05:41 -070025 unpin_user_pages(pages, nr_pages);
26 break;
John Hubbardf4f9bda2020-12-14 19:05:21 -080027 case DUMP_USER_PAGES_TEST:
28 if (gup_test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) {
29 unpin_user_pages(pages, nr_pages);
30 } else {
31 for (i = 0; i < nr_pages; i++)
32 put_page(pages[i]);
33
34 }
35 break;
John Hubbard41c45d32020-04-01 21:05:41 -070036 }
37}
38
39static void verify_dma_pinned(unsigned int cmd, struct page **pages,
40 unsigned long nr_pages)
41{
42 unsigned long i;
Vishal Moola (Oracle)c9223a42023-06-13 19:13:10 -070043 struct folio *folio;
John Hubbard41c45d32020-04-01 21:05:41 -070044
45 switch (cmd) {
46 case PIN_FAST_BENCHMARK:
John Hubbarda9bed1e2020-12-14 19:05:17 -080047 case PIN_BASIC_TEST:
Barry Song657d4f72020-10-13 16:51:54 -070048 case PIN_LONGTERM_BENCHMARK:
John Hubbard41c45d32020-04-01 21:05:41 -070049 for (i = 0; i < nr_pages; i++) {
Vishal Moola (Oracle)c9223a42023-06-13 19:13:10 -070050 folio = page_folio(pages[i]);
51
52 if (WARN(!folio_maybe_dma_pinned(folio),
John Hubbard41c45d32020-04-01 21:05:41 -070053 "pages[%lu] is NOT dma-pinned\n", i)) {
54
Vishal Moola (Oracle)c9223a42023-06-13 19:13:10 -070055 dump_page(&folio->page, "gup_test failure");
John Hubbard41c45d32020-04-01 21:05:41 -070056 break;
Pavel Tatashine44605a2021-05-04 18:39:27 -070057 } else if (cmd == PIN_LONGTERM_BENCHMARK &&
Vishal Moola (Oracle)c9223a42023-06-13 19:13:10 -070058 WARN(!folio_is_longterm_pinnable(folio),
Pavel Tatashine44605a2021-05-04 18:39:27 -070059 "pages[%lu] is NOT pinnable but pinned\n",
60 i)) {
Vishal Moola (Oracle)c9223a42023-06-13 19:13:10 -070061 dump_page(&folio->page, "gup_test failure");
Pavel Tatashine44605a2021-05-04 18:39:27 -070062 break;
John Hubbard41c45d32020-04-01 21:05:41 -070063 }
64 }
65 break;
66 }
67}
68
John Hubbardf4f9bda2020-12-14 19:05:21 -080069static void dump_pages_test(struct gup_test *gup, struct page **pages,
70 unsigned long nr_pages)
71{
72 unsigned int index_to_dump;
73 unsigned int i;
74
75 /*
76 * Zero out any user-supplied page index that is out of range. Remember:
77 * .which_pages[] contains a 1-based set of page indices.
78 */
79 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
80 if (gup->which_pages[i] > nr_pages) {
81 pr_warn("ZEROING due to out of range: .which_pages[%u]: %u\n",
82 i, gup->which_pages[i]);
83 gup->which_pages[i] = 0;
84 }
85 }
86
87 for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) {
88 index_to_dump = gup->which_pages[i];
89
90 if (index_to_dump) {
91 index_to_dump--; // Decode from 1-based, to 0-based
92 pr_info("---- page #%u, starting from user virt addr: 0x%llx\n",
93 index_to_dump, gup->addr);
94 dump_page(pages[index_to_dump],
95 "gup_test: dump_pages() test");
96 }
97 }
98}
99
John Hubbard9c84f222020-12-14 19:05:05 -0800100static int __gup_test_ioctl(unsigned int cmd,
101 struct gup_test *gup)
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800102{
103 ktime_t start_time, end_time;
YueHaibing51896862018-10-05 15:51:44 -0700104 unsigned long i, nr_pages, addr, next;
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700105 long nr;
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800106 struct page **pages;
Navid Emamdoosta7c46c02020-01-04 13:00:12 -0800107 int ret = 0;
Jann Hornf3964592020-10-17 16:14:12 -0700108 bool needs_mmap_lock =
109 cmd != GUP_FAST_BENCHMARK && cmd != PIN_FAST_BENCHMARK;
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800110
Dan Carpenter4b408c72018-10-30 15:04:32 -0700111 if (gup->size > ULONG_MAX)
112 return -EINVAL;
113
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800114 nr_pages = gup->size / PAGE_SIZE;
Kees Cook778e1cd2018-06-12 14:04:48 -0700115 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800116 if (!pages)
117 return -ENOMEM;
118
Jann Hornf3964592020-10-17 16:14:12 -0700119 if (needs_mmap_lock && mmap_read_lock_killable(current->mm)) {
120 ret = -EINTR;
121 goto free_pages;
122 }
123
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800124 i = 0;
125 nr = gup->nr_pages_per_call;
126 start_time = ktime_get();
127 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) {
128 if (nr != gup->nr_pages_per_call)
129 break;
130
131 next = addr + nr * PAGE_SIZE;
132 if (next > gup->addr + gup->size) {
133 next = gup->addr + gup->size;
134 nr = (next - addr) / PAGE_SIZE;
135 }
136
Keith Busch714a3a12018-10-26 15:09:56 -0700137 switch (cmd) {
138 case GUP_FAST_BENCHMARK:
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700139 nr = get_user_pages_fast(addr, nr, gup->gup_flags,
Keith Busch714a3a12018-10-26 15:09:56 -0700140 pages + i);
141 break;
John Hubbarda9bed1e2020-12-14 19:05:17 -0800142 case GUP_BASIC_TEST:
Lorenzo Stoakes54d02062023-05-17 20:25:33 +0100143 nr = get_user_pages(addr, nr, gup->gup_flags, pages + i);
Keith Busch714a3a12018-10-26 15:09:56 -0700144 break;
John Hubbard41c45d32020-04-01 21:05:41 -0700145 case PIN_FAST_BENCHMARK:
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700146 nr = pin_user_pages_fast(addr, nr, gup->gup_flags,
John Hubbard41c45d32020-04-01 21:05:41 -0700147 pages + i);
148 break;
John Hubbarda9bed1e2020-12-14 19:05:17 -0800149 case PIN_BASIC_TEST:
Lorenzo Stoakes4c630f32023-05-17 20:25:45 +0100150 nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i);
John Hubbard41c45d32020-04-01 21:05:41 -0700151 break;
Barry Song657d4f72020-10-13 16:51:54 -0700152 case PIN_LONGTERM_BENCHMARK:
153 nr = pin_user_pages(addr, nr,
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700154 gup->gup_flags | FOLL_LONGTERM,
Lorenzo Stoakes4c630f32023-05-17 20:25:45 +0100155 pages + i);
Barry Song657d4f72020-10-13 16:51:54 -0700156 break;
John Hubbardf4f9bda2020-12-14 19:05:21 -0800157 case DUMP_USER_PAGES_TEST:
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700158 if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN)
159 nr = pin_user_pages(addr, nr, gup->gup_flags,
Lorenzo Stoakes4c630f32023-05-17 20:25:45 +0100160 pages + i);
John Hubbardf4f9bda2020-12-14 19:05:21 -0800161 else
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700162 nr = get_user_pages(addr, nr, gup->gup_flags,
Lorenzo Stoakes54d02062023-05-17 20:25:33 +0100163 pages + i);
John Hubbardf4f9bda2020-12-14 19:05:21 -0800164 break;
Keith Busch714a3a12018-10-26 15:09:56 -0700165 default:
Navid Emamdoosta7c46c02020-01-04 13:00:12 -0800166 ret = -EINVAL;
Jann Hornf3964592020-10-17 16:14:12 -0700167 goto unlock;
Keith Busch714a3a12018-10-26 15:09:56 -0700168 }
169
Michael S. Tsirkin09e35a42018-04-13 15:35:16 -0700170 if (nr <= 0)
171 break;
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800172 i += nr;
173 }
174 end_time = ktime_get();
175
John Hubbard41c45d32020-04-01 21:05:41 -0700176 /* Shifting the meaning of nr_pages: now it is actual number pinned: */
177 nr_pages = i;
178
Keith Busch26db3d02018-10-26 15:09:52 -0700179 gup->get_delta_usec = ktime_us_delta(end_time, start_time);
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800180 gup->size = addr - gup->addr;
181
John Hubbard41c45d32020-04-01 21:05:41 -0700182 /*
183 * Take an un-benchmark-timed moment to verify DMA pinned
184 * state: print a warning if any non-dma-pinned pages are found:
185 */
186 verify_dma_pinned(cmd, pages, nr_pages);
187
John Hubbardf4f9bda2020-12-14 19:05:21 -0800188 if (cmd == DUMP_USER_PAGES_TEST)
189 dump_pages_test(gup, pages, nr_pages);
190
Keith Busch26db3d02018-10-26 15:09:52 -0700191 start_time = ktime_get();
John Hubbard41c45d32020-04-01 21:05:41 -0700192
Pavel Tatashin79dbf132021-05-04 18:39:23 -0700193 put_back_pages(cmd, pages, nr_pages, gup->test_flags);
John Hubbard41c45d32020-04-01 21:05:41 -0700194
Keith Busch26db3d02018-10-26 15:09:52 -0700195 end_time = ktime_get();
196 gup->put_delta_usec = ktime_us_delta(end_time, start_time);
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800197
Jann Hornf3964592020-10-17 16:14:12 -0700198unlock:
199 if (needs_mmap_lock)
200 mmap_read_unlock(current->mm);
201free_pages:
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800202 kvfree(pages);
Navid Emamdoosta7c46c02020-01-04 13:00:12 -0800203 return ret;
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800204}
205
David Hildenbrandc77369b2022-09-27 13:01:19 +0200206static DEFINE_MUTEX(pin_longterm_test_mutex);
207static struct page **pin_longterm_test_pages;
208static unsigned long pin_longterm_test_nr_pages;
209
210static inline void pin_longterm_test_stop(void)
211{
212 if (pin_longterm_test_pages) {
213 if (pin_longterm_test_nr_pages)
214 unpin_user_pages(pin_longterm_test_pages,
215 pin_longterm_test_nr_pages);
David Hildenbrand61b963b2022-12-12 19:20:18 +0100216 kvfree(pin_longterm_test_pages);
David Hildenbrandc77369b2022-09-27 13:01:19 +0200217 pin_longterm_test_pages = NULL;
218 pin_longterm_test_nr_pages = 0;
219 }
220}
221
222static inline int pin_longterm_test_start(unsigned long arg)
223{
224 long nr_pages, cur_pages, addr, remaining_pages;
225 int gup_flags = FOLL_LONGTERM;
226 struct pin_longterm_test args;
227 struct page **pages;
228 int ret = 0;
229 bool fast;
230
231 if (pin_longterm_test_pages)
232 return -EINVAL;
233
234 if (copy_from_user(&args, (void __user *)arg, sizeof(args)))
235 return -EFAULT;
236
237 if (args.flags &
238 ~(PIN_LONGTERM_TEST_FLAG_USE_WRITE|PIN_LONGTERM_TEST_FLAG_USE_FAST))
239 return -EINVAL;
240 if (!IS_ALIGNED(args.addr | args.size, PAGE_SIZE))
241 return -EINVAL;
242 if (args.size > LONG_MAX)
243 return -EINVAL;
244 nr_pages = args.size / PAGE_SIZE;
245 if (!nr_pages)
246 return -EINVAL;
247
248 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
249 if (!pages)
250 return -ENOMEM;
251
252 if (args.flags & PIN_LONGTERM_TEST_FLAG_USE_WRITE)
253 gup_flags |= FOLL_WRITE;
254 fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST);
255
256 if (!fast && mmap_read_lock_killable(current->mm)) {
David Hildenbrand61b963b2022-12-12 19:20:18 +0100257 kvfree(pages);
David Hildenbrandc77369b2022-09-27 13:01:19 +0200258 return -EINTR;
259 }
260
261 pin_longterm_test_pages = pages;
262 pin_longterm_test_nr_pages = 0;
263
264 while (nr_pages - pin_longterm_test_nr_pages) {
265 remaining_pages = nr_pages - pin_longterm_test_nr_pages;
266 addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE;
267
268 if (fast)
269 cur_pages = pin_user_pages_fast(addr, remaining_pages,
270 gup_flags, pages);
271 else
272 cur_pages = pin_user_pages(addr, remaining_pages,
Lorenzo Stoakes4c630f32023-05-17 20:25:45 +0100273 gup_flags, pages);
David Hildenbrandc77369b2022-09-27 13:01:19 +0200274 if (cur_pages < 0) {
275 pin_longterm_test_stop();
276 ret = cur_pages;
277 break;
278 }
279 pin_longterm_test_nr_pages += cur_pages;
280 pages += cur_pages;
281 }
282
283 if (!fast)
284 mmap_read_unlock(current->mm);
285 return ret;
286}
287
288static inline int pin_longterm_test_read(unsigned long arg)
289{
290 __u64 user_addr;
291 unsigned long i;
292
293 if (!pin_longterm_test_pages)
294 return -EINVAL;
295
296 if (copy_from_user(&user_addr, (void __user *)arg, sizeof(user_addr)))
297 return -EFAULT;
298
299 for (i = 0; i < pin_longterm_test_nr_pages; i++) {
David Hildenbranda0ac9b352022-12-05 20:37:13 +0100300 void *addr = kmap_local_page(pin_longterm_test_pages[i]);
301 unsigned long ret;
David Hildenbrandc77369b2022-09-27 13:01:19 +0200302
David Hildenbranda0ac9b352022-12-05 20:37:13 +0100303 ret = copy_to_user((void __user *)(unsigned long)user_addr, addr,
304 PAGE_SIZE);
305 kunmap_local(addr);
306 if (ret)
David Hildenbrandc77369b2022-09-27 13:01:19 +0200307 return -EFAULT;
308 user_addr += PAGE_SIZE;
309 }
310 return 0;
311}
312
313static long pin_longterm_test_ioctl(struct file *filep, unsigned int cmd,
314 unsigned long arg)
315{
316 int ret = -EINVAL;
317
318 if (mutex_lock_killable(&pin_longterm_test_mutex))
319 return -EINTR;
320
321 switch (cmd) {
322 case PIN_LONGTERM_TEST_START:
323 ret = pin_longterm_test_start(arg);
324 break;
325 case PIN_LONGTERM_TEST_STOP:
326 pin_longterm_test_stop();
327 ret = 0;
328 break;
329 case PIN_LONGTERM_TEST_READ:
330 ret = pin_longterm_test_read(arg);
331 break;
332 }
333
334 mutex_unlock(&pin_longterm_test_mutex);
335 return ret;
336}
337
John Hubbard9c84f222020-12-14 19:05:05 -0800338static long gup_test_ioctl(struct file *filep, unsigned int cmd,
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800339 unsigned long arg)
340{
John Hubbard9c84f222020-12-14 19:05:05 -0800341 struct gup_test gup;
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800342 int ret;
343
Keith Busch714a3a12018-10-26 15:09:56 -0700344 switch (cmd) {
345 case GUP_FAST_BENCHMARK:
John Hubbard41c45d32020-04-01 21:05:41 -0700346 case PIN_FAST_BENCHMARK:
Barry Song657d4f72020-10-13 16:51:54 -0700347 case PIN_LONGTERM_BENCHMARK:
John Hubbarda9bed1e2020-12-14 19:05:17 -0800348 case GUP_BASIC_TEST:
349 case PIN_BASIC_TEST:
John Hubbardf4f9bda2020-12-14 19:05:21 -0800350 case DUMP_USER_PAGES_TEST:
Keith Busch714a3a12018-10-26 15:09:56 -0700351 break;
David Hildenbrandc77369b2022-09-27 13:01:19 +0200352 case PIN_LONGTERM_TEST_START:
353 case PIN_LONGTERM_TEST_STOP:
354 case PIN_LONGTERM_TEST_READ:
355 return pin_longterm_test_ioctl(filep, cmd, arg);
Keith Busch714a3a12018-10-26 15:09:56 -0700356 default:
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800357 return -EINVAL;
Keith Busch714a3a12018-10-26 15:09:56 -0700358 }
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800359
360 if (copy_from_user(&gup, (void __user *)arg, sizeof(gup)))
361 return -EFAULT;
362
John Hubbard9c84f222020-12-14 19:05:05 -0800363 ret = __gup_test_ioctl(cmd, &gup);
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800364 if (ret)
365 return ret;
366
367 if (copy_to_user((void __user *)arg, &gup, sizeof(gup)))
368 return -EFAULT;
369
370 return 0;
371}
372
David Hildenbrandc77369b2022-09-27 13:01:19 +0200373static int gup_test_release(struct inode *inode, struct file *file)
374{
375 pin_longterm_test_stop();
376
377 return 0;
378}
379
John Hubbard9c84f222020-12-14 19:05:05 -0800380static const struct file_operations gup_test_fops = {
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800381 .open = nonseekable_open,
John Hubbard9c84f222020-12-14 19:05:05 -0800382 .unlocked_ioctl = gup_test_ioctl,
Haibo Li4f572f02023-05-26 10:21:25 +0800383 .compat_ioctl = compat_ptr_ioctl,
David Hildenbrandc77369b2022-09-27 13:01:19 +0200384 .release = gup_test_release,
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800385};
386
Barry Songafaa7882020-12-14 19:05:34 -0800387static int __init gup_test_init(void)
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800388{
John Hubbard9c84f222020-12-14 19:05:05 -0800389 debugfs_create_file_unsafe("gup_test", 0600, NULL, NULL,
390 &gup_test_fops);
Kirill A. Shutemov64c349f42017-11-17 15:31:22 -0800391
392 return 0;
393}
394
John Hubbard9c84f222020-12-14 19:05:05 -0800395late_initcall(gup_test_init);