blob: 1505a52f23a01945ef30bb675d647a524e01680d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Aleksa Saraif5a1a532019-10-01 11:10:52 +10002#include <linux/bitops.h>
Albert van der Linde4d0e9df2020-10-15 20:13:50 -07003#include <linux/fault-inject-usercopy.h>
Marco Elver76d6f062020-01-21 17:05:12 +01004#include <linux/instrumented.h>
5#include <linux/uaccess.h>
Al Virod5975802017-03-20 21:56:06 -04006
7/* out-of-line parts */
8
9#ifndef INLINE_COPY_FROM_USER
10unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
11{
12 unsigned long res = n;
Al Viro9c5f6902017-06-29 21:39:54 -040013 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070014 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
Alexander Potapenko33b75c12022-09-15 17:03:37 +020015 instrument_copy_from_user_before(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040016 res = raw_copy_from_user(to, from, n);
Alexander Potapenko33b75c12022-09-15 17:03:37 +020017 instrument_copy_from_user_after(to, from, n, res);
Al Viro9c5f6902017-06-29 21:39:54 -040018 }
Al Virod5975802017-03-20 21:56:06 -040019 if (unlikely(res))
20 memset(to + (n - res), 0, res);
21 return res;
22}
23EXPORT_SYMBOL(_copy_from_user);
24#endif
25
26#ifndef INLINE_COPY_TO_USER
Christophe Leroya0e94592017-12-09 17:24:24 +010027unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
Al Virod5975802017-03-20 21:56:06 -040028{
Al Viro9c5f6902017-06-29 21:39:54 -040029 might_fault();
Albert van der Linde4d0e9df2020-10-15 20:13:50 -070030 if (should_fail_usercopy())
31 return n;
Linus Torvalds96d4f262019-01-03 18:57:57 -080032 if (likely(access_ok(to, n))) {
Marco Elver76d6f062020-01-21 17:05:12 +010033 instrument_copy_to_user(to, from, n);
Al Virod5975802017-03-20 21:56:06 -040034 n = raw_copy_to_user(to, from, n);
Al Viro9c5f6902017-06-29 21:39:54 -040035 }
Al Virod5975802017-03-20 21:56:06 -040036 return n;
37}
38EXPORT_SYMBOL(_copy_to_user);
39#endif
Aleksa Saraif5a1a532019-10-01 11:10:52 +100040
41/**
42 * check_zeroed_user: check if a userspace buffer only contains zero bytes
43 * @from: Source address, in userspace.
44 * @size: Size of buffer.
45 *
46 * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
47 * userspace addresses (and is more efficient because we don't care where the
48 * first non-zero byte is).
49 *
50 * Returns:
51 * * 0: There were non-zero bytes present in the buffer.
52 * * 1: The buffer was full of zero bytes.
53 * * -EFAULT: access to userspace failed.
54 */
55int check_zeroed_user(const void __user *from, size_t size)
56{
57 unsigned long val;
58 uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
59
60 if (unlikely(size == 0))
61 return 1;
62
63 from -= align;
64 size += align;
65
Christophe Leroy41cd7802020-04-03 07:20:51 +000066 if (!user_read_access_begin(from, size))
Aleksa Saraif5a1a532019-10-01 11:10:52 +100067 return -EFAULT;
68
69 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
70 if (align)
71 val &= ~aligned_byte_mask(align);
72
73 while (size > sizeof(unsigned long)) {
74 if (unlikely(val))
75 goto done;
76
77 from += sizeof(unsigned long);
78 size -= sizeof(unsigned long);
79
80 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
81 }
82
83 if (size < sizeof(unsigned long))
84 val &= aligned_byte_mask(size);
85
86done:
Christophe Leroy41cd7802020-04-03 07:20:51 +000087 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100088 return (val == 0);
89err_fault:
Christophe Leroy41cd7802020-04-03 07:20:51 +000090 user_read_access_end();
Aleksa Saraif5a1a532019-10-01 11:10:52 +100091 return -EFAULT;
92}
93EXPORT_SYMBOL(check_zeroed_user);