blob: 5f4d240f67ecc0cae5c0900ae6e9545185847e0b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02008
Christoph Hellwigfe557312020-06-17 09:37:53 +02009bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
10 size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070011{
12 return true;
13}
14
Christoph Hellwigfe557312020-06-17 09:37:53 +020015#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070016 while (len >= sizeof(type)) { \
17 __get_kernel_nofault(dst, src, type, err_label); \
18 dst += sizeof(type); \
19 src += sizeof(type); \
20 len -= sizeof(type); \
21 }
22
Christoph Hellwigfe557312020-06-17 09:37:53 +020023long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070024{
Arnd Bergmann2423de22021-08-11 08:30:18 +010025 unsigned long align = 0;
26
27 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
28 align = (unsigned long)dst | (unsigned long)src;
29
Christoph Hellwigfe557312020-06-17 09:37:53 +020030 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070031 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070032
33 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010034 if (!(align & 7))
35 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
36 if (!(align & 3))
37 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
38 if (!(align & 1))
39 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020040 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070041 pagefault_enable();
42 return 0;
43Efault:
44 pagefault_enable();
45 return -EFAULT;
46}
Christoph Hellwigfe557312020-06-17 09:37:53 +020047EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070048
Christoph Hellwigfe557312020-06-17 09:37:53 +020049#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070050 while (len >= sizeof(type)) { \
51 __put_kernel_nofault(dst, src, type, err_label); \
52 dst += sizeof(type); \
53 src += sizeof(type); \
54 len -= sizeof(type); \
55 }
56
Christoph Hellwigfe557312020-06-17 09:37:53 +020057long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070058{
Arnd Bergmann2423de22021-08-11 08:30:18 +010059 unsigned long align = 0;
60
61 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
62 align = (unsigned long)dst | (unsigned long)src;
63
Christoph Hellwigb58294e2020-06-08 21:34:58 -070064 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010065 if (!(align & 7))
66 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
67 if (!(align & 3))
68 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
69 if (!(align & 1))
70 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020071 copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070072 pagefault_enable();
73 return 0;
74Efault:
75 pagefault_enable();
76 return -EFAULT;
77}
78
79long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
80{
81 const void *src = unsafe_addr;
82
83 if (unlikely(count <= 0))
84 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +020085 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070086 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070087
88 pagefault_disable();
89 do {
90 __get_kernel_nofault(dst, src, u8, Efault);
91 dst++;
92 src++;
93 } while (dst[-1] && src - unsafe_addr < count);
94 pagefault_enable();
95
96 dst[-1] = '\0';
97 return src - unsafe_addr;
98Efault:
99 pagefault_enable();
100 dst[-1] = '\0';
101 return -EFAULT;
102}
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900103
104/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200105 * copy_from_user_nofault(): safely attempt to read from a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700106 * @dst: pointer to the buffer that shall take the data
107 * @src: address to read from. This must be a user address.
108 * @size: size of the data chunk
109 *
110 * Safely read from user address @src to the buffer at @dst. If a kernel fault
111 * happens, handle that and return -EFAULT.
112 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200113long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700114{
115 long ret = -EFAULT;
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700116 if (access_ok(src, size)) {
117 pagefault_disable();
118 ret = __copy_from_user_inatomic(dst, src, size);
119 pagefault_enable();
120 }
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700121
122 if (ret)
123 return -EFAULT;
124 return 0;
125}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200126EXPORT_SYMBOL_GPL(copy_from_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700127
128/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200129 * copy_to_user_nofault(): safely attempt to write to a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700130 * @dst: address to write to
131 * @src: pointer to the data that shall be written
132 * @size: size of the data chunk
133 *
134 * Safely write to address @dst from the buffer at @src. If a kernel fault
135 * happens, handle that and return -EFAULT.
136 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200137long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700138{
139 long ret = -EFAULT;
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700140
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700141 if (access_ok(dst, size)) {
142 pagefault_disable();
143 ret = __copy_to_user_inatomic(dst, src, size);
144 pagefault_enable();
145 }
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700146
147 if (ret)
148 return -EFAULT;
149 return 0;
150}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200151EXPORT_SYMBOL_GPL(copy_to_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700152
153/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700154 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900155 * address.
156 * @dst: Destination address, in kernel space. This buffer must be at
157 * least @count bytes long.
158 * @unsafe_addr: Unsafe user address.
159 * @count: Maximum number of bytes to copy, including the trailing NUL.
160 *
161 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
162 *
163 * On success, returns the length of the string INCLUDING the trailing NUL.
164 *
165 * If access fails, returns -EFAULT (some data may have been copied
166 * and the trailing NUL added).
167 *
168 * If @count is smaller than the length of the string, copies @count-1 bytes,
169 * sets the last byte of @dst buffer to NUL and returns @count.
170 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700171long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900172 long count)
173{
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900174 long ret;
175
176 if (unlikely(count <= 0))
177 return 0;
178
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900179 pagefault_disable();
180 ret = strncpy_from_user(dst, unsafe_addr, count);
181 pagefault_enable();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900182
183 if (ret >= count) {
184 ret = count;
185 dst[ret - 1] = '\0';
186 } else if (ret > 0) {
187 ret++;
188 }
189
190 return ret;
191}
192
193/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700194 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900195 * @unsafe_addr: The string to measure.
196 * @count: Maximum count (including NUL)
197 *
198 * Get the size of a NUL-terminated string in user space without pagefault.
199 *
200 * Returns the size of the string INCLUDING the terminating NUL.
201 *
202 * If the string is too long, returns a number larger than @count. User
203 * has to check the return value against "> count".
204 * On exception (or invalid count), returns 0.
205 *
206 * Unlike strnlen_user, this can be used from IRQ handler etc. because
207 * it disables pagefaults.
208 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700209long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900210{
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900211 int ret;
212
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900213 pagefault_disable();
214 ret = strnlen_user(unsafe_addr, count);
215 pagefault_enable();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900216
217 return ret;
218}
Christophe Leroyad7489d2022-03-22 14:47:49 -0700219
220void __copy_overflow(int size, unsigned long count)
221{
222 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
223}
224EXPORT_SYMBOL(__copy_overflow);