blob: 518a25667323ee4edd1b54577c5f0598d05bd51c [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02002/*
Christoph Hellwig3f0acb12020-06-08 21:34:11 -07003 * Access kernel or user memory without faulting.
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02004 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -04005#include <linux/export.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02006#include <linux/mm.h>
David Howells7c7fcf72010-10-27 17:29:01 +01007#include <linux/uaccess.h>
Alexei Starovoitovd319f342023-04-10 19:43:44 +02008#include <asm/tlb.h>
Ingo Molnarc33fa9f2008-04-17 20:05:36 +02009
Christoph Hellwigfe557312020-06-17 09:37:53 +020010bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
11 size_t size)
Christoph Hellwigeab0c602020-06-08 21:34:27 -070012{
13 return true;
14}
15
Christoph Hellwigfe557312020-06-17 09:37:53 +020016#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070017 while (len >= sizeof(type)) { \
18 __get_kernel_nofault(dst, src, type, err_label); \
19 dst += sizeof(type); \
20 src += sizeof(type); \
21 len -= sizeof(type); \
22 }
23
Christoph Hellwigfe557312020-06-17 09:37:53 +020024long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070025{
Arnd Bergmann2423de22021-08-11 08:30:18 +010026 unsigned long align = 0;
27
28 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
29 align = (unsigned long)dst | (unsigned long)src;
30
Christoph Hellwigfe557312020-06-17 09:37:53 +020031 if (!copy_from_kernel_nofault_allowed(src, size))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070032 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070033
34 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010035 if (!(align & 7))
36 copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
37 if (!(align & 3))
38 copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
39 if (!(align & 1))
40 copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020041 copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070042 pagefault_enable();
43 return 0;
44Efault:
45 pagefault_enable();
46 return -EFAULT;
47}
Christoph Hellwigfe557312020-06-17 09:37:53 +020048EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070049
Christoph Hellwigfe557312020-06-17 09:37:53 +020050#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \
Christoph Hellwigb58294e2020-06-08 21:34:58 -070051 while (len >= sizeof(type)) { \
52 __put_kernel_nofault(dst, src, type, err_label); \
53 dst += sizeof(type); \
54 src += sizeof(type); \
55 len -= sizeof(type); \
56 }
57
Christoph Hellwigfe557312020-06-17 09:37:53 +020058long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
Christoph Hellwigb58294e2020-06-08 21:34:58 -070059{
Arnd Bergmann2423de22021-08-11 08:30:18 +010060 unsigned long align = 0;
61
62 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
63 align = (unsigned long)dst | (unsigned long)src;
64
Christoph Hellwigb58294e2020-06-08 21:34:58 -070065 pagefault_disable();
Arnd Bergmann2423de22021-08-11 08:30:18 +010066 if (!(align & 7))
67 copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
68 if (!(align & 3))
69 copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
70 if (!(align & 1))
71 copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
Christoph Hellwigfe557312020-06-17 09:37:53 +020072 copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
Christoph Hellwigb58294e2020-06-08 21:34:58 -070073 pagefault_enable();
74 return 0;
75Efault:
76 pagefault_enable();
77 return -EFAULT;
78}
79
80long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
81{
82 const void *src = unsafe_addr;
83
84 if (unlikely(count <= 0))
85 return 0;
Christoph Hellwigfe557312020-06-17 09:37:53 +020086 if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
Christoph Hellwig2a71e812020-06-08 21:35:04 -070087 return -ERANGE;
Christoph Hellwigb58294e2020-06-08 21:34:58 -070088
89 pagefault_disable();
90 do {
91 __get_kernel_nofault(dst, src, u8, Efault);
92 dst++;
93 src++;
94 } while (dst[-1] && src - unsafe_addr < count);
95 pagefault_enable();
96
97 dst[-1] = '\0';
98 return src - unsafe_addr;
99Efault:
100 pagefault_enable();
Alban Crequy8678ea02022-11-10 09:56:13 +0100101 dst[0] = '\0';
Christoph Hellwigb58294e2020-06-08 21:34:58 -0700102 return -EFAULT;
103}
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900104
105/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200106 * copy_from_user_nofault(): safely attempt to read from a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700107 * @dst: pointer to the buffer that shall take the data
108 * @src: address to read from. This must be a user address.
109 * @size: size of the data chunk
110 *
111 * Safely read from user address @src to the buffer at @dst. If a kernel fault
112 * happens, handle that and return -EFAULT.
113 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200114long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700115{
116 long ret = -EFAULT;
Alexei Starovoitovd319f342023-04-10 19:43:44 +0200117
118 if (!__access_ok(src, size))
119 return ret;
120
121 if (!nmi_uaccess_okay())
122 return ret;
123
124 pagefault_disable();
125 ret = __copy_from_user_inatomic(dst, src, size);
126 pagefault_enable();
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700127
128 if (ret)
129 return -EFAULT;
130 return 0;
131}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200132EXPORT_SYMBOL_GPL(copy_from_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700133
134/**
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200135 * copy_to_user_nofault(): safely attempt to write to a user-space location
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700136 * @dst: address to write to
137 * @src: pointer to the data that shall be written
138 * @size: size of the data chunk
139 *
140 * Safely write to address @dst from the buffer at @src. If a kernel fault
141 * happens, handle that and return -EFAULT.
142 */
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200143long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700144{
145 long ret = -EFAULT;
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700146
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700147 if (access_ok(dst, size)) {
148 pagefault_disable();
149 ret = __copy_to_user_inatomic(dst, src, size);
150 pagefault_enable();
151 }
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700152
153 if (ret)
154 return -EFAULT;
155 return 0;
156}
Christoph Hellwigc0ee37e2020-06-17 09:37:54 +0200157EXPORT_SYMBOL_GPL(copy_to_user_nofault);
Christoph Hellwigfc3562d72020-06-08 21:34:55 -0700158
159/**
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700160 * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900161 * address.
162 * @dst: Destination address, in kernel space. This buffer must be at
163 * least @count bytes long.
164 * @unsafe_addr: Unsafe user address.
165 * @count: Maximum number of bytes to copy, including the trailing NUL.
166 *
167 * Copies a NUL-terminated string from unsafe user address to kernel buffer.
168 *
169 * On success, returns the length of the string INCLUDING the trailing NUL.
170 *
171 * If access fails, returns -EFAULT (some data may have been copied
172 * and the trailing NUL added).
173 *
174 * If @count is smaller than the length of the string, copies @count-1 bytes,
175 * sets the last byte of @dst buffer to NUL and returns @count.
176 */
Christoph Hellwigbd88bb52020-06-08 21:34:14 -0700177long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900178 long count)
179{
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900180 long ret;
181
182 if (unlikely(count <= 0))
183 return 0;
184
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900185 pagefault_disable();
186 ret = strncpy_from_user(dst, unsafe_addr, count);
187 pagefault_enable();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900188
189 if (ret >= count) {
190 ret = count;
191 dst[ret - 1] = '\0';
192 } else if (ret > 0) {
193 ret++;
194 }
195
196 return ret;
197}
198
199/**
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700200 * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900201 * @unsafe_addr: The string to measure.
202 * @count: Maximum count (including NUL)
203 *
204 * Get the size of a NUL-terminated string in user space without pagefault.
205 *
206 * Returns the size of the string INCLUDING the terminating NUL.
207 *
208 * If the string is too long, returns a number larger than @count. User
209 * has to check the return value against "> count".
210 * On exception (or invalid count), returns 0.
211 *
212 * Unlike strnlen_user, this can be used from IRQ handler etc. because
213 * it disables pagefaults.
214 */
Christoph Hellwig02dddb12020-06-08 21:34:20 -0700215long strnlen_user_nofault(const void __user *unsafe_addr, long count)
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900216{
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900217 int ret;
218
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900219 pagefault_disable();
220 ret = strnlen_user(unsafe_addr, count);
221 pagefault_enable();
Masami Hiramatsu3d708182019-05-15 14:38:18 +0900222
223 return ret;
224}
Christophe Leroyad7489d2022-03-22 14:47:49 -0700225
226void __copy_overflow(int size, unsigned long count)
227{
228 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
229}
230EXPORT_SYMBOL(__copy_overflow);