blob: 92748660ba51234f31a651d9181f50605418f0c9 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* Copyright 2002 Andi Kleen */
Dave Jones038b0a62006-10-04 03:38:54 -04003
Jan Beulich8d379da2006-09-26 10:52:32 +02004#include <linux/linkage.h>
Tony Luckcbf8b5a2016-03-14 15:33:39 -07005#include <asm/errno.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01006#include <asm/cpufeatures.h>
Dan Williams5d8beee2018-05-01 06:49:45 -07007#include <asm/mcsafe_test.h>
Fenghua Yu101068c2011-05-17 15:29:16 -07008#include <asm/alternative-asm.h>
Al Viro784d5692016-01-11 11:04:34 -05009#include <asm/export.h>
Jan Beulich8d379da2006-09-26 10:52:32 +020010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011/*
Borislav Petkove0bc8d12015-02-04 15:36:49 +010012 * We build a jump to memcpy_orig by default which gets NOPped out on
13 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
14 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
15 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
16 */
17
18.weak memcpy
19
20/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * memcpy - Copy a memory block.
22 *
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010023 * Input:
24 * rdi destination
25 * rsi source
26 * rdx count
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * Output:
29 * rax original destination
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010030 */
Borislav Petkove0bc8d12015-02-04 15:36:49 +010031ENTRY(__memcpy)
32ENTRY(memcpy)
33 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
34 "jmp memcpy_erms", X86_FEATURE_ERMS
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010036 movq %rdi, %rax
Jan Beulich2ab56092012-01-26 15:50:55 +000037 movq %rdx, %rcx
38 shrq $3, %rcx
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010039 andl $7, %edx
Jan Beulich8d379da2006-09-26 10:52:32 +020040 rep movsq
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010041 movl %edx, %ecx
Jan Beulich8d379da2006-09-26 10:52:32 +020042 rep movsb
43 ret
Borislav Petkove0bc8d12015-02-04 15:36:49 +010044ENDPROC(memcpy)
45ENDPROC(__memcpy)
Al Viro784d5692016-01-11 11:04:34 -050046EXPORT_SYMBOL(memcpy)
47EXPORT_SYMBOL(__memcpy)
Jan Beulich8d379da2006-09-26 10:52:32 +020048
Fenghua Yu101068c2011-05-17 15:29:16 -070049/*
Borislav Petkove0bc8d12015-02-04 15:36:49 +010050 * memcpy_erms() - enhanced fast string memcpy. This is faster and
51 * simpler than memcpy. Use memcpy_erms when possible.
Fenghua Yu101068c2011-05-17 15:29:16 -070052 */
Borislav Petkove0bc8d12015-02-04 15:36:49 +010053ENTRY(memcpy_erms)
Fenghua Yu101068c2011-05-17 15:29:16 -070054 movq %rdi, %rax
Jan Beulich2ab56092012-01-26 15:50:55 +000055 movq %rdx, %rcx
Fenghua Yu101068c2011-05-17 15:29:16 -070056 rep movsb
57 ret
Borislav Petkove0bc8d12015-02-04 15:36:49 +010058ENDPROC(memcpy_erms)
Fenghua Yu101068c2011-05-17 15:29:16 -070059
Borislav Petkove0bc8d12015-02-04 15:36:49 +010060ENTRY(memcpy_orig)
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010061 movq %rdi, %rax
Andi Kleen7bcd3f32006-02-03 21:51:02 +010062
Jan Beulich2ab56092012-01-26 15:50:55 +000063 cmpq $0x20, %rdx
Ma Ling59daa702010-06-29 03:24:25 +080064 jb .Lhandle_tail
65
66 /*
Bart Van Assche9de49662011-05-01 14:09:21 +020067 * We check whether memory false dependence could occur,
Ma Ling59daa702010-06-29 03:24:25 +080068 * then jump to corresponding copy mode.
69 */
70 cmp %dil, %sil
71 jl .Lcopy_backward
Jan Beulich2ab56092012-01-26 15:50:55 +000072 subq $0x20, %rdx
Ma Ling59daa702010-06-29 03:24:25 +080073.Lcopy_forward_loop:
74 subq $0x20, %rdx
75
76 /*
77 * Move in blocks of 4x8 bytes:
78 */
79 movq 0*8(%rsi), %r8
80 movq 1*8(%rsi), %r9
81 movq 2*8(%rsi), %r10
82 movq 3*8(%rsi), %r11
83 leaq 4*8(%rsi), %rsi
84
85 movq %r8, 0*8(%rdi)
86 movq %r9, 1*8(%rdi)
87 movq %r10, 2*8(%rdi)
88 movq %r11, 3*8(%rdi)
89 leaq 4*8(%rdi), %rdi
90 jae .Lcopy_forward_loop
Jan Beulich2ab56092012-01-26 15:50:55 +000091 addl $0x20, %edx
Ma Ling59daa702010-06-29 03:24:25 +080092 jmp .Lhandle_tail
93
94.Lcopy_backward:
95 /*
96 * Calculate copy position to tail.
97 */
98 addq %rdx, %rsi
99 addq %rdx, %rdi
100 subq $0x20, %rdx
101 /*
102 * At most 3 ALU operations in one cycle,
Andy Shevchenkod50ba3682013-04-15 12:06:10 +0300103 * so append NOPS in the same 16 bytes trunk.
Ma Ling59daa702010-06-29 03:24:25 +0800104 */
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100105 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800106.Lcopy_backward_loop:
107 subq $0x20, %rdx
108 movq -1*8(%rsi), %r8
109 movq -2*8(%rsi), %r9
110 movq -3*8(%rsi), %r10
111 movq -4*8(%rsi), %r11
112 leaq -4*8(%rsi), %rsi
113 movq %r8, -1*8(%rdi)
114 movq %r9, -2*8(%rdi)
115 movq %r10, -3*8(%rdi)
116 movq %r11, -4*8(%rdi)
117 leaq -4*8(%rdi), %rdi
118 jae .Lcopy_backward_loop
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100119
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100120 /*
Ma Ling59daa702010-06-29 03:24:25 +0800121 * Calculate copy position to head.
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100122 */
Jan Beulich2ab56092012-01-26 15:50:55 +0000123 addl $0x20, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800124 subq %rdx, %rsi
125 subq %rdx, %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100126.Lhandle_tail:
Jan Beulich2ab56092012-01-26 15:50:55 +0000127 cmpl $16, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800128 jb .Lless_16bytes
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100129
Ma Ling59daa702010-06-29 03:24:25 +0800130 /*
131 * Move data from 16 bytes to 31 bytes.
132 */
133 movq 0*8(%rsi), %r8
134 movq 1*8(%rsi), %r9
135 movq -2*8(%rsi, %rdx), %r10
136 movq -1*8(%rsi, %rdx), %r11
137 movq %r8, 0*8(%rdi)
138 movq %r9, 1*8(%rdi)
139 movq %r10, -2*8(%rdi, %rdx)
140 movq %r11, -1*8(%rdi, %rdx)
141 retq
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100142 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800143.Lless_16bytes:
Jan Beulich2ab56092012-01-26 15:50:55 +0000144 cmpl $8, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800145 jb .Lless_8bytes
146 /*
147 * Move data from 8 bytes to 15 bytes.
148 */
149 movq 0*8(%rsi), %r8
150 movq -1*8(%rsi, %rdx), %r9
151 movq %r8, 0*8(%rdi)
152 movq %r9, -1*8(%rdi, %rdx)
153 retq
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100154 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800155.Lless_8bytes:
Jan Beulich2ab56092012-01-26 15:50:55 +0000156 cmpl $4, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800157 jb .Lless_3bytes
158
159 /*
160 * Move data from 4 bytes to 7 bytes.
161 */
162 movl (%rsi), %ecx
163 movl -4(%rsi, %rdx), %r8d
164 movl %ecx, (%rdi)
165 movl %r8d, -4(%rdi, %rdx)
166 retq
167 .p2align 4
168.Lless_3bytes:
Jan Beulich9d8e2272012-01-26 15:55:32 +0000169 subl $1, %edx
170 jb .Lend
Ma Ling59daa702010-06-29 03:24:25 +0800171 /*
172 * Move data from 1 bytes to 3 bytes.
173 */
Jan Beulich9d8e2272012-01-26 15:55:32 +0000174 movzbl (%rsi), %ecx
175 jz .Lstore_1byte
176 movzbq 1(%rsi), %r8
177 movzbq (%rsi, %rdx), %r9
178 movb %r8b, 1(%rdi)
179 movb %r9b, (%rdi, %rdx)
180.Lstore_1byte:
181 movb %cl, (%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100182
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100183.Lend:
Ma Ling59daa702010-06-29 03:24:25 +0800184 retq
Borislav Petkove0bc8d12015-02-04 15:36:49 +0100185ENDPROC(memcpy_orig)
Tony Luck92b07292016-02-18 11:47:26 -0800186
187#ifndef CONFIG_UML
Dan Williams5d8beee2018-05-01 06:49:45 -0700188
189MCSAFE_TEST_CTL
190
Tony Luck92b07292016-02-18 11:47:26 -0800191/*
Dan Williamsda7bc9c2018-05-03 17:06:11 -0700192 * __memcpy_mcsafe - memory copy with machine check exception handling
Tony Luck92b07292016-02-18 11:47:26 -0800193 * Note that we only catch machine checks when reading the source addresses.
194 * Writes to target are posted and don't generate machine checks.
195 */
Dan Williamsda7bc9c2018-05-03 17:06:11 -0700196ENTRY(__memcpy_mcsafe)
Tony Luck92b07292016-02-18 11:47:26 -0800197 cmpl $8, %edx
198 /* Less than 8 bytes? Go to byte copy loop */
199 jb .L_no_whole_words
200
201 /* Check for bad alignment of source */
202 testl $7, %esi
203 /* Already aligned */
204 jz .L_8byte_aligned
205
206 /* Copy one byte at a time until source is 8-byte aligned */
207 movl %esi, %ecx
208 andl $7, %ecx
209 subl $8, %ecx
210 negl %ecx
211 subl %ecx, %edx
Dan Williamsbd131542018-05-03 17:06:16 -0700212.L_read_leading_bytes:
Tony Luck92b07292016-02-18 11:47:26 -0800213 movb (%rsi), %al
Dan Williams5d8beee2018-05-01 06:49:45 -0700214 MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
215 MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
Dan Williamsbd131542018-05-03 17:06:16 -0700216.L_write_leading_bytes:
Tony Luck92b07292016-02-18 11:47:26 -0800217 movb %al, (%rdi)
218 incq %rsi
219 incq %rdi
220 decl %ecx
Dan Williamsbd131542018-05-03 17:06:16 -0700221 jnz .L_read_leading_bytes
Tony Luck92b07292016-02-18 11:47:26 -0800222
223.L_8byte_aligned:
Tony Luck92b07292016-02-18 11:47:26 -0800224 movl %edx, %ecx
225 andl $7, %edx
226 shrl $3, %ecx
227 jz .L_no_whole_words
228
Dan Williamsbd131542018-05-03 17:06:16 -0700229.L_read_words:
Tony Luck92b07292016-02-18 11:47:26 -0800230 movq (%rsi), %r8
Dan Williams5d8beee2018-05-01 06:49:45 -0700231 MCSAFE_TEST_SRC %rsi 8 .E_read_words
232 MCSAFE_TEST_DST %rdi 8 .E_write_words
Dan Williamsbd131542018-05-03 17:06:16 -0700233.L_write_words:
Dan Williamsda7bc9c2018-05-03 17:06:11 -0700234 movq %r8, (%rdi)
235 addq $8, %rsi
236 addq $8, %rdi
Tony Luck92b07292016-02-18 11:47:26 -0800237 decl %ecx
Dan Williamsbd131542018-05-03 17:06:16 -0700238 jnz .L_read_words
Tony Luck92b07292016-02-18 11:47:26 -0800239
240 /* Any trailing bytes? */
241.L_no_whole_words:
242 andl %edx, %edx
243 jz .L_done_memcpy_trap
244
245 /* Copy trailing bytes */
246 movl %edx, %ecx
Dan Williamsbd131542018-05-03 17:06:16 -0700247.L_read_trailing_bytes:
Tony Luck92b07292016-02-18 11:47:26 -0800248 movb (%rsi), %al
Dan Williams5d8beee2018-05-01 06:49:45 -0700249 MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
250 MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
Dan Williamsbd131542018-05-03 17:06:16 -0700251.L_write_trailing_bytes:
Tony Luck92b07292016-02-18 11:47:26 -0800252 movb %al, (%rdi)
253 incq %rsi
254 incq %rdi
255 decl %ecx
Dan Williamsbd131542018-05-03 17:06:16 -0700256 jnz .L_read_trailing_bytes
Tony Luck92b07292016-02-18 11:47:26 -0800257
Tony Luckcbf8b5a2016-03-14 15:33:39 -0700258 /* Copy successful. Return zero */
Tony Luck92b07292016-02-18 11:47:26 -0800259.L_done_memcpy_trap:
Jan Beulicha7bea832018-07-02 04:31:54 -0600260 xorl %eax, %eax
Peter Zijlstrab69656f2019-04-03 09:39:45 +0200261.L_done:
Tony Luck92b07292016-02-18 11:47:26 -0800262 ret
Dan Williamsda7bc9c2018-05-03 17:06:11 -0700263ENDPROC(__memcpy_mcsafe)
264EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
Tony Luck92b07292016-02-18 11:47:26 -0800265
266 .section .fixup, "ax"
Dan Williams60622d62018-05-03 17:06:21 -0700267 /*
268 * Return number of bytes not copied for any failure. Note that
269 * there is no "tail" handling since the source buffer is 8-byte
270 * aligned and poison is cacheline aligned.
271 */
272.E_read_words:
273 shll $3, %ecx
274.E_leading_bytes:
275 addl %edx, %ecx
276.E_trailing_bytes:
277 mov %ecx, %eax
Peter Zijlstrab69656f2019-04-03 09:39:45 +0200278 jmp .L_done
Tony Luck92b07292016-02-18 11:47:26 -0800279
Dan Williams12c89132018-05-03 17:06:26 -0700280 /*
281 * For write fault handling, given the destination is unaligned,
282 * we handle faults on multi-byte writes with a byte-by-byte
283 * copy up to the write-protected page.
284 */
285.E_write_words:
286 shll $3, %ecx
287 addl %edx, %ecx
288 movl %ecx, %edx
289 jmp mcsafe_handle_tail
290
Tony Luck92b07292016-02-18 11:47:26 -0800291 .previous
292
Dan Williams60622d62018-05-03 17:06:21 -0700293 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
294 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
295 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
Dan Williams12c89132018-05-03 17:06:26 -0700296 _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
297 _ASM_EXTABLE(.L_write_words, .E_write_words)
298 _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
Tony Luck92b07292016-02-18 11:47:26 -0800299#endif