x86: some lock annotations for user copy paths, v2
- introduce might_fault()
- handle the atomic user copy paths correctly
[ mingo@elte.hu: move might_sleep() outside of in_atomic(). ]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 8eedde2..7393152 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -32,9 +32,7 @@
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
int __d0, __d1, __d2; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -121,9 +119,7 @@
#define __do_clear_user(addr,size) \
do { \
int __d0; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -193,9 +189,7 @@
unsigned long mask = -__addr_ok(s);
unsigned long res, tmp;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
__asm__ __volatile__(
" testl %0, %0\n"