Merge branch 'mips-next-3.10' of git://git.linux-mips.org/pub/scm/john/linux-john into mips-for-linux-next
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5b..e3ee92d 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
+#include <linux/stringify.h>
 
-#include <asm/cpu-features.h>
+#define ___ssnop							\
+	sll	$0, $0, 1
 
-#define ASMMACRO(name, code...)						\
-__asm__(".macro " #name "; " #code "; .endm");				\
-									\
-static inline void name(void)						\
-{									\
-	__asm__ __volatile__ (#name);					\
-}
-
-/*
- * MIPS R2 instruction hazard barrier.	 Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
-
-ASMMACRO(_ssnop,
-	 sll	$0, $0, 1
-	)
-
-ASMMACRO(_ehb,
-	 sll	$0, $0, 3
-	)
+#define ___ehb								\
+	sll	$0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	 _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	 _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ehb
-	)
+#define __mtc0_tlbw_hazard						\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -94,24 +73,42 @@
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	_ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
+
+#define __mtc0_tlbw_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -147,18 +144,18 @@
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	 _ssnop; _ssnop; _ssnop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	nop; nop
-	)
-ASMMACRO(tlbw_use_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(tlb_probe_hazard,
-	 nop; nop; nop
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
-ASMMACRO(irq_disable_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
+#define __mtc0_tlbw_hazard						\
+	nop;								\
+	nop
+
+#define __tlbw_use_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __tlb_probe_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __irq_disable_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-	 .set	push;
-	 .set	mips64;
-	 .set	noreorder;
-	 _ssnop;
-	 bnezl	$0, .+4;
-	 _ssnop;
-	 .set	pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard						\
+	.set	push;							\
+	.set	mips64;							\
+	.set	noreorder;						\
+	___ssnop;							\
+	bnezl	$0, .+4;						\
+	___ssnop;							\
+	.set	pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-	 _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	___ehb
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-	 nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define	_ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()							\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ssnop)						\
+	);								\
+} while (0)
+
+#define	_ehb()								\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ehb)						\
+	);								\
+} while (0)
+
+
+#define mtc0_tlbw_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__mtc0_tlbw_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlbw_use_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlbw_use_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlb_probe_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlb_probe_hazard)					\
+	);								\
+} while (0)
+
+
+#define irq_enable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_enable_hazard)				\
+	);								\
+} while (0)
+
+
+#define irq_disable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_disable_hazard)				\
+	);								\
+} while (0)
+
+
+#define back_to_back_c0_hazard() 					\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__back_to_back_c0_hazard)				\
+	);								\
+} while (0)
+
+
+#define enable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__enable_fpu_hazard)				\
+	);								\
+} while (0)
+
+
+#define disable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__disable_fpu_hazard)				\
+	);								\
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c..45c0095 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
-	"	.set	push						\n"
-	"	.set	noat						\n"
-	"	di							\n"
-	"	irq_disable_hazard					\n"
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 static inline void arch_local_irq_disable(void)
 {
 	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
-}
-
-
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
 	"	.set	push						\n"
-	"	.set	reorder						\n"
 	"	.set	noat						\n"
-	"	di	\\result					\n"
-	"	andi	\\result, 1					\n"
-	"	irq_disable_hazard					\n"
+	"	di							\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
+}
 
 static inline unsigned long arch_local_irq_save(void)
 {
 	unsigned long flags;
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
+
+	asm __volatile__(
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+	"	di	%[flags]					\n"
+	"	andi	%[flags], 1					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
+
 	return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
 
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
@@ -69,7 +64,7 @@
 	 * Slow, but doesn't suffer from a relatively unlikely race
 	 * condition we're having since days 1.
 	 */
-	"	beqz	\\flags, 1f					\n"
+	"	beqz	%[flags], 1f					\n"
 	"	di							\n"
 	"	ei							\n"
 	"1:								\n"
@@ -78,33 +73,44 @@
 	 * Fast, dangerous.  Life is fun, life is good.
 	 */
 	"	mfc0	$1, $12						\n"
-	"	ins	$1, \\flags, 0, 1				\n"
+	"	ins	$1, %[flags], 0, 1				\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-	unsigned long __tmp1;
-
-	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-	unsigned long __tmp1;
-
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#if defined(CONFIG_IRQ_CPU)
+	/*
+	 * Slow, but doesn't suffer from a relatively unlikely race
+	 * condition we're having since days 1.
+	 */
+	"	beqz	%[flags], 1f					\n"
+	"	di							\n"
+	"	ei							\n"
+	"1:								\n"
+#else
+	/*
+	 * Fast, dangerous.  Life is fun, life is good.
+	 */
+	"	mfc0	$1, $12						\n"
+	"	ins	$1, %[flags], 0, 1				\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: "0" (flags)
+	: "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-	"	.macro	arch_local_irq_enable				\n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of call overhead on each local_irq_enable()
+	 */
+	smtc_ipi_replay();
+#endif
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
@@ -133,45 +149,28 @@
 	"	xori	$1,0x1e						\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_enable_hazard					\n"
+	"	" __stringify(__irq_enable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of call overhead on each local_irq_enable()
-	 */
-	smtc_ipi_replay();
-#endif
-	__asm__ __volatile__(
-		"arch_local_irq_enable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 }
 
-
-__asm__(
-	"	.macro	arch_local_save_flags flags			\n"
-	"	.set	push						\n"
-	"	.set	reorder						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\flags, $2, 1					\n"
-#else
-	"	mfc0	\\flags, $12					\n"
-#endif
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 static inline unsigned long arch_local_save_flags(void)
 {
 	unsigned long flags;
-	asm volatile("arch_local_save_flags %0" : "=r" (flags));
+
+	asm __volatile__(
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	%[flags], $2, 1					\n"
+#else
+	"	mfc0	%[flags], $12					\n"
+#endif
+	"	.set	pop						\n"
+	: [flags] "=r" (flags));
+
 	return flags;
 }
 
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 49d220c..952701c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -26,10 +26,15 @@
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd)				\
-	tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
+do {									\
+	void (*tlbmiss_handler_setup_pgd)(unsigned long);		\
+	extern u32 tlbmiss_handler_setup_pgd_array[16];			\
+									\
+	tlbmiss_handler_setup_pgd =					\
+		(__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
+} while (0)
 
 #define TLBMISS_HANDLER_SETUP()						\
 	do {								\
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index fdc62fb..8b8f6b3 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #ifdef CONFIG_32BIT
 #include <asm/pgtable-32.h>
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88..78d201f 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -71,7 +71,6 @@
 		"	 nop						\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
 		"	andi	%[ticket], %[ticket], 0xffff		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	bne	%[ticket], %[my_ticket], 4f		\n"
 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
 		"2:							\n"
@@ -105,7 +104,6 @@
 		"	beqz	%[my_ticket], 1b			\n"
 		"	 srl	%[my_ticket], %[ticket], 16		\n"
 		"	andi	%[ticket], %[ticket], 0xffff		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	bne	%[ticket], %[my_ticket], 4f		\n"
 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
 		"2:							\n"
@@ -153,7 +151,6 @@
 		"							\n"
 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
@@ -178,7 +175,6 @@
 		"							\n"
 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
@@ -242,25 +238,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bltz	%1, 3f					\n"
-		"	 addu	%1, 1					\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 1b					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bltz	%1, 3b					\n"
-		"	 addu	%1, 1					\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_lock	\n"
+			"	bltz	%1, 1b				\n"
+			"	 addu	%1, 1				\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -285,21 +272,15 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_unlock	\n"
-		"1:	ll	%1, %2					\n"
-		"	sub	%1, 1					\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 2f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"2:	b	1b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_unlock	\n"
+			"	sub	%1, 1				\n"
+			"	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 }
 
@@ -321,25 +302,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bnez	%1, 3f					\n"
-		"	 lui	%1, 0x8000				\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bnez	%1, 3b					\n"
-		"	 lui	%1, 0x8000				\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_write_lock	\n"
+			"	bnez	%1, 1b				\n"
+			"	 lui	%1, 0x8000			\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -424,25 +396,21 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_trylock	\n"
-		"	li	%2, 0					\n"
-		"1:	ll	%1, %3					\n"
-		"	bnez	%1, 2f					\n"
-		"	lui	%1, 0x8000				\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 li	%2, 1					\n"
-		"2:							\n"
-		__WEAK_LLSC_MB
-		"	.subsection 2					\n"
-		"3:	b	1b					\n"
-		"	 li	%2, 0					\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"	ll	%1, %3	# arch_write_trylock	\n"
+			"	li	%2, 0				\n"
+			"	bnez	%1, 2f				\n"
+			"	lui	%1, 0x8000			\n"
+			"	sc	%1, %0				\n"
+			"	li	%2, 1				\n"
+			"2:						\n"
+			: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
+
+		smp_llsc_mb();
 	}
 
 	return ret;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 178f792..895320e 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -58,8 +58,12 @@
 #define init_stack		(init_thread_union.stack)
 
 /* How to get the thread information struct from C.  */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info()  __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+	register struct thread_info *__current_thread_info __asm__("$28");
+
+	return __current_thread_info;
+}
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3abb881..3b98b7b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -60,9 +60,9 @@
 extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
 extern asmlinkage void handle_adel(void);
 extern asmlinkage void handle_ades(void);
 extern asmlinkage void handle_ibe(void);
@@ -1390,9 +1390,8 @@
 void __init *set_except_vector(int n, void *addr)
 {
 	unsigned long handler = (unsigned long) addr;
-	unsigned long old_handler = exception_handlers[n];
+	unsigned long old_handler = xchg(&exception_handlers[n], handler);
 
-	exception_handlers[n] = handler;
 	if (n == 0 && cpu_has_divec) {
 		unsigned long jump_mask = ~((1 << 28) - 1);
 		u32 *buf = (u32 *)(ebase + 0x200);
@@ -1410,7 +1409,7 @@
 	return (void *)old_handler;
 }
 
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
 {
 	show_regs(get_irq_regs());
 	panic("Caught unexpected vectored interrupt.");
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index cd160be..6807f71 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
@@ -34,8 +35,11 @@
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,89 +56,54 @@
 	"	.set	noreorder					\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-	preempt_disable();
-	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
+notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\result, $2, 1					\n"
-	"	ori	$1, \\result, 0x400				\n"
+	"	mfc0	%[flags], $2, 1				\n"
+	"	ori	$1, %[flags], 0x400				\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $2, 1					\n"
-	"	andi	\\result, \\result, 0x400			\n"
+	"	andi	%[flags], %[flags], 0x400			\n"
 #elif defined(CONFIG_CPU_MIPSR2)
 	/* see irqflags.h for inline function */
 #else
-	"	mfc0	\\result, $12					\n"
-	"	ori	$1, \\result, 0x1f				\n"
+	"	mfc0	%[flags], $12					\n"
+	"	ori	$1, %[flags], 0x1f				\n"
 	"	xori	$1, 0x1f					\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags;
-	preempt_disable();
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
 	preempt_enable();
+
 	return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
-
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
-	"	.set	push						\n"
-	"	.set	noreorder					\n"
-	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"mfc0	$1, $2, 1						\n"
-	"andi	\\flags, 0x400						\n"
-	"ori	$1, 0x400						\n"
-	"xori	$1, 0x400						\n"
-	"or	\\flags, $1						\n"
-	"mtc0	\\flags, $2, 1						\n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
-	/* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
-	/* see irqflags.h for inline function */
-#else
-	"	mfc0	$1, $12						\n"
-	"	andi	\\flags, 1					\n"
-	"	ori	$1, 0x1f					\n"
-	"	xori	$1, 0x1f					\n"
-	"	or	\\flags, $1					\n"
-	"	mtc0	\\flags, $12					\n"
-#endif
-	"	irq_disable_hazard					\n"
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 notrace void arch_local_irq_restore(unsigned long flags)
 {
 	unsigned long __tmp1;
@@ -149,11 +118,36 @@
 		smtc_ipi_replay();
 #endif
 	preempt_disable();
+
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@
 	unsigned long __tmp1;
 
 	preempt_disable();
+
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 6bc28b4d..3b3822a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1458,17 +1458,17 @@
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
 
 static void __cpuinit build_r4000_setup_pgd(void)
 {
 	const int a0 = 4;
 	const int a1 = 5;
-	u32 *p = tlbmiss_handler_setup_pgd;
+	u32 *p = tlbmiss_handler_setup_pgd_array;
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
 
-	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+	memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
 
@@ -1496,15 +1496,15 @@
 		uasm_i_jr(&p, 31);
 		UASM_i_MTC0(&p, a0, 31, pgd_reg);
 	}
-	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
-		panic("tlbmiss_handler_setup_pgd space exceeded");
+	if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+		panic("tlbmiss_handler_setup_pgd_array space exceeded");
 	uasm_resolve_relocs(relocs, labels);
-	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
-		 (unsigned int)(p - tlbmiss_handler_setup_pgd));
+	pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+		 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
 
 	dump_handler("tlbmiss_handler",
-		     tlbmiss_handler_setup_pgd,
-		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+		     tlbmiss_handler_setup_pgd_array,
+		     ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
 }
 #endif
 
@@ -2213,7 +2213,7 @@
 	local_flush_icache_range((unsigned long)handle_tlbm,
 			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
-			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+			   (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
 #endif
 }
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index fff58ac1..2e21b76 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -69,7 +69,7 @@
 	/* Nothing to do ...  */
 }
 
-int rt_timer_irq;
+unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
 static DEFINE_PER_CPU(char [11], hub_rt_name);