powerpc: Fix usage of register macros getting ready for %r0 change

Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.

Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
	std	r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 6048a00..a623b1d 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -67,15 +67,15 @@
  */
 .macro kvm_handler_common intno, srr0, flags
 	/* Restore host stack pointer */
-	PPC_STL	r1, VCPU_GPR(r1)(r4)
-	PPC_STL	r2, VCPU_GPR(r2)(r4)
+	PPC_STL	r1, VCPU_GPR(R1)(r4)
+	PPC_STL	r2, VCPU_GPR(R2)(r4)
 	PPC_LL	r1, VCPU_HOST_STACK(r4)
 	PPC_LL	r2, HOST_R2(r1)
 
 	mfspr	r10, SPRN_PID
 	lwz	r8, VCPU_HOST_PID(r4)
 	PPC_LL	r11, VCPU_SHARED(r4)
-	PPC_STL	r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+	PPC_STL	r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
 	li	r14, \intno
 
 	stw	r10, VCPU_GUEST_PID(r4)
@@ -137,27 +137,27 @@
 	 */
 
 	mfspr	r3, SPRN_EPLC	/* will already have correct ELPID and EGS */
-	PPC_STL	r15, VCPU_GPR(r15)(r4)
-	PPC_STL	r16, VCPU_GPR(r16)(r4)
-	PPC_STL	r17, VCPU_GPR(r17)(r4)
-	PPC_STL	r18, VCPU_GPR(r18)(r4)
-	PPC_STL	r19, VCPU_GPR(r19)(r4)
+	PPC_STL	r15, VCPU_GPR(R15)(r4)
+	PPC_STL	r16, VCPU_GPR(R16)(r4)
+	PPC_STL	r17, VCPU_GPR(R17)(r4)
+	PPC_STL	r18, VCPU_GPR(R18)(r4)
+	PPC_STL	r19, VCPU_GPR(R19)(r4)
 	mr	r8, r3
-	PPC_STL	r20, VCPU_GPR(r20)(r4)
+	PPC_STL	r20, VCPU_GPR(R20)(r4)
 	rlwimi	r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
-	PPC_STL	r21, VCPU_GPR(r21)(r4)
+	PPC_STL	r21, VCPU_GPR(R21)(r4)
 	rlwimi	r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
-	PPC_STL	r22, VCPU_GPR(r22)(r4)
+	PPC_STL	r22, VCPU_GPR(R22)(r4)
 	rlwimi	r8, r10, EPC_EPID_SHIFT, EPC_EPID
-	PPC_STL	r23, VCPU_GPR(r23)(r4)
-	PPC_STL	r24, VCPU_GPR(r24)(r4)
-	PPC_STL	r25, VCPU_GPR(r25)(r4)
-	PPC_STL	r26, VCPU_GPR(r26)(r4)
-	PPC_STL	r27, VCPU_GPR(r27)(r4)
-	PPC_STL	r28, VCPU_GPR(r28)(r4)
-	PPC_STL	r29, VCPU_GPR(r29)(r4)
-	PPC_STL	r30, VCPU_GPR(r30)(r4)
-	PPC_STL	r31, VCPU_GPR(r31)(r4)
+	PPC_STL	r23, VCPU_GPR(R23)(r4)
+	PPC_STL	r24, VCPU_GPR(R24)(r4)
+	PPC_STL	r25, VCPU_GPR(R25)(r4)
+	PPC_STL	r26, VCPU_GPR(R26)(r4)
+	PPC_STL	r27, VCPU_GPR(R27)(r4)
+	PPC_STL	r28, VCPU_GPR(R28)(r4)
+	PPC_STL	r29, VCPU_GPR(R29)(r4)
+	PPC_STL	r30, VCPU_GPR(R30)(r4)
+	PPC_STL	r31, VCPU_GPR(R31)(r4)
 	mtspr	SPRN_EPLC, r8
 
 	/* disable preemption, so we are sure we hit the fixup handler */
@@ -211,24 +211,24 @@
 .macro kvm_handler intno srr0, srr1, flags
 _GLOBAL(kvmppc_handler_\intno\()_\srr1)
 	GET_VCPU(r11, r10)
-	PPC_STL r3, VCPU_GPR(r3)(r11)
+	PPC_STL r3, VCPU_GPR(R3)(r11)
 	mfspr	r3, SPRN_SPRG_RSCRATCH0
-	PPC_STL	r4, VCPU_GPR(r4)(r11)
+	PPC_STL	r4, VCPU_GPR(R4)(r11)
 	PPC_LL	r4, THREAD_NORMSAVE(0)(r10)
-	PPC_STL	r5, VCPU_GPR(r5)(r11)
+	PPC_STL	r5, VCPU_GPR(R5)(r11)
 	stw	r13, VCPU_CR(r11)
 	mfspr	r5, \srr0
-	PPC_STL	r3, VCPU_GPR(r10)(r11)
+	PPC_STL	r3, VCPU_GPR(R10)(r11)
 	PPC_LL	r3, THREAD_NORMSAVE(2)(r10)
-	PPC_STL	r6, VCPU_GPR(r6)(r11)
-	PPC_STL	r4, VCPU_GPR(r11)(r11)
+	PPC_STL	r6, VCPU_GPR(R6)(r11)
+	PPC_STL	r4, VCPU_GPR(R11)(r11)
 	mfspr	r6, \srr1
-	PPC_STL	r7, VCPU_GPR(r7)(r11)
-	PPC_STL	r8, VCPU_GPR(r8)(r11)
-	PPC_STL	r9, VCPU_GPR(r9)(r11)
-	PPC_STL r3, VCPU_GPR(r13)(r11)
+	PPC_STL	r7, VCPU_GPR(R7)(r11)
+	PPC_STL	r8, VCPU_GPR(R8)(r11)
+	PPC_STL	r9, VCPU_GPR(R9)(r11)
+	PPC_STL r3, VCPU_GPR(R13)(r11)
 	mfctr	r7
-	PPC_STL	r12, VCPU_GPR(r12)(r11)
+	PPC_STL	r12, VCPU_GPR(R12)(r11)
 	PPC_STL	r7, VCPU_CTR(r11)
 	mr	r4, r11
 	kvm_handler_common \intno, \srr0, \flags
@@ -238,25 +238,25 @@
 _GLOBAL(kvmppc_handler_\intno\()_\srr1)
 	mfspr	r10, SPRN_SPRG_THREAD
 	GET_VCPU(r11, r10)
-	PPC_STL r3, VCPU_GPR(r3)(r11)
+	PPC_STL r3, VCPU_GPR(R3)(r11)
 	mfspr	r3, \scratch
-	PPC_STL	r4, VCPU_GPR(r4)(r11)
+	PPC_STL	r4, VCPU_GPR(R4)(r11)
 	PPC_LL	r4, GPR9(r8)
-	PPC_STL	r5, VCPU_GPR(r5)(r11)
+	PPC_STL	r5, VCPU_GPR(R5)(r11)
 	stw	r9, VCPU_CR(r11)
 	mfspr	r5, \srr0
-	PPC_STL	r3, VCPU_GPR(r8)(r11)
+	PPC_STL	r3, VCPU_GPR(R8)(r11)
 	PPC_LL	r3, GPR10(r8)
-	PPC_STL	r6, VCPU_GPR(r6)(r11)
-	PPC_STL	r4, VCPU_GPR(r9)(r11)
+	PPC_STL	r6, VCPU_GPR(R6)(r11)
+	PPC_STL	r4, VCPU_GPR(R9)(r11)
 	mfspr	r6, \srr1
 	PPC_LL	r4, GPR11(r8)
-	PPC_STL	r7, VCPU_GPR(r7)(r11)
-	PPC_STL r3, VCPU_GPR(r10)(r11)
+	PPC_STL	r7, VCPU_GPR(R7)(r11)
+	PPC_STL r3, VCPU_GPR(R10)(r11)
 	mfctr	r7
-	PPC_STL	r12, VCPU_GPR(r12)(r11)
-	PPC_STL r13, VCPU_GPR(r13)(r11)
-	PPC_STL	r4, VCPU_GPR(r11)(r11)
+	PPC_STL	r12, VCPU_GPR(R12)(r11)
+	PPC_STL r13, VCPU_GPR(R13)(r11)
+	PPC_STL	r4, VCPU_GPR(R11)(r11)
 	PPC_STL	r7, VCPU_CTR(r11)
 	mr	r4, r11
 	kvm_handler_common \intno, \srr0, \flags
@@ -310,7 +310,7 @@
 _GLOBAL(kvmppc_resume_host)
 	/* Save remaining volatile guest register state to vcpu. */
 	mfspr	r3, SPRN_VRSAVE
-	PPC_STL	r0, VCPU_GPR(r0)(r4)
+	PPC_STL	r0, VCPU_GPR(R0)(r4)
 	mflr	r5
 	mfspr	r6, SPRN_SPRG4
 	PPC_STL	r5, VCPU_LR(r4)
@@ -358,27 +358,27 @@
 
 	/* Restore vcpu pointer and the nonvolatiles we used. */
 	mr	r4, r14
-	PPC_LL	r14, VCPU_GPR(r14)(r4)
+	PPC_LL	r14, VCPU_GPR(R14)(r4)
 
 	andi.	r5, r3, RESUME_FLAG_NV
 	beq	skip_nv_load
-	PPC_LL	r15, VCPU_GPR(r15)(r4)
-	PPC_LL	r16, VCPU_GPR(r16)(r4)
-	PPC_LL	r17, VCPU_GPR(r17)(r4)
-	PPC_LL	r18, VCPU_GPR(r18)(r4)
-	PPC_LL	r19, VCPU_GPR(r19)(r4)
-	PPC_LL	r20, VCPU_GPR(r20)(r4)
-	PPC_LL	r21, VCPU_GPR(r21)(r4)
-	PPC_LL	r22, VCPU_GPR(r22)(r4)
-	PPC_LL	r23, VCPU_GPR(r23)(r4)
-	PPC_LL	r24, VCPU_GPR(r24)(r4)
-	PPC_LL	r25, VCPU_GPR(r25)(r4)
-	PPC_LL	r26, VCPU_GPR(r26)(r4)
-	PPC_LL	r27, VCPU_GPR(r27)(r4)
-	PPC_LL	r28, VCPU_GPR(r28)(r4)
-	PPC_LL	r29, VCPU_GPR(r29)(r4)
-	PPC_LL	r30, VCPU_GPR(r30)(r4)
-	PPC_LL	r31, VCPU_GPR(r31)(r4)
+	PPC_LL	r15, VCPU_GPR(R15)(r4)
+	PPC_LL	r16, VCPU_GPR(R16)(r4)
+	PPC_LL	r17, VCPU_GPR(R17)(r4)
+	PPC_LL	r18, VCPU_GPR(R18)(r4)
+	PPC_LL	r19, VCPU_GPR(R19)(r4)
+	PPC_LL	r20, VCPU_GPR(R20)(r4)
+	PPC_LL	r21, VCPU_GPR(R21)(r4)
+	PPC_LL	r22, VCPU_GPR(R22)(r4)
+	PPC_LL	r23, VCPU_GPR(R23)(r4)
+	PPC_LL	r24, VCPU_GPR(R24)(r4)
+	PPC_LL	r25, VCPU_GPR(R25)(r4)
+	PPC_LL	r26, VCPU_GPR(R26)(r4)
+	PPC_LL	r27, VCPU_GPR(R27)(r4)
+	PPC_LL	r28, VCPU_GPR(R28)(r4)
+	PPC_LL	r29, VCPU_GPR(R29)(r4)
+	PPC_LL	r30, VCPU_GPR(R30)(r4)
+	PPC_LL	r31, VCPU_GPR(R31)(r4)
 skip_nv_load:
 	/* Should we return to the guest? */
 	andi.	r5, r3, RESUME_FLAG_HOST
@@ -396,23 +396,23 @@
 	 * non-volatiles.
 	 */
 
-	PPC_STL	r15, VCPU_GPR(r15)(r4)
-	PPC_STL	r16, VCPU_GPR(r16)(r4)
-	PPC_STL	r17, VCPU_GPR(r17)(r4)
-	PPC_STL	r18, VCPU_GPR(r18)(r4)
-	PPC_STL	r19, VCPU_GPR(r19)(r4)
-	PPC_STL	r20, VCPU_GPR(r20)(r4)
-	PPC_STL	r21, VCPU_GPR(r21)(r4)
-	PPC_STL	r22, VCPU_GPR(r22)(r4)
-	PPC_STL	r23, VCPU_GPR(r23)(r4)
-	PPC_STL	r24, VCPU_GPR(r24)(r4)
-	PPC_STL	r25, VCPU_GPR(r25)(r4)
-	PPC_STL	r26, VCPU_GPR(r26)(r4)
-	PPC_STL	r27, VCPU_GPR(r27)(r4)
-	PPC_STL	r28, VCPU_GPR(r28)(r4)
-	PPC_STL	r29, VCPU_GPR(r29)(r4)
-	PPC_STL	r30, VCPU_GPR(r30)(r4)
-	PPC_STL	r31, VCPU_GPR(r31)(r4)
+	PPC_STL	r15, VCPU_GPR(R15)(r4)
+	PPC_STL	r16, VCPU_GPR(R16)(r4)
+	PPC_STL	r17, VCPU_GPR(R17)(r4)
+	PPC_STL	r18, VCPU_GPR(R18)(r4)
+	PPC_STL	r19, VCPU_GPR(R19)(r4)
+	PPC_STL	r20, VCPU_GPR(R20)(r4)
+	PPC_STL	r21, VCPU_GPR(R21)(r4)
+	PPC_STL	r22, VCPU_GPR(R22)(r4)
+	PPC_STL	r23, VCPU_GPR(R23)(r4)
+	PPC_STL	r24, VCPU_GPR(R24)(r4)
+	PPC_STL	r25, VCPU_GPR(R25)(r4)
+	PPC_STL	r26, VCPU_GPR(R26)(r4)
+	PPC_STL	r27, VCPU_GPR(R27)(r4)
+	PPC_STL	r28, VCPU_GPR(R28)(r4)
+	PPC_STL	r29, VCPU_GPR(R29)(r4)
+	PPC_STL	r30, VCPU_GPR(R30)(r4)
+	PPC_STL	r31, VCPU_GPR(R31)(r4)
 
 	/* Load host non-volatile register state from host stack. */
 	PPC_LL	r14, HOST_NV_GPR(r14)(r1)
@@ -478,24 +478,24 @@
 	PPC_STL	r31, HOST_NV_GPR(r31)(r1)
 
 	/* Load guest non-volatiles. */
-	PPC_LL	r14, VCPU_GPR(r14)(r4)
-	PPC_LL	r15, VCPU_GPR(r15)(r4)
-	PPC_LL	r16, VCPU_GPR(r16)(r4)
-	PPC_LL	r17, VCPU_GPR(r17)(r4)
-	PPC_LL	r18, VCPU_GPR(r18)(r4)
-	PPC_LL	r19, VCPU_GPR(r19)(r4)
-	PPC_LL	r20, VCPU_GPR(r20)(r4)
-	PPC_LL	r21, VCPU_GPR(r21)(r4)
-	PPC_LL	r22, VCPU_GPR(r22)(r4)
-	PPC_LL	r23, VCPU_GPR(r23)(r4)
-	PPC_LL	r24, VCPU_GPR(r24)(r4)
-	PPC_LL	r25, VCPU_GPR(r25)(r4)
-	PPC_LL	r26, VCPU_GPR(r26)(r4)
-	PPC_LL	r27, VCPU_GPR(r27)(r4)
-	PPC_LL	r28, VCPU_GPR(r28)(r4)
-	PPC_LL	r29, VCPU_GPR(r29)(r4)
-	PPC_LL	r30, VCPU_GPR(r30)(r4)
-	PPC_LL	r31, VCPU_GPR(r31)(r4)
+	PPC_LL	r14, VCPU_GPR(R14)(r4)
+	PPC_LL	r15, VCPU_GPR(R15)(r4)
+	PPC_LL	r16, VCPU_GPR(R16)(r4)
+	PPC_LL	r17, VCPU_GPR(R17)(r4)
+	PPC_LL	r18, VCPU_GPR(R18)(r4)
+	PPC_LL	r19, VCPU_GPR(R19)(r4)
+	PPC_LL	r20, VCPU_GPR(R20)(r4)
+	PPC_LL	r21, VCPU_GPR(R21)(r4)
+	PPC_LL	r22, VCPU_GPR(R22)(r4)
+	PPC_LL	r23, VCPU_GPR(R23)(r4)
+	PPC_LL	r24, VCPU_GPR(R24)(r4)
+	PPC_LL	r25, VCPU_GPR(R25)(r4)
+	PPC_LL	r26, VCPU_GPR(R26)(r4)
+	PPC_LL	r27, VCPU_GPR(R27)(r4)
+	PPC_LL	r28, VCPU_GPR(R28)(r4)
+	PPC_LL	r29, VCPU_GPR(R29)(r4)
+	PPC_LL	r30, VCPU_GPR(R30)(r4)
+	PPC_LL	r31, VCPU_GPR(R31)(r4)
 
 
 lightweight_exit:
@@ -554,13 +554,13 @@
 	lwz	r7, VCPU_CR(r4)
 	PPC_LL	r8, VCPU_PC(r4)
 	PPC_LD(r9, VCPU_SHARED_MSR, r11)
-	PPC_LL	r0, VCPU_GPR(r0)(r4)
-	PPC_LL	r1, VCPU_GPR(r1)(r4)
-	PPC_LL	r2, VCPU_GPR(r2)(r4)
-	PPC_LL	r10, VCPU_GPR(r10)(r4)
-	PPC_LL	r11, VCPU_GPR(r11)(r4)
-	PPC_LL	r12, VCPU_GPR(r12)(r4)
-	PPC_LL	r13, VCPU_GPR(r13)(r4)
+	PPC_LL	r0, VCPU_GPR(R0)(r4)
+	PPC_LL	r1, VCPU_GPR(R1)(r4)
+	PPC_LL	r2, VCPU_GPR(R2)(r4)
+	PPC_LL	r10, VCPU_GPR(R10)(r4)
+	PPC_LL	r11, VCPU_GPR(R11)(r4)
+	PPC_LL	r12, VCPU_GPR(R12)(r4)
+	PPC_LL	r13, VCPU_GPR(R13)(r4)
 	mtlr	r3
 	mtxer	r5
 	mtctr	r6
@@ -586,12 +586,12 @@
 	mtcr	r7
 
 	/* Finish loading guest volatiles and jump to guest. */
-	PPC_LL	r5, VCPU_GPR(r5)(r4)
-	PPC_LL	r6, VCPU_GPR(r6)(r4)
-	PPC_LL	r7, VCPU_GPR(r7)(r4)
-	PPC_LL	r8, VCPU_GPR(r8)(r4)
-	PPC_LL	r9, VCPU_GPR(r9)(r4)
+	PPC_LL	r5, VCPU_GPR(R5)(r4)
+	PPC_LL	r6, VCPU_GPR(R6)(r4)
+	PPC_LL	r7, VCPU_GPR(R7)(r4)
+	PPC_LL	r8, VCPU_GPR(R8)(r4)
+	PPC_LL	r9, VCPU_GPR(R9)(r4)
 
-	PPC_LL	r3, VCPU_GPR(r3)(r4)
-	PPC_LL	r4, VCPU_GPR(r4)(r4)
+	PPC_LL	r3, VCPU_GPR(R3)(r4)
+	PPC_LL	r4, VCPU_GPR(R4)(r4)
 	rfi