s390: convert ".insn" encoding to instruction names

With z10 as minimum supported machine generation many ".insn" encodings
could be now converted to instruction names. There are couple of exceptions
- stfle is used from the als code built for z900 and cannot be converted
- few ".insn" directives encode unsupported instruction formats

The generated code is identical before/after this change.

Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
diff --git a/arch/s390/crypto/chacha-s390.S b/arch/s390/crypto/chacha-s390.S
index badf5c4..9b03362 100644
--- a/arch/s390/crypto/chacha-s390.S
+++ b/arch/s390/crypto/chacha-s390.S
@@ -312,7 +312,7 @@
 	VPERM	XC0,XC0,XC0,BEPERM
 	VPERM	XD0,XD0,XD0,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_4x
 
 	VLM	XT0,XT3,0,INP,0
@@ -339,7 +339,7 @@
 	VPERM	XC0,XC0,XC0,BEPERM
 	VPERM	XD0,XD0,XD0,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_4x
 
 	VLM	XT0,XT3,0,INP,0
@@ -366,7 +366,7 @@
 	VPERM	XC0,XC0,XC0,BEPERM
 	VPERM	XD0,XD0,XD0,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_4x
 
 	VLM	XT0,XT3,0,INP,0
@@ -472,7 +472,7 @@
 #define T3		%v30
 
 ENTRY(chacha20_vx)
-	.insn	rilu,0xc20e00000000,LEN,256	# clgfi LEN,256
+	clgfi	LEN,256
 	jle	chacha20_vx_4x
 	stmg	%r6,%r7,6*8(SP)
 
@@ -725,7 +725,7 @@
 	VPERM	C0,C0,C0,BEPERM
 	VPERM	D0,D0,D0,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VAF	D2,D2,T2		# +K[3]+2
@@ -754,7 +754,7 @@
 	VPERM	C0,C1,C1,BEPERM
 	VPERM	D0,D1,D1,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VLM	A1,D1,0,INP,0
@@ -780,7 +780,7 @@
 	VPERM	C0,C2,C2,BEPERM
 	VPERM	D0,D2,D2,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VLM	A1,D1,0,INP,0
@@ -807,7 +807,7 @@
 	VPERM	C0,C3,C3,BEPERM
 	VPERM	D0,D3,D3,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VAF	D3,D2,T1		# K[3]+4
@@ -837,7 +837,7 @@
 	VPERM	C0,C4,C4,BEPERM
 	VPERM	D0,D4,D4,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VLM	A1,D1,0,INP,0
@@ -864,7 +864,7 @@
 	VPERM	C0,C5,C5,BEPERM
 	VPERM	D0,D5,D5,BEPERM
 
-	.insn	rilu,0xc20e00000000,LEN,0x40	# clgfi LEN,0x40
+	clgfi	LEN,0x40
 	jl	.Ltail_vx
 
 	VLM	A1,D1,0,INP,0
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 78bb336..feaba12 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -160,7 +160,7 @@ struct hws_trailer_entry {
 /* Load program parameter */
 static inline void lpp(void *pp)
 {
-	asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory");
+	asm volatile("lpp 0(%0)\n" :: "a" (pp) : "memory");
 }
 
 /* Query counter information */
@@ -169,7 +169,7 @@ static inline int qctri(struct cpumf_ctr_info *info)
 	int rc = -EINVAL;
 
 	asm volatile (
-		"0:	.insn	s,0xb28e0000,%1\n"
+		"0:	qctri	%1\n"
 		"1:	lhi	%0,0\n"
 		"2:\n"
 		EX_TABLE(1b, 2b)
@@ -183,7 +183,7 @@ static inline int lcctl(u64 ctl)
 	int cc;
 
 	asm volatile (
-		"	.insn	s,0xb2840000,%1\n"
+		"	lcctl	%1\n"
 		"	ipm	%0\n"
 		"	srl	%0,28\n"
 		: "=d" (cc) : "Q" (ctl) : "cc");
@@ -197,7 +197,7 @@ static inline int __ecctr(u64 ctr, u64 *content)
 	int cc;
 
 	asm volatile (
-		"	.insn	rre,0xb2e40000,%0,%2\n"
+		"	ecctr	%0,%2\n"
 		"	ipm	%1\n"
 		"	srl	%1,28\n"
 		: "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
@@ -247,7 +247,7 @@ static inline int qsi(struct hws_qsi_info_block *info)
 	int cc = 1;
 
 	asm volatile(
-		"0:	.insn	s,0xb2860000,%1\n"
+		"0:	qsi	%1\n"
 		"1:	lhi	%0,0\n"
 		"2:\n"
 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
@@ -262,7 +262,7 @@ static inline int lsctl(struct hws_lsctl_request_block *req)
 
 	cc = 1;
 	asm volatile(
-		"0:	.insn	s,0xb2870000,0(%1)\n"
+		"0:	lsctl	0(%1)\n"
 		"1:	ipm	%0\n"
 		"	srl	%0,28\n"
 		"2:\n"
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a3f26e3..9df6791 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -600,7 +600,7 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
 	unsigned long address = (unsigned long)ptr | 1;
 
 	asm volatile(
-		"	.insn	rre,0xb98a0000,%[r1],%[address]"
+		"	cspg	%[r1],%[address]"
 		: [r1] "+&d" (r1.pair), "+m" (*ptr)
 		: [address] "d" (address)
 		: "cc");
@@ -1052,7 +1052,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
 	if (__builtin_constant_p(opt) && opt == 0) {
 		/* Invalidation + TLB flush for the pte */
 		asm volatile(
-			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
+			"	ipte	%[r1],%[r2],0,%[m4]"
 			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
 			  [m4] "i" (local));
 		return;
@@ -1061,7 +1061,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
 	/* Invalidate ptes with options + TLB flush of the ptes */
 	opt = opt | (asce & _ASCE_ORIGIN);
 	asm volatile(
-		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
+		"	ipte	%[r1],%[r2],%[r3],%[m4]"
 		: [r2] "+a" (address), [r3] "+a" (opt)
 		: [r1] "a" (pto), [m4] "i" (local) : "memory");
 }
@@ -1074,7 +1074,7 @@ static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
 	/* Invalidate a range of ptes + TLB flush of the ptes */
 	do {
 		asm volatile(
-			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
+			"	ipte %[r1],%[r2],%[r3],%[m4]"
 			: [r2] "+a" (address), [r3] "+a" (nr)
 			: [r1] "a" (pto), [m4] "i" (local) : "memory");
 	} while (nr != 255);
@@ -1535,7 +1535,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
 	if (__builtin_constant_p(opt) && opt == 0) {
 		/* flush without guest asce */
 		asm volatile(
-			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+			"	idte	%[r1],0,%[r2],%[m4]"
 			: "+m" (*pmdp)
 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
 			  [m4] "i" (local)
@@ -1543,7 +1543,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
 	} else {
 		/* flush with guest asce */
 		asm volatile(
-			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
+			"	idte	%[r1],%[r3],%[r2],%[m4]"
 			: "+m" (*pmdp)
 			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
 			  [r3] "a" (asce), [m4] "i" (local)
@@ -1562,7 +1562,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
 	if (__builtin_constant_p(opt) && opt == 0) {
 		/* flush without guest asce */
 		asm volatile(
-			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+			"	idte	%[r1],0,%[r2],%[m4]"
 			: "+m" (*pudp)
 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
 			  [m4] "i" (local)
@@ -1570,7 +1570,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
 	} else {
 		/* flush with guest asce */
 		asm volatile(
-			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
+			"	idte	%[r1],%[r3],%[r2],%[m4]"
 			: "+m" (*pudp)
 			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
 			  [r3] "a" (asce), [m4] "i" (local)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 022cf09..84ec631 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -225,8 +225,7 @@ static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
 {
 	unsigned long val;
 
-	asm volatile(".insn	rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
-		     : "=d" (val) : "a" (asi << 8 | parm));
+	asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm));
 	return val;
 }
 
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index ca9a8ab..2cfce42 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -148,7 +148,7 @@ struct ptff_qui {
 	asm volatile(							\
 		"	lgr	0,%[reg0]\n"				\
 		"	lgr	1,%[reg1]\n"				\
-		"	.insn	e,0x0104\n"				\
+		"	ptff\n"						\
 		"	ipm	%[rc]\n"				\
 		"	srl	%[rc],28\n"				\
 		: [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1)	\
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 6448bb5..a6e2cd8 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -25,9 +25,7 @@ static inline void __tlb_flush_idte(unsigned long asce)
 	if (MACHINE_HAS_TLB_GUEST)
 		opt |= IDTE_GUEST_ASCE;
 	/* Global TLB flush for the mm */
-	asm volatile(
-		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
-		: : "a" (opt), "a" (asce) : "cc");
+	asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
 }
 
 /*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 8f15e41..a601a51 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -226,7 +226,7 @@
 	aghi	%r3,__TASK_pid
 	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
 	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
-	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
+	ALTERNATIVE "", "lpp _LPP_OFFSET", 40
 	BR_EX	%r14
 ENDPROC(__switch_to)
 
@@ -648,7 +648,7 @@
 ENDPROC(mcck_int_handler)
 
 ENTRY(restart_int_handler)
-	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
+	ALTERNATIVE "", "lpp _LPP_OFFSET", 40
 	stg	%r15,__LC_SAVE_AREA_RESTART
 	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
 	jz	0f
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5e1f7bc..1852d46 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -62,7 +62,7 @@ asm(
 	"	.align 16\n"
 	"ftrace_shared_hotpatch_trampoline_exrl:\n"
 	"	lmg	%r0,%r1,2(%r1)\n"
-	"	.insn	ril,0xc60000000000,%r0,0f\n" /* exrl */
+	"	exrl	%r0,0f\n"
 	"	j	.\n"
 	"0:	br	%r1\n"
 	"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index a37f6fd..d7b3b19 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -45,7 +45,7 @@ static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
 	tmp1 = -4096UL;
 	asm volatile(
 		"   lr	  0,%[spec]\n"
-		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
+		"0: mvcos 0(%2),0(%1),%0\n"
 		"6: jz    4f\n"
 		"1: algr  %0,%3\n"
 		"   slgr  %1,%3\n"
@@ -56,7 +56,7 @@ static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
 		"   slgr  %4,%1\n"
 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
 		"   jnh   5f\n"
-		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
+		"3: mvcos 0(%2),0(%1),%4\n"
 		"7: slgr  %0,%4\n"
 		"   j     5f\n"
 		"4: slgr  %0,%0\n"
@@ -104,7 +104,7 @@ static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
 	tmp1 = -4096UL;
 	asm volatile(
 		"   lr	  0,%[spec]\n"
-		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+		"0: mvcos 0(%1),0(%2),%0\n"
 		"6: jz    4f\n"
 		"1: algr  %0,%3\n"
 		"   slgr  %1,%3\n"
@@ -115,7 +115,7 @@ static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
 		"   slgr  %4,%1\n"
 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
 		"   jnh   5f\n"
-		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
+		"3: mvcos 0(%1),0(%2),%4\n"
 		"7: slgr  %0,%4\n"
 		"   j     5f\n"
 		"4: slgr  %0,%0\n"
@@ -155,7 +155,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
 	tmp1 = -4096UL;
 	asm volatile(
 		"   lr	  0,%[spec]\n"
-		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+		"0: mvcos 0(%1),0(%4),%0\n"
 		"   jz	  4f\n"
 		"1: algr  %0,%2\n"
 		"   slgr  %1,%2\n"
@@ -165,7 +165,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
 		"   slgr  %3,%1\n"
 		"   clgr  %0,%3\n"	/* copy crosses next page boundary? */
 		"   jnh	  5f\n"
-		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+		"3: mvcos 0(%1),0(%4),%3\n"
 		"   slgr  %0,%3\n"
 		"   j	  5f\n"
 		"4: slgr  %0,%0\n"
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index a57224a..af03cac 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1278,7 +1278,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
 {
 	asm volatile(
-		"	.insn	rrf,0xb98e0000,%0,%1,0,0"
+		"	idte	%0,0,%1"
 		: : "a" (asce), "a" (vaddr) : "cc", "memory");
 }