Merge branch 's390x-next-20220217' into 'master'

s390x: smp lib improvements and more

See merge request kvm-unit-tests/kvm-unit-tests!25
diff --git a/lib/s390x/asm/uv.h b/lib/s390x/asm/uv.h
index 97c90e8..70bf65c 100644
--- a/lib/s390x/asm/uv.h
+++ b/lib/s390x/asm/uv.h
@@ -39,7 +39,7 @@
 #define UVC_CMD_VERIFY_IMG		0x0302
 #define UVC_CMD_CPU_RESET		0x0310
 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
-#define UVC_CMD_PERF_CONF_CLEAR_RESET	0x0320
+#define UVC_CMD_PREPARE_RESET		0x0320
 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
 #define UVC_CMD_CPU_SET_STATE		0x0330
 #define UVC_CMD_SET_UNSHARED_ALL	0x0340
@@ -66,7 +66,7 @@
 	BIT_UVC_CMD_CPU_RESET = 15,
 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
 	BIT_UVC_CMD_CPU_SET_STATE = 17,
-	BIT_UVC_CMD_PREPARE_CLEAR_RESET = 18,
+	BIT_UVC_CMD_PREPARE_RESET = 18,
 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
 	BIT_UVC_CMD_UNSHARE_ALL = 20,
 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
diff --git a/lib/s390x/smp.c b/lib/s390x/smp.c
index b753eab..46e1b02 100644
--- a/lib/s390x/smp.c
+++ b/lib/s390x/smp.c
@@ -25,16 +25,32 @@
 #include "sclp.h"
 
 static struct cpu *cpus;
-static struct cpu *cpu0;
 static struct spinlock lock;
 
 extern void smp_cpu_setup_state(void);
 
+static void check_idx(uint16_t idx)
+{
+	assert(idx < smp_query_num_cpus());
+}
+
 int smp_query_num_cpus(void)
 {
 	return sclp_get_cpu_num();
 }
 
+int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
+{
+	check_idx(idx);
+	return sigp(cpus[idx].addr, order, parm, status);
+}
+
+int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
+{
+	check_idx(idx);
+	return sigp_retry(cpus[idx].addr, order, parm, status);
+}
+
 struct cpu *smp_cpu_from_addr(uint16_t addr)
 {
 	int i, num = smp_query_num_cpus();
@@ -46,174 +62,183 @@
 	return NULL;
 }
 
-bool smp_cpu_stopped(uint16_t addr)
+struct cpu *smp_cpu_from_idx(uint16_t idx)
+{
+	check_idx(idx);
+	return &cpus[idx];
+}
+
+uint16_t smp_cpu_addr(uint16_t idx)
+{
+	check_idx(idx);
+	return cpus[idx].addr;
+}
+
+bool smp_cpu_stopped(uint16_t idx)
 {
 	uint32_t status;
 
-	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
+	if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
 		return false;
 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
 }
 
-bool smp_sense_running_status(uint16_t addr)
+bool smp_sense_running_status(uint16_t idx)
 {
-	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
+	if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
 		return true;
 	/* Status stored condition code is equivalent to cpu not running. */
 	return false;
 }
 
-static int smp_cpu_stop_nolock(uint16_t addr, bool store)
+static int smp_cpu_stop_nolock(uint16_t idx, bool store)
 {
-	struct cpu *cpu;
 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
 
-	cpu = smp_cpu_from_addr(addr);
-	if (!cpu || cpu == cpu0)
+	/* refuse to work on the boot CPU */
+	if (idx == 0)
 		return -1;
 
-	if (sigp_retry(addr, order, 0, NULL))
+	if (smp_sigp_retry(idx, order, 0, NULL))
 		return -1;
 
-	while (!smp_cpu_stopped(addr))
+	while (!smp_cpu_stopped(idx))
 		mb();
-	cpu->active = false;
+	/* idx has been already checked by the smp_* functions called above */
+	cpus[idx].active = false;
 	return 0;
 }
 
-int smp_cpu_stop(uint16_t addr)
+int smp_cpu_stop(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, false);
+	rc = smp_cpu_stop_nolock(idx, false);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_stop_store_status(uint16_t addr)
+int smp_cpu_stop_store_status(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, true);
+	rc = smp_cpu_stop_nolock(idx, true);
 	spin_unlock(&lock);
 	return rc;
 }
 
-static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
+static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
 {
 	int rc;
-	struct cpu *cpu = smp_cpu_from_addr(addr);
 
-	if (!cpu)
-		return -1;
+	check_idx(idx);
 	if (psw) {
-		cpu->lowcore->restart_new_psw.mask = psw->mask;
-		cpu->lowcore->restart_new_psw.addr = psw->addr;
+		cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
+		cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
 	}
 	/*
 	 * Stop the cpu, so we don't have a race between a running cpu
 	 * and the restart in the test that checks if the cpu is
 	 * running after the restart.
 	 */
-	smp_cpu_stop_nolock(addr, false);
-	rc = sigp(addr, SIGP_RESTART, 0, NULL);
+	smp_cpu_stop_nolock(idx, false);
+	rc = smp_sigp(idx, SIGP_RESTART, 0, NULL);
 	if (rc)
 		return rc;
 	/*
 	 * The order has been accepted, but the actual restart may not
 	 * have been performed yet, so wait until the cpu is running.
 	 */
-	while (smp_cpu_stopped(addr))
+	while (smp_cpu_stopped(idx))
 		mb();
-	cpu->active = true;
+	cpus[idx].active = true;
 	return 0;
 }
 
-int smp_cpu_restart(uint16_t addr)
+int smp_cpu_restart(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_restart_nolock(addr, NULL);
+	rc = smp_cpu_restart_nolock(idx, NULL);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_start(uint16_t addr, struct psw psw)
+int smp_cpu_start(uint16_t idx, struct psw psw)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_restart_nolock(addr, &psw);
+	rc = smp_cpu_restart_nolock(idx, &psw);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_destroy(uint16_t addr)
+int smp_cpu_destroy(uint16_t idx)
 {
-	struct cpu *cpu;
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, false);
+	rc = smp_cpu_stop_nolock(idx, false);
 	if (!rc) {
-		cpu = smp_cpu_from_addr(addr);
-		free_pages(cpu->lowcore);
-		free_pages(cpu->stack);
-		cpu->lowcore = (void *)-1UL;
-		cpu->stack = (void *)-1UL;
+		free_pages(cpus[idx].lowcore);
+		free_pages(cpus[idx].stack);
+		cpus[idx].lowcore = (void *)-1UL;
+		cpus[idx].stack = (void *)-1UL;
 	}
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_setup(uint16_t addr, struct psw psw)
+static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
 {
 	struct lowcore *lc;
-	struct cpu *cpu;
-	int rc = -1;
 
-	spin_lock(&lock);
+	if (cpus[idx].active)
+		return -1;
 
-	if (!cpus)
-		goto out;
-
-	cpu = smp_cpu_from_addr(addr);
-
-	if (!cpu || cpu->active)
-		goto out;
-
-	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
+	smp_sigp_retry(idx, SIGP_INITIAL_CPU_RESET, 0, NULL);
 
 	lc = alloc_pages_flags(1, AREA_DMA31);
-	cpu->lowcore = lc;
-	memset(lc, 0, PAGE_SIZE * 2);
-	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
+	cpus[idx].lowcore = lc;
+	smp_sigp_retry(idx, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
 
 	/* Copy all exception psws. */
-	memcpy(lc, cpu0->lowcore, 512);
+	memcpy(lc, cpus[0].lowcore, 512);
 
 	/* Setup stack */
-	cpu->stack = (uint64_t *)alloc_pages(2);
+	cpus[idx].stack = (uint64_t *)alloc_pages(2);
 
 	/* Start without DAT and any other mask bits. */
-	cpu->lowcore->sw_int_psw.mask = psw.mask;
-	cpu->lowcore->sw_int_psw.addr = psw.addr;
-	cpu->lowcore->sw_int_grs[14] = psw.addr;
-	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
+	lc->sw_int_psw.mask = psw.mask;
+	lc->sw_int_psw.addr = psw.addr;
+	lc->sw_int_grs[14] = psw.addr;
+	lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
 	lc->restart_new_psw.mask = PSW_MASK_64;
 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
 	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
 
 	/* Start processing */
-	smp_cpu_restart_nolock(addr, NULL);
+	smp_cpu_restart_nolock(idx, NULL);
 	/* Wait until the cpu has finished setup and started the provided psw */
 	while (lc->restart_new_psw.addr != psw.addr)
 		mb();
-	rc = 0;
-out:
+
+	return 0;
+}
+
+int smp_cpu_setup(uint16_t idx, struct psw psw)
+{
+	int rc = -1;
+
+	spin_lock(&lock);
+	if (cpus) {
+		check_idx(idx);
+		rc = smp_cpu_setup_nolock(idx, psw);
+	}
 	spin_unlock(&lock);
 	return rc;
 }
@@ -251,15 +276,27 @@
 	if (num > 1)
 		printf("SMP: Initializing, found %d cpus\n", num);
 
-	cpus = calloc(num, sizeof(cpus));
+	cpus = calloc(num, sizeof(*cpus));
 	for (i = 0; i < num; i++) {
 		cpus[i].addr = entry[i].address;
 		cpus[i].active = false;
+		/*
+		 * Fill in the boot CPU. If the boot CPU is not at index 0,
+		 * swap it with the one at index 0. This guarantees that the
+		 * boot CPU will always have index 0. If the boot CPU was
+		 * already at index 0, a few extra useless assignments are
+		 * performed, but everything will work ok.
+		 * Notice that there is no guarantee that the list of CPUs
+		 * returned by the Read SCP Info command is in any
+		 * particular order, or that its order will stay consistent
+		 * across multiple invocations.
+		 */
 		if (entry[i].address == cpu0_addr) {
-			cpu0 = &cpus[i];
-			cpu0->stack = stackptr;
-			cpu0->lowcore = (void *)0;
-			cpu0->active = true;
+			cpus[i].addr = cpus[0].addr;
+			cpus[0].addr = cpu0_addr;
+			cpus[0].stack = stackptr;
+			cpus[0].lowcore = (void *)0;
+			cpus[0].active = true;
 		}
 	}
 	spin_unlock(&lock);
diff --git a/lib/s390x/smp.h b/lib/s390x/smp.h
index a2609f1..1e69a7d 100644
--- a/lib/s390x/smp.h
+++ b/lib/s390x/smp.h
@@ -37,15 +37,19 @@
 
 int smp_query_num_cpus(void);
 struct cpu *smp_cpu_from_addr(uint16_t addr);
-bool smp_cpu_stopped(uint16_t addr);
-bool smp_sense_running_status(uint16_t addr);
-int smp_cpu_restart(uint16_t addr);
-int smp_cpu_start(uint16_t addr, struct psw psw);
-int smp_cpu_stop(uint16_t addr);
-int smp_cpu_stop_store_status(uint16_t addr);
-int smp_cpu_destroy(uint16_t addr);
-int smp_cpu_setup(uint16_t addr, struct psw psw);
+struct cpu *smp_cpu_from_idx(uint16_t idx);
+uint16_t smp_cpu_addr(uint16_t idx);
+bool smp_cpu_stopped(uint16_t idx);
+bool smp_sense_running_status(uint16_t idx);
+int smp_cpu_restart(uint16_t idx);
+int smp_cpu_start(uint16_t idx, struct psw psw);
+int smp_cpu_stop(uint16_t idx);
+int smp_cpu_stop_store_status(uint16_t idx);
+int smp_cpu_destroy(uint16_t idx);
+int smp_cpu_setup(uint16_t idx, struct psw psw);
 void smp_teardown(void);
 void smp_setup(void);
+int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
+int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
 
 #endif
diff --git a/lib/s390x/stsi.h b/lib/s390x/stsi.h
new file mode 100644
index 0000000..bebc492
--- /dev/null
+++ b/lib/s390x/stsi.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Structures used to Store System Information
+ *
+ * Copyright IBM Corp. 2022
+ */
+
+#ifndef _S390X_STSI_H_
+#define _S390X_STSI_H_
+
+struct sysinfo_3_2_2 {
+	uint8_t reserved[31];
+	uint8_t count;
+	struct {
+		uint8_t reserved2[4];
+		uint16_t total_cpus;
+		uint16_t conf_cpus;
+		uint16_t standby_cpus;
+		uint16_t reserved_cpus;
+		uint8_t name[8];
+		uint32_t caf;
+		uint8_t cpi[16];
+		uint8_t reserved5[3];
+		uint8_t ext_name_encoding;
+		uint32_t reserved3;
+		uint8_t uuid[16];
+	} vm[8];
+	uint8_t reserved4[1504];
+	uint8_t ext_names[8][256];
+};
+
+#endif  /* _S390X_STSI_H_ */
diff --git a/lib/s390x/vm.c b/lib/s390x/vm.c
index a5b9286..33fb1c4 100644
--- a/lib/s390x/vm.c
+++ b/lib/s390x/vm.c
@@ -12,6 +12,7 @@
 #include <alloc_page.h>
 #include <asm/arch_def.h>
 #include "vm.h"
+#include "stsi.h"
 
 /**
  * Detect whether we are running with TCG (instead of KVM)
@@ -26,9 +27,13 @@
 	if (initialized)
 		return is_tcg;
 
+	if (stsi_get_fc() != 3) {
+		initialized = true;
+		return is_tcg;
+	}
+
 	buf = alloc_page();
-	if (!buf)
-		return false;
+	assert(buf);
 
 	if (stsi(buf, 1, 1, 1))
 		goto out;
@@ -43,3 +48,45 @@
 	free_page(buf);
 	return is_tcg;
 }
+
+/**
+ * Detect whether we are running with KVM
+ */
+bool vm_is_kvm(void)
+{
+	/* EBCDIC for "KVM/" */
+	const uint8_t kvm_ebcdic[] = { 0xd2, 0xe5, 0xd4, 0x61 };
+	static bool initialized;
+	static bool is_kvm;
+	struct sysinfo_3_2_2 *stsi_322;
+
+	if (initialized)
+		return is_kvm;
+
+	if (stsi_get_fc() != 3 || vm_is_tcg()) {
+		initialized = true;
+		return is_kvm;
+	}
+
+	stsi_322 = alloc_page();
+	assert(stsi_322);
+
+	if (stsi(stsi_322, 3, 2, 2))
+		goto out;
+
+	/*
+	 * If the manufacturer string is "KVM/" in EBCDIC, then we
+	 * are on KVM.
+	 */
+	is_kvm = !memcmp(&stsi_322->vm[0].cpi, kvm_ebcdic, sizeof(kvm_ebcdic));
+	initialized = true;
+out:
+	free_page(stsi_322);
+	return is_kvm;
+}
+
+bool vm_is_lpar(void)
+{
+	return stsi_get_fc() == 2;
+}
+
diff --git a/lib/s390x/vm.h b/lib/s390x/vm.h
index 7abba0c..4456b48 100644
--- a/lib/s390x/vm.h
+++ b/lib/s390x/vm.h
@@ -9,5 +9,7 @@
 #define _S390X_VM_H_
 
 bool vm_is_tcg(void);
+bool vm_is_kvm(void);
+bool vm_is_lpar(void);
 
 #endif  /* _S390X_VM_H_ */
diff --git a/s390x/cpumodel.c b/s390x/cpumodel.c
index 67bb654..23ccf84 100644
--- a/s390x/cpumodel.c
+++ b/s390x/cpumodel.c
@@ -116,14 +116,15 @@
 
 	report_prefix_push("dependency");
 	for (i = 0; i < ARRAY_SIZE(dep); i++) {
+		report_prefix_pushf("%d implies %d", dep[i].facility, dep[i].implied);
 		if (test_facility(dep[i].facility)) {
 			report_xfail(dep[i].expected_tcg_fail && vm_is_tcg(),
 				     test_facility(dep[i].implied),
-				     "%d implies %d",
-				     dep[i].facility, dep[i].implied);
+				     "implication not correct");
 		} else {
 			report_skip("facility %d not present", dep[i].facility);
 		}
+		report_prefix_pop();
 	}
 	report_prefix_pop();
 
diff --git a/s390x/firq.c b/s390x/firq.c
index fb9a290..b4b3542 100644
--- a/s390x/firq.c
+++ b/s390x/firq.c
@@ -44,24 +44,13 @@
 		goto out;
 	}
 
-	if (stap()) {
-		report_skip("need to start on CPU #0");
-		goto out;
-	}
-
-	/*
-	 * We want CPU #2 to be stopped. This should be the case at this
-	 * point, however, we want to sense if it even exists as well.
-	 */
+	/* Stop CPU #2. It must succeed because we have at least 3 CPUs */
 	ret = smp_cpu_stop(2);
-	if (ret) {
-		report_skip("CPU #2 not found");
-		goto out;
-	}
+	assert(!ret);
 
 	/*
-	 * We're going to perform an SCLP service call but expect
-	 * the interrupt on CPU #1 while it is in the wait state.
+	 * We're going to perform an SCLP service call but expect the
+	 * interrupt on CPU #1 while it is in the wait state.
 	 */
 	sclp_mark_busy();
 
@@ -69,11 +58,8 @@
 	psw.mask = extract_psw_mask();
 	psw.addr = (unsigned long)wait_for_sclp_int;
 	ret = smp_cpu_setup(1, psw);
-	if (ret) {
-		sclp_clear_busy();
-		report_skip("cpu #1 not found");
-		goto out;
-	}
+	/* This must not fail because we have at least 3 CPUs */
+	assert(!ret);
 
 	/*
 	 * We'd have to jump trough some hoops to sense e.g., via SIGP
diff --git a/s390x/skrf.c b/s390x/skrf.c
index ca4efbf..b9a2e90 100644
--- a/s390x/skrf.c
+++ b/s390x/skrf.c
@@ -176,7 +176,7 @@
 	wait_for_flag();
 	set_flag(0);
 
-	sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
+	smp_sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
 	wait_for_flag();
 	smp_cpu_stop(1);
 	report_prefix_pop();
diff --git a/s390x/smp.c b/s390x/smp.c
index 1bbe4c3..068ac74 100644
--- a/s390x/smp.c
+++ b/s390x/smp.c
@@ -56,7 +56,7 @@
  */
 static void test_restart(void)
 {
-	struct cpu *cpu = smp_cpu_from_addr(1);
+	struct cpu *cpu = smp_cpu_from_idx(1);
 	struct lowcore *lc = cpu->lowcore;
 
 	lc->restart_new_psw.mask = extract_psw_mask();
@@ -92,7 +92,7 @@
 
 static void test_stop_store_status(void)
 {
-	struct cpu *cpu = smp_cpu_from_addr(1);
+	struct cpu *cpu = smp_cpu_from_idx(1);
 	struct lowcore *lc = (void *)0x0;
 
 	report_prefix_push("stop store status");
@@ -129,7 +129,7 @@
 
 	report_prefix_push("running");
 	smp_cpu_restart(1);
-	sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
+	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, &r);
 	report(r == SIGP_STATUS_INCORRECT_STATE, "incorrect state");
 	report(!memcmp(status, (void *)status + PAGE_SIZE, PAGE_SIZE),
 	       "status not written");
@@ -138,7 +138,7 @@
 	memset(status, 0, PAGE_SIZE);
 	report_prefix_push("stopped");
 	smp_cpu_stop(1);
-	sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
+	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
 	while (!status->prefix) { mb(); }
 	report_pass("status written");
 	free_pages(status);
@@ -176,7 +176,7 @@
 	smp_cpu_start(1, psw);
 	wait_for_flag();
 	set_flag(0);
-	sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
+	smp_sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
 	wait_for_flag();
 	smp_cpu_stop(1);
 	report_prefix_pop();
@@ -210,7 +210,7 @@
 	smp_cpu_start(1, psw);
 	wait_for_flag();
 	set_flag(0);
-	sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
+	smp_sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
 	wait_for_flag();
 	smp_cpu_stop(1);
 	report_prefix_pop();
@@ -253,8 +253,8 @@
 	smp_cpu_start(1, psw);
 	wait_for_flag();
 
-	sigp_retry(1, SIGP_INITIAL_CPU_RESET, 0, NULL);
-	sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
+	smp_sigp_retry(1, SIGP_INITIAL_CPU_RESET, 0, NULL);
+	smp_sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
 
 	report_prefix_push("clear");
 	report(!status->psw.mask && !status->psw.addr, "psw");
@@ -299,11 +299,11 @@
 	psw.addr = (unsigned long)test_func;
 
 	report_prefix_push("cpu reset");
-	sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
-	sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
+	smp_sigp(1, SIGP_EMERGENCY_SIGNAL, 0, NULL);
+	smp_sigp(1, SIGP_EXTERNAL_CALL, 0, NULL);
 	smp_cpu_start(1, psw);
 
-	sigp_retry(1, SIGP_CPU_RESET, 0, NULL);
+	smp_sigp_retry(1, SIGP_CPU_RESET, 0, NULL);
 	report(smp_cpu_stopped(1), "cpu stopped");
 
 	set_flag(0);
diff --git a/s390x/stsi.c b/s390x/stsi.c
index 391f884..dccc53e 100644
--- a/s390x/stsi.c
+++ b/s390x/stsi.c
@@ -13,27 +13,8 @@
 #include <asm/asm-offsets.h>
 #include <asm/interrupt.h>
 #include <smp.h>
+#include <stsi.h>
 
-struct stsi_322 {
-	uint8_t reserved[31];
-	uint8_t count;
-	struct {
-		uint8_t reserved2[4];
-		uint16_t total_cpus;
-		uint16_t conf_cpus;
-		uint16_t standby_cpus;
-		uint16_t reserved_cpus;
-		uint8_t name[8];
-		uint32_t caf;
-		uint8_t cpi[16];
-		uint8_t reserved5[3];
-		uint8_t ext_name_encoding;
-		uint32_t reserved3;
-		uint8_t uuid[16];
-	} vm[8];
-	uint8_t reserved4[1504];
-	uint8_t ext_names[8][256];
-};
 static uint8_t pagebuf[PAGE_SIZE * 2] __attribute__((aligned(PAGE_SIZE * 2)));
 
 static void test_specs(void)
@@ -91,7 +72,7 @@
 	/* EBCDIC for "KVM/" */
 	const uint8_t cpi_kvm[] = { 0xd2, 0xe5, 0xd4, 0x61 };
 	const char vm_name_ext[] = "kvm-unit-test";
-	struct stsi_322 *data = (void *)pagebuf;
+	struct sysinfo_3_2_2 *data = (void *)pagebuf;
 
 	report_prefix_push("3.2.2");
 
diff --git a/s390x/unittests.cfg b/s390x/unittests.cfg
index 054560c..1600e71 100644
--- a/s390x/unittests.cfg
+++ b/s390x/unittests.cfg
@@ -113,12 +113,26 @@
 [spec_ex-sie]
 file = spec_ex-sie.elf
 
-[firq-linear-cpu-ids]
+[firq-linear-cpu-ids-kvm]
+file = firq.elf
+timeout = 20
+extra_params = -smp 1,maxcpus=3 -device host-s390x-cpu,core-id=1 -device host-s390x-cpu,core-id=2
+accel = kvm
+
+[firq-nonlinear-cpu-ids-kvm]
+file = firq.elf
+timeout = 20
+extra_params = -smp 1,maxcpus=3 -device host-s390x-cpu,core-id=2 -device host-s390x-cpu,core-id=1
+accel = kvm
+
+[firq-linear-cpu-ids-tcg]
 file = firq.elf
 timeout = 20
 extra_params = -smp 1,maxcpus=3 -cpu qemu -device qemu-s390x-cpu,core-id=1 -device qemu-s390x-cpu,core-id=2
+accel = tcg
 
-[firq-nonlinear-cpu-ids]
+[firq-nonlinear-cpu-ids-tcg]
 file = firq.elf
 timeout = 20
 extra_params = -smp 1,maxcpus=3 -cpu qemu -device qemu-s390x-cpu,core-id=2 -device qemu-s390x-cpu,core-id=1
+accel = tcg
diff --git a/s390x/uv-guest.c b/s390x/uv-guest.c
index 44ad215..99120ca 100644
--- a/s390x/uv-guest.c
+++ b/s390x/uv-guest.c
@@ -142,7 +142,7 @@
 	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_VERIFY_IMG },
 	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET },
 	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET_INITIAL },
-	{ "conf clear reset", UVC_CMD_PERF_CONF_CLEAR_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_CLEAR_RESET },
+	{ "prepare clear reset", UVC_CMD_PREPARE_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_RESET },
 	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET },
 	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct uv_cb_cpu_set_state), BIT_UVC_CMD_CPU_SET_STATE },
 	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_PIN_PAGE_SHARED },
diff --git a/s390x/uv-host.c b/s390x/uv-host.c
index 92a4106..de2e485 100644
--- a/s390x/uv-host.c
+++ b/s390x/uv-host.c
@@ -55,7 +55,7 @@
 	{ "verify", UVC_CMD_VERIFY_IMG, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_VERIFY_IMG },
 	{ "cpu reset", UVC_CMD_CPU_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET },
 	{ "cpu initial reset", UVC_CMD_CPU_RESET_INITIAL, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_RESET_INITIAL },
-	{ "conf clear reset", UVC_CMD_PERF_CONF_CLEAR_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_CLEAR_RESET },
+	{ "conf clear reset", UVC_CMD_PREPARE_RESET, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_PREPARE_RESET },
 	{ "cpu clear reset", UVC_CMD_CPU_RESET_CLEAR, sizeof(struct uv_cb_nodata), BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET },
 	{ "cpu set state", UVC_CMD_CPU_SET_STATE, sizeof(struct uv_cb_cpu_set_state), BIT_UVC_CMD_CPU_SET_STATE },
 	{ "pin shared", UVC_CMD_PIN_PAGE_SHARED, sizeof(struct uv_cb_cfs), BIT_UVC_CMD_PIN_PAGE_SHARED },