riscv: sbi: Add tests for HSM extension

Add some tests for all of the HSM extension functions. These tests
ensure that the HSM extension functions follow the behavior as described
in the SBI specification.

Signed-off-by: James Raphael Tiovalen <jamestiotio@gmail.com>
[Made the changes requested in the last review.]
Signed-off-by: Andrew Jones <andrew.jones@linux.dev>
diff --git a/riscv/sbi-asm.S b/riscv/sbi-asm.S
index aa44e1a..923c2ce 100644
--- a/riscv/sbi-asm.S
+++ b/riscv/sbi-asm.S
@@ -12,6 +12,70 @@
 
 .section .text
 
+/*
+ * sbi_hsm_check
+ *   a0 and a1 are set by SBI HSM start/suspend
+ *   s1 is the address of the results array
+ * Doesn't return.
+ *
+ * This function is only called from HSM start and on resumption
+ * from HSM suspend which means we can do whatever we like with
+ * all registers. So, to avoid complicated register agreements with
+ * other assembly functions called, we just always use the saved
+ * registers for anything that should be maintained across calls.
+ */
+#define HSM_RESULTS_ARRAY	s1
+#define HSM_RESULTS_MAP		s2
+#define HSM_CPU_INDEX		s3
+.balign 4
+sbi_hsm_check:
+	li	HSM_RESULTS_MAP, 0
+	REG_L	t0, ASMARR(a1, SBI_HSM_MAGIC_IDX)
+	li	t1, SBI_HSM_MAGIC
+	bne	t0, t1, 1f
+	ori	HSM_RESULTS_MAP, HSM_RESULTS_MAP, SBI_HSM_TEST_MAGIC_A1
+1:	REG_L	t0, ASMARR(a1, SBI_HSM_HARTID_IDX)
+	bne	a0, t0, 2f
+	ori	HSM_RESULTS_MAP, HSM_RESULTS_MAP, SBI_HSM_TEST_HARTID_A0
+2:	csrr	t0, CSR_SATP
+	bnez	t0, 3f
+	ori	HSM_RESULTS_MAP, HSM_RESULTS_MAP, SBI_HSM_TEST_SATP
+3:	csrr	t0, CSR_SSTATUS
+	andi	t0, t0, SR_SIE
+	bnez	t0, 4f
+	ori	HSM_RESULTS_MAP, HSM_RESULTS_MAP, SBI_HSM_TEST_SIE
+4:	call	hartid_to_cpu
+	mv	HSM_CPU_INDEX, a0
+	li	t0, -1
+	bne	HSM_CPU_INDEX, t0, 6f
+5:	pause
+	j	5b
+6:	ori	HSM_RESULTS_MAP, HSM_RESULTS_MAP, SBI_HSM_TEST_DONE
+	add	t0, HSM_RESULTS_ARRAY, HSM_CPU_INDEX
+	sb	HSM_RESULTS_MAP, 0(t0)
+	la	t1, sbi_hsm_stop_hart
+	add	t1, t1, HSM_CPU_INDEX
+7:	lb	t0, 0(t1)
+	pause
+	beqz	t0, 7b
+	li	a7, 0x48534d	/* SBI_EXT_HSM */
+	li	a6, 1		/* SBI_EXT_HSM_HART_STOP */
+	ecall
+8:	pause
+	j	8b
+
+.balign 4
+.global sbi_hsm_check_hart_start
+sbi_hsm_check_hart_start:
+	la	HSM_RESULTS_ARRAY, sbi_hsm_hart_start_checks
+	j	sbi_hsm_check
+
+.balign 4
+.global sbi_hsm_check_non_retentive_suspend
+sbi_hsm_check_non_retentive_suspend:
+	la	HSM_RESULTS_ARRAY, sbi_hsm_non_retentive_hart_suspend_checks
+	j	sbi_hsm_check
+
 .balign 4
 restore_csrs:
 	REG_L	a1, ASMARR(a0, SBI_CSR_SSTATUS_IDX)
diff --git a/riscv/sbi-tests.h b/riscv/sbi-tests.h
index 211e0cb..ce12996 100644
--- a/riscv/sbi-tests.h
+++ b/riscv/sbi-tests.h
@@ -2,6 +2,18 @@
 #ifndef _RISCV_SBI_TESTS_H_
 #define _RISCV_SBI_TESTS_H_
 
+#define SBI_HSM_TEST_DONE	(1 << 0)
+#define SBI_HSM_TEST_MAGIC_A1	(1 << 1)
+#define SBI_HSM_TEST_HARTID_A0	(1 << 2)
+#define SBI_HSM_TEST_SATP	(1 << 3)
+#define SBI_HSM_TEST_SIE	(1 << 4)
+
+#define SBI_HSM_MAGIC		0x453
+
+#define SBI_HSM_MAGIC_IDX	0
+#define SBI_HSM_HARTID_IDX	1
+#define SBI_HSM_NUM_OF_PARAMS	2
+
 #define SBI_SUSP_MAGIC_IDX	0
 #define SBI_SUSP_CSRS_IDX	1
 #define SBI_SUSP_HARTID_IDX	2
diff --git a/riscv/sbi.c b/riscv/sbi.c
index 97a5c45..6f4ddaf 100644
--- a/riscv/sbi.c
+++ b/riscv/sbi.c
@@ -21,8 +21,10 @@
 #include <asm/delay.h>
 #include <asm/io.h>
 #include <asm/mmu.h>
+#include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/sbi.h>
+#include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/timer.h>
 
@@ -58,6 +60,11 @@
 	return sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE_BYTE, byte, 0, 0, 0, 0, 0);
 }
 
+static struct sbiret sbi_hart_suspend(uint32_t suspend_type, unsigned long resume_addr, unsigned long opaque)
+{
+	return sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, resume_addr, opaque, 0, 0, 0);
+}
+
 static struct sbiret sbi_system_suspend(uint32_t sleep_type, unsigned long resume_addr, unsigned long opaque)
 {
 	return sbi_ecall(SBI_EXT_SUSP, 0, sleep_type, resume_addr, opaque, 0, 0, 0);
@@ -71,7 +78,8 @@
 static void stop_cpu(void *data)
 {
 	struct sbiret ret = sbi_hart_stop();
-	assert_msg(0, "cpu%d failed to stop with sbiret.error %ld", smp_processor_id(), ret.error);
+	assert_msg(0, "cpu%d (hartid = %lx) failed to stop with sbiret.error %ld",
+		   smp_processor_id(), current_thread_info()->hartid, ret.error);
 }
 
 static int rand_online_cpu(prng_state *ps)
@@ -512,6 +520,574 @@
 	report_prefix_pop();
 }
 
+unsigned char sbi_hsm_stop_hart[NR_CPUS];
+unsigned char sbi_hsm_hart_start_checks[NR_CPUS];
+unsigned char sbi_hsm_non_retentive_hart_suspend_checks[NR_CPUS];
+
+static const char * const hart_state_str[] = {
+	[SBI_EXT_HSM_STARTED] = "started",
+	[SBI_EXT_HSM_STOPPED] = "stopped",
+	[SBI_EXT_HSM_SUSPENDED] = "suspended",
+};
+struct hart_state_transition_info {
+	enum sbi_ext_hsm_sid initial_state;
+	enum sbi_ext_hsm_sid intermediate_state;
+	enum sbi_ext_hsm_sid final_state;
+};
+static cpumask_t sbi_hsm_started_hart_checks;
+static bool sbi_hsm_invalid_hartid_check;
+static bool sbi_hsm_timer_fired;
+extern void sbi_hsm_check_hart_start(void);
+extern void sbi_hsm_check_non_retentive_suspend(void);
+
+static void hsm_timer_irq_handler(struct pt_regs *regs)
+{
+	timer_stop();
+	sbi_hsm_timer_fired = true;
+}
+
+static void hsm_timer_setup(void)
+{
+	install_irq_handler(IRQ_S_TIMER, hsm_timer_irq_handler);
+	timer_irq_enable();
+}
+
+static void hsm_timer_teardown(void)
+{
+	timer_irq_disable();
+	install_irq_handler(IRQ_S_TIMER, NULL);
+}
+
+static void hart_check_already_started(void *data)
+{
+	struct sbiret ret;
+	unsigned long hartid = current_thread_info()->hartid;
+	int me = smp_processor_id();
+
+	ret = sbi_hart_start(hartid, virt_to_phys(&start_cpu), 0);
+
+	if (ret.error == SBI_ERR_ALREADY_AVAILABLE)
+		cpumask_set_cpu(me, &sbi_hsm_started_hart_checks);
+}
+
+static void hart_start_invalid_hartid(void *data)
+{
+	struct sbiret ret;
+
+	ret = sbi_hart_start(-1UL, virt_to_phys(&start_cpu), 0);
+
+	if (ret.error == SBI_ERR_INVALID_PARAM)
+		sbi_hsm_invalid_hartid_check = true;
+}
+
+static void hart_retentive_suspend(void *data)
+{
+	unsigned long hartid = current_thread_info()->hartid;
+	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_RETENTIVE, 0, 0);
+
+	if (ret.error)
+		report_fail("failed to retentive suspend cpu%d (hartid = %lx) (error=%ld)",
+			    smp_processor_id(), hartid, ret.error);
+}
+
+static void hart_non_retentive_suspend(void *data)
+{
+	unsigned long hartid = current_thread_info()->hartid;
+	unsigned long params[] = {
+		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
+		[SBI_HSM_HARTID_IDX] = hartid,
+	};
+	struct sbiret ret = sbi_hart_suspend(SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE,
+					     virt_to_phys(&sbi_hsm_check_non_retentive_suspend),
+					     virt_to_phys(params));
+
+	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) (error=%ld)",
+		    smp_processor_id(), hartid, ret.error);
+}
+
+/* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
+static void hart_retentive_suspend_with_msb_set(void *data)
+{
+	unsigned long hartid = current_thread_info()->hartid;
+	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
+	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type, 0, 0, 0, 0, 0);
+
+	if (ret.error)
+		report_fail("failed to retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
+			    smp_processor_id(), hartid, ret.error);
+}
+
+/* This test function is only being run on RV64 to verify that upper bits of suspend_type are ignored */
+static void hart_non_retentive_suspend_with_msb_set(void *data)
+{
+	unsigned long hartid = current_thread_info()->hartid;
+	unsigned long suspend_type = SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE | (_AC(1, UL) << (__riscv_xlen - 1));
+	unsigned long params[] = {
+		[SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC,
+		[SBI_HSM_HARTID_IDX] = hartid,
+	};
+
+	struct sbiret ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, suspend_type,
+				      virt_to_phys(&sbi_hsm_check_non_retentive_suspend), virt_to_phys(params),
+				      0, 0, 0);
+
+	report_fail("failed to non-retentive suspend cpu%d (hartid = %lx) with MSB set (error=%ld)",
+		    smp_processor_id(), hartid, ret.error);
+}
+
+static bool hart_wait_on_status(unsigned long hartid, enum sbi_ext_hsm_sid status, unsigned long duration)
+{
+	struct sbiret ret;
+
+	sbi_hsm_timer_fired = false;
+	timer_start(duration);
+
+	ret = sbi_hart_get_status(hartid);
+
+	while (!ret.error && ret.value == status && !sbi_hsm_timer_fired) {
+		cpu_relax();
+		ret = sbi_hart_get_status(hartid);
+	}
+
+	timer_stop();
+
+	if (sbi_hsm_timer_fired)
+		report_info("timer fired while waiting on status %u for hartid %lx", status, hartid);
+	else if (ret.error)
+		report_fail("got %ld while waiting on status %u for hartid %lx", ret.error, status, hartid);
+
+	return !sbi_hsm_timer_fired && !ret.error;
+}
+
+static int hart_wait_state_transition(cpumask_t *mask, unsigned long duration,
+				      struct hart_state_transition_info *states)
+{
+	struct sbiret ret;
+	unsigned long hartid;
+	int cpu, count = 0;
+
+	for_each_cpu(cpu, mask) {
+		hartid = cpus[cpu].hartid;
+		if (!hart_wait_on_status(hartid, states->initial_state, duration))
+			continue;
+		if (!hart_wait_on_status(hartid, states->intermediate_state, duration))
+			continue;
+
+		ret = sbi_hart_get_status(hartid);
+		if (ret.error)
+			report_info("hartid %lx get status failed (error=%ld)", hartid, ret.error);
+		else if (ret.value != states->final_state)
+			report_info("hartid %lx status is not '%s' (ret.value=%ld)", hartid,
+				    hart_state_str[states->final_state], ret.value);
+		else
+			count++;
+	}
+
+	return count;
+}
+
+static void hart_wait_until_idle(cpumask_t *mask, unsigned long duration)
+{
+	sbi_hsm_timer_fired = false;
+	timer_start(duration);
+
+	while (!cpumask_subset(mask, &cpu_idle_mask) && !sbi_hsm_timer_fired)
+		cpu_relax();
+
+	timer_stop();
+
+	if (sbi_hsm_timer_fired)
+		report_info("hsm timer fired before all cpus became idle");
+}
+
+static void check_hsm(void)
+{
+	struct sbiret ret;
+	unsigned long hartid;
+	cpumask_t secondary_cpus_mask, mask;
+	struct hart_state_transition_info transition_states;
+	bool ipi_unavailable = false;
+	int cpu, me = smp_processor_id();
+	int max_cpus = getenv("SBI_MAX_CPUS") ? strtol(getenv("SBI_MAX_CPUS"), NULL, 0) : nr_cpus;
+	unsigned long hsm_timer_duration = getenv("SBI_HSM_TIMER_DURATION")
+					 ? strtol(getenv("SBI_HSM_TIMER_DURATION"), NULL, 0) : 200000;
+	unsigned long sbi_hsm_hart_start_params[NR_CPUS * SBI_HSM_NUM_OF_PARAMS];
+	int count, check;
+
+	max_cpus = MIN(MIN(max_cpus, nr_cpus), cpumask_weight(&cpu_present_mask));
+
+	report_prefix_push("hsm");
+
+	if (!sbi_probe(SBI_EXT_HSM)) {
+		report_skip("hsm extension not available");
+		report_prefix_pop();
+		return;
+	}
+
+	report_prefix_push("hart_get_status");
+
+	hartid = current_thread_info()->hartid;
+	ret = sbi_hart_get_status(hartid);
+
+	if (ret.error) {
+		report_fail("failed to get status of current hart (error=%ld)", ret.error);
+		report_prefix_popn(2);
+		return;
+	} else if (ret.value != SBI_EXT_HSM_STARTED) {
+		report_fail("current hart is not started (ret.value=%ld)", ret.value);
+		report_prefix_popn(2);
+		return;
+	}
+
+	report_pass("status of current hart is started");
+
+	report_prefix_pop();
+
+	if (max_cpus < 2) {
+		report_skip("no other cpus to run the remaining hsm tests on");
+		report_prefix_pop();
+		return;
+	}
+
+	report_prefix_push("hart_stop");
+
+	cpumask_copy(&secondary_cpus_mask, &cpu_present_mask);
+	cpumask_clear_cpu(me, &secondary_cpus_mask);
+	hsm_timer_setup();
+	local_irq_enable();
+
+	/* Assume that previous tests have not cleaned up and stopped the secondary harts */
+	on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
+		.final_state = SBI_EXT_HSM_STOPPED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts stopped");
+
+	report_prefix_pop();
+
+	report_prefix_push("hart_start");
+
+	for_each_cpu(cpu, &secondary_cpus_mask) {
+		hartid = cpus[cpu].hartid;
+		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_MAGIC_IDX] = SBI_HSM_MAGIC;
+		sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS + SBI_HSM_HARTID_IDX] = hartid;
+
+		ret = sbi_hart_start(hartid, virt_to_phys(&sbi_hsm_check_hart_start),
+				     virt_to_phys(&sbi_hsm_hart_start_params[cpu * SBI_HSM_NUM_OF_PARAMS]));
+		if (ret.error) {
+			report_fail("failed to start test on cpu%d (hartid = %lx) (error=%ld)", cpu, hartid, ret.error);
+			continue;
+		}
+	}
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STOPPED,
+		.intermediate_state = SBI_EXT_HSM_START_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+	check = 0;
+
+	for_each_cpu(cpu, &secondary_cpus_mask) {
+		sbi_hsm_timer_fired = false;
+		timer_start(hsm_timer_duration);
+
+		while (!(READ_ONCE(sbi_hsm_hart_start_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
+			cpu_relax();
+
+		timer_stop();
+
+		if (sbi_hsm_timer_fired) {
+			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with start checks", cpu, hartid);
+			continue;
+		}
+
+		if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SATP))
+			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_SIE))
+			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
+			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_hart_start_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
+			report_info("a0 is not hartid for test on cpu %d (hartid = %lx)", cpu, hartid);
+		else
+			check++;
+	}
+
+	report(count == max_cpus - 1, "all secondary harts started");
+	report(check == max_cpus - 1, "all secondary harts have expected register values after hart start");
+
+	report_prefix_pop();
+
+	report_prefix_push("hart_stop");
+
+	memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
+		.final_state = SBI_EXT_HSM_STOPPED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts stopped");
+
+	/* Reset the stop flags so that we can reuse them after suspension tests */
+	memset(sbi_hsm_stop_hart, 0, sizeof(sbi_hsm_stop_hart));
+
+	report_prefix_pop();
+
+	report_prefix_push("hart_start");
+
+	/* Select just one secondary cpu to run the invalid hartid test */
+	on_cpu(cpumask_next(-1, &secondary_cpus_mask), hart_start_invalid_hartid, NULL);
+
+	report(sbi_hsm_invalid_hartid_check, "secondary hart refuse to start with invalid hartid");
+
+	on_cpumask_async(&secondary_cpus_mask, hart_check_already_started, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STOPPED,
+		.intermediate_state = SBI_EXT_HSM_START_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts started");
+
+	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
+
+	report(cpumask_weight(&sbi_hsm_started_hart_checks) == max_cpus - 1,
+	       "all secondary harts are already started");
+
+	report_prefix_pop();
+
+	report_prefix_push("hart_suspend");
+
+	if (!sbi_probe(SBI_EXT_IPI)) {
+		report_skip("skipping suspension tests since ipi extension is unavailable");
+		report_prefix_pop();
+		ipi_unavailable = true;
+		goto sbi_hsm_hart_stop_tests;
+	}
+
+	on_cpumask_async(&secondary_cpus_mask, hart_retentive_suspend, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
+		.final_state = SBI_EXT_HSM_SUSPENDED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts retentive suspended");
+
+	/* Ignore the return value since we check the status of each hart anyway */
+	sbi_send_ipi_cpumask(&secondary_cpus_mask);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_SUSPENDED,
+		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts retentive resumed");
+
+	hart_wait_until_idle(&secondary_cpus_mask, hsm_timer_duration);
+
+	on_cpumask_async(&secondary_cpus_mask, hart_non_retentive_suspend, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
+		.final_state = SBI_EXT_HSM_SUSPENDED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts non-retentive suspended");
+
+	/* Ignore the return value since we check the status of each hart anyway */
+	sbi_send_ipi_cpumask(&secondary_cpus_mask);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_SUSPENDED,
+		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+	check = 0;
+
+	for_each_cpu(cpu, &secondary_cpus_mask) {
+		sbi_hsm_timer_fired = false;
+		timer_start(hsm_timer_duration);
+
+		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
+			cpu_relax();
+
+		timer_stop();
+
+		if (sbi_hsm_timer_fired) {
+			report_info("hsm timer fired before hart %ld is done with non-retentive resume checks", hartid);
+			continue;
+		}
+
+		if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
+			report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
+			report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
+			report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
+			report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
+		else
+			check++;
+	}
+
+	report(count == max_cpus - 1, "all secondary harts non-retentive resumed");
+	report(check == max_cpus - 1, "all secondary harts have expected register values after non-retentive resume");
+
+	report_prefix_pop();
+
+sbi_hsm_hart_stop_tests:
+	report_prefix_push("hart_stop");
+
+	if (ipi_unavailable)
+		on_cpumask_async(&secondary_cpus_mask, stop_cpu, NULL);
+	else
+		memset(sbi_hsm_stop_hart, 1, sizeof(sbi_hsm_stop_hart));
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
+		.final_state = SBI_EXT_HSM_STOPPED,
+	};
+	count = hart_wait_state_transition(&secondary_cpus_mask, hsm_timer_duration, &transition_states);
+
+	report(count == max_cpus - 1, "all secondary harts stopped");
+
+	report_prefix_pop();
+
+	if (__riscv_xlen == 32 || ipi_unavailable) {
+		local_irq_disable();
+		hsm_timer_teardown();
+		report_prefix_pop();
+		return;
+	}
+
+	report_prefix_push("hart_suspend");
+
+	/* Select just one secondary cpu to run suspension tests with MSB of suspend type being set */
+	cpu = cpumask_next(-1, &secondary_cpus_mask);
+	hartid = cpus[cpu].hartid;
+	cpumask_clear(&mask);
+	cpumask_set_cpu(cpu, &mask);
+
+	/* Boot up the secondary cpu and let it proceed to the idle loop */
+	on_cpu(cpu, start_cpu, NULL);
+
+	on_cpu_async(cpu, hart_retentive_suspend_with_msb_set, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
+		.final_state = SBI_EXT_HSM_SUSPENDED,
+	};
+	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
+
+	report(count, "secondary hart retentive suspended with MSB set");
+
+	/* Ignore the return value since we manually validate the status of the hart anyway */
+	sbi_send_ipi_cpu(cpu);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_SUSPENDED,
+		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
+
+	report(count, "secondary hart retentive resumed with MSB set");
+
+	/* Reset these flags so that we can reuse them for the non-retentive suspension test */
+	sbi_hsm_stop_hart[cpu] = 0;
+	sbi_hsm_non_retentive_hart_suspend_checks[cpu] = 0;
+
+	on_cpu_async(cpu, hart_non_retentive_suspend_with_msb_set, NULL);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_SUSPEND_PENDING,
+		.final_state = SBI_EXT_HSM_SUSPENDED,
+	};
+	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
+
+	report(count, "secondary hart non-retentive suspended with MSB set");
+
+	/* Ignore the return value since we manually validate the status of the hart anyway */
+	sbi_send_ipi_cpu(cpu);
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_SUSPENDED,
+		.intermediate_state = SBI_EXT_HSM_RESUME_PENDING,
+		.final_state = SBI_EXT_HSM_STARTED,
+	};
+	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
+	check = 0;
+
+	if (count) {
+		sbi_hsm_timer_fired = false;
+		timer_start(hsm_timer_duration);
+
+		while (!(READ_ONCE(sbi_hsm_non_retentive_hart_suspend_checks[cpu]) & SBI_HSM_TEST_DONE) && !sbi_hsm_timer_fired)
+			cpu_relax();
+
+		timer_stop();
+
+		if (sbi_hsm_timer_fired) {
+			report_info("hsm timer fired before cpu%d (hartid = %lx) is done with non-retentive resume checks", cpu, hartid);
+		} else {
+			if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SATP))
+				report_info("satp is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_SIE))
+				report_info("sstatus.SIE is not zero for test on cpu%d (hartid = %lx)", cpu, hartid);
+			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_MAGIC_A1))
+				report_info("a1 does not start with magic for test on cpu%d (hartid = %lx)", cpu, hartid);
+			else if (!(sbi_hsm_non_retentive_hart_suspend_checks[cpu] & SBI_HSM_TEST_HARTID_A0))
+				report_info("a0 is not hartid for test on cpu%d (hartid = %lx)", cpu, hartid);
+			else
+				check = 1;
+		}
+	}
+
+	report(count, "secondary hart non-retentive resumed with MSB set");
+	report(check, "secondary hart has expected register values after non-retentive resume with MSB set");
+
+	report_prefix_pop();
+
+	report_prefix_push("hart_stop");
+
+	sbi_hsm_stop_hart[cpu] = 1;
+
+	transition_states = (struct hart_state_transition_info) {
+		.initial_state = SBI_EXT_HSM_STARTED,
+		.intermediate_state = SBI_EXT_HSM_STOP_PENDING,
+		.final_state = SBI_EXT_HSM_STOPPED,
+	};
+	count = hart_wait_state_transition(&mask, hsm_timer_duration, &transition_states);
+
+	report(count, "secondary hart stopped after suspension tests with MSB set");
+
+	local_irq_disable();
+	hsm_timer_teardown();
+	report_prefix_popn(2);
+}
+
 #define DBCN_WRITE_TEST_STRING		"DBCN_WRITE_TEST_STRING\n"
 #define DBCN_WRITE_BYTE_TEST_BYTE	((u8)'a')
 
@@ -872,6 +1448,7 @@
 	check_base();
 	check_time();
 	check_ipi();
+	check_hsm();
 	check_dbcn();
 	check_susp();