Merge tag 's390x-2020-04-30' of https://github.com/davidhildenbrand/kvm-unit-tests

New maintainer, reviewer, and cc list. New STSI test. Lots of minor fixes
and cleanups
diff --git a/x86/access.c b/x86/access.c
index 86d8a72..068b4dc 100644
--- a/x86/access.c
+++ b/x86/access.c
@@ -16,7 +16,7 @@
 static int invalid_mask;
 static int page_table_levels;
 
-#define PT_BASE_ADDR_MASK ((pt_element_t)((((pt_element_t)1 << 40) - 1) & PAGE_MASK))
+#define PT_BASE_ADDR_MASK ((pt_element_t)((((pt_element_t)1 << 36) - 1) & PAGE_MASK))
 #define PT_PSE_BASE_ADDR_MASK (PT_BASE_ADDR_MASK & ~(1ull << 21))
 
 #define CR0_WP_MASK (1UL << 16)
@@ -47,6 +47,7 @@
     AC_PTE_DIRTY_BIT,
     AC_PTE_NX_BIT,
     AC_PTE_BIT51_BIT,
+    AC_PTE_BIT36_BIT,
 
     AC_PDE_PRESENT_BIT,
     AC_PDE_WRITABLE_BIT,
@@ -56,6 +57,7 @@
     AC_PDE_PSE_BIT,
     AC_PDE_NX_BIT,
     AC_PDE_BIT51_BIT,
+    AC_PDE_BIT36_BIT,
     AC_PDE_BIT13_BIT,
 
     AC_PKU_AD_BIT,
@@ -82,6 +84,7 @@
 #define AC_PTE_DIRTY_MASK     (1 << AC_PTE_DIRTY_BIT)
 #define AC_PTE_NX_MASK        (1 << AC_PTE_NX_BIT)
 #define AC_PTE_BIT51_MASK     (1 << AC_PTE_BIT51_BIT)
+#define AC_PTE_BIT36_MASK     (1 << AC_PTE_BIT36_BIT)
 
 #define AC_PDE_PRESENT_MASK   (1 << AC_PDE_PRESENT_BIT)
 #define AC_PDE_WRITABLE_MASK  (1 << AC_PDE_WRITABLE_BIT)
@@ -91,6 +94,7 @@
 #define AC_PDE_PSE_MASK       (1 << AC_PDE_PSE_BIT)
 #define AC_PDE_NX_MASK        (1 << AC_PDE_NX_BIT)
 #define AC_PDE_BIT51_MASK     (1 << AC_PDE_BIT51_BIT)
+#define AC_PDE_BIT36_MASK     (1 << AC_PDE_BIT36_BIT)
 #define AC_PDE_BIT13_MASK     (1 << AC_PDE_BIT13_BIT)
 
 #define AC_PKU_AD_MASK        (1 << AC_PKU_AD_BIT)
@@ -115,6 +119,7 @@
     [AC_PTE_DIRTY_BIT] = "pte.d",
     [AC_PTE_NX_BIT] = "pte.nx",
     [AC_PTE_BIT51_BIT] = "pte.51",
+    [AC_PTE_BIT36_BIT] = "pte.36",
     [AC_PDE_PRESENT_BIT] = "pde.p",
     [AC_PDE_ACCESSED_BIT] = "pde.a",
     [AC_PDE_WRITABLE_BIT] = "pde.rw",
@@ -123,6 +128,7 @@
     [AC_PDE_PSE_BIT] = "pde.pse",
     [AC_PDE_NX_BIT] = "pde.nx",
     [AC_PDE_BIT51_BIT] = "pde.51",
+    [AC_PDE_BIT36_BIT] = "pde.36",
     [AC_PDE_BIT13_BIT] = "pde.13",
     [AC_PKU_AD_BIT] = "pkru.ad",
     [AC_PKU_WD_BIT] = "pkru.wd",
@@ -295,6 +301,14 @@
     if (!F(AC_PDE_PSE) && F(AC_PDE_BIT13))
         return false;
 
+    /*
+     * Shorten the test by avoiding testing too many reserved bit combinations
+     */
+    if ((F(AC_PDE_BIT51) + F(AC_PDE_BIT36) + F(AC_PDE_BIT13)) > 1)
+        return false;
+    if ((F(AC_PTE_BIT51) + F(AC_PTE_BIT36)) > 1)
+        return false;
+
     return true;
 }
 
@@ -381,7 +395,7 @@
         at->ignore_pde = PT_ACCESSED_MASK;
 
     pde_valid = F(AC_PDE_PRESENT)
-        && !F(AC_PDE_BIT51) && !F(AC_PDE_BIT13)
+        && !F(AC_PDE_BIT51) && !F(AC_PDE_BIT36) && !F(AC_PDE_BIT13)
         && !(F(AC_PDE_NX) && !F(AC_CPU_EFER_NX));
 
     if (!pde_valid) {
@@ -407,7 +421,7 @@
     at->expected_pde |= PT_ACCESSED_MASK;
 
     pte_valid = F(AC_PTE_PRESENT)
-        && !F(AC_PTE_BIT51)
+        && !F(AC_PTE_BIT51) && !F(AC_PTE_BIT36)
         && !(F(AC_PTE_NX) && !F(AC_CPU_EFER_NX));
 
     if (!pte_valid) {
@@ -516,6 +530,8 @@
 		pte |= PT64_NX_MASK;
 	    if (F(AC_PDE_BIT51))
 		pte |= 1ull << 51;
+	    if (F(AC_PDE_BIT36))
+                pte |= 1ull << 36;
 	    if (F(AC_PDE_BIT13))
 		pte |= 1ull << 13;
 	    at->pdep = &vroot[index];
@@ -538,6 +554,8 @@
 		pte |= PT64_NX_MASK;
 	    if (F(AC_PTE_BIT51))
 		pte |= 1ull << 51;
+	    if (F(AC_PTE_BIT36))
+                pte |= 1ull << 36;
 	    at->ptep = &vroot[index];
 	    break;
 	}
@@ -736,6 +754,7 @@
 	    strcat(line, " ");
 	    strcat(line, ac_names[i]);
 	}
+
     strcat(line, ": ");
     printf("%s", line);
 }
@@ -945,6 +964,15 @@
     shadow_cr4 = read_cr4();
     shadow_efer = rdmsr(MSR_EFER);
 
+    if (cpuid_maxphyaddr() >= 52) {
+        invalid_mask |= AC_PDE_BIT51_MASK;
+        invalid_mask |= AC_PTE_BIT51_MASK;
+    }
+    if (cpuid_maxphyaddr() >= 37) {
+        invalid_mask |= AC_PDE_BIT36_MASK;
+        invalid_mask |= AC_PTE_BIT36_MASK;
+    }
+
     if (this_cpu_has(X86_FEATURE_PKU)) {
         set_cr4_pke(1);
         set_cr4_pke(0);
diff --git a/x86/ioapic.c b/x86/ioapic.c
index 3106531..f315e4b 100644
--- a/x86/ioapic.c
+++ b/x86/ioapic.c
@@ -504,7 +504,8 @@
 	test_ioapic_level_tmr(true);
 	test_ioapic_edge_tmr(true);
 
-	test_ioapic_physical_destination_mode();
+	if (cpu_count() > 1)
+		test_ioapic_physical_destination_mode();
 	if (cpu_count() > 3)
 		test_ioapic_logical_destination_mode();
 
diff --git a/x86/msr.c b/x86/msr.c
index de2cb6d..f7539c3 100644
--- a/x86/msr.c
+++ b/x86/msr.c
@@ -16,6 +16,7 @@
 
 
 #define addr_64 0x0000123456789abcULL
+#define addr_ul (unsigned long)addr_64
 
 struct msr_info msr_info[] =
 {
@@ -23,10 +24,10 @@
       .val_pairs = {{ .valid = 1, .value = 0x1234, .expected = 0x1234}}
     },
     { .index = 0x00000175, .name = "MSR_IA32_SYSENTER_ESP",
-      .val_pairs = {{ .valid = 1, .value = addr_64, .expected = addr_64}}
+      .val_pairs = {{ .valid = 1, .value = addr_ul, .expected = addr_ul}}
     },
     { .index = 0x00000176, .name = "IA32_SYSENTER_EIP",
-      .val_pairs = {{ .valid = 1, .value = addr_64, .expected = addr_64}}
+      .val_pairs = {{ .valid = 1, .value = addr_ul, .expected = addr_ul}}
     },
     { .index = 0x000001a0, .name = "MSR_IA32_MISC_ENABLE",
       // reserved: 1:2, 4:6, 8:10, 13:15, 17, 19:21, 24:33, 35:63
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index 5a571eb..2f53b8f 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -1316,6 +1316,11 @@
             return true;
         }
 
+        /* The guest is not woken up from HLT and RIP still points to it.  */
+        if (get_test_stage(test) == 3) {
+            vmcb->save.rip++;
+        }
+
         irq_enable();
         asm volatile ("nop");
         irq_disable();
@@ -1475,6 +1480,7 @@
     if (!nmi_fired) {
         report(nmi_fired, "intercepted pending NMI not dispatched");
         set_test_stage(test, -1);
+        vmmcall();
     }
 
     set_test_stage(test, 3);
@@ -1501,6 +1507,9 @@
             return true;
         }
 
+        /* The guest is not woken up from HLT and RIP still points to it.  */
+        vmcb->save.rip++;
+
         report(true, "NMI intercept while running guest");
         break;
 
@@ -1528,8 +1537,9 @@
 
 static void exc_inject_prepare(struct svm_test *test)
 {
-	handle_exception(DE_VECTOR, my_isr);
-	handle_exception(NMI_VECTOR, my_isr);
+    default_prepare(test);
+    handle_exception(DE_VECTOR, my_isr);
+    handle_exception(NMI_VECTOR, my_isr);
 }
 
 
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index d658bc8..bf0d02e 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -116,7 +116,7 @@
 [access]
 file = access.flat
 arch = x86_64
-extra_params = -cpu host
+extra_params = -cpu host,phys-bits=36
 
 [smap]
 file = smap.flat
diff --git a/x86/vmx.c b/x86/vmx.c
index 4c47eec..cbe6876 100644
--- a/x86/vmx.c
+++ b/x86/vmx.c
@@ -32,6 +32,7 @@
 #include "processor.h"
 #include "alloc_page.h"
 #include "vm.h"
+#include "vmalloc.h"
 #include "desc.h"
 #include "vmx.h"
 #include "msr.h"
@@ -387,6 +388,141 @@
 	free_page(vmcs);
 }
 
+ulong finish_fault;
+u8 sentinel;
+bool handler_called;
+
+static void pf_handler(struct ex_regs *regs)
+{
+	/*
+	 * check that RIP was not improperly advanced and that the
+	 * flags value was preserved.
+	 */
+	report(regs->rip < finish_fault, "RIP has not been advanced!");
+	report(((u8)regs->rflags == ((sentinel | 2) & 0xd7)),
+	       "The low byte of RFLAGS was preserved!");
+	regs->rip = finish_fault;
+	handler_called = true;
+
+}
+
+static void prep_flags_test_env(void **vpage, struct vmcs **vmcs, handler *old)
+{
+	/*
+	 * get an unbacked address that will cause a #PF
+	 */
+	*vpage = alloc_vpage();
+
+	/*
+	 * set up VMCS so we have something to read from
+	 */
+	*vmcs = alloc_page();
+
+	memset(*vmcs, 0, PAGE_SIZE);
+	(*vmcs)->hdr.revision_id = basic.revision;
+	assert(!vmcs_clear(*vmcs));
+	assert(!make_vmcs_current(*vmcs));
+
+	*old = handle_exception(PF_VECTOR, &pf_handler);
+}
+
+static void test_read_sentinel(void)
+{
+	void *vpage;
+	struct vmcs *vmcs;
+	handler old;
+
+	prep_flags_test_env(&vpage, &vmcs, &old);
+
+	/*
+	 * set the proper label
+	 */
+	extern char finish_read_fault;
+
+	finish_fault = (ulong)&finish_read_fault;
+
+	/*
+	 * execute the vmread instruction that will cause a #PF
+	 */
+	handler_called = false;
+	asm volatile ("movb %[byte], %%ah\n\t"
+		      "sahf\n\t"
+		      "vmread %[enc], %[val]; finish_read_fault:"
+		      : [val] "=m" (*(u64 *)vpage)
+		      : [byte] "Krm" (sentinel),
+		      [enc] "r" ((u64)GUEST_SEL_SS)
+		      : "cc", "ah");
+	report(handler_called, "The #PF handler was invoked");
+
+	/*
+	 * restore the old #PF handler
+	 */
+	handle_exception(PF_VECTOR, old);
+}
+
+static void test_vmread_flags_touch(void)
+{
+	/*
+	 * set up the sentinel value in the flags register. we
+	 * choose these two values because they candy-stripe
+	 * the 5 flags that sahf sets.
+	 */
+	sentinel = 0x91;
+	test_read_sentinel();
+
+	sentinel = 0x45;
+	test_read_sentinel();
+}
+
+static void test_write_sentinel(void)
+{
+	void *vpage;
+	struct vmcs *vmcs;
+	handler old;
+
+	prep_flags_test_env(&vpage, &vmcs, &old);
+
+	/*
+	 * set the proper label
+	 */
+	extern char finish_write_fault;
+
+	finish_fault = (ulong)&finish_write_fault;
+
+	/*
+	 * execute the vmwrite instruction that will cause a #PF
+	 */
+	handler_called = false;
+	asm volatile ("movb %[byte], %%ah\n\t"
+		      "sahf\n\t"
+		      "vmwrite %[val], %[enc]; finish_write_fault:"
+		      : [val] "=m" (*(u64 *)vpage)
+		      : [byte] "Krm" (sentinel),
+		      [enc] "r" ((u64)GUEST_SEL_SS)
+		      : "cc", "ah");
+	report(handler_called, "The #PF handler was invoked");
+
+	/*
+	 * restore the old #PF handler
+	 */
+	handle_exception(PF_VECTOR, old);
+}
+
+static void test_vmwrite_flags_touch(void)
+{
+	/*
+	 * set up the sentinel value in the flags register. we
+	 * choose these two values because they candy-stripe
+	 * the 5 flags that sahf sets.
+	 */
+	sentinel = 0x91;
+	test_write_sentinel();
+
+	sentinel = 0x45;
+	test_write_sentinel();
+}
+
+
 static void test_vmcs_high(void)
 {
 	struct vmcs *vmcs = alloc_page();
@@ -1988,6 +2124,10 @@
 		test_vmcs_lifecycle();
 	if (test_wanted("test_vmx_caps", argv, argc))
 		test_vmx_caps();
+	if (test_wanted("test_vmread_flags_touch", argv, argc))
+		test_vmread_flags_touch();
+	if (test_wanted("test_vmwrite_flags_touch", argv, argc))
+		test_vmwrite_flags_touch();
 
 	/* Balance vmxon from test_vmxon. */
 	vmx_off();
diff --git a/x86/vmx.h b/x86/vmx.h
index 2e28ecb..08b354d 100644
--- a/x86/vmx.h
+++ b/x86/vmx.h
@@ -521,6 +521,13 @@
 	VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
 };
 
+enum vm_entry_failure_code {
+	ENTRY_FAIL_DEFAULT		= 0,
+	ENTRY_FAIL_PDPTE		= 2,
+	ENTRY_FAIL_NMI			= 3,
+	ENTRY_FAIL_VMCS_LINK_PTR	= 4,
+};
+
 #define SAVE_GPR				\
 	"xchg %rax, regs\n\t"			\
 	"xchg %rcx, regs+0x8\n\t"		\
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index a91715f..0909adb 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -3570,6 +3570,10 @@
 	for (i = 0; i <= supported_targets + 1; i++)
 		try_cr3_target_count(i, supported_targets);
 	vmcs_write(CR3_TARGET_COUNT, cr3_targets);
+
+	/* VMWRITE to nonexistent target fields should fail. */
+	for (i = supported_targets; i < 256; i++)
+		TEST_ASSERT(vmcs_write(CR3_TARGET_0 + i*2, 0));
 }
 
 /*
@@ -5255,7 +5259,8 @@
 
 	report(result.exit_reason.failed_vmentry == xfail &&
 	       ((xfail && result.exit_reason.basic == VMX_FAIL_STATE) ||
-	        (!xfail && result.exit_reason.basic == VMX_VMCALL)),
+	        (!xfail && result.exit_reason.basic == VMX_VMCALL)) &&
+		(!xfail || vmcs_read(EXI_QUALIFICATION) == ENTRY_FAIL_DEFAULT),
 	        "%s, %s %lx", test, field_name, field);
 
 	if (!result.exit_reason.failed_vmentry)
@@ -8328,6 +8333,228 @@
 	       msr_entry.value, low, high);
 }
 
+static void vmx_preemption_timer_zero_test_db_handler(struct ex_regs *regs)
+{
+}
+
+static void vmx_preemption_timer_zero_test_guest(void)
+{
+	while (vmx_get_test_stage() < 3)
+		vmcall();
+}
+
+static void vmx_preemption_timer_zero_activate_preemption_timer(void)
+{
+	vmcs_set_bits(PIN_CONTROLS, PIN_PREEMPT);
+	vmcs_write(PREEMPT_TIMER_VALUE, 0);
+}
+
+static void vmx_preemption_timer_zero_advance_past_vmcall(void)
+{
+	vmcs_clear_bits(PIN_CONTROLS, PIN_PREEMPT);
+	enter_guest();
+	skip_exit_vmcall();
+}
+
+static void vmx_preemption_timer_zero_inject_db(bool intercept_db)
+{
+	vmx_preemption_timer_zero_activate_preemption_timer();
+	vmcs_write(ENT_INTR_INFO, INTR_INFO_VALID_MASK |
+		   INTR_TYPE_HARD_EXCEPTION | DB_VECTOR);
+	vmcs_write(EXC_BITMAP, intercept_db ? 1 << DB_VECTOR : 0);
+	enter_guest();
+}
+
+static void vmx_preemption_timer_zero_set_pending_dbg(u32 exception_bitmap)
+{
+	vmx_preemption_timer_zero_activate_preemption_timer();
+	vmcs_write(GUEST_PENDING_DEBUG, BIT(12) | DR_TRAP1);
+	vmcs_write(EXC_BITMAP, exception_bitmap);
+	enter_guest();
+}
+
+static void vmx_preemption_timer_zero_expect_preempt_at_rip(u64 expected_rip)
+{
+	u32 reason = (u32)vmcs_read(EXI_REASON);
+	u64 guest_rip = vmcs_read(GUEST_RIP);
+
+	report(reason == VMX_PREEMPT && guest_rip == expected_rip,
+	       "Exit reason is 0x%x (expected 0x%x) and guest RIP is %lx (0x%lx expected).",
+	       reason, VMX_PREEMPT, guest_rip, expected_rip);
+}
+
+/*
+ * This test ensures that when the VMX preemption timer is zero at
+ * VM-entry, a VM-exit occurs after any event injection and after any
+ * pending debug exceptions are raised, but before execution of any
+ * guest instructions.
+ */
+static void vmx_preemption_timer_zero_test(void)
+{
+	u64 db_fault_address = (u64)get_idt_addr(&boot_idt[DB_VECTOR]);
+	handler old_db;
+	u32 reason;
+
+	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
+		report_skip("'Activate VMX-preemption timer' not supported");
+		return;
+	}
+
+	/*
+	 * Install a custom #DB handler that doesn't abort.
+	 */
+	old_db = handle_exception(DB_VECTOR,
+				  vmx_preemption_timer_zero_test_db_handler);
+
+	test_set_guest(vmx_preemption_timer_zero_test_guest);
+
+	/*
+	 * VMX-preemption timer should fire after event injection.
+	 */
+	vmx_set_test_stage(0);
+	vmx_preemption_timer_zero_inject_db(0);
+	vmx_preemption_timer_zero_expect_preempt_at_rip(db_fault_address);
+	vmx_preemption_timer_zero_advance_past_vmcall();
+
+	/*
+	 * VMX-preemption timer should fire after event injection.
+	 * Exception bitmap is irrelevant, since you can't intercept
+	 * an event that you injected.
+	 */
+	vmx_set_test_stage(1);
+	vmx_preemption_timer_zero_inject_db(1 << DB_VECTOR);
+	vmx_preemption_timer_zero_expect_preempt_at_rip(db_fault_address);
+	vmx_preemption_timer_zero_advance_past_vmcall();
+
+	/*
+	 * VMX-preemption timer should fire after pending debug exceptions
+	 * have delivered a #DB trap.
+	 */
+	vmx_set_test_stage(2);
+	vmx_preemption_timer_zero_set_pending_dbg(0);
+	vmx_preemption_timer_zero_expect_preempt_at_rip(db_fault_address);
+	vmx_preemption_timer_zero_advance_past_vmcall();
+
+	/*
+	 * VMX-preemption timer would fire after pending debug exceptions
+	 * have delivered a #DB trap, but in this case, the #DB trap is
+	 * intercepted.
+	 */
+	vmx_set_test_stage(3);
+	vmx_preemption_timer_zero_set_pending_dbg(1 << DB_VECTOR);
+	reason = (u32)vmcs_read(EXI_REASON);
+	report(reason == VMX_EXC_NMI, "Exit reason is 0x%x (expected 0x%x)",
+	       reason, VMX_EXC_NMI);
+
+	vmcs_clear_bits(PIN_CONTROLS, PIN_PREEMPT);
+	enter_guest();
+
+	handle_exception(DB_VECTOR, old_db);
+}
+
+static u64 vmx_preemption_timer_tf_test_prev_rip;
+
+static void vmx_preemption_timer_tf_test_db_handler(struct ex_regs *regs)
+{
+	extern char vmx_preemption_timer_tf_test_endloop;
+
+	if (vmx_get_test_stage() == 2) {
+		/*
+		 * Stage 2 means that we're done, one way or another.
+		 * Arrange for the iret to drop us out of the wbinvd
+		 * loop and stop single-stepping.
+		 */
+		regs->rip = (u64)&vmx_preemption_timer_tf_test_endloop;
+		regs->rflags &= ~X86_EFLAGS_TF;
+	} else if (regs->rip == vmx_preemption_timer_tf_test_prev_rip) {
+		/*
+		 * The RIP should alternate between the wbinvd and the
+		 * jmp instruction in the code below. If we ever see
+		 * the same instruction twice in a row, that means a
+		 * single-step trap has been dropped. Let the
+		 * hypervisor know about the failure by executing a
+		 * VMCALL.
+		 */
+		vmcall();
+	}
+	vmx_preemption_timer_tf_test_prev_rip = regs->rip;
+}
+
+static void vmx_preemption_timer_tf_test_guest(void)
+{
+	/*
+	 * The hypervisor doesn't intercept WBINVD, so the loop below
+	 * shouldn't be a problem--it's just two instructions
+	 * executing in VMX non-root mode. However, when the
+	 * hypervisor is running in a virtual environment, the parent
+	 * hypervisor might intercept WBINVD and emulate it. If the
+	 * parent hypervisor is broken, the single-step trap after the
+	 * WBINVD might be lost.
+	 */
+	asm volatile("vmcall\n\t"
+		     "0: wbinvd\n\t"
+		     "1: jmp 0b\n\t"
+		     "vmx_preemption_timer_tf_test_endloop:");
+}
+
+/*
+ * Ensure that the delivery of a "VMX-preemption timer expired"
+ * VM-exit doesn't disrupt single-stepping in the guest. Note that
+ * passing this test doesn't ensure correctness, because the test will
+ * only fail if the VMX-preemtion timer fires at the right time (or
+ * the wrong time, as it were).
+ */
+static void vmx_preemption_timer_tf_test(void)
+{
+	handler old_db;
+	u32 reason;
+	int i;
+
+	if (!(ctrl_pin_rev.clr & PIN_PREEMPT)) {
+		report_skip("'Activate VMX-preemption timer' not supported");
+		return;
+	}
+
+	old_db = handle_exception(DB_VECTOR,
+				  vmx_preemption_timer_tf_test_db_handler);
+
+	test_set_guest(vmx_preemption_timer_tf_test_guest);
+
+	enter_guest();
+	skip_exit_vmcall();
+
+	vmx_set_test_stage(1);
+	vmcs_set_bits(PIN_CONTROLS, PIN_PREEMPT);
+	vmcs_write(PREEMPT_TIMER_VALUE, 50000);
+	vmcs_write(GUEST_RFLAGS, X86_EFLAGS_FIXED | X86_EFLAGS_TF);
+
+	/*
+	 * The only exit we should see is "VMX-preemption timer
+	 * expired."  If we get a VMCALL exit, that means the #DB
+	 * handler has detected a missing single-step trap. It doesn't
+	 * matter where the guest RIP is when the VMX-preemption timer
+	 * expires (whether it's in the WBINVD loop or in the #DB
+	 * handler)--a single-step trap should never be discarded.
+	 */
+	for (i = 0; i < 10000; i++) {
+		enter_guest();
+		reason = (u32)vmcs_read(EXI_REASON);
+		if (reason == VMX_PREEMPT)
+			continue;
+		TEST_ASSERT(reason == VMX_VMCALL);
+		skip_exit_insn();
+		break;
+	}
+
+	report(reason == VMX_PREEMPT, "No single-step traps skipped");
+
+	vmx_set_test_stage(2);
+	vmcs_clear_bits(PIN_CONTROLS, PIN_PREEMPT);
+	enter_guest();
+
+	handle_exception(DB_VECTOR, old_db);
+}
+
 static void vmx_db_test_guest(void)
 {
 	/*
@@ -9632,6 +9859,8 @@
 	TEST(vmx_pending_event_test),
 	TEST(vmx_pending_event_hlt_test),
 	TEST(vmx_store_tsc_test),
+	TEST(vmx_preemption_timer_zero_test),
+	TEST(vmx_preemption_timer_tf_test),
 	/* EPT access tests. */
 	TEST(ept_access_test_not_present),
 	TEST(ept_access_test_read_only),