ANDROID: KVM: arm64: Map guest MMIO as device memory
As guests use FWB, assignable MMIO should be mapped as device memory.
One more caveat for FWB, is guest DMA, which we assume to be
dma-coherent for now, otherwise we should just disable FWB for all
protected guests.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index ecc8cb0..6275cc8 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -242,6 +242,8 @@ enum kvm_pgtable_prot {
#define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
#define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
+#define PKVM_GUEST_MMIO_PROT (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_DEVICE)
+
#define KVM_HOST_S2_DEFAULT_MASK ((KVM_PTE_LEAF_ATTR_HI | \
KVM_PTE_LEAF_ATTR_LO) & \
~(KVM_INVALID_PTE_LOCKED))
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index efd4519..2a8cb2a 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -469,6 +469,12 @@ static enum kvm_pgtable_prot default_host_prot(bool is_memory)
return is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
}
+/* Guests use FWB, so we need to mark MMIO as non-cacheable. */
+static enum kvm_pgtable_prot default_guests_prot(bool is_memory)
+{
+ return is_memory ? PKVM_HOST_MEM_PROT : PKVM_GUEST_MMIO_PROT;
+}
+
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
@@ -815,6 +821,7 @@ struct pkvm_mem_share {
struct pkvm_mem_donation {
const struct pkvm_mem_transition tx;
+ const bool is_mmio;
};
struct check_walk_data {
@@ -1193,7 +1200,7 @@ static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
return PKVM_NOPAGE;
prot = kvm_pgtable_stage2_pte_prot(pte);
- if (kvm_pte_valid(pte) && ((prot & KVM_PGTABLE_PROT_RWX) != KVM_PGTABLE_PROT_RWX))
+ if (kvm_pte_valid(pte) && ((prot & KVM_PGTABLE_PROT_RW) != KVM_PGTABLE_PROT_RW))
state = PKVM_PAGE_RESTRICTED_PROT;
return state | pkvm_getstate(prot);
@@ -1242,9 +1249,9 @@ static int guest_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
prot, &vcpu->vcpu.arch.pkvm_memcache, 0);
}
-static int guest_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
+static int guest_complete_donation(u64 addr, const struct pkvm_mem_transition *tx, bool is_mmio)
{
- enum kvm_pgtable_prot prot = pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_OWNED);
+ enum kvm_pgtable_prot prot = pkvm_mkstate(default_guests_prot(!is_mmio), PKVM_PAGE_OWNED);
struct pkvm_hyp_vcpu *vcpu = tx->completer.guest.hyp_vcpu;
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
phys_addr_t phys = tx->completer.guest.phys;
@@ -1675,7 +1682,7 @@ static int __do_donate(struct pkvm_mem_donation *donation)
ret = hyp_complete_donation(completer_addr, tx);
break;
case PKVM_ID_GUEST:
- ret = guest_complete_donation(completer_addr, tx);
+ ret = guest_complete_donation(completer_addr, tx, donation->is_mmio);
break;
default:
ret = -EINVAL;
@@ -1917,6 +1924,7 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
.id = PKVM_ID_HYP,
},
},
+ .is_mmio = false,
};
host_lock_component();
@@ -1949,6 +1957,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
.id = PKVM_ID_HOST,
},
},
+ .is_mmio = false,
};
host_lock_component();
@@ -2281,6 +2290,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 guest_addr = hyp_pfn_to_phys(gfn);
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ bool is_mmio = !addr_is_memory(host_addr) && pkvm_hyp_vcpu_is_protected(vcpu);
struct pkvm_mem_share share = {
.tx = {
.nr_pages = 1,
@@ -2299,7 +2309,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
},
},
},
- .completer_prot = default_host_prot(addr_is_memory(host_addr)),
+ .completer_prot = default_guests_prot(!is_mmio),
};
host_lock_component();
@@ -2319,6 +2329,7 @@ int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 guest_addr = hyp_pfn_to_phys(gfn);
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
+ bool is_mmio = !addr_is_memory(host_addr) && pkvm_hyp_vcpu_is_protected(vcpu);
struct pkvm_mem_donation donation = {
.tx = {
.nr_pages = 1,
@@ -2337,13 +2348,14 @@ int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu)
},
},
},
+ .is_mmio = is_mmio,
};
host_lock_component();
guest_lock_component(vm);
/* Donation for devices to protected VM must be done in groups. */
- if (!addr_is_memory(host_addr) && pkvm_hyp_vcpu_is_protected(vcpu))
+ if (is_mmio && pkvm_hyp_vcpu_is_protected(vcpu))
ret = pkvm_device_transient_group(host_addr, vm);
if (!ret)