ANDROID: KVM: arm64: Restore memcache representation

Recently, order was added to memcache, and the idea was to encode the
order in the PAGE_SIZE bits as memcache is always page aligned.
However, due to wrong usage of FIELD_*, now the address is shifted
by PAGE_SIZE, which can cause address corruption under some
configurations.

Also, the naming of the macros make things confusing, just use
PAGE_MASK as it make things more clear.

Bug: 357781595
Bug: 333874255
Bug: 277989609
Bug: 278749606

Change-Id: I2782f8995b7230300b524e2197b607426b2ed250
Signed-off-by: Mostafa Saleh <smostafa@google.com>
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 84029d0..f7c47d39 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -85,8 +85,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
 
 /* Head holds page head and it's order. */
-#define HYP_MC_PTR_MASK			GENMASK_ULL(63, PAGE_SHIFT)
-#define HYP_MC_ORDER_MASK		GENMASK_ULL(PAGE_SHIFT - 1, 0)
 struct kvm_hyp_memcache {
 	phys_addr_t head;
 	unsigned long nr_pages;
@@ -99,8 +97,8 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
 				     unsigned long order)
 {
 	*p = mc->head;
-	mc->head = FIELD_PREP(HYP_MC_PTR_MASK, to_pa(p)) |
-		   FIELD_PREP(HYP_MC_ORDER_MASK, order);
+	mc->head = (to_pa(p) & PAGE_MASK) |
+		   FIELD_PREP(~PAGE_MASK, order);
 	mc->nr_pages++;
 }
 
@@ -108,12 +106,12 @@ static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
 				     void *(*to_va)(phys_addr_t phys),
 				     unsigned long *order)
 {
-	phys_addr_t *p = to_va(FIELD_GET(HYP_MC_PTR_MASK, mc->head));
+	phys_addr_t *p = to_va(mc->head & PAGE_MASK);
 
 	if (!mc->nr_pages)
 		return NULL;
 
-	*order = FIELD_GET(HYP_MC_ORDER_MASK, mc->head);
+	*order = FIELD_GET(~PAGE_MASK, mc->head);
 
 	mc->head = *p;
 	mc->nr_pages--;
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 9bab9a2..86ab719 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -478,10 +478,10 @@ static void *admit_host_page(void *arg, unsigned long order)
 	if (!host_mc->nr_pages)
 		return NULL;
 
-	mc_order = FIELD_GET(HYP_MC_ORDER_MASK, host_mc->head);
+	mc_order = FIELD_GET(~PAGE_MASK, host_mc->head);
 	BUG_ON(order != mc_order);
 
-	p = FIELD_GET(HYP_MC_PTR_MASK, host_mc->head);
+	p = host_mc->head & PAGE_MASK;
 	/*
 	 * The host still owns the pages in its memcache, so we need to go
 	 * through a full host-to-hyp donation cycle to change it. Fortunately,
@@ -529,7 +529,7 @@ int refill_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc)
 	void *p;
 
 	while (host_mc->nr_pages) {
-		order = host_mc->head & (PAGE_SIZE - 1);
+		order = FIELD_GET(~PAGE_MASK, host_mc->head);
 		p = admit_host_page(host_mc, order);
 		if (!p)
 			return -EINVAL;