| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Copyright (C) 2020 - Google LLC |
| * Author: Quentin Perret <qperret@google.com> |
| * Author: Fuad Tabba <tabba@google.com> |
| */ |
| #ifndef __ARM64_KVM_PKVM_H__ |
| #define __ARM64_KVM_PKVM_H__ |
| |
| #include <linux/arm_ffa.h> |
| #include <linux/memblock.h> |
| #include <linux/scatterlist.h> |
| #include <asm/kvm_pgtable.h> |
| #include <asm/sysreg.h> |
| |
| /* Maximum number of VMs that can co-exist under pKVM. */ |
| #define KVM_MAX_PVMS 255 |
| |
| #define HYP_MEMBLOCK_REGIONS 128 |
| |
| int pkvm_init_host_vm(struct kvm *kvm, unsigned long type); |
| int pkvm_create_hyp_vm(struct kvm *kvm); |
| void pkvm_destroy_hyp_vm(struct kvm *kvm); |
| bool pkvm_is_hyp_created(struct kvm *kvm); |
| |
| /* All HAFGRTR_EL2 bits are AMU */ |
| #define HAFGRTR_AMU __HAFGRTR_EL2_MASK |
| |
| #define PVM_HAFGRTR_EL2_SET \ |
| (kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP) ? 0ULL : HAFGRTR_AMU) |
| |
| #define PVM_HAFGRTR_EL2_CLR (0ULL) |
| |
| /* No support for debug, trace, of PMU for protected VMs */ |
| #define PVM_HDFGRTR_EL2_SET __HDFGRTR_EL2_MASK |
| #define PVM_HDFGRTR_EL2_CLR __HDFGRTR_EL2_nMASK |
| |
| #define PVM_HDFGWTR_EL2_SET __HDFGWTR_EL2_MASK |
| #define PVM_HDFGWTR_EL2_CLR __HDFGWTR_EL2_nMASK |
| |
| #define HFGxTR_RAS_IMP (\ |
| HFGxTR_EL2_ERXADDR_EL1 | \ |
| HFGxTR_EL2_ERXPFGF_EL1 | \ |
| HFGxTR_EL2_ERXMISCn_EL1 | \ |
| HFGxTR_EL2_ERXSTATUS_EL1 | \ |
| HFGxTR_EL2_ERXCTLR_EL1 | \ |
| HFGxTR_EL2_ERXFR_EL1 | \ |
| HFGxTR_EL2_ERRSELR_EL1 | \ |
| HFGxTR_EL2_ERRIDR_EL1 \ |
| ) |
| #define HFGxTR_RAS_V1P1 (\ |
| HFGxTR_EL2_ERXPFGCDN_EL1 | \ |
| HFGxTR_EL2_ERXPFGCTL_EL1 \ |
| ) |
| #define HFGxTR_GIC HFGxTR_EL2_ICC_IGRPENn_EL1 |
| #define HFGxTR_CSV2 (\ |
| HFGxTR_EL2_SCXTNUM_EL0 | \ |
| HFGxTR_EL2_SCXTNUM_EL1 \ |
| ) |
| #define HFGxTR_LOR (\ |
| HFGxTR_EL2_LORSA_EL1 | \ |
| HFGxTR_EL2_LORN_EL1 | \ |
| HFGxTR_EL2_LORID_EL1 | \ |
| HFGxTR_EL2_LOREA_EL1 | \ |
| HFGxTR_EL2_LORC_EL1 \ |
| ) |
| #define HFGxTR_PAUTH (\ |
| HFGxTR_EL2_APIBKey | \ |
| HFGxTR_EL2_APIAKey | \ |
| HFGxTR_EL2_APGAKey | \ |
| HFGxTR_EL2_APDBKey | \ |
| HFGxTR_EL2_APDAKey \ |
| ) |
| #define HFGxTR_nAIE (\ |
| HFGxTR_EL2_nAMAIR2_EL1 | \ |
| HFGxTR_EL2_nMAIR2_EL1 \ |
| ) |
| #define HFGxTR_nS2POE HFGxTR_EL2_nS2POR_EL1 |
| #define HFGxTR_nS1POE (\ |
| HFGxTR_EL2_nPOR_EL1 | \ |
| HFGxTR_EL2_nPOR_EL0 \ |
| ) |
| #define HFGxTR_nS1PIE (\ |
| HFGxTR_EL2_nPIR_EL1 | \ |
| HFGxTR_EL2_nPIRE0_EL1 \ |
| ) |
| #define HFGxTR_nTHE HFGxTR_EL2_nRCWMASK_EL1 |
| #define HFGxTR_nSME (\ |
| HFGxTR_EL2_nTPIDR2_EL0 | \ |
| HFGxTR_EL2_nSMPRI_EL1 \ |
| ) |
| #define HFGxTR_nGCS (\ |
| HFGxTR_EL2_nGCS_EL1 | \ |
| HFGxTR_EL2_nGCS_EL0 \ |
| ) |
| #define HFGxTR_nLS64 HFGxTR_EL2_nACCDATA_EL1 |
| |
| #define PVM_HFGXTR_EL2_SET \ |
| (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP) ? 0ULL : HFGxTR_RAS_IMP) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ? 0ULL : HFGxTR_RAS_V1P1) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP) ? 0ULL : HFGxTR_GIC) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, IMP) ? 0ULL : HFGxTR_CSV2) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP) ? 0ULL : HFGxTR_LOR) | \ |
| (vcpu_has_ptrauth(vcpu) ? 0ULL : HFGxTR_PAUTH) | \ |
| (vcpu_has_ptrauth(vcpu) ? 0ULL : HFGxTR_PAUTH) | \ |
| 0 |
| |
| #define PVM_HFGXTR_EL2_CLR \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP) ? 0ULL : HFGxTR_nAIE) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP) ? 0ULL : HFGxTR_nS2POE) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP) ? 0ULL : HFGxTR_nS1POE) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP) ? 0ULL : HFGxTR_nS1PIE) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP) ? 0ULL : HFGxTR_nTHE) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ? 0ULL : HFGxTR_nSME) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HFGxTR_nGCS) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64) ? 0ULL : HFGxTR_nLS64) | \ |
| 0 |
| |
| #define PVM_HFGRTR_EL2_SET PVM_HFGXTR_EL2_SET |
| #define PVM_HFGWTR_EL2_SET PVM_HFGXTR_EL2_SET |
| #define PVM_HFGRTR_EL2_CLR PVM_HFGXTR_EL2_CLR |
| #define PVM_HFGWTR_EL2_CLR PVM_HFGXTR_EL2_CLR |
| |
| #define HFGITR_SPECRES (\ |
| HFGITR_EL2_CPPRCTX | \ |
| HFGITR_EL2_DVPRCTX | \ |
| HFGITR_EL2_CFPRCTX \ |
| ) |
| #define HFGITR_TLBIOS (\ |
| HFGITR_EL2_TLBIVAALE1OS | \ |
| HFGITR_EL2_TLBIVALE1OS | \ |
| HFGITR_EL2_TLBIVAAE1OS | \ |
| HFGITR_EL2_TLBIASIDE1OS | \ |
| HFGITR_EL2_TLBIVAE1OS | \ |
| HFGITR_EL2_TLBIVMALLE1OS \ |
| ) |
| #define HFGITR_TLBIRANGE \ |
| (\ |
| HFGITR_TLBIOS | \ |
| HFGITR_EL2_TLBIRVAALE1 | \ |
| HFGITR_EL2_TLBIRVALE1 | \ |
| HFGITR_EL2_TLBIRVAAE1 | \ |
| HFGITR_EL2_TLBIRVAE1 | \ |
| HFGITR_EL2_TLBIRVAE1 | \ |
| HFGITR_EL2_TLBIRVAALE1IS | \ |
| HFGITR_EL2_TLBIRVALE1IS | \ |
| HFGITR_EL2_TLBIRVAAE1IS | \ |
| HFGITR_EL2_TLBIRVAE1IS | \ |
| HFGITR_EL2_TLBIVAALE1IS | \ |
| HFGITR_EL2_TLBIVALE1IS | \ |
| HFGITR_EL2_TLBIVAAE1IS | \ |
| HFGITR_EL2_TLBIASIDE1IS | \ |
| HFGITR_EL2_TLBIVAE1IS | \ |
| HFGITR_EL2_TLBIVMALLE1IS | \ |
| HFGITR_EL2_TLBIRVAALE1OS | \ |
| HFGITR_EL2_TLBIRVALE1OS | \ |
| HFGITR_EL2_TLBIRVAAE1OS | \ |
| HFGITR_EL2_TLBIRVAE1OS \ |
| ) |
| #define HFGITR_TLB HFGITR_TLBIRANGE |
| #define HFGITR_PAN2 (\ |
| HFGITR_EL2_ATS1E1WP | \ |
| HFGITR_EL2_ATS1E1RP | \ |
| HFGITR_EL2_ATS1E0W | \ |
| HFGITR_EL2_ATS1E0R | \ |
| HFGITR_EL2_ATS1E1W | \ |
| HFGITR_EL2_ATS1E1R \ |
| ) |
| #define HFGITR_PAN HFGITR_PAN2 |
| #define HFGITR_DPB2 HFGITR_EL2_DCCVADP |
| #define HFGITR_DPB_IMP HFGITR_EL2_DCCVAP |
| #define HFGITR_DPB (HFGITR_DPB_IMP | HFGITR_DPB2) |
| #define HFGITR_nGCS (\ |
| HFGITR_EL2_nGCSEPP | \ |
| HFGITR_EL2_nGCSSTR_EL1 | \ |
| HFGITR_EL2_nGCSPUSHM_EL1 \ |
| ) |
| #define HFGITR_nBRBE (\ |
| HFGITR_EL2_nBRBIALL | \ |
| HFGITR_EL2_nBRBINJ \ |
| ) |
| |
| #define PVM_HFGITR_EL2_SET \ |
| (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP) ? 0ULL : HFGITR_EL2_ATS1E1A) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP) ? 0ULL : HFGITR_SPECRES) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS) ? 0ULL : HFGITR_TLB) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, IMP) ? 0ULL : HFGITR_PAN) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, IMP) ? 0ULL : HFGITR_DPB) | \ |
| 0 |
| |
| #define PVM_HFGITR_EL2_CLR \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HFGITR_nGCS) | \ |
| (kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP) ? 0ULL : HFGITR_nBRBE) | \ |
| 0 |
| |
| #define HCRX_NMI HCRX_EL2_TALLINT |
| |
| #define HCRX_nPAuth_LR HCRX_EL2_PACMEn |
| #define HCRX_nFPMR HCRX_EL2_EnFPM |
| #define HCRX_nGCS HCRX_EL2_GCSEn |
| #define HCRX_nSYSREG128 HCRX_EL2_EnIDCP128 |
| #define HCRX_nADERR HCRX_EL2_EnSDERR |
| #define HCRX_nDoubleFault2 HCRX_EL2_TMEA |
| #define HCRX_nANERR HCRX_EL2_EnSNERR |
| #define HCRX_nD128 HCRX_EL2_D128En |
| #define HCRX_nTHE HCRX_EL2_PTTWI |
| #define HCRX_nSCTLR2 HCRX_EL2_SCTLR2En |
| #define HCRX_nTCR2 HCRX_EL2_TCR2En |
| #define HCRX_nMOPS (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) |
| #define HCRX_nCMOW HCRX_EL2_CMOW |
| #define HCRX_nNMI (HCRX_EL2_VFNMI | HCRX_EL2_VINMI) |
| #define HCRX_SME HCRX_EL2_SMPME |
| #define HCRX_nXS (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS) |
| #define HCRX_nLS64 (HCRX_EL2_EnASR| HCRX_EL2_EnALS | HCRX_EL2_EnAS0) |
| |
| #define PVM_HCRX_EL2_SET \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP) ? 0ULL : HCRX_NMI) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ? 0ULL : HCRX_SME) | \ |
| 0 |
| |
| #define PVM_HCRX_EL2_CLR \ |
| (vcpu_has_ptrauth(vcpu) ? HCRX_nPAuth_LR : 0ULL) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HCRX_nGCS) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP) ? 0ULL : HCRX_nSYSREG128) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, FEAT_ADERR) ? 0ULL : HCRX_nADERR) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP) ? 0ULL : HCRX_nDoubleFault2) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ANERR, FEAT_ANERR) ? 0ULL : HCRX_nANERR) | \ |
| (true /* trap unless ID_AA64MMFR0_EL1 PARANGE == 0b111 */ ? 0ULL : HCRX_nD128) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP) ? 0ULL : HCRX_nTHE) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP) ? 0ULL : HCRX_nSCTLR2) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP) ? 0ULL : HCRX_nTCR2) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP) ? 0ULL : HCRX_nMOPS) | \ |
| (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP) ? 0ULL : HCRX_nCMOW) | \ |
| (kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP) ? 0ULL : HCRX_nNMI) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP) ? 0ULL : HCRX_nXS) | \ |
| (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64) ? 0ULL : HCRX_nLS64) | \ |
| 0 |
| |
| extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; |
| extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); |
| |
| static inline unsigned long |
| hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size) |
| { |
| unsigned long nr_pages = reg->size >> PAGE_SHIFT; |
| unsigned long start, end; |
| |
| start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size; |
| end = start + nr_pages * vmemmap_entry_size; |
| start = ALIGN_DOWN(start, PAGE_SIZE); |
| end = ALIGN(end, PAGE_SIZE); |
| |
| return end - start; |
| } |
| |
| static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size) |
| { |
| unsigned long res = 0, i; |
| |
| for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { |
| res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i], |
| vmemmap_entry_size); |
| } |
| |
| return res >> PAGE_SHIFT; |
| } |
| |
| static inline unsigned long hyp_vm_table_pages(void) |
| { |
| return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT; |
| } |
| |
| static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) |
| { |
| unsigned long total = 0; |
| int i; |
| |
| /* Provision the worst case scenario */ |
| for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) { |
| nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); |
| total += nr_pages; |
| } |
| |
| return total; |
| } |
| |
| static inline unsigned long __hyp_pgtable_total_pages(void) |
| { |
| unsigned long res = 0, i; |
| |
| /* Cover all of memory with page-granularity */ |
| for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { |
| struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; |
| res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); |
| } |
| |
| return res; |
| } |
| |
| static inline unsigned long hyp_s1_pgtable_pages(void) |
| { |
| unsigned long res; |
| |
| res = __hyp_pgtable_total_pages(); |
| |
| /* Allow 1 GiB for private mappings */ |
| res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); |
| |
| return res; |
| } |
| |
| static inline unsigned long host_s2_pgtable_pages(void) |
| { |
| unsigned long res; |
| |
| /* |
| * Include an extra 16 pages to safely upper-bound the worst case of |
| * concatenated pgds. |
| */ |
| res = __hyp_pgtable_total_pages() + 16; |
| |
| /* Allow 1 GiB for MMIO mappings */ |
| res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); |
| |
| return res; |
| } |
| |
| #define KVM_FFA_MBOX_NR_PAGES 1 |
| |
| static inline unsigned long hyp_ffa_proxy_pages(void) |
| { |
| size_t desc_max; |
| |
| /* |
| * The hypervisor FFA proxy needs enough memory to buffer a fragmented |
| * descriptor returned from EL3 in response to a RETRIEVE_REQ call. |
| */ |
| desc_max = sizeof(struct ffa_mem_region) + |
| sizeof(struct ffa_mem_region_attributes) + |
| sizeof(struct ffa_composite_mem_region) + |
| SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range); |
| |
| /* Plus a page each for the hypervisor's RX and TX mailboxes. */ |
| return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE); |
| } |
| |
| static inline size_t pkvm_host_sve_state_size(void) |
| { |
| if (!system_supports_sve()) |
| return 0; |
| |
| return size_add(sizeof(struct cpu_sve_state), |
| SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl))); |
| } |
| |
| #endif /* __ARM64_KVM_PKVM_H__ */ |