blob: e3e793d0ec30413491839f9e060bd9e27deff227 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marc Zyngier37c43752012-12-10 15:35:24 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier37c43752012-12-10 15:35:24 +00005 */
6
7#ifndef __ARM64_KVM_MMU_H__
8#define __ARM64_KVM_MMU_H__
9
10#include <asm/page.h>
11#include <asm/memory.h>
Will Deacon9ef2b482020-09-28 11:45:24 +010012#include <asm/mmu.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000013#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000014
15/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000016 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000017 * "negative" addresses. This makes it impossible to directly share
18 * mappings with the kernel.
19 *
20 * Instead, give the HYP mode its own VA region at a fixed offset from
21 * the kernel by just masking the top bits (which are all ones for a
Marc Zyngier82a81bf2016-06-30 18:40:34 +010022 * kernel address). We need to find out how many bits to mask.
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000023 *
Marc Zyngier82a81bf2016-06-30 18:40:34 +010024 * We want to build a set of page tables that cover both parts of the
25 * idmap (the trampoline page used to initialize EL2), and our normal
26 * runtime VA space, at the same time.
27 *
28 * Given that the kernel uses VA_BITS for its entire address space,
29 * and that half of that space (VA_BITS - 1) is used for the linear
30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
31 *
32 * The main question is "Within the VA_BITS space, does EL2 use the
33 * top or the bottom half of that space to shadow the kernel's linear
34 * mapping?". As we need to idmap the trampoline page, this is
35 * determined by the range in which this page lives.
36 *
37 * If the page is in the bottom half, we have to use the top half. If
38 * the page is in the top half, we have to use the bottom half:
39 *
Laura Abbott2077be62017-01-10 13:35:49 -080040 * T = __pa_symbol(__hyp_idmap_text_start)
Marc Zyngier82a81bf2016-06-30 18:40:34 +010041 * if (T & BIT(VA_BITS - 1))
42 * HYP_VA_MIN = 0 //idmap in upper half
43 * else
44 * HYP_VA_MIN = 1 << (VA_BITS - 1)
45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
46 *
Marc Zyngier82a81bf2016-06-30 18:40:34 +010047 * When using VHE, there are no separate hyp mappings and all KVM
48 * functionality is already mapped as part of the main kernel
49 * mappings, and none of this applies in that case.
Marc Zyngier37c43752012-12-10 15:35:24 +000050 */
Marc Zyngierd53d9bc62016-06-30 18:40:39 +010051
Marc Zyngier37c43752012-12-10 15:35:24 +000052#ifdef __ASSEMBLY__
53
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000054#include <asm/alternative.h>
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000055
Marc Zyngier37c43752012-12-10 15:35:24 +000056/*
57 * Convert a kernel VA into a HYP VA.
58 * reg: VA to be converted.
Marc Zyngierfd81e6b2016-06-30 18:40:40 +010059 *
Marc Zyngier2b4d1602017-12-03 17:36:55 +000060 * The actual code generation takes place in kvm_update_va_mask, and
61 * the instructions below are only there to reserve the space and
62 * perform the register allocation (kvm_update_va_mask uses the
63 * specific registers encoded in the instructions).
Marc Zyngier37c43752012-12-10 15:35:24 +000064 */
65.macro kern_hyp_va reg
Marc Zyngier1a6511e2023-03-30 18:47:52 +010066#ifndef __KVM_VHE_HYPERVISOR__
Mark Rutland4c0bd992022-09-12 17:22:08 +010067alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
Marc Zyngiered57cac2017-12-03 18:22:49 +000068 and \reg, \reg, #1 /* mask with va_mask */
69 ror \reg, \reg, #1 /* rotate to the first tag bit */
70 add \reg, \reg, #0 /* insert the low 12 bits of the tag */
71 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
72 ror \reg, \reg, #63 /* rotate back */
Marc Zyngier2b4d1602017-12-03 17:36:55 +000073alternative_cb_end
Marc Zyngier1a6511e2023-03-30 18:47:52 +010074#endif
Marc Zyngier37c43752012-12-10 15:35:24 +000075.endm
76
Marc Zyngier68b824e2020-10-24 16:33:38 +010077/*
David Brazdil97cbd2f2021-01-05 18:05:39 +000078 * Convert a hypervisor VA to a PA
79 * reg: hypervisor address to be converted in place
80 * tmp: temporary register
81 */
82.macro hyp_pa reg, tmp
83 ldr_l \tmp, hyp_physvirt_offset
84 add \reg, \reg, \tmp
85.endm
86
87/*
88 * Convert a hypervisor VA to a kernel image address
89 * reg: hypervisor address to be converted in place
Marc Zyngier68b824e2020-10-24 16:33:38 +010090 * tmp: temporary register
91 *
92 * The actual code generation takes place in kvm_get_kimage_voffset, and
93 * the instructions below are only there to reserve the space and
94 * perform the register allocation (kvm_get_kimage_voffset uses the
95 * specific registers encoded in the instructions).
96 */
David Brazdil97cbd2f2021-01-05 18:05:39 +000097.macro hyp_kimg_va reg, tmp
98 /* Convert hyp VA -> PA. */
99 hyp_pa \reg, \tmp
100
101 /* Load kimage_voffset. */
Mark Rutland4c0bd992022-09-12 17:22:08 +0100102alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
Marc Zyngier68b824e2020-10-24 16:33:38 +0100103 movz \tmp, #0
104 movk \tmp, #0, lsl #16
105 movk \tmp, #0, lsl #32
106 movk \tmp, #0, lsl #48
107alternative_cb_end
108
David Brazdil97cbd2f2021-01-05 18:05:39 +0000109 /* Convert PA -> kimg VA. */
110 add \reg, \reg, \tmp
David Brazdil5be1d622020-12-02 18:41:05 +0000111.endm
112
Marc Zyngier37c43752012-12-10 15:35:24 +0000113#else
114
Mike Rapoport65fddcf2020-06-08 21:32:42 -0700115#include <linux/pgtable.h>
Christoffer Dall38f791a2014-10-10 12:14:28 +0200116#include <asm/pgalloc.h>
Will Deacon02f77602017-03-10 20:32:23 +0000117#include <asm/cache.h>
Marc Zyngier37c43752012-12-10 15:35:24 +0000118#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000119#include <asm/mmu_context.h>
Christoffer Dall191e0e12023-02-09 17:58:20 +0000120#include <asm/kvm_emulate.h>
Julien Grall32481362021-11-22 12:18:43 +0000121#include <asm/kvm_host.h>
Marc Zyngier37c43752012-12-10 15:35:24 +0000122
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000123void kvm_update_va_mask(struct alt_instr *alt,
124 __le32 *origptr, __le32 *updptr, int nr_inst);
Sebastian Andrzej Siewior0492747c2019-11-28 20:58:05 +0100125void kvm_compute_layout(void);
David Brazdil6ec62592021-01-05 18:05:38 +0000126void kvm_apply_hyp_relocations(void);
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000127
Andrew Scullaec0fae2021-03-18 14:33:11 +0000128#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
129
James Morse5c37f1a2020-02-20 16:58:37 +0000130static __always_inline unsigned long __kern_hyp_va(unsigned long v)
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100131{
Marc Zyngier1a6511e2023-03-30 18:47:52 +0100132#ifndef __KVM_VHE_HYPERVISOR__
Marc Zyngiered57cac2017-12-03 18:22:49 +0000133 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
134 "ror %0, %0, #1\n"
135 "add %0, %0, #0\n"
136 "add %0, %0, #0, lsl 12\n"
137 "ror %0, %0, #63\n",
Mark Rutland4c0bd992022-09-12 17:22:08 +0100138 ARM64_ALWAYS_SYSTEM,
Marc Zyngier2b4d1602017-12-03 17:36:55 +0000139 kvm_update_va_mask)
140 : "+r" (v));
Marc Zyngier1a6511e2023-03-30 18:47:52 +0100141#endif
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100142 return v;
143}
144
Marc Zyngier94d0e592016-10-18 18:37:49 +0100145#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
Marc Zyngier37c43752012-12-10 15:35:24 +0000146
147/*
Zenghui Yu1b444712019-02-14 01:45:46 +0000148 * We currently support using a VM-specified IPA size. For backward
149 * compatibility, the default IPA size is fixed to 40bits.
Marc Zyngier37c43752012-12-10 15:35:24 +0000150 */
Joel Schoppdbff1242014-07-09 11:17:04 -0500151#define KVM_PHYS_SHIFT (40)
Suzuki K Poulosee55cac52018-09-26 17:32:44 +0100152
Marc Zyngierfe49fd92023-10-12 21:51:08 +0100153#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr)
154#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu))
155#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL))
Marc Zyngier37c43752012-12-10 15:35:24 +0000156
Will Deacon0f9d09b2020-09-11 14:25:12 +0100157#include <asm/kvm_pgtable.h>
Suzuki K Poulosec0ef6322016-03-22 14:16:52 +0000158#include <asm/stage2_pgtable.h>
159
Quentin Perret3f868e12021-12-15 16:12:23 +0000160int kvm_share_hyp(void *from, void *to);
Quentin Perret52b28652021-12-15 16:12:31 +0000161void kvm_unshare_hyp(void *from, void *to);
Will Deacon0f9d09b2020-09-11 14:25:12 +0100162int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
Kalesh Singhce335432022-04-20 14:42:54 -0700163int __create_hyp_mappings(unsigned long start, unsigned long size,
164 unsigned long phys, enum kvm_pgtable_prot prot);
Kalesh Singh92abe0f2022-04-20 14:42:52 -0700165int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
Marc Zyngier807a3782017-12-04 16:26:09 +0000166int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
Marc Zyngier1bb32a42017-12-04 16:43:23 +0000167 void __iomem **kaddr,
168 void __iomem **haddr);
Marc Zyngierdc2e4632018-02-13 11:00:29 +0000169int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
170 void **haddr);
Vincent Donnefortf156a7d2023-08-11 12:20:37 +0100171int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
Sean Christopherson8d20bd62022-11-30 23:09:18 +0000172void __init free_hyp_pgds(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000173
Christoffer Dall957db102014-11-27 10:35:03 +0100174void stage2_unmap_vm(struct kvm *kvm);
Quentin Perret315775f2022-11-10 19:02:51 +0000175int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
Ricardo Kollerce2b6022023-04-26 17:23:26 +0000176void kvm_uninit_stage2_mmu(struct kvm *kvm);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100177void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
Marc Zyngier37c43752012-12-10 15:35:24 +0000178int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700179 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +0000180
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800181int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
Marc Zyngier37c43752012-12-10 15:35:24 +0000182
Marc Zyngier37c43752012-12-10 15:35:24 +0000183phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000184phys_addr_t kvm_get_idmap_vector(void);
Sean Christopherson8d20bd62022-11-30 23:09:18 +0000185int __init kvm_mmu_init(u32 *hyp_va_bits);
Mike Rapoporte9f63762020-06-04 16:46:23 -0700186
Quentin Perretbc1d2892021-03-19 10:01:23 +0000187static inline void *__kvm_vector_slot2addr(void *base,
188 enum arm64_hyp_spectre_vector slot)
189{
190 int idx = slot - (slot != HYP_VECTOR_DIRECT);
191
192 return base + (idx * SZ_2K);
193}
194
Marc Zyngier37c43752012-12-10 15:35:24 +0000195struct kvm;
196
Fuad Tabba814b1862021-05-24 09:29:55 +0100197#define kvm_flush_dcache_to_poc(a,l) \
Fuad Tabbafade9c22021-05-24 09:30:01 +0100198 dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
Marc Zyngier2d58b732014-01-14 19:13:10 +0000199
200static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000201{
Christoffer Dall191e0e12023-02-09 17:58:20 +0000202 u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
203 int reg;
204
205 if (vcpu_is_el2(vcpu))
206 reg = SCTLR_EL2;
207 else
208 reg = SCTLR_EL1;
209
210 return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
Marc Zyngier2d58b732014-01-14 19:13:10 +0000211}
212
Yanan Wang378e6a92021-06-17 18:58:23 +0800213static inline void __clean_dcache_guest_page(void *va, size_t size)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000214{
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100215 /*
216 * With FWB, we ensure that the guest always accesses memory using
217 * cacheable attributes, and we don't have to clean to PoC when
218 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
219 * PoU is not required either in this case.
220 */
Mark Rutlandd8569fb2023-10-16 11:24:32 +0100221 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
Marc Zyngiere48d53a2018-04-06 12:27:28 +0100222 return;
223
Marc Zyngier8f36eba2017-01-25 12:29:59 +0000224 kvm_flush_dcache_to_poc(va, size);
Marc Zyngiera15f6932017-10-23 17:11:15 +0100225}
Marc Zyngier2d58b732014-01-14 19:13:10 +0000226
Oliver Upton909b5832023-09-20 08:01:33 +0000227static inline size_t __invalidate_icache_max_range(void)
228{
229 u8 iminline;
230 u64 ctr;
231
232 asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
233 "movk %0, #0, lsl #16\n"
234 "movk %0, #0, lsl #32\n"
235 "movk %0, #0, lsl #48\n",
236 ARM64_ALWAYS_SYSTEM,
237 kvm_compute_final_ctr_el0)
238 : "=r" (ctr));
239
240 iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2;
241 return MAX_DVM_OPS << iminline;
242}
243
Yanan Wang378e6a92021-06-17 18:58:23 +0800244static inline void __invalidate_icache_guest_page(void *va, size_t size)
Marc Zyngiera15f6932017-10-23 17:11:15 +0100245{
Oliver Upton909b5832023-09-20 08:01:33 +0000246 /*
Oliver Upton909b5832023-09-20 08:01:33 +0000247 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
248 * invalidation range exceeds our arbitrary limit on invadations by
249 * cache line.
250 */
251 if (icache_is_aliasing() || size > __invalidate_icache_max_range())
Fuad Tabbafade9c22021-05-24 09:30:01 +0100252 icache_inval_all_pou();
Oliver Upton909b5832023-09-20 08:01:33 +0000253 else
Marc Zyngier85c653f2021-06-18 17:30:39 +0100254 icache_inval_pou((unsigned long)va, (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000255}
256
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000257void kvm_set_way_flush(struct kvm_vcpu *vcpu);
258void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000259
Vladimir Murzin20475f72015-11-16 11:28:18 +0000260static inline unsigned int kvm_get_vmid_bits(void)
261{
Dave Martin46823dd2017-03-23 15:14:39 +0000262 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
Vladimir Murzin20475f72015-11-16 11:28:18 +0000263
Anshuman Khandualc73433f2020-05-12 07:27:27 +0530264 return get_vmid_bits(reg);
Vladimir Murzin20475f72015-11-16 11:28:18 +0000265}
266
Andre Przywarabf308242018-05-11 15:20:14 +0100267/*
268 * We are not in the kvm->srcu critical section most of the time, so we take
269 * the SRCU read lock here. Since we copy the data from the user page, we
270 * can immediately drop the lock again.
271 */
272static inline int kvm_read_guest_lock(struct kvm *kvm,
273 gpa_t gpa, void *data, unsigned long len)
274{
275 int srcu_idx = srcu_read_lock(&kvm->srcu);
276 int ret = kvm_read_guest(kvm, gpa, data, len);
277
278 srcu_read_unlock(&kvm->srcu, srcu_idx);
279
280 return ret;
281}
282
Marc Zyngiera6ecfb12019-03-19 12:47:11 +0000283static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
284 const void *data, unsigned long len)
285{
286 int srcu_idx = srcu_read_lock(&kvm->srcu);
287 int ret = kvm_write_guest(kvm, gpa, data, len);
288
289 srcu_read_unlock(&kvm->srcu, srcu_idx);
290
291 return ret;
292}
293
Kristina Martsenko529c4b02017-12-13 17:07:18 +0000294#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
295
Marc Zyngiercf364e02021-08-06 12:31:08 +0100296/*
297 * When this is (directly or indirectly) used on the TLB invalidation
298 * path, we rely on a previously issued DSB so that page table updates
299 * and VMID reads are correctly ordered.
300 */
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100301static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
Vladimir Murzinab510022018-07-31 14:08:57 +0100302{
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100303 struct kvm_vmid *vmid = &mmu->vmid;
Christoffer Dalle329fb72018-12-11 15:26:31 +0100304 u64 vmid_field, baddr;
305 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
306
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100307 baddr = mmu->pgd_phys;
Julien Grall32481362021-11-22 12:18:43 +0000308 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
309 vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
Christoffer Dalle329fb72018-12-11 15:26:31 +0100310 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
Vladimir Murzinab510022018-07-31 14:08:57 +0100311}
312
Marc Zyngierfe677be2020-05-28 14:12:59 +0100313/*
314 * Must be called from hyp code running at EL2 with an updated VTTBR
315 * and interrupts disabled.
316 */
Marc Zyngier4efc0ed2021-08-06 12:31:07 +0100317static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
318 struct kvm_arch *arch)
Marc Zyngierfe677be2020-05-28 14:12:59 +0100319{
Marc Zyngierfe49fd92023-10-12 21:51:08 +0100320 write_sysreg(mmu->vtcr, vtcr_el2);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100321 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
Marc Zyngierfe677be2020-05-28 14:12:59 +0100322
323 /*
324 * ARM errata 1165522 and 1530923 require the actual execution of the
325 * above before we can switch to the EL1/EL0 translation regime used by
326 * the guest.
327 */
328 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
329}
330
Quentin Perretcfb1a982021-03-19 10:01:28 +0000331static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
332{
333 return container_of(mmu->arch, struct kvm, arch);
334}
Marc Zyngier37c43752012-12-10 15:35:24 +0000335#endif /* __ASSEMBLY__ */
336#endif /* __ARM64_KVM_MMU_H__ */