blob: 43c3bc0f9544d9a3f477427315d07e6d17fafb94 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +00005 */
6
7#ifndef __ARM_KVM_ASM_H__
8#define __ARM_KVM_ASM_H__
9
David Brazdilce492a12020-09-22 21:49:02 +010010#include <asm/hyp_image.h>
Mark Rutland3e00e392021-06-09 11:23:01 +010011#include <asm/insn.h>
Marc Zyngier454519142013-06-26 15:16:40 +010012#include <asm/virt.h>
13
Marc Zyngier20163402016-09-06 14:02:05 +010014#define ARM_EXIT_WITH_SERROR_BIT 31
15#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
Marc Zyngier58466762018-12-19 08:28:38 +000016#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
Marc Zyngier20163402016-09-06 14:02:05 +010017#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
18
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +000019#define ARM_EXCEPTION_IRQ 0
Marc Zyngier9aecafc2016-09-06 14:02:02 +010020#define ARM_EXCEPTION_EL1_SERROR 1
21#define ARM_EXCEPTION_TRAP 2
Christoffer Dalle4e11cc2018-10-17 20:21:16 +020022#define ARM_EXCEPTION_IL 3
James Morsec94b0cf2016-04-27 17:47:04 +010023/* The hyp-stub will return this for any kvm_call_hyp() call */
Marc Zyngier4993fdc2017-04-03 19:37:37 +010024#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +000025
Christoffer Dall71a7e472018-12-03 21:31:24 +010026#define kvm_arm_exception_type \
27 {ARM_EXCEPTION_IRQ, "IRQ" }, \
28 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
29 {ARM_EXCEPTION_TRAP, "TRAP" }, \
30 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
31
James Morse3dbf1002019-06-18 16:17:34 +010032/*
33 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
34 * that jumps over this.
35 */
James Morse0e5b9c02019-06-18 16:17:36 +010036#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
James Morse3dbf1002019-06-18 16:17:34 +010037
Andrew Scull05469832020-09-15 11:46:41 +010038#define KVM_HOST_SMCCC_ID(id) \
39 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
40 ARM_SMCCC_SMC_64, \
41 ARM_SMCCC_OWNER_VENDOR_HYP, \
42 (id))
43
44#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
45
46#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
Andrew Scull05469832020-09-15 11:46:41 +010047
Mark Rutland46c4a302018-05-10 12:13:47 +010048#ifndef __ASSEMBLY__
49
50#include <linux/mm.h>
Marc Zyngier0c557ed2014-04-24 10:24:46 +010051
Marc Zyngiera78738e2021-10-08 14:58:34 +010052enum __kvm_host_smccc_func {
Will Deacon057bed22021-10-08 14:58:39 +010053 /* Hypercalls available only prior to pKVM finalisation */
Marc Zyngiera78738e2021-10-08 14:58:34 +010054 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
Will Deacon057bed22021-10-08 14:58:39 +010055 __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
56 __KVM_HOST_SMCCC_FUNC___pkvm_init,
57 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
63
64 /* Hypercalls available after pKVM finalisation */
65 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
Will Deaconb8cc6eb52021-12-15 16:12:30 +000066 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
Will Deacon057bed22021-10-08 14:58:39 +010067 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
68 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
Marc Zyngiera78738e2021-10-08 14:58:34 +010069 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
70 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
71 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
72 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
73 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
Marc Zyngiera78738e2021-10-08 14:58:34 +010074 __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
75 __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
Marc Zyngiera78738e2021-10-08 14:58:34 +010076 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
77 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
Marc Zyngierbe08c3c2021-10-18 17:20:50 +010078 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
Fuad Tabbaa1ec5c72022-11-10 19:02:45 +000079 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
80 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
81 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
Marc Zyngiera78738e2021-10-08 14:58:34 +010082};
83
Andrew Scullf50b6f6a2020-06-25 14:14:10 +010084#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
85#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
86
87/*
88 * Define a pair of symbols sharing the same name but one defined in
89 * VHE and the other in nVHE hyp implementations.
90 */
91#define DECLARE_KVM_HYP_SYM(sym) \
92 DECLARE_KVM_VHE_SYM(sym); \
93 DECLARE_KVM_NVHE_SYM(sym)
94
David Brazdil57249492020-09-22 21:49:06 +010095#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
96 DECLARE_PER_CPU(type, sym)
97#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
98 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
99
100#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
101 DECLARE_KVM_VHE_PER_CPU(type, sym); \
102 DECLARE_KVM_NVHE_PER_CPU(type, sym)
103
David Brazdil30c95392020-09-22 21:49:09 +0100104/*
105 * Compute pointer to a symbol defined in nVHE percpu region.
106 * Returns NULL if percpu memory has not been allocated yet.
107 */
108#define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
109#define per_cpu_ptr_nvhe_sym(sym, cpu) \
110 ({ \
111 unsigned long base, off; \
Quentin Perretfe41a7f2022-11-10 19:02:54 +0000112 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
David Brazdil30c95392020-09-22 21:49:09 +0100113 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
114 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
115 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
116 })
David Brazdil57249492020-09-22 21:49:06 +0100117
Andrew Scullceee2fe2020-09-15 11:46:28 +0100118#if defined(__KVM_NVHE_HYPERVISOR__)
Marc Zyngier6de7dd32020-07-07 15:34:06 +0100119
Andrew Scullceee2fe2020-09-15 11:46:28 +0100120#define CHOOSE_NVHE_SYM(sym) sym
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100121#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
122
Andrew Scullceee2fe2020-09-15 11:46:28 +0100123/* The nVHE hypervisor shouldn't even try to access VHE symbols */
124extern void *__nvhe_undefined_symbol;
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100125#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
126#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
127#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
Andrew Scullceee2fe2020-09-15 11:46:28 +0100128
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100129#elif defined(__KVM_VHE_HYPERVISOR__)
Andrew Scullceee2fe2020-09-15 11:46:28 +0100130
Andrew Scullceee2fe2020-09-15 11:46:28 +0100131#define CHOOSE_VHE_SYM(sym) sym
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100132#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
133
Andrew Scullceee2fe2020-09-15 11:46:28 +0100134/* The VHE hypervisor shouldn't even try to access nVHE symbols */
135extern void *__vhe_undefined_symbol;
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100136#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
137#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
138#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
Andrew Scullceee2fe2020-09-15 11:46:28 +0100139
140#else
141
Marc Zyngier6de7dd32020-07-07 15:34:06 +0100142/*
143 * BIG FAT WARNINGS:
144 *
145 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
146 * to has_vhe(). has_vhe() is implemented as a *final* capability,
147 * while this is used early at boot time, when the capabilities are
148 * not final yet....
149 *
150 * - Don't let the nVHE hypervisor have access to this, as it will
151 * pick the *wrong* symbol (yes, it runs at EL2...).
152 */
David Brazdil57249492020-09-22 21:49:06 +0100153#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
154 ? CHOOSE_VHE_SYM(sym) \
David Brazdilb877e982020-06-25 14:14:11 +0100155 : CHOOSE_NVHE_SYM(sym))
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100156
David Brazdil57249492020-09-22 21:49:06 +0100157#define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
158 ? this_cpu_ptr(&sym) \
159 : this_cpu_ptr_nvhe_sym(sym))
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100160
David Brazdil57249492020-09-22 21:49:06 +0100161#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
162 ? per_cpu_ptr(&sym, cpu) \
163 : per_cpu_ptr_nvhe_sym(sym, cpu))
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100164
Andrew Scullceee2fe2020-09-15 11:46:28 +0100165#define CHOOSE_VHE_SYM(sym) sym
166#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
167
Marc Zyngier6de7dd32020-07-07 15:34:06 +0100168#endif
David Brazdilb877e982020-06-25 14:14:11 +0100169
David Brazdil63fec242020-12-02 18:41:06 +0000170struct kvm_nvhe_init_params {
David Brazdild3e10862020-12-02 18:41:07 +0000171 unsigned long mair_el2;
172 unsigned long tcr_el2;
David Brazdil63fec242020-12-02 18:41:06 +0000173 unsigned long tpidr_el2;
174 unsigned long stack_hyp_va;
Kalesh Singhce335432022-04-20 14:42:54 -0700175 unsigned long stack_pa;
David Brazdil63fec242020-12-02 18:41:06 +0000176 phys_addr_t pgd_pa;
Quentin Perret734864c2021-03-19 10:01:29 +0000177 unsigned long hcr_el2;
178 unsigned long vttbr;
179 unsigned long vtcr;
David Brazdil63fec242020-12-02 18:41:06 +0000180};
181
Kalesh Singh879e5ac2022-07-26 00:37:42 -0700182/*
183 * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
184 * hyp_panic() in non-protected mode.
185 *
186 * @stack_base: hyp VA of the hyp_stack base.
187 * @overflow_stack_base: hyp VA of the hyp_overflow_stack base.
188 * @fp: hyp FP where the backtrace begins.
189 * @pc: hyp PC where the backtrace begins.
190 */
191struct kvm_nvhe_stacktrace_info {
192 unsigned long stack_base;
193 unsigned long overflow_stack_base;
194 unsigned long fp;
195 unsigned long pc;
196};
197
David Brazdilb877e982020-06-25 14:14:11 +0100198/* Translate a kernel address @ptr into its equivalent linear mapping */
199#define kvm_ksym_ref(ptr) \
Marc Zyngier2510ffe2016-03-18 17:25:59 +0000200 ({ \
David Brazdilb877e982020-06-25 14:14:11 +0100201 void *val = (ptr); \
Marc Zyngier2510ffe2016-03-18 17:25:59 +0000202 if (!is_kernel_in_hyp_mode()) \
David Brazdilb877e982020-06-25 14:14:11 +0100203 val = lm_alias((ptr)); \
Marc Zyngier2510ffe2016-03-18 17:25:59 +0000204 val; \
205 })
Andrew Scullf50b6f6a2020-06-25 14:14:10 +0100206#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
Ard Biesheuvela0bf9772016-02-16 13:52:39 +0100207
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000208struct kvm;
209struct kvm_vcpu;
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100210struct kvm_s2_mmu;
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000211
Andrew Scull208243c2020-06-25 14:14:12 +0100212DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
David Brazdilb877e982020-06-25 14:14:11 +0100213DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
Andrew Scull208243c2020-06-25 14:14:12 +0100214#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
David Brazdilb877e982020-06-25 14:14:11 +0100215#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
216
Quentin Perretfe41a7f2022-11-10 19:02:54 +0000217extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
David Brazdil30c95392020-09-22 21:49:09 +0100218DECLARE_KVM_NVHE_SYM(__per_cpu_start);
219DECLARE_KVM_NVHE_SYM(__per_cpu_end);
220
David Brazdilb877e982020-06-25 14:14:11 +0100221DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
222#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000223
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000224extern void __kvm_flush_vm_context(void);
Marc Zyngier01dc9262021-03-03 16:45:05 +0000225extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
Marc Zyngierefaa5b92019-01-02 12:34:25 +0000226extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
227 int level);
Christoffer Dalla0e50aa2019-01-04 21:09:05 +0100228extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000229
Marc Zyngierc6fe89f2020-05-13 11:58:29 +0100230extern void __kvm_timer_set_cntvoff(u64 cntvoff);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100231
David Brazdil09cf57e2020-06-25 14:14:14 +0100232extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100233
Marc Zyngierf5e30682021-05-06 14:31:42 +0100234extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
235
Marc Zyngierb9d699e2021-03-05 18:52:52 +0000236extern u64 __vgic_v3_get_gic_config(void);
Christoffer Dall328e56642016-03-24 11:21:04 +0100237extern u64 __vgic_v3_read_vmcr(void);
238extern void __vgic_v3_write_vmcr(u32 vmcr);
Marc Zyngier0d98d002016-03-03 15:43:58 +0000239extern void __vgic_v3_init_lrs(void);
Marc Zyngierb2fb1c02013-07-12 15:15:23 +0100240
Fuad Tabbad6c850d2021-08-17 09:11:22 +0100241extern u64 __kvm_get_mdcr_el2(void);
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100242
James Morse88a84cc2020-08-21 15:07:06 +0100243#define __KVM_EXTABLE(from, to) \
244 " .pushsection __kvm_ex_table, \"a\"\n" \
245 " .align 3\n" \
246 " .long (" #from " - .), (" #to " - .)\n" \
247 " .popsection\n"
248
249
250#define __kvm_at(at_op, addr) \
251( { \
252 int __kvm_at_err = 0; \
253 u64 spsr, elr; \
254 asm volatile( \
255 " mrs %1, spsr_el2\n" \
256 " mrs %2, elr_el2\n" \
257 "1: at "at_op", %3\n" \
258 " isb\n" \
259 " b 9f\n" \
260 "2: msr spsr_el2, %1\n" \
261 " msr elr_el2, %2\n" \
262 " mov %w0, %4\n" \
263 "9:\n" \
264 __KVM_EXTABLE(1b, 2b) \
265 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
266 : "r" (addr), "i" (-EFAULT)); \
267 __kvm_at_err; \
268} )
269
270
Christoffer Dall4464e212017-10-08 17:01:56 +0200271#else /* __ASSEMBLY__ */
272
Marc Zyngier85478ba2018-05-29 13:11:15 +0100273.macro get_host_ctxt reg, tmp
David Brazdilea391022020-09-22 21:49:05 +0100274 adr_this_cpu \reg, kvm_host_data, \tmp
Andrew Murray630a1682019-04-09 20:22:11 +0100275 add \reg, \reg, #HOST_DATA_CONTEXT
Marc Zyngier85478ba2018-05-29 13:11:15 +0100276.endm
277
Christoffer Dall4464e212017-10-08 17:01:56 +0200278.macro get_vcpu_ptr vcpu, ctxt
279 get_host_ctxt \ctxt, \vcpu
280 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
Christoffer Dall4464e212017-10-08 17:01:56 +0200281.endm
282
Andrew Scull7db21532020-09-15 11:46:34 +0100283.macro get_loaded_vcpu vcpu, ctxt
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100284 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
Andrew Scull7db21532020-09-15 11:46:34 +0100285 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
286.endm
287
288.macro set_loaded_vcpu vcpu, ctxt, tmp
Marc Zyngier14ef9d02020-09-30 14:05:35 +0100289 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
Andrew Scull7db21532020-09-15 11:46:34 +0100290 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
291.endm
292
James Morsee9ee1862020-08-21 15:07:05 +0100293/*
294 * KVM extable for unexpected exceptions.
Mark Rutlandae2b2f32021-10-19 17:02:10 +0100295 * Create a struct kvm_exception_table_entry output to a section that can be
296 * mapped by EL2. The table is not sorted.
297 *
298 * The caller must ensure:
James Morsee9ee1862020-08-21 15:07:05 +0100299 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
300 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
301 */
302.macro _kvm_extable, from, to
303 .pushsection __kvm_ex_table, "a"
304 .align 3
305 .long (\from - .), (\to - .)
306 .popsection
307.endm
308
Andrew Scull603d2bd2020-09-15 11:46:35 +0100309#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
310#define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
311#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
312
313/*
314 * We treat x18 as callee-saved as the host may use it as a platform
315 * register (e.g. for shadow call stack).
316 */
317.macro save_callee_saved_regs ctxt
318 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
319 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
320 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
321 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
322 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
323 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
324 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
325.endm
326
327.macro restore_callee_saved_regs ctxt
328 // We require \ctxt is not x18-x28
329 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
330 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
331 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
332 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
333 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
334 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
335 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
336.endm
337
338.macro save_sp_el0 ctxt, tmp
339 mrs \tmp, sp_el0
340 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
341.endm
342
343.macro restore_sp_el0 ctxt, tmp
344 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
345 msr sp_el0, \tmp
346.endm
347
Marc Zyngierfd9fc9f2012-12-10 11:16:40 +0000348#endif
349
350#endif /* __ARM_KVM_ASM_H__ */