blob: 87927f7e1ee7072f714958ded4ff2da1126b8fa5 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngieraa024c22013-01-20 18:28:13 -05002/*
3 * Copyright (C) 2012 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngieraa024c22013-01-20 18:28:13 -05005 */
6
Marc Zyngier09e6be12018-02-06 17:56:12 +00007#include <linux/arm-smccc.h>
Christoffer Dallcf5d31882014-10-16 17:00:18 +02008#include <linux/preempt.h>
Marc Zyngieraa024c22013-01-20 18:28:13 -05009#include <linux/kvm_host.h>
Marc Zyngier85bd0ba2018-01-21 16:42:56 +000010#include <linux/uaccess.h>
Marc Zyngieraa024c22013-01-20 18:28:13 -050011#include <linux/wait.h>
12
Marc Zyngier79c64882013-10-18 18:19:03 +010013#include <asm/cputype.h>
Marc Zyngieraa024c22013-01-20 18:28:13 -050014#include <asm/kvm_emulate.h>
Andre Przywara4429fc62014-06-02 15:37:13 +020015#include <asm/kvm_host.h>
Marc Zyngieraa024c22013-01-20 18:28:13 -050016
Marc Zyngier1a2fb942018-02-06 17:56:08 +000017#include <kvm/arm_psci.h>
18
Marc Zyngieraa024c22013-01-20 18:28:13 -050019/*
20 * This is an implementation of the Power State Coordination Interface
21 * as described in ARM document number ARM DEN 0022A.
22 */
23
Anup Patele6bc13c82014-04-29 11:24:21 +053024#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
25
Marc Zyngier84684fe2018-02-06 17:56:10 +000026static u32 smccc_get_function(struct kvm_vcpu *vcpu)
27{
28 return vcpu_get_reg(vcpu, 0);
29}
30
31static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
32{
33 return vcpu_get_reg(vcpu, 1);
34}
35
36static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
37{
38 return vcpu_get_reg(vcpu, 2);
39}
40
41static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
42{
43 return vcpu_get_reg(vcpu, 3);
44}
45
46static void smccc_set_retval(struct kvm_vcpu *vcpu,
47 unsigned long a0,
48 unsigned long a1,
49 unsigned long a2,
50 unsigned long a3)
51{
52 vcpu_set_reg(vcpu, 0, a0);
53 vcpu_set_reg(vcpu, 1, a1);
54 vcpu_set_reg(vcpu, 2, a2);
55 vcpu_set_reg(vcpu, 3, a3);
56}
57
Anup Patele6bc13c82014-04-29 11:24:21 +053058static unsigned long psci_affinity_mask(unsigned long affinity_level)
59{
60 if (affinity_level <= 3)
61 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
62
63 return 0;
64}
65
Anup Patelb376d022014-04-29 11:24:24 +053066static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
67{
68 /*
69 * NOTE: For simplicity, we make VCPU suspend emulation to be
70 * same-as WFI (Wait-for-interrupt) emulation.
71 *
72 * This means for KVM the wakeup events are interrupts and
73 * this is consistent with intended use of StateID as described
74 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
75 *
76 * Further, we also treat power-down request to be same as
77 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
78 * specification (ARM DEN 0022A). This means all suspend states
79 * for KVM will preserve the register state.
80 */
81 kvm_vcpu_block(vcpu);
Andrew Jones6a6d73b2017-06-04 14:43:54 +020082 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Anup Patelb376d022014-04-29 11:24:24 +053083
84 return PSCI_RET_SUCCESS;
85}
86
Marc Zyngieraa024c22013-01-20 18:28:13 -050087static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
88{
Eric Auger37815282015-09-25 23:41:14 +020089 vcpu->arch.power_off = true;
Andrew Jones7b244e22017-06-04 14:43:58 +020090 kvm_make_request(KVM_REQ_SLEEP, vcpu);
Andrew Jones424c9892017-06-04 14:43:57 +020091 kvm_vcpu_kick(vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -050092}
93
94static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
95{
Marc Zyngier358b28f2018-12-20 11:36:07 +000096 struct vcpu_reset_state *reset_state;
Marc Zyngieraa024c22013-01-20 18:28:13 -050097 struct kvm *kvm = source_vcpu->kvm;
Andre Przywara4429fc62014-06-02 15:37:13 +020098 struct kvm_vcpu *vcpu = NULL;
Marc Zyngieraa024c22013-01-20 18:28:13 -050099 unsigned long cpu_id;
Marc Zyngieraa024c22013-01-20 18:28:13 -0500100
Marc Zyngier84684fe2018-02-06 17:56:10 +0000101 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
Marc Zyngieraa024c22013-01-20 18:28:13 -0500102 if (vcpu_mode_is_32bit(source_vcpu))
103 cpu_id &= ~((u32) 0);
104
Andre Przywara4429fc62014-06-02 15:37:13 +0200105 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
Marc Zyngier79c64882013-10-18 18:19:03 +0100106
Christoffer Dall478a8232013-11-19 17:43:19 -0800107 /*
108 * Make sure the caller requested a valid CPU and that the CPU is
109 * turned off.
110 */
Anup Patelaa8aeef2014-04-29 11:24:23 +0530111 if (!vcpu)
Anup Patel7d0f84a2014-04-29 11:24:16 +0530112 return PSCI_RET_INVALID_PARAMS;
Eric Auger37815282015-09-25 23:41:14 +0200113 if (!vcpu->arch.power_off) {
Marc Zyngiera4097b32018-02-06 17:56:13 +0000114 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
Anup Patelaa8aeef2014-04-29 11:24:23 +0530115 return PSCI_RET_ALREADY_ON;
116 else
117 return PSCI_RET_INVALID_PARAMS;
118 }
Marc Zyngieraa024c22013-01-20 18:28:13 -0500119
Marc Zyngier358b28f2018-12-20 11:36:07 +0000120 reset_state = &vcpu->arch.reset_state;
Marc Zyngieraa024c22013-01-20 18:28:13 -0500121
Marc Zyngier358b28f2018-12-20 11:36:07 +0000122 reset_state->pc = smccc_get_arg2(source_vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -0500123
Marc Zyngierce94fe92013-11-05 14:12:15 +0000124 /* Propagate caller endianness */
Marc Zyngier358b28f2018-12-20 11:36:07 +0000125 reset_state->be = kvm_vcpu_is_be(source_vcpu);
Marc Zyngierce94fe92013-11-05 14:12:15 +0000126
Anup Patelaa8aeef2014-04-29 11:24:23 +0530127 /*
128 * NOTE: We always update r0 (or x0) because for PSCI v0.1
129 * the general puspose registers are undefined upon CPU_ON.
130 */
Marc Zyngier358b28f2018-12-20 11:36:07 +0000131 reset_state->r0 = smccc_get_arg3(source_vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -0500132
Marc Zyngier358b28f2018-12-20 11:36:07 +0000133 WRITE_ONCE(reset_state->reset, true);
134 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
135
136 /*
137 * Make sure the reset request is observed if the change to
138 * power_state is observed.
139 */
140 smp_wmb();
141
142 vcpu->arch.power_off = false;
143 kvm_vcpu_wake_up(vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -0500144
Anup Patel7d0f84a2014-04-29 11:24:16 +0530145 return PSCI_RET_SUCCESS;
146}
147
Anup Patele6bc13c82014-04-29 11:24:21 +0530148static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
149{
Alexander Spyridakis0c067292015-09-04 17:06:24 +0200150 int i, matching_cpus = 0;
Anup Patele6bc13c82014-04-29 11:24:21 +0530151 unsigned long mpidr;
152 unsigned long target_affinity;
153 unsigned long target_affinity_mask;
154 unsigned long lowest_affinity_level;
155 struct kvm *kvm = vcpu->kvm;
156 struct kvm_vcpu *tmp;
157
Marc Zyngier84684fe2018-02-06 17:56:10 +0000158 target_affinity = smccc_get_arg1(vcpu);
159 lowest_affinity_level = smccc_get_arg2(vcpu);
Anup Patele6bc13c82014-04-29 11:24:21 +0530160
161 /* Determine target affinity mask */
162 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
163 if (!target_affinity_mask)
164 return PSCI_RET_INVALID_PARAMS;
165
166 /* Ignore other bits of target affinity */
167 target_affinity &= target_affinity_mask;
168
169 /*
170 * If one or more VCPU matching target affinity are running
171 * then ON else OFF
172 */
173 kvm_for_each_vcpu(i, tmp, kvm) {
Andre Przywara4429fc62014-06-02 15:37:13 +0200174 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
Alexander Spyridakis0c067292015-09-04 17:06:24 +0200175 if ((mpidr & target_affinity_mask) == target_affinity) {
176 matching_cpus++;
Eric Auger37815282015-09-25 23:41:14 +0200177 if (!tmp->arch.power_off)
Alexander Spyridakis0c067292015-09-04 17:06:24 +0200178 return PSCI_0_2_AFFINITY_LEVEL_ON;
Anup Patele6bc13c82014-04-29 11:24:21 +0530179 }
180 }
181
Alexander Spyridakis0c067292015-09-04 17:06:24 +0200182 if (!matching_cpus)
183 return PSCI_RET_INVALID_PARAMS;
184
Anup Patele6bc13c82014-04-29 11:24:21 +0530185 return PSCI_0_2_AFFINITY_LEVEL_OFF;
186}
187
Anup Patel4b123822014-04-29 11:24:20 +0530188static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
189{
Christoffer Dallcf5d31882014-10-16 17:00:18 +0200190 int i;
191 struct kvm_vcpu *tmp;
192
193 /*
194 * The KVM ABI specifies that a system event exit may call KVM_RUN
195 * again and may perform shutdown/reboot at a later time that when the
196 * actual request is made. Since we are implementing PSCI and a
197 * caller of PSCI reboot and shutdown expects that the system shuts
198 * down or reboots immediately, let's make sure that VCPUs are not run
199 * after this call is handled and before the VCPUs have been
200 * re-initialized.
201 */
Andrew Jonescc9b43f2017-06-04 14:43:56 +0200202 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
Eric Auger37815282015-09-25 23:41:14 +0200203 tmp->arch.power_off = true;
Andrew Jones7b244e22017-06-04 14:43:58 +0200204 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
Christoffer Dallcf5d31882014-10-16 17:00:18 +0200205
Anup Patel4b123822014-04-29 11:24:20 +0530206 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
207 vcpu->run->system_event.type = type;
208 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
209}
210
211static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
212{
213 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
214}
215
216static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
217{
218 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
219}
220
Anup Patele8e7fcc2014-04-29 11:24:18 +0530221static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
Anup Patel7d0f84a2014-04-29 11:24:16 +0530222{
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200223 struct kvm *kvm = vcpu->kvm;
Marc Zyngier84684fe2018-02-06 17:56:10 +0000224 u32 psci_fn = smccc_get_function(vcpu);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530225 unsigned long val;
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200226 int ret = 1;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530227
228 switch (psci_fn) {
229 case PSCI_0_2_FN_PSCI_VERSION:
230 /*
231 * Bits[31:16] = Major Version = 0
232 * Bits[15:0] = Minor Version = 2
233 */
Marc Zyngierd0a144f2018-02-06 17:56:09 +0000234 val = KVM_ARM_PSCI_0_2;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530235 break;
Anup Patelb376d022014-04-29 11:24:24 +0530236 case PSCI_0_2_FN_CPU_SUSPEND:
237 case PSCI_0_2_FN64_CPU_SUSPEND:
238 val = kvm_psci_vcpu_suspend(vcpu);
239 break;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530240 case PSCI_0_2_FN_CPU_OFF:
241 kvm_psci_vcpu_off(vcpu);
242 val = PSCI_RET_SUCCESS;
243 break;
244 case PSCI_0_2_FN_CPU_ON:
245 case PSCI_0_2_FN64_CPU_ON:
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200246 mutex_lock(&kvm->lock);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530247 val = kvm_psci_vcpu_on(vcpu);
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200248 mutex_unlock(&kvm->lock);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530249 break;
Anup Patele6bc13c82014-04-29 11:24:21 +0530250 case PSCI_0_2_FN_AFFINITY_INFO:
251 case PSCI_0_2_FN64_AFFINITY_INFO:
252 val = kvm_psci_vcpu_affinity_info(vcpu);
253 break;
Anup Patelbab0b4302014-04-29 11:24:22 +0530254 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
255 /*
256 * Trusted OS is MP hence does not require migration
257 * or
258 * Trusted OS is not present
259 */
260 val = PSCI_0_2_TOS_MP;
261 break;
Anup Patel4b123822014-04-29 11:24:20 +0530262 case PSCI_0_2_FN_SYSTEM_OFF:
263 kvm_psci_system_off(vcpu);
264 /*
265 * We should'nt be going back to guest VCPU after
266 * receiving SYSTEM_OFF request.
267 *
268 * If user space accidently/deliberately resumes
269 * guest VCPU after SYSTEM_OFF request then guest
270 * VCPU should see internal failure from PSCI return
271 * value. To achieve this, we preload r0 (or x0) with
272 * PSCI return value INTERNAL_FAILURE.
273 */
274 val = PSCI_RET_INTERNAL_FAILURE;
275 ret = 0;
276 break;
277 case PSCI_0_2_FN_SYSTEM_RESET:
278 kvm_psci_system_reset(vcpu);
279 /*
280 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
281 * with PSCI return value INTERNAL_FAILURE.
282 */
283 val = PSCI_RET_INTERNAL_FAILURE;
284 ret = 0;
285 break;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530286 default:
Lorenzo Pieralisie2d99732015-06-10 15:19:24 +0100287 val = PSCI_RET_NOT_SUPPORTED;
288 break;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530289 }
290
Marc Zyngier84684fe2018-02-06 17:56:10 +0000291 smccc_set_retval(vcpu, val, 0, 0, 0);
Anup Patel4b123822014-04-29 11:24:20 +0530292 return ret;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530293}
294
Marc Zyngier58e0b222018-02-06 17:56:11 +0000295static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
296{
297 u32 psci_fn = smccc_get_function(vcpu);
298 u32 feature;
299 unsigned long val;
300 int ret = 1;
301
302 switch(psci_fn) {
303 case PSCI_0_2_FN_PSCI_VERSION:
304 val = KVM_ARM_PSCI_1_0;
305 break;
306 case PSCI_1_0_FN_PSCI_FEATURES:
307 feature = smccc_get_arg1(vcpu);
308 switch(feature) {
309 case PSCI_0_2_FN_PSCI_VERSION:
310 case PSCI_0_2_FN_CPU_SUSPEND:
311 case PSCI_0_2_FN64_CPU_SUSPEND:
312 case PSCI_0_2_FN_CPU_OFF:
313 case PSCI_0_2_FN_CPU_ON:
314 case PSCI_0_2_FN64_CPU_ON:
315 case PSCI_0_2_FN_AFFINITY_INFO:
316 case PSCI_0_2_FN64_AFFINITY_INFO:
317 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
318 case PSCI_0_2_FN_SYSTEM_OFF:
319 case PSCI_0_2_FN_SYSTEM_RESET:
320 case PSCI_1_0_FN_PSCI_FEATURES:
Marc Zyngier09e6be12018-02-06 17:56:12 +0000321 case ARM_SMCCC_VERSION_FUNC_ID:
Marc Zyngier58e0b222018-02-06 17:56:11 +0000322 val = 0;
323 break;
324 default:
325 val = PSCI_RET_NOT_SUPPORTED;
326 break;
327 }
328 break;
329 default:
330 return kvm_psci_0_2_call(vcpu);
331 }
332
333 smccc_set_retval(vcpu, val, 0, 0, 0);
334 return ret;
335}
336
Anup Patele8e7fcc2014-04-29 11:24:18 +0530337static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
Anup Patel7d0f84a2014-04-29 11:24:16 +0530338{
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200339 struct kvm *kvm = vcpu->kvm;
Marc Zyngier84684fe2018-02-06 17:56:10 +0000340 u32 psci_fn = smccc_get_function(vcpu);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530341 unsigned long val;
342
343 switch (psci_fn) {
344 case KVM_PSCI_FN_CPU_OFF:
345 kvm_psci_vcpu_off(vcpu);
346 val = PSCI_RET_SUCCESS;
347 break;
348 case KVM_PSCI_FN_CPU_ON:
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200349 mutex_lock(&kvm->lock);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530350 val = kvm_psci_vcpu_on(vcpu);
Andrew Jones6c7a5dc2017-04-18 17:59:58 +0200351 mutex_unlock(&kvm->lock);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530352 break;
Lorenzo Pieralisie2d99732015-06-10 15:19:24 +0100353 default:
Anup Patel7d0f84a2014-04-29 11:24:16 +0530354 val = PSCI_RET_NOT_SUPPORTED;
355 break;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530356 }
357
Marc Zyngier84684fe2018-02-06 17:56:10 +0000358 smccc_set_retval(vcpu, val, 0, 0, 0);
Anup Patele8e7fcc2014-04-29 11:24:18 +0530359 return 1;
Marc Zyngieraa024c22013-01-20 18:28:13 -0500360}
361
362/**
363 * kvm_psci_call - handle PSCI call if r0 value is in range
364 * @vcpu: Pointer to the VCPU struct
365 *
Dave P Martin24a7f672013-05-01 17:49:28 +0100366 * Handle PSCI calls from guests through traps from HVC instructions.
Anup Patele8e7fcc2014-04-29 11:24:18 +0530367 * The calling convention is similar to SMC calls to the secure world
368 * where the function number is placed in r0.
369 *
370 * This function returns: > 0 (success), 0 (success but exit to user
371 * space), and < 0 (errors)
372 *
373 * Errors:
374 * -EINVAL: Unrecognized PSCI function
Marc Zyngieraa024c22013-01-20 18:28:13 -0500375 */
Marc Zyngier09e6be12018-02-06 17:56:12 +0000376static int kvm_psci_call(struct kvm_vcpu *vcpu)
Marc Zyngieraa024c22013-01-20 18:28:13 -0500377{
Marc Zyngiera4097b32018-02-06 17:56:13 +0000378 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
Marc Zyngier58e0b222018-02-06 17:56:11 +0000379 case KVM_ARM_PSCI_1_0:
380 return kvm_psci_1_0_call(vcpu);
Anup Patel7d0f84a2014-04-29 11:24:16 +0530381 case KVM_ARM_PSCI_0_2:
382 return kvm_psci_0_2_call(vcpu);
383 case KVM_ARM_PSCI_0_1:
384 return kvm_psci_0_1_call(vcpu);
Marc Zyngieraa024c22013-01-20 18:28:13 -0500385 default:
Anup Patele8e7fcc2014-04-29 11:24:18 +0530386 return -EINVAL;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530387 };
Marc Zyngieraa024c22013-01-20 18:28:13 -0500388}
Marc Zyngier09e6be12018-02-06 17:56:12 +0000389
390int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
391{
392 u32 func_id = smccc_get_function(vcpu);
Marc Zyngier5d81f7d2018-05-29 13:11:18 +0100393 u32 val = SMCCC_RET_NOT_SUPPORTED;
Marc Zyngier6167ec52018-02-06 17:56:14 +0000394 u32 feature;
Marc Zyngier09e6be12018-02-06 17:56:12 +0000395
396 switch (func_id) {
397 case ARM_SMCCC_VERSION_FUNC_ID:
398 val = ARM_SMCCC_VERSION_1_1;
399 break;
400 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
Marc Zyngier6167ec52018-02-06 17:56:14 +0000401 feature = smccc_get_arg1(vcpu);
402 switch(feature) {
403 case ARM_SMCCC_ARCH_WORKAROUND_1:
Andre Przywarac118bbb2019-05-03 15:27:48 +0100404 switch (kvm_arm_harden_branch_predictor()) {
405 case KVM_BP_HARDEN_UNKNOWN:
406 break;
407 case KVM_BP_HARDEN_WA_NEEDED:
Marc Zyngier5d81f7d2018-05-29 13:11:18 +0100408 val = SMCCC_RET_SUCCESS;
Andre Przywarac118bbb2019-05-03 15:27:48 +0100409 break;
410 case KVM_BP_HARDEN_NOT_REQUIRED:
411 val = SMCCC_RET_NOT_REQUIRED;
412 break;
413 }
Marc Zyngier5d81f7d2018-05-29 13:11:18 +0100414 break;
415 case ARM_SMCCC_ARCH_WORKAROUND_2:
416 switch (kvm_arm_have_ssbd()) {
417 case KVM_SSBD_FORCE_DISABLE:
418 case KVM_SSBD_UNKNOWN:
419 break;
420 case KVM_SSBD_KERNEL:
421 val = SMCCC_RET_SUCCESS;
422 break;
423 case KVM_SSBD_FORCE_ENABLE:
424 case KVM_SSBD_MITIGATED:
425 val = SMCCC_RET_NOT_REQUIRED;
426 break;
427 }
Marc Zyngier6167ec52018-02-06 17:56:14 +0000428 break;
429 }
Marc Zyngier09e6be12018-02-06 17:56:12 +0000430 break;
431 default:
432 return kvm_psci_call(vcpu);
433 }
434
435 smccc_set_retval(vcpu, val, 0, 0, 0);
436 return 1;
437}
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000438
439int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
440{
Andre Przywara99adb5672019-05-03 15:27:49 +0100441 return 3; /* PSCI version and two workaround registers */
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000442}
443
444int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
445{
Andre Przywara99adb5672019-05-03 15:27:49 +0100446 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
447 return -EFAULT;
448
449 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
450 return -EFAULT;
451
452 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000453 return -EFAULT;
454
455 return 0;
456}
457
Andre Przywara99adb5672019-05-03 15:27:49 +0100458#define KVM_REG_FEATURE_LEVEL_WIDTH 4
459#define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
460
461/*
462 * Convert the workaround level into an easy-to-compare number, where higher
463 * values mean better protection.
464 */
465static int get_kernel_wa_level(u64 regid)
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000466{
Andre Przywara99adb5672019-05-03 15:27:49 +0100467 switch (regid) {
468 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
469 switch (kvm_arm_harden_branch_predictor()) {
470 case KVM_BP_HARDEN_UNKNOWN:
471 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
472 case KVM_BP_HARDEN_WA_NEEDED:
473 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
474 case KVM_BP_HARDEN_NOT_REQUIRED:
475 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
476 }
477 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
478 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
479 switch (kvm_arm_have_ssbd()) {
480 case KVM_SSBD_FORCE_DISABLE:
481 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
482 case KVM_SSBD_KERNEL:
483 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
484 case KVM_SSBD_FORCE_ENABLE:
485 case KVM_SSBD_MITIGATED:
486 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
487 case KVM_SSBD_UNKNOWN:
488 default:
489 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
490 }
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000491 }
492
493 return -EINVAL;
494}
495
Andre Przywara99adb5672019-05-03 15:27:49 +0100496int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
497{
498 void __user *uaddr = (void __user *)(long)reg->addr;
499 u64 val;
500
501 switch (reg->id) {
502 case KVM_REG_ARM_PSCI_VERSION:
503 val = kvm_psci_version(vcpu, vcpu->kvm);
504 break;
505 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
506 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
507 break;
508 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
509 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
510
511 if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
512 kvm_arm_get_vcpu_workaround_2_flag(vcpu))
513 val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
514 break;
515 default:
516 return -ENOENT;
517 }
518
519 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
520 return -EFAULT;
521
522 return 0;
523}
524
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000525int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
526{
Andre Przywara99adb5672019-05-03 15:27:49 +0100527 void __user *uaddr = (void __user *)(long)reg->addr;
528 u64 val;
529 int wa_level;
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000530
Andre Przywara99adb5672019-05-03 15:27:49 +0100531 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
532 return -EFAULT;
533
534 switch (reg->id) {
535 case KVM_REG_ARM_PSCI_VERSION:
536 {
537 bool wants_02;
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000538
539 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
540
541 switch (val) {
542 case KVM_ARM_PSCI_0_1:
543 if (wants_02)
544 return -EINVAL;
545 vcpu->kvm->arch.psci_version = val;
546 return 0;
547 case KVM_ARM_PSCI_0_2:
548 case KVM_ARM_PSCI_1_0:
549 if (!wants_02)
550 return -EINVAL;
551 vcpu->kvm->arch.psci_version = val;
552 return 0;
553 }
Andre Przywara99adb5672019-05-03 15:27:49 +0100554 break;
555 }
556
557 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
558 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
559 return -EINVAL;
560
561 if (get_kernel_wa_level(reg->id) < val)
562 return -EINVAL;
563
564 return 0;
565
566 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
567 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
568 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
569 return -EINVAL;
570
571 wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
572
573 if (get_kernel_wa_level(reg->id) < wa_level)
574 return -EINVAL;
575
576 /* The enabled bit must not be set unless the level is AVAIL. */
577 if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
578 wa_level != val)
579 return -EINVAL;
580
581 /* Are we finished or do we need to check the enable bit ? */
582 if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
583 return 0;
584
585 /*
586 * If this kernel supports the workaround to be switched on
587 * or off, make sure it matches the requested setting.
588 */
589 switch (wa_level) {
590 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
591 kvm_arm_set_vcpu_workaround_2_flag(vcpu,
592 val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
593 break;
594 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
595 kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
596 break;
597 }
598
599 return 0;
600 default:
601 return -ENOENT;
Marc Zyngier85bd0ba2018-01-21 16:42:56 +0000602 }
603
604 return -EINVAL;
605}