Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 2 | #ifndef __KVM_X86_PMU_H |
| 3 | #define __KVM_X86_PMU_H |
| 4 | |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 5 | #include <linux/nospec.h> |
| 6 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 7 | #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
| 8 | #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) |
| 9 | #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) |
| 10 | |
Like Xu | b9181c8 | 2022-06-01 11:19:25 +0800 | [diff] [blame] | 11 | #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ |
| 12 | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) |
| 13 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 14 | /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ |
| 15 | #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) |
| 16 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 17 | #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 |
| 18 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 |
| 19 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 |
| 20 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 21 | struct kvm_event_hw_type_mapping { |
| 22 | u8 eventsel; |
| 23 | u8 unit_mask; |
| 24 | unsigned event_type; |
| 25 | }; |
| 26 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 27 | struct kvm_pmu_ops { |
Like Xu | 7aadaa9 | 2022-05-18 21:25:12 +0800 | [diff] [blame] | 28 | bool (*hw_event_available)(struct kvm_pmc *pmc); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 29 | bool (*pmc_is_enabled)(struct kvm_pmc *pmc); |
| 30 | struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 31 | struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, |
| 32 | unsigned int idx, u64 *mask); |
Like Xu | c900c15 | 2019-10-27 18:52:41 +0800 | [diff] [blame] | 33 | struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); |
Jim Mattson | e6cd31f | 2021-11-05 13:20:58 -0700 | [diff] [blame] | 34 | bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); |
Sean Christopherson | 545feb9 | 2022-06-11 00:57:52 +0000 | [diff] [blame] | 35 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
Wei Wang | cbd7175 | 2020-05-29 15:43:44 +0800 | [diff] [blame] | 36 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 37 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 38 | void (*refresh)(struct kvm_vcpu *vcpu); |
| 39 | void (*init)(struct kvm_vcpu *vcpu); |
| 40 | void (*reset)(struct kvm_vcpu *vcpu); |
Like Xu | e6209a3 | 2021-02-01 13:10:36 +0800 | [diff] [blame] | 41 | void (*deliver_pmi)(struct kvm_vcpu *vcpu); |
Like Xu | 9aa4f62 | 2021-02-01 13:10:37 +0800 | [diff] [blame] | 42 | void (*cleanup)(struct kvm_vcpu *vcpu); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 43 | }; |
| 44 | |
Like Xu | 8f969c0 | 2022-03-29 23:50:52 +0000 | [diff] [blame] | 45 | void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); |
| 46 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 47 | static inline u64 pmc_bitmask(struct kvm_pmc *pmc) |
| 48 | { |
| 49 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
| 50 | |
| 51 | return pmu->counter_bitmask[pmc->type]; |
| 52 | } |
| 53 | |
| 54 | static inline u64 pmc_read_counter(struct kvm_pmc *pmc) |
| 55 | { |
| 56 | u64 counter, enabled, running; |
| 57 | |
| 58 | counter = pmc->counter; |
Like Xu | e79f49c | 2021-07-28 20:07:05 +0800 | [diff] [blame] | 59 | if (pmc->perf_event && !pmc->is_paused) |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 60 | counter += perf_event_read_value(pmc->perf_event, |
| 61 | &enabled, &running); |
| 62 | /* FIXME: Scaling needed? */ |
| 63 | return counter & pmc_bitmask(pmc); |
| 64 | } |
| 65 | |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 66 | static inline void pmc_release_perf_event(struct kvm_pmc *pmc) |
| 67 | { |
| 68 | if (pmc->perf_event) { |
| 69 | perf_event_release_kernel(pmc->perf_event); |
| 70 | pmc->perf_event = NULL; |
| 71 | pmc->current_config = 0; |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 72 | pmc_to_pmu(pmc)->event_count--; |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 73 | } |
| 74 | } |
| 75 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 76 | static inline void pmc_stop_counter(struct kvm_pmc *pmc) |
| 77 | { |
| 78 | if (pmc->perf_event) { |
| 79 | pmc->counter = pmc_read_counter(pmc); |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 80 | pmc_release_perf_event(pmc); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 81 | } |
| 82 | } |
| 83 | |
| 84 | static inline bool pmc_is_gp(struct kvm_pmc *pmc) |
| 85 | { |
| 86 | return pmc->type == KVM_PMC_GP; |
| 87 | } |
| 88 | |
| 89 | static inline bool pmc_is_fixed(struct kvm_pmc *pmc) |
| 90 | { |
| 91 | return pmc->type == KVM_PMC_FIXED; |
| 92 | } |
| 93 | |
Oliver Upton | 9477f44 | 2019-11-13 16:17:15 -0800 | [diff] [blame] | 94 | static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, |
| 95 | u64 data) |
| 96 | { |
| 97 | return !(pmu->global_ctrl_mask & data); |
| 98 | } |
| 99 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 100 | /* returns general purpose PMC with the specified MSR. Note that it can be |
| 101 | * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a |
Ingo Molnar | d9f6e12 | 2021-03-18 15:28:01 +0100 | [diff] [blame] | 102 | * parameter to tell them apart. |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 103 | */ |
| 104 | static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, |
| 105 | u32 base) |
| 106 | { |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 107 | if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { |
| 108 | u32 index = array_index_nospec(msr - base, |
| 109 | pmu->nr_arch_gp_counters); |
| 110 | |
| 111 | return &pmu->gp_counters[index]; |
| 112 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 113 | |
| 114 | return NULL; |
| 115 | } |
| 116 | |
| 117 | /* returns fixed PMC with the specified MSR */ |
| 118 | static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) |
| 119 | { |
| 120 | int base = MSR_CORE_PERF_FIXED_CTR0; |
| 121 | |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 122 | if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { |
| 123 | u32 index = array_index_nospec(msr - base, |
| 124 | pmu->nr_arch_fixed_counters); |
| 125 | |
| 126 | return &pmu->fixed_counters[index]; |
| 127 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 128 | |
| 129 | return NULL; |
| 130 | } |
| 131 | |
Eric Hankland | 168d918 | 2020-02-21 18:34:13 -0800 | [diff] [blame] | 132 | static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) |
| 133 | { |
| 134 | u64 sample_period = (-counter_value) & pmc_bitmask(pmc); |
| 135 | |
| 136 | if (!sample_period) |
| 137 | sample_period = pmc_bitmask(pmc) + 1; |
| 138 | return sample_period; |
| 139 | } |
| 140 | |
Like Xu | 75189d1 | 2022-04-09 09:52:26 +0800 | [diff] [blame] | 141 | static inline void pmc_update_sample_period(struct kvm_pmc *pmc) |
| 142 | { |
| 143 | if (!pmc->perf_event || pmc->is_paused) |
| 144 | return; |
| 145 | |
| 146 | perf_event_period(pmc->perf_event, |
| 147 | get_sample_period(pmc, pmc->counter)); |
| 148 | } |
| 149 | |
Like Xu | 63f21f3 | 2022-04-11 18:19:42 +0800 | [diff] [blame] | 150 | static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) |
| 151 | { |
| 152 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
| 153 | |
| 154 | if (pmc_is_fixed(pmc)) |
| 155 | return fixed_ctrl_field(pmu->fixed_ctr_ctrl, |
| 156 | pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; |
| 157 | |
| 158 | return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; |
| 159 | } |
| 160 | |
Like Xu | 968635a | 2022-04-11 18:19:44 +0800 | [diff] [blame] | 161 | extern struct x86_pmu_capability kvm_pmu_cap; |
| 162 | |
| 163 | static inline void kvm_init_pmu_capability(void) |
| 164 | { |
Like Xu | d7808f7 | 2022-05-19 01:01:18 +0800 | [diff] [blame] | 165 | bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; |
| 166 | |
Like Xu | 968635a | 2022-04-11 18:19:44 +0800 | [diff] [blame] | 167 | perf_get_x86_pmu_capability(&kvm_pmu_cap); |
| 168 | |
Like Xu | d7808f7 | 2022-05-19 01:01:18 +0800 | [diff] [blame] | 169 | /* |
| 170 | * For Intel, only support guest architectural pmu |
| 171 | * on a host with architectural pmu. |
| 172 | */ |
Like Xu | 6ef25aa | 2022-06-01 11:19:24 +0800 | [diff] [blame] | 173 | if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp) |
Like Xu | d7808f7 | 2022-05-19 01:01:18 +0800 | [diff] [blame] | 174 | enable_pmu = false; |
Like Xu | 6ef25aa | 2022-06-01 11:19:24 +0800 | [diff] [blame] | 175 | |
| 176 | if (!enable_pmu) { |
| 177 | memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); |
Like Xu | d7808f7 | 2022-05-19 01:01:18 +0800 | [diff] [blame] | 178 | return; |
| 179 | } |
Like Xu | 968635a | 2022-04-11 18:19:44 +0800 | [diff] [blame] | 180 | |
| 181 | kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); |
| 182 | kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, |
| 183 | KVM_PMC_MAX_FIXED); |
| 184 | } |
| 185 | |
Like Xu | a40239b | 2022-05-18 21:25:05 +0800 | [diff] [blame] | 186 | void reprogram_counter(struct kvm_pmc *pmc); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 187 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 188 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
| 189 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); |
| 190 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
Jim Mattson | e6cd31f | 2021-11-05 13:20:58 -0700 | [diff] [blame] | 191 | bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); |
Sean Christopherson | 545feb9 | 2022-06-11 00:57:52 +0000 | [diff] [blame] | 192 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
Wei Wang | cbd7175 | 2020-05-29 15:43:44 +0800 | [diff] [blame] | 193 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 194 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 195 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); |
| 196 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); |
| 197 | void kvm_pmu_init(struct kvm_vcpu *vcpu); |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 198 | void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 199 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
Eric Hankland | 66bb8a0 | 2019-07-10 18:25:15 -0700 | [diff] [blame] | 200 | int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); |
Eric Hankland | 9cd803d | 2021-11-30 15:42:20 +0800 | [diff] [blame] | 201 | void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 202 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 203 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
| 204 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 205 | extern struct kvm_pmu_ops intel_pmu_ops; |
| 206 | extern struct kvm_pmu_ops amd_pmu_ops; |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 207 | #endif /* __KVM_X86_PMU_H */ |