blob: 3972197e22100aadb1b79cc1f4a6dcc547dc9785 [file] [log] [blame]
Bhaskara Budiredla036a7582021-11-15 10:05:05 +05301// SPDX-License-Identifier: GPL-2.0
2/* Marvell CN10K LLC-TAD perf driver
3 *
4 * Copyright (C) 2021 Marvell
Bhaskara Budiredla036a7582021-11-15 10:05:05 +05305 */
6
7#define pr_fmt(fmt) "tad_pmu: " fmt
8
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/of_device.h>
13#include <linux/cpuhotplug.h>
14#include <linux/perf_event.h>
15#include <linux/platform_device.h>
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +053016#include <linux/acpi.h>
Bhaskara Budiredla036a7582021-11-15 10:05:05 +053017
Tanmay Jagdalef5ebeb12022-06-14 17:13:56 +000018#define TAD_PFC_OFFSET 0x800
Bhaskara Budiredla036a7582021-11-15 10:05:05 +053019#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
Tanmay Jagdalef5ebeb12022-06-14 17:13:56 +000020#define TAD_PRF_OFFSET 0x900
Bhaskara Budiredla036a7582021-11-15 10:05:05 +053021#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3))
22#define TAD_PRF_CNTSEL_MASK 0xFF
23#define TAD_MAX_COUNTERS 8
24
25#define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu))
26
27struct tad_region {
28 void __iomem *base;
29};
30
31struct tad_pmu {
32 struct pmu pmu;
33 struct tad_region *regions;
34 u32 region_cnt;
35 unsigned int cpu;
36 struct hlist_node node;
37 struct perf_event *events[TAD_MAX_COUNTERS];
38 DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
39};
40
41static int tad_pmu_cpuhp_state;
42
43static void tad_pmu_event_counter_read(struct perf_event *event)
44{
45 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
46 struct hw_perf_event *hwc = &event->hw;
47 u32 counter_idx = hwc->idx;
48 u64 prev, new;
49 int i;
50
51 do {
52 prev = local64_read(&hwc->prev_count);
53 for (i = 0, new = 0; i < tad_pmu->region_cnt; i++)
54 new += readq(tad_pmu->regions[i].base +
55 TAD_PFC(counter_idx));
56 } while (local64_cmpxchg(&hwc->prev_count, prev, new) != prev);
57
58 local64_add(new - prev, &event->count);
59}
60
61static void tad_pmu_event_counter_stop(struct perf_event *event, int flags)
62{
63 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
64 struct hw_perf_event *hwc = &event->hw;
65 u32 counter_idx = hwc->idx;
66 int i;
67
68 /* TAD()_PFC() stop counting on the write
69 * which sets TAD()_PRF()[CNTSEL] == 0
70 */
71 for (i = 0; i < tad_pmu->region_cnt; i++) {
72 writeq_relaxed(0, tad_pmu->regions[i].base +
73 TAD_PRF(counter_idx));
74 }
75
76 tad_pmu_event_counter_read(event);
77 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
78}
79
80static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
81{
82 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
83 struct hw_perf_event *hwc = &event->hw;
84 u32 event_idx = event->attr.config;
85 u32 counter_idx = hwc->idx;
86 u64 reg_val;
87 int i;
88
89 hwc->state = 0;
90
91 /* Typically TAD_PFC() are zeroed to start counting */
92 for (i = 0; i < tad_pmu->region_cnt; i++)
93 writeq_relaxed(0, tad_pmu->regions[i].base +
94 TAD_PFC(counter_idx));
95
96 /* TAD()_PFC() start counting on the write
97 * which sets TAD()_PRF()[CNTSEL] != 0
98 */
99 for (i = 0; i < tad_pmu->region_cnt; i++) {
Tanmay Jagdalef5ebeb12022-06-14 17:13:56 +0000100 reg_val = event_idx & 0xFF;
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530101 writeq_relaxed(reg_val, tad_pmu->regions[i].base +
102 TAD_PRF(counter_idx));
103 }
104}
105
106static void tad_pmu_event_counter_del(struct perf_event *event, int flags)
107{
108 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
109 struct hw_perf_event *hwc = &event->hw;
110 int idx = hwc->idx;
111
112 tad_pmu_event_counter_stop(event, flags | PERF_EF_UPDATE);
113 tad_pmu->events[idx] = NULL;
114 clear_bit(idx, tad_pmu->counters_map);
115}
116
117static int tad_pmu_event_counter_add(struct perf_event *event, int flags)
118{
119 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
120 struct hw_perf_event *hwc = &event->hw;
121 int idx;
122
123 /* Get a free counter for this event */
124 idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS);
125 if (idx == TAD_MAX_COUNTERS)
126 return -EAGAIN;
127
128 set_bit(idx, tad_pmu->counters_map);
129
130 hwc->idx = idx;
131 hwc->state = PERF_HES_STOPPED;
132 tad_pmu->events[idx] = event;
133
134 if (flags & PERF_EF_START)
135 tad_pmu_event_counter_start(event, flags);
136
137 return 0;
138}
139
140static int tad_pmu_event_init(struct perf_event *event)
141{
142 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
143
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530144 if (event->attr.type != event->pmu->type)
145 return -ENOENT;
146
Tanmay Jagdale33835e82022-05-10 15:56:57 +0530147 if (!event->attr.disabled)
148 return -EINVAL;
149
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530150 if (event->state != PERF_EVENT_STATE_OFF)
151 return -EINVAL;
152
153 event->cpu = tad_pmu->cpu;
154 event->hw.idx = -1;
155 event->hw.config_base = event->attr.config;
156
157 return 0;
158}
159
160static ssize_t tad_pmu_event_show(struct device *dev,
161 struct device_attribute *attr, char *page)
162{
163 struct perf_pmu_events_attr *pmu_attr;
164
165 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
166 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
167}
168
169#define TAD_PMU_EVENT_ATTR(name, config) \
170 PMU_EVENT_ATTR_ID(name, tad_pmu_event_show, config)
171
172static struct attribute *tad_pmu_event_attrs[] = {
173 TAD_PMU_EVENT_ATTR(tad_none, 0x0),
174 TAD_PMU_EVENT_ATTR(tad_req_msh_in_any, 0x1),
175 TAD_PMU_EVENT_ATTR(tad_req_msh_in_mn, 0x2),
176 TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
177 TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_any, 0x4),
178 TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_mn, 0x5),
179 TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_exlmn, 0x6),
180 TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_dss, 0x7),
181 TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_retry_dss, 0x8),
182 TAD_PMU_EVENT_ATTR(tad_dat_msh_in_any, 0x9),
183 TAD_PMU_EVENT_ATTR(tad_dat_msh_in_dss, 0xa),
184 TAD_PMU_EVENT_ATTR(tad_req_msh_out_any, 0xb),
185 TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_rd, 0xc),
186 TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_wr, 0xd),
187 TAD_PMU_EVENT_ATTR(tad_req_msh_out_evict, 0xe),
188 TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_any, 0xf),
189 TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_exlmn, 0x10),
190 TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_mn, 0x11),
191 TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_exlmn, 0x12),
192 TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_mn, 0x13),
193 TAD_PMU_EVENT_ATTR(tad_snp_msh_out_any, 0x14),
194 TAD_PMU_EVENT_ATTR(tad_snp_msh_out_mn, 0x15),
195 TAD_PMU_EVENT_ATTR(tad_snp_msh_out_exlmn, 0x16),
196 TAD_PMU_EVENT_ATTR(tad_dat_msh_out_any, 0x17),
197 TAD_PMU_EVENT_ATTR(tad_dat_msh_out_fill, 0x18),
198 TAD_PMU_EVENT_ATTR(tad_dat_msh_out_dss, 0x19),
199 TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
200 TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
201 TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
202 TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
203 TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
204 TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
205 TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
206 TAD_PMU_EVENT_ATTR(tad_dat_rd, 0x21),
207 TAD_PMU_EVENT_ATTR(tad_dat_rd_byp, 0x22),
208 TAD_PMU_EVENT_ATTR(tad_ifb_occ, 0x23),
209 TAD_PMU_EVENT_ATTR(tad_req_occ, 0x24),
210 NULL
211};
212
213static const struct attribute_group tad_pmu_events_attr_group = {
214 .name = "events",
215 .attrs = tad_pmu_event_attrs,
216};
217
218PMU_FORMAT_ATTR(event, "config:0-7");
219
220static struct attribute *tad_pmu_format_attrs[] = {
221 &format_attr_event.attr,
222 NULL
223};
224
225static struct attribute_group tad_pmu_format_attr_group = {
226 .name = "format",
227 .attrs = tad_pmu_format_attrs,
228};
229
230static ssize_t tad_pmu_cpumask_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev));
234
235 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu));
236}
237
238static DEVICE_ATTR(cpumask, 0444, tad_pmu_cpumask_show, NULL);
239
240static struct attribute *tad_pmu_cpumask_attrs[] = {
241 &dev_attr_cpumask.attr,
242 NULL
243};
244
245static struct attribute_group tad_pmu_cpumask_attr_group = {
246 .attrs = tad_pmu_cpumask_attrs,
247};
248
249static const struct attribute_group *tad_pmu_attr_groups[] = {
250 &tad_pmu_events_attr_group,
251 &tad_pmu_format_attr_group,
252 &tad_pmu_cpumask_attr_group,
253 NULL
254};
255
256static int tad_pmu_probe(struct platform_device *pdev)
257{
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530258 struct device *dev = &pdev->dev;
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530259 struct tad_region *regions;
260 struct tad_pmu *tad_pmu;
261 struct resource *res;
262 u32 tad_pmu_page_size;
263 u32 tad_page_size;
264 u32 tad_cnt;
265 int i, ret;
266 char *name;
267
268 tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL);
269 if (!tad_pmu)
270 return -ENOMEM;
271
272 platform_set_drvdata(pdev, tad_pmu);
273
274 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
275 if (!res) {
276 dev_err(&pdev->dev, "Mem resource not found\n");
277 return -ENODEV;
278 }
279
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530280 ret = device_property_read_u32(dev, "marvell,tad-page-size",
281 &tad_page_size);
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530282 if (ret) {
283 dev_err(&pdev->dev, "Can't find tad-page-size property\n");
284 return ret;
285 }
286
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530287 ret = device_property_read_u32(dev, "marvell,tad-pmu-page-size",
288 &tad_pmu_page_size);
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530289 if (ret) {
290 dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
291 return ret;
292 }
293
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530294 ret = device_property_read_u32(dev, "marvell,tad-cnt", &tad_cnt);
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530295 if (ret) {
296 dev_err(&pdev->dev, "Can't find tad-cnt property\n");
297 return ret;
298 }
299
300 regions = devm_kcalloc(&pdev->dev, tad_cnt,
301 sizeof(*regions), GFP_KERNEL);
302 if (!regions)
303 return -ENOMEM;
304
305 /* ioremap the distributed TAD pmu regions */
306 for (i = 0; i < tad_cnt && res->start < res->end; i++) {
307 regions[i].base = devm_ioremap(&pdev->dev,
308 res->start,
309 tad_pmu_page_size);
Dan Carpenter2da56882021-12-17 17:59:08 +0300310 if (!regions[i].base) {
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530311 dev_err(&pdev->dev, "TAD%d ioremap fail\n", i);
312 return -ENOMEM;
313 }
314 res->start += tad_page_size;
315 }
316
317 tad_pmu->regions = regions;
318 tad_pmu->region_cnt = tad_cnt;
319
320 tad_pmu->pmu = (struct pmu) {
321
322 .module = THIS_MODULE,
323 .attr_groups = tad_pmu_attr_groups,
324 .capabilities = PERF_PMU_CAP_NO_EXCLUDE |
325 PERF_PMU_CAP_NO_INTERRUPT,
326 .task_ctx_nr = perf_invalid_context,
327
328 .event_init = tad_pmu_event_init,
329 .add = tad_pmu_event_counter_add,
330 .del = tad_pmu_event_counter_del,
331 .start = tad_pmu_event_counter_start,
332 .stop = tad_pmu_event_counter_stop,
333 .read = tad_pmu_event_counter_read,
334 };
335
336 tad_pmu->cpu = raw_smp_processor_id();
337
338 /* Register pmu instance for cpu hotplug */
339 ret = cpuhp_state_add_instance_nocalls(tad_pmu_cpuhp_state,
340 &tad_pmu->node);
341 if (ret) {
342 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
343 return ret;
344 }
345
346 name = "tad";
347 ret = perf_pmu_register(&tad_pmu->pmu, name, -1);
348 if (ret)
349 cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
350 &tad_pmu->node);
351
352 return ret;
353}
354
355static int tad_pmu_remove(struct platform_device *pdev)
356{
357 struct tad_pmu *pmu = platform_get_drvdata(pdev);
358
359 cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
360 &pmu->node);
361 perf_pmu_unregister(&pmu->pmu);
362
363 return 0;
364}
365
Will Deacon8c0c5682022-02-08 15:12:28 +0000366#ifdef CONFIG_OF
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530367static const struct of_device_id tad_pmu_of_match[] = {
368 { .compatible = "marvell,cn10k-tad-pmu", },
369 {},
370};
Will Deacon8c0c5682022-02-08 15:12:28 +0000371#endif
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530372
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530373#ifdef CONFIG_ACPI
374static const struct acpi_device_id tad_pmu_acpi_match[] = {
375 {"MRVL000B", 0},
376 {},
377};
378MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
379#endif
380
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530381static struct platform_driver tad_pmu_driver = {
382 .driver = {
383 .name = "cn10k_tad_pmu",
384 .of_match_table = of_match_ptr(tad_pmu_of_match),
Gowthami Thiagarajan093cf1f2022-12-09 11:07:15 +0530385 .acpi_match_table = ACPI_PTR(tad_pmu_acpi_match),
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530386 .suppress_bind_attrs = true,
387 },
388 .probe = tad_pmu_probe,
389 .remove = tad_pmu_remove,
390};
391
392static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
393{
394 struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node);
395 unsigned int target;
396
397 if (cpu != pmu->cpu)
398 return 0;
399
400 target = cpumask_any_but(cpu_online_mask, cpu);
401 if (target >= nr_cpu_ids)
402 return 0;
403
404 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
405 pmu->cpu = target;
406
407 return 0;
408}
409
410static int __init tad_pmu_init(void)
411{
412 int ret;
413
414 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
415 "perf/cn10k/tadpmu:online",
416 NULL,
417 tad_pmu_offline_cpu);
418 if (ret < 0)
419 return ret;
420 tad_pmu_cpuhp_state = ret;
Yuan Can973ae932022-11-15 07:02:07 +0000421 ret = platform_driver_register(&tad_pmu_driver);
422 if (ret)
423 cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
424
425 return ret;
Bhaskara Budiredla036a7582021-11-15 10:05:05 +0530426}
427
428static void __exit tad_pmu_exit(void)
429{
430 platform_driver_unregister(&tad_pmu_driver);
431 cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
432}
433
434module_init(tad_pmu_init);
435module_exit(tad_pmu_exit);
436
437MODULE_DESCRIPTION("Marvell CN10K LLC-TAD Perf driver");
438MODULE_AUTHOR("Bhaskara Budiredla <bbudiredla@marvell.com>");
439MODULE_LICENSE("GPL v2");