| |
| /* SPDX-License-Identifier: GPL-2.0 */ |
| /* THIS FILE WAS AUTOGENERATED BY jevents.py arch=none model=none ! */ |
| |
| #include <pmu-events/pmu-events.h> |
| #include "util/header.h" |
| #include "util/pmu.h" |
| #include <string.h> |
| #include <stddef.h> |
| |
| struct compact_pmu_event { |
| int offset; |
| }; |
| |
| struct pmu_table_entry { |
| const struct compact_pmu_event *entries; |
| uint32_t num_entries; |
| struct compact_pmu_event pmu_name; |
| }; |
| |
| static const char *const big_c_string = |
| /* offset=0 */ "default_core\000" |
| /* offset=13 */ "bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000" |
| /* offset=72 */ "bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000" |
| /* offset=131 */ "l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000" |
| /* offset=226 */ "segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000" |
| /* offset=325 */ "dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000" |
| /* offset=455 */ "eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000" |
| /* offset=570 */ "hisi_sccl,ddrc\000" |
| /* offset=585 */ "uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000" |
| /* offset=671 */ "uncore_cbox\000" |
| /* offset=683 */ "unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000" |
| /* offset=914 */ "event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000" |
| /* offset=979 */ "event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000" |
| /* offset=1050 */ "hisi_sccl,l3c\000" |
| /* offset=1064 */ "uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000" |
| /* offset=1144 */ "uncore_imc_free_running\000" |
| /* offset=1168 */ "uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000" |
| /* offset=1263 */ "uncore_imc\000" |
| /* offset=1274 */ "uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000" |
| /* offset=1352 */ "uncore_sys_ddr_pmu\000" |
| /* offset=1371 */ "sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000" |
| /* offset=1444 */ "uncore_sys_ccn_pmu\000" |
| /* offset=1463 */ "sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000" |
| /* offset=1537 */ "uncore_sys_cmn_pmu\000" |
| /* offset=1556 */ "sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000" |
| /* offset=1696 */ "CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000" |
| /* offset=1718 */ "IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000" |
| /* offset=1781 */ "Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000" |
| /* offset=1947 */ "dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000" |
| /* offset=2011 */ "icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000" |
| /* offset=2078 */ "cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000" |
| /* offset=2149 */ "DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000" |
| /* offset=2243 */ "DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000" |
| /* offset=2377 */ "DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000" |
| /* offset=2441 */ "DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000" |
| /* offset=2509 */ "DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000" |
| /* offset=2579 */ "M1\000\000ipc + M2\000\000\000\000\000\000\000\00000" |
| /* offset=2601 */ "M2\000\000ipc + M1\000\000\000\000\000\000\000\00000" |
| /* offset=2623 */ "M3\000\0001 / M3\000\000\000\000\000\000\000\00000" |
| /* offset=2643 */ "L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000" |
| ; |
| |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_default_core[] = { |
| { 13 }, /* bp_l1_btb_correct\000branch\000L1 BTB Correction\000event=0x8a\000\00000\000\000 */ |
| { 72 }, /* bp_l2_btb_correct\000branch\000L2 BTB Correction\000event=0x8b\000\00000\000\000 */ |
| { 325 }, /* dispatch_blocked.any\000other\000Memory cluster signals to block micro-op dispatch for any reason\000event=9,period=200000,umask=0x20\000\00000\000\000 */ |
| { 455 }, /* eist_trans\000other\000Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions\000event=0x3a,period=200000\000\00000\000\000 */ |
| { 131 }, /* l3_cache_rd\000cache\000L3 cache access, read\000event=0x40\000\00000\000Attributable Level 3 cache access, read\000 */ |
| { 226 }, /* segment_reg_loads.any\000other\000Number of segment register loads\000event=6,period=200000,umask=0x80\000\00000\000\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_ddrc[] = { |
| { 585 }, /* uncore_hisi_ddrc.flux_wcmd\000uncore\000DDRC write commands\000event=2\000\00000\000DDRC write commands\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_hisi_sccl_l3c[] = { |
| { 1064 }, /* uncore_hisi_l3c.rd_hit_cpipe\000uncore\000Total read hits\000event=7\000\00000\000Total read hits\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_cbox[] = { |
| { 914 }, /* event-hyphen\000uncore\000UNC_CBO_HYPHEN\000event=0xe0\000\00000\000UNC_CBO_HYPHEN\000 */ |
| { 979 }, /* event-two-hyph\000uncore\000UNC_CBO_TWO_HYPH\000event=0xc0\000\00000\000UNC_CBO_TWO_HYPH\000 */ |
| { 683 }, /* unc_cbo_xsnp_response.miss_eviction\000uncore\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000event=0x22,umask=0x81\000\00000\000A cross-core snoop resulted from L3 Eviction which misses in some processor core\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc[] = { |
| { 1274 }, /* uncore_imc.cache_hits\000uncore\000Total cache hits\000event=0x34\000\00000\000Total cache hits\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_cpu_uncore_imc_free_running[] = { |
| { 1168 }, /* uncore_imc_free_running.cache_miss\000uncore\000Total cache misses\000event=0x12\000\00000\000Total cache misses\000 */ |
| |
| }; |
| |
| const struct pmu_table_entry pmu_events__test_soc_cpu[] = { |
| { |
| .entries = pmu_events__test_soc_cpu_default_core, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_default_core), |
| .pmu_name = { 0 /* default_core\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_cpu_hisi_sccl_ddrc, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_ddrc), |
| .pmu_name = { 570 /* hisi_sccl,ddrc\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_cpu_hisi_sccl_l3c, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_hisi_sccl_l3c), |
| .pmu_name = { 1050 /* hisi_sccl,l3c\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_cpu_uncore_cbox, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_cbox), |
| .pmu_name = { 671 /* uncore_cbox\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_cpu_uncore_imc, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc), |
| .pmu_name = { 1263 /* uncore_imc\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_cpu_uncore_imc_free_running, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_cpu_uncore_imc_free_running), |
| .pmu_name = { 1144 /* uncore_imc_free_running\000 */ }, |
| }, |
| }; |
| |
| static const struct compact_pmu_event pmu_metrics__test_soc_cpu_default_core[] = { |
| { 1696 }, /* CPI\000\0001 / IPC\000\000\000\000\000\000\000\00000 */ |
| { 2377 }, /* DCache_L2_All\000\000DCache_L2_All_Hits + DCache_L2_All_Miss\000\000\000\000\000\000\000\00000 */ |
| { 2149 }, /* DCache_L2_All_Hits\000\000l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit\000\000\000\000\000\000\000\00000 */ |
| { 2243 }, /* DCache_L2_All_Miss\000\000max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss\000\000\000\000\000\000\000\00000 */ |
| { 2441 }, /* DCache_L2_Hits\000\000d_ratio(DCache_L2_All_Hits, DCache_L2_All)\000\000\000\000\000\000\000\00000 */ |
| { 2509 }, /* DCache_L2_Misses\000\000d_ratio(DCache_L2_All_Miss, DCache_L2_All)\000\000\000\000\000\000\000\00000 */ |
| { 1781 }, /* Frontend_Bound_SMT\000\000idq_uops_not_delivered.core / (4 * (cpu_clk_unhalted.thread / 2 * (1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk)))\000\000\000\000\000\000\000\00000 */ |
| { 1718 }, /* IPC\000group1\000inst_retired.any / cpu_clk_unhalted.thread\000\000\000\000\000\000\000\00000 */ |
| { 2643 }, /* L1D_Cache_Fill_BW\000\00064 * l1d.replacement / 1e9 / duration_time\000\000\000\000\000\000\000\00000 */ |
| { 2579 }, /* M1\000\000ipc + M2\000\000\000\000\000\000\000\00000 */ |
| { 2601 }, /* M2\000\000ipc + M1\000\000\000\000\000\000\000\00000 */ |
| { 2623 }, /* M3\000\0001 / M3\000\000\000\000\000\000\000\00000 */ |
| { 2078 }, /* cache_miss_cycles\000group1\000dcache_miss_cpi + icache_miss_cycles\000\000\000\000\000\000\000\00000 */ |
| { 1947 }, /* dcache_miss_cpi\000\000l1d\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */ |
| { 2011 }, /* icache_miss_cycles\000\000l1i\\-loads\\-misses / inst_retired.any\000\000\000\000\000\000\000\00000 */ |
| |
| }; |
| |
| const struct pmu_table_entry pmu_metrics__test_soc_cpu[] = { |
| { |
| .entries = pmu_metrics__test_soc_cpu_default_core, |
| .num_entries = ARRAY_SIZE(pmu_metrics__test_soc_cpu_default_core), |
| .pmu_name = { 0 /* default_core\000 */ }, |
| }, |
| }; |
| |
| static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ccn_pmu[] = { |
| { 1463 }, /* sys_ccn_pmu.read_cycles\000uncore\000ccn read-cycles event\000config=0x2c\0000x01\00000\000\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_cmn_pmu[] = { |
| { 1556 }, /* sys_cmn_pmu.hnf_cache_miss\000uncore\000Counts total cache misses in first lookup result (high priority)\000eventid=1,type=5\000(434|436|43c|43a).*\00000\000\000 */ |
| }; |
| static const struct compact_pmu_event pmu_events__test_soc_sys_uncore_sys_ddr_pmu[] = { |
| { 1371 }, /* sys_ddr_pmu.write_cycles\000uncore\000ddr write-cycles event\000event=0x2b\000v8\00000\000\000 */ |
| |
| }; |
| |
| const struct pmu_table_entry pmu_events__test_soc_sys[] = { |
| { |
| .entries = pmu_events__test_soc_sys_uncore_sys_ccn_pmu, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ccn_pmu), |
| .pmu_name = { 1444 /* uncore_sys_ccn_pmu\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_sys_uncore_sys_cmn_pmu, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_cmn_pmu), |
| .pmu_name = { 1537 /* uncore_sys_cmn_pmu\000 */ }, |
| }, |
| { |
| .entries = pmu_events__test_soc_sys_uncore_sys_ddr_pmu, |
| .num_entries = ARRAY_SIZE(pmu_events__test_soc_sys_uncore_sys_ddr_pmu), |
| .pmu_name = { 1352 /* uncore_sys_ddr_pmu\000 */ }, |
| }, |
| }; |
| |
| |
| /* Struct used to make the PMU event table implementation opaque to callers. */ |
| struct pmu_events_table { |
| const struct pmu_table_entry *pmus; |
| uint32_t num_pmus; |
| }; |
| |
| /* Struct used to make the PMU metric table implementation opaque to callers. */ |
| struct pmu_metrics_table { |
| const struct pmu_table_entry *pmus; |
| uint32_t num_pmus; |
| }; |
| |
| /* |
| * Map a CPU to its table of PMU events. The CPU is identified by the |
| * cpuid field, which is an arch-specific identifier for the CPU. |
| * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile |
| * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) |
| * |
| * The cpuid can contain any character other than the comma. |
| */ |
| struct pmu_events_map { |
| const char *arch; |
| const char *cpuid; |
| struct pmu_events_table event_table; |
| struct pmu_metrics_table metric_table; |
| }; |
| |
| /* |
| * Global table mapping each known CPU for the architecture to its |
| * table of PMU events. |
| */ |
| const struct pmu_events_map pmu_events_map[] = { |
| { |
| .arch = "testarch", |
| .cpuid = "testcpu", |
| .event_table = { |
| .pmus = pmu_events__test_soc_cpu, |
| .num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu), |
| }, |
| .metric_table = { |
| .pmus = pmu_metrics__test_soc_cpu, |
| .num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu), |
| } |
| }, |
| { |
| .arch = 0, |
| .cpuid = 0, |
| .event_table = { 0, 0 }, |
| .metric_table = { 0, 0 }, |
| } |
| }; |
| |
| struct pmu_sys_events { |
| const char *name; |
| struct pmu_events_table event_table; |
| struct pmu_metrics_table metric_table; |
| }; |
| |
| static const struct pmu_sys_events pmu_sys_event_tables[] = { |
| { |
| .event_table = { |
| .pmus = pmu_events__test_soc_sys, |
| .num_pmus = ARRAY_SIZE(pmu_events__test_soc_sys) |
| }, |
| .name = "pmu_events__test_soc_sys", |
| }, |
| { |
| .event_table = { 0, 0 }, |
| .metric_table = { 0, 0 }, |
| }, |
| }; |
| |
| static void decompress_event(int offset, struct pmu_event *pe) |
| { |
| const char *p = &big_c_string[offset]; |
| |
| pe->name = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->topic = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->desc = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->event = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->compat = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->deprecated = *p - '0'; |
| p++; |
| pe->perpkg = *p - '0'; |
| p++; |
| pe->unit = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pe->long_desc = (*p == '\0' ? NULL : p); |
| } |
| |
| static void decompress_metric(int offset, struct pmu_metric *pm) |
| { |
| const char *p = &big_c_string[offset]; |
| |
| pm->metric_name = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->metric_group = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->metric_expr = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->metric_threshold = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->desc = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->long_desc = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->unit = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->compat = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->metricgroup_no_group = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->default_metricgroup_name = (*p == '\0' ? NULL : p); |
| while (*p++); |
| pm->aggr_mode = *p - '0'; |
| p++; |
| pm->event_grouping = *p - '0'; |
| } |
| |
| static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table, |
| const struct pmu_table_entry *pmu, |
| pmu_event_iter_fn fn, |
| void *data) |
| { |
| int ret; |
| struct pmu_event pe = { |
| .pmu = &big_c_string[pmu->pmu_name.offset], |
| }; |
| |
| for (uint32_t i = 0; i < pmu->num_entries; i++) { |
| decompress_event(pmu->entries[i].offset, &pe); |
| if (!pe.name) |
| continue; |
| ret = fn(&pe, table, data); |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table, |
| const struct pmu_table_entry *pmu, |
| const char *name, |
| pmu_event_iter_fn fn, |
| void *data) |
| { |
| struct pmu_event pe = { |
| .pmu = &big_c_string[pmu->pmu_name.offset], |
| }; |
| int low = 0, high = pmu->num_entries - 1; |
| |
| while (low <= high) { |
| int cmp, mid = (low + high) / 2; |
| |
| decompress_event(pmu->entries[mid].offset, &pe); |
| |
| if (!pe.name && !name) |
| goto do_call; |
| |
| if (!pe.name && name) { |
| low = mid + 1; |
| continue; |
| } |
| if (pe.name && !name) { |
| high = mid - 1; |
| continue; |
| } |
| |
| cmp = strcasecmp(pe.name, name); |
| if (cmp < 0) { |
| low = mid + 1; |
| continue; |
| } |
| if (cmp > 0) { |
| high = mid - 1; |
| continue; |
| } |
| do_call: |
| return fn ? fn(&pe, table, data) : 0; |
| } |
| return PMU_EVENTS__NOT_FOUND; |
| } |
| |
| int pmu_events_table__for_each_event(const struct pmu_events_table *table, |
| struct perf_pmu *pmu, |
| pmu_event_iter_fn fn, |
| void *data) |
| { |
| for (size_t i = 0; i < table->num_pmus; i++) { |
| const struct pmu_table_entry *table_pmu = &table->pmus[i]; |
| const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; |
| int ret; |
| |
| if (pmu && !pmu__name_match(pmu, pmu_name)) |
| continue; |
| |
| ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); |
| if (pmu || ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| int pmu_events_table__find_event(const struct pmu_events_table *table, |
| struct perf_pmu *pmu, |
| const char *name, |
| pmu_event_iter_fn fn, |
| void *data) |
| { |
| for (size_t i = 0; i < table->num_pmus; i++) { |
| const struct pmu_table_entry *table_pmu = &table->pmus[i]; |
| const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; |
| int ret; |
| |
| if (!pmu__name_match(pmu, pmu_name)) |
| continue; |
| |
| ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); |
| if (ret != PMU_EVENTS__NOT_FOUND) |
| return ret; |
| } |
| return PMU_EVENTS__NOT_FOUND; |
| } |
| |
| size_t pmu_events_table__num_events(const struct pmu_events_table *table, |
| struct perf_pmu *pmu) |
| { |
| size_t count = 0; |
| |
| for (size_t i = 0; i < table->num_pmus; i++) { |
| const struct pmu_table_entry *table_pmu = &table->pmus[i]; |
| const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; |
| |
| if (pmu__name_match(pmu, pmu_name)) |
| count += table_pmu->num_entries; |
| } |
| return count; |
| } |
| |
| static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table, |
| const struct pmu_table_entry *pmu, |
| pmu_metric_iter_fn fn, |
| void *data) |
| { |
| int ret; |
| struct pmu_metric pm = { |
| .pmu = &big_c_string[pmu->pmu_name.offset], |
| }; |
| |
| for (uint32_t i = 0; i < pmu->num_entries; i++) { |
| decompress_metric(pmu->entries[i].offset, &pm); |
| if (!pm.metric_expr) |
| continue; |
| ret = fn(&pm, table, data); |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, |
| pmu_metric_iter_fn fn, |
| void *data) |
| { |
| for (size_t i = 0; i < table->num_pmus; i++) { |
| int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i], |
| fn, data); |
| |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu) |
| { |
| static struct { |
| const struct pmu_events_map *map; |
| struct perf_pmu *pmu; |
| } last_result; |
| static struct { |
| const struct pmu_events_map *map; |
| char *cpuid; |
| } last_map_search; |
| static bool has_last_result, has_last_map_search; |
| const struct pmu_events_map *map = NULL; |
| char *cpuid = NULL; |
| size_t i; |
| |
| if (has_last_result && last_result.pmu == pmu) |
| return last_result.map; |
| |
| cpuid = perf_pmu__getcpuid(pmu); |
| |
| /* |
| * On some platforms which uses cpus map, cpuid can be NULL for |
| * PMUs other than CORE PMUs. |
| */ |
| if (!cpuid) |
| goto out_update_last_result; |
| |
| if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) { |
| map = last_map_search.map; |
| free(cpuid); |
| } else { |
| i = 0; |
| for (;;) { |
| map = &pmu_events_map[i++]; |
| |
| if (!map->arch) { |
| map = NULL; |
| break; |
| } |
| |
| if (!strcmp_cpuid_str(map->cpuid, cpuid)) |
| break; |
| } |
| free(last_map_search.cpuid); |
| last_map_search.cpuid = cpuid; |
| last_map_search.map = map; |
| has_last_map_search = true; |
| } |
| out_update_last_result: |
| last_result.pmu = pmu; |
| last_result.map = map; |
| has_last_result = true; |
| return map; |
| } |
| |
| const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) |
| { |
| const struct pmu_events_map *map = map_for_pmu(pmu); |
| |
| if (!map) |
| return NULL; |
| |
| if (!pmu) |
| return &map->event_table; |
| |
| for (size_t i = 0; i < map->event_table.num_pmus; i++) { |
| const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i]; |
| const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; |
| |
| if (pmu__name_match(pmu, pmu_name)) |
| return &map->event_table; |
| } |
| return NULL; |
| } |
| |
| const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu) |
| { |
| const struct pmu_events_map *map = map_for_pmu(pmu); |
| |
| if (!map) |
| return NULL; |
| |
| if (!pmu) |
| return &map->metric_table; |
| |
| for (size_t i = 0; i < map->metric_table.num_pmus; i++) { |
| const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i]; |
| const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; |
| |
| if (pmu__name_match(pmu, pmu_name)) |
| return &map->metric_table; |
| } |
| return NULL; |
| } |
| |
| const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) |
| { |
| for (const struct pmu_events_map *tables = &pmu_events_map[0]; |
| tables->arch; |
| tables++) { |
| if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) |
| return &tables->event_table; |
| } |
| return NULL; |
| } |
| |
| const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) |
| { |
| for (const struct pmu_events_map *tables = &pmu_events_map[0]; |
| tables->arch; |
| tables++) { |
| if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) |
| return &tables->metric_table; |
| } |
| return NULL; |
| } |
| |
| int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) |
| { |
| for (const struct pmu_events_map *tables = &pmu_events_map[0]; |
| tables->arch; |
| tables++) { |
| int ret = pmu_events_table__for_each_event(&tables->event_table, |
| /*pmu=*/ NULL, fn, data); |
| |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) |
| { |
| for (const struct pmu_events_map *tables = &pmu_events_map[0]; |
| tables->arch; |
| tables++) { |
| int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); |
| |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| const struct pmu_events_table *find_sys_events_table(const char *name) |
| { |
| for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; |
| tables->name; |
| tables++) { |
| if (!strcmp(tables->name, name)) |
| return &tables->event_table; |
| } |
| return NULL; |
| } |
| |
| int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) |
| { |
| for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; |
| tables->name; |
| tables++) { |
| int ret = pmu_events_table__for_each_event(&tables->event_table, |
| /*pmu=*/ NULL, fn, data); |
| |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data) |
| { |
| for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; |
| tables->name; |
| tables++) { |
| int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); |
| |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| static const int metricgroups[][2] = { |
| |
| }; |
| |
| const char *describe_metricgroup(const char *group) |
| { |
| int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1; |
| |
| while (low <= high) { |
| int mid = (low + high) / 2; |
| const char *mgroup = &big_c_string[metricgroups[mid][0]]; |
| int cmp = strcmp(mgroup, group); |
| |
| if (cmp == 0) { |
| return &big_c_string[metricgroups[mid][1]]; |
| } else if (cmp < 0) { |
| low = mid + 1; |
| } else { |
| high = mid - 1; |
| } |
| } |
| return NULL; |
| } |