Thomas Gleixner | c9af7f3 | 2019-05-29 07:12:26 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 2 | /* |
| 3 | * ARM64 cacheinfo support |
| 4 | * |
| 5 | * Copyright (C) 2015 ARM Ltd. |
| 6 | * All Rights Reserved |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Jeremy Linton | 8571890 | 2018-05-11 18:58:03 -0500 | [diff] [blame] | 9 | #include <linux/acpi.h> |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 10 | #include <linux/cacheinfo.h> |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 11 | #include <linux/of.h> |
| 12 | |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 13 | #define MAX_CACHE_LEVEL 7 /* Max 7 level supported */ |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 14 | |
Shaokun Zhang | 7b8c87b | 2019-05-28 10:16:54 +0800 | [diff] [blame] | 15 | int cache_line_size(void) |
| 16 | { |
Shaokun Zhang | 7b8c87b | 2019-05-28 10:16:54 +0800 | [diff] [blame] | 17 | if (coherency_max_size != 0) |
| 18 | return coherency_max_size; |
| 19 | |
Masayoshi Mizuma | 8f5c903 | 2019-06-14 09:11:41 -0400 | [diff] [blame] | 20 | return cache_line_size_of_cpu(); |
Shaokun Zhang | 7b8c87b | 2019-05-28 10:16:54 +0800 | [diff] [blame] | 21 | } |
| 22 | EXPORT_SYMBOL_GPL(cache_line_size); |
| 23 | |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 24 | static inline enum cache_type get_cache_type(int level) |
| 25 | { |
| 26 | u64 clidr; |
| 27 | |
| 28 | if (level > MAX_CACHE_LEVEL) |
| 29 | return CACHE_TYPE_NOCACHE; |
Mark Rutland | adf7589 | 2016-09-08 13:55:38 +0100 | [diff] [blame] | 30 | clidr = read_sysreg(clidr_el1); |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 31 | return CLIDR_CTYPE(clidr, level); |
| 32 | } |
| 33 | |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 34 | static void ci_leaf_init(struct cacheinfo *this_leaf, |
| 35 | enum cache_type type, unsigned int level) |
| 36 | { |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 37 | this_leaf->level = level; |
| 38 | this_leaf->type = type; |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 39 | } |
| 40 | |
Thomas Gleixner | 4b92d4a | 2021-08-31 13:48:34 +0200 | [diff] [blame] | 41 | int init_cache_level(unsigned int cpu) |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 42 | { |
Sudeep Holla | e75d18c | 2022-08-08 09:46:40 +0100 | [diff] [blame] | 43 | unsigned int ctype, level, leaves; |
Pierre Gondois | bd50036 | 2023-01-04 19:30:28 +0100 | [diff] [blame] | 44 | int fw_level, ret; |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 45 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 46 | |
| 47 | for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { |
| 48 | ctype = get_cache_type(level); |
| 49 | if (ctype == CACHE_TYPE_NOCACHE) { |
| 50 | level--; |
| 51 | break; |
| 52 | } |
| 53 | /* Separate instruction and data caches */ |
| 54 | leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; |
| 55 | } |
| 56 | |
Pierre Gondois | bd50036 | 2023-01-04 19:30:28 +0100 | [diff] [blame] | 57 | if (acpi_disabled) { |
Jeremy Linton | 8571890 | 2018-05-11 18:58:03 -0500 | [diff] [blame] | 58 | fw_level = of_find_last_cache_level(cpu); |
Pierre Gondois | bd50036 | 2023-01-04 19:30:28 +0100 | [diff] [blame] | 59 | } else { |
| 60 | ret = acpi_get_cache_info(cpu, &fw_level, NULL); |
| 61 | if (ret < 0) |
Pierre Gondois | d931b83 | 2023-01-24 16:40:47 +0100 | [diff] [blame] | 62 | fw_level = 0; |
Pierre Gondois | bd50036 | 2023-01-04 19:30:28 +0100 | [diff] [blame] | 63 | } |
Jeremy Linton | 8571890 | 2018-05-11 18:58:03 -0500 | [diff] [blame] | 64 | |
| 65 | if (level < fw_level) { |
Sudeep Holla | 9a80243 | 2017-01-16 10:40:44 +0000 | [diff] [blame] | 66 | /* |
| 67 | * some external caches not specified in CLIDR_EL1 |
| 68 | * the information may be available in the device tree |
| 69 | * only unified external caches are considered here |
| 70 | */ |
Jeremy Linton | 8571890 | 2018-05-11 18:58:03 -0500 | [diff] [blame] | 71 | leaves += (fw_level - level); |
| 72 | level = fw_level; |
Sudeep Holla | 9a80243 | 2017-01-16 10:40:44 +0000 | [diff] [blame] | 73 | } |
| 74 | |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 75 | this_cpu_ci->num_levels = level; |
| 76 | this_cpu_ci->num_leaves = leaves; |
| 77 | return 0; |
| 78 | } |
| 79 | |
Thomas Gleixner | 4b92d4a | 2021-08-31 13:48:34 +0200 | [diff] [blame] | 80 | int populate_cache_leaves(unsigned int cpu) |
Sudeep Holla | 5d425c1 | 2015-01-08 10:42:34 +0000 | [diff] [blame] | 81 | { |
| 82 | unsigned int level, idx; |
| 83 | enum cache_type type; |
| 84 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 85 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
| 86 | |
| 87 | for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && |
| 88 | idx < this_cpu_ci->num_leaves; idx++, level++) { |
| 89 | type = get_cache_type(level); |
| 90 | if (type == CACHE_TYPE_SEPARATE) { |
| 91 | ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); |
| 92 | ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); |
| 93 | } else { |
| 94 | ci_leaf_init(this_leaf++, type, level); |
| 95 | } |
| 96 | } |
| 97 | return 0; |
| 98 | } |