Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | #include "amdgpu.h" |
| 24 | #include "df_v3_6.h" |
| 25 | |
| 26 | #include "df/df_3_6_default.h" |
| 27 | #include "df/df_3_6_offset.h" |
| 28 | #include "df/df_3_6_sh_mask.h" |
| 29 | |
John Clements | a6c44d2 | 2020-01-17 12:18:00 +0800 | [diff] [blame] | 30 | #define DF_3_6_SMN_REG_INST_DIST 0x8 |
| 31 | #define DF_3_6_INST_CNT 8 |
| 32 | |
Jonathan Kim | b4a7db7 | 2020-09-01 20:21:00 -0400 | [diff] [blame] | 33 | /* Defined in global_features.h as FTI_PERFMON_VISIBLE */ |
| 34 | #define DF_V3_6_MAX_COUNTERS 4 |
| 35 | |
| 36 | /* get flags from df perfmon config */ |
| 37 | #define DF_V3_6_GET_EVENT(x) (x & 0xFFUL) |
| 38 | #define DF_V3_6_GET_INSTANCE(x) ((x >> 8) & 0xFFUL) |
| 39 | #define DF_V3_6_GET_UNITMASK(x) ((x >> 16) & 0xFFUL) |
| 40 | #define DF_V3_6_PERFMON_OVERFLOW 0xFFFFFFFFFFFFULL |
| 41 | |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 42 | static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, |
| 43 | 16, 32, 0, 0, 0, 2, 4, 8}; |
| 44 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 45 | static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, |
| 46 | uint32_t ficaa_val) |
| 47 | { |
| 48 | unsigned long flags, address, data; |
| 49 | uint32_t ficadl_val, ficadh_val; |
| 50 | |
Hawking Zhang | bebc076 | 2019-08-23 19:39:18 +0800 | [diff] [blame] | 51 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
| 52 | data = adev->nbio.funcs->get_pcie_data_offset(adev); |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 53 | |
| 54 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 55 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); |
| 56 | WREG32(data, ficaa_val); |
| 57 | |
| 58 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); |
| 59 | ficadl_val = RREG32(data); |
| 60 | |
| 61 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); |
| 62 | ficadh_val = RREG32(data); |
| 63 | |
| 64 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 65 | |
| 66 | return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val); |
| 67 | } |
| 68 | |
| 69 | static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, |
| 70 | uint32_t ficadl_val, uint32_t ficadh_val) |
| 71 | { |
| 72 | unsigned long flags, address, data; |
| 73 | |
Hawking Zhang | bebc076 | 2019-08-23 19:39:18 +0800 | [diff] [blame] | 74 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
| 75 | data = adev->nbio.funcs->get_pcie_data_offset(adev); |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 76 | |
| 77 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 78 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); |
| 79 | WREG32(data, ficaa_val); |
| 80 | |
| 81 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); |
| 82 | WREG32(data, ficadl_val); |
| 83 | |
| 84 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); |
| 85 | WREG32(data, ficadh_val); |
| 86 | |
| 87 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 88 | } |
| 89 | |
| 90 | /* |
| 91 | * df_v3_6_perfmon_rreg - read perfmon lo and hi |
| 92 | * |
| 93 | * required to be atomic. no mmio method provided so subsequent reads for lo |
| 94 | * and hi require to preserve df finite state machine |
| 95 | */ |
| 96 | static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, |
| 97 | uint32_t lo_addr, uint32_t *lo_val, |
| 98 | uint32_t hi_addr, uint32_t *hi_val) |
| 99 | { |
| 100 | unsigned long flags, address, data; |
| 101 | |
Hawking Zhang | bebc076 | 2019-08-23 19:39:18 +0800 | [diff] [blame] | 102 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
| 103 | data = adev->nbio.funcs->get_pcie_data_offset(adev); |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 104 | |
| 105 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 106 | WREG32(address, lo_addr); |
| 107 | *lo_val = RREG32(data); |
| 108 | WREG32(address, hi_addr); |
| 109 | *hi_val = RREG32(data); |
| 110 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * df_v3_6_perfmon_wreg - write to perfmon lo and hi |
| 115 | * |
| 116 | * required to be atomic. no mmio method provided so subsequent reads after |
| 117 | * data writes cannot occur to preserve data fabrics finite state machine. |
| 118 | */ |
| 119 | static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, |
| 120 | uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val) |
| 121 | { |
| 122 | unsigned long flags, address, data; |
| 123 | |
Hawking Zhang | bebc076 | 2019-08-23 19:39:18 +0800 | [diff] [blame] | 124 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
| 125 | data = adev->nbio.funcs->get_pcie_data_offset(adev); |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 126 | |
| 127 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 128 | WREG32(address, lo_addr); |
| 129 | WREG32(data, lo_val); |
| 130 | WREG32(address, hi_addr); |
| 131 | WREG32(data, hi_val); |
| 132 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 133 | } |
| 134 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 135 | /* same as perfmon_wreg but return status on write value check */ |
| 136 | static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev, |
| 137 | uint32_t lo_addr, uint32_t lo_val, |
| 138 | uint32_t hi_addr, uint32_t hi_val) |
| 139 | { |
| 140 | unsigned long flags, address, data; |
| 141 | uint32_t lo_val_rb, hi_val_rb; |
| 142 | |
| 143 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
| 144 | data = adev->nbio.funcs->get_pcie_data_offset(adev); |
| 145 | |
| 146 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
| 147 | WREG32(address, lo_addr); |
| 148 | WREG32(data, lo_val); |
| 149 | WREG32(address, hi_addr); |
| 150 | WREG32(data, hi_val); |
| 151 | |
| 152 | WREG32(address, lo_addr); |
| 153 | lo_val_rb = RREG32(data); |
| 154 | WREG32(address, hi_addr); |
| 155 | hi_val_rb = RREG32(data); |
| 156 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
| 157 | |
| 158 | if (!(lo_val == lo_val_rb && hi_val == hi_val_rb)) |
| 159 | return -EBUSY; |
| 160 | |
| 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | |
| 165 | /* |
| 166 | * retry arming counters every 100 usecs within 1 millisecond interval. |
| 167 | * if retry fails after time out, return error. |
| 168 | */ |
| 169 | #define ARM_RETRY_USEC_TIMEOUT 1000 |
| 170 | #define ARM_RETRY_USEC_INTERVAL 100 |
| 171 | static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev, |
| 172 | uint32_t lo_addr, uint32_t lo_val, |
| 173 | uint32_t hi_addr, uint32_t hi_val) |
| 174 | { |
| 175 | int countdown = ARM_RETRY_USEC_TIMEOUT; |
| 176 | |
| 177 | while (countdown) { |
| 178 | |
| 179 | if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val, |
| 180 | hi_addr, hi_val)) |
| 181 | break; |
| 182 | |
| 183 | countdown -= ARM_RETRY_USEC_INTERVAL; |
| 184 | udelay(ARM_RETRY_USEC_INTERVAL); |
| 185 | } |
| 186 | |
| 187 | return countdown > 0 ? 0 : -ETIME; |
| 188 | } |
| 189 | |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 190 | /* get the number of df counters available */ |
| 191 | static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, |
| 192 | struct device_attribute *attr, |
| 193 | char *buf) |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 194 | { |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 195 | struct amdgpu_device *adev; |
| 196 | struct drm_device *ddev; |
| 197 | int i, count; |
| 198 | |
| 199 | ddev = dev_get_drvdata(dev); |
Luben Tuikov | 1348969 | 2020-08-24 12:27:47 -0400 | [diff] [blame] | 200 | adev = drm_to_adev(ddev); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 201 | count = 0; |
| 202 | |
| 203 | for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { |
| 204 | if (adev->df_perfmon_config_assign_mask[i] == 0) |
| 205 | count++; |
| 206 | } |
| 207 | |
Tian Tao | 36000c7 | 2021-03-24 17:17:40 +0800 | [diff] [blame] | 208 | return sysfs_emit(buf, "%i\n", count); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | /* device attr for available perfmon counters */ |
| 212 | static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL); |
| 213 | |
Joseph Greathouse | 22d39fe | 2020-01-09 17:41:43 -0600 | [diff] [blame] | 214 | static void df_v3_6_query_hashes(struct amdgpu_device *adev) |
| 215 | { |
| 216 | u32 tmp; |
| 217 | |
| 218 | adev->df.hash_status.hash_64k = false; |
| 219 | adev->df.hash_status.hash_2m = false; |
| 220 | adev->df.hash_status.hash_1g = false; |
| 221 | |
Mukul Joshi | da6b993 | 2021-03-09 13:42:33 -0500 | [diff] [blame] | 222 | /* encoding for hash-enabled on Arcturus and Aldebaran */ |
| 223 | if ((adev->asic_type == CHIP_ARCTURUS && |
| 224 | adev->df.funcs->get_fb_channel_number(adev) == 0xe) || |
| 225 | (adev->asic_type == CHIP_ALDEBARAN && |
| 226 | adev->df.funcs->get_fb_channel_number(adev) == 0x1e)) { |
Joseph Greathouse | 22d39fe | 2020-01-09 17:41:43 -0600 | [diff] [blame] | 227 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl); |
| 228 | adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp, |
| 229 | DF_CS_UMC_AON0_DfGlobalCtrl, |
| 230 | GlbHashIntlvCtl64K); |
| 231 | adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp, |
| 232 | DF_CS_UMC_AON0_DfGlobalCtrl, |
| 233 | GlbHashIntlvCtl2M); |
| 234 | adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp, |
| 235 | DF_CS_UMC_AON0_DfGlobalCtrl, |
| 236 | GlbHashIntlvCtl1G); |
| 237 | } |
| 238 | } |
| 239 | |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 240 | /* init perfmons */ |
| 241 | static void df_v3_6_sw_init(struct amdgpu_device *adev) |
| 242 | { |
| 243 | int i, ret; |
| 244 | |
| 245 | ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail); |
| 246 | if (ret) |
| 247 | DRM_ERROR("failed to create file for available df counters\n"); |
| 248 | |
| 249 | for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++) |
| 250 | adev->df_perfmon_config_assign_mask[i] = 0; |
Joseph Greathouse | 22d39fe | 2020-01-09 17:41:43 -0600 | [diff] [blame] | 251 | |
| 252 | df_v3_6_query_hashes(adev); |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 253 | } |
| 254 | |
Jack Zhang | f1d59e0 | 2019-09-03 10:15:23 +0800 | [diff] [blame] | 255 | static void df_v3_6_sw_fini(struct amdgpu_device *adev) |
| 256 | { |
| 257 | |
| 258 | device_remove_file(adev->dev, &dev_attr_df_cntr_avail); |
| 259 | |
| 260 | } |
| 261 | |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 262 | static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, |
| 263 | bool enable) |
| 264 | { |
| 265 | u32 tmp; |
| 266 | |
| 267 | if (enable) { |
| 268 | tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl); |
| 269 | tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; |
| 270 | WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp); |
| 271 | } else |
| 272 | WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, |
| 273 | mmFabricConfigAccessControl_DEFAULT); |
| 274 | } |
| 275 | |
| 276 | static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) |
| 277 | { |
| 278 | u32 tmp; |
| 279 | |
Mukul Joshi | 1f62565 | 2021-05-18 10:58:09 -0400 | [diff] [blame] | 280 | if (adev->asic_type == CHIP_ALDEBARAN) { |
| 281 | tmp = RREG32_SOC15(DF, 0, mmDF_GCM_AON0_DramMegaBaseAddress0); |
Mukul Joshi | da6b993 | 2021-03-09 13:42:33 -0500 | [diff] [blame] | 282 | tmp &= |
| 283 | ALDEBARAN_DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; |
Mukul Joshi | 1f62565 | 2021-05-18 10:58:09 -0400 | [diff] [blame] | 284 | } else { |
| 285 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); |
Mukul Joshi | da6b993 | 2021-03-09 13:42:33 -0500 | [diff] [blame] | 286 | tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; |
Mukul Joshi | 1f62565 | 2021-05-18 10:58:09 -0400 | [diff] [blame] | 287 | } |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 288 | tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; |
| 289 | |
| 290 | return tmp; |
| 291 | } |
| 292 | |
| 293 | static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) |
| 294 | { |
| 295 | int fb_channel_number; |
| 296 | |
Joseph Greathouse | bdf84a8 | 2020-01-14 10:05:21 -0500 | [diff] [blame] | 297 | fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); |
Colin Ian King | 036286e | 2018-05-30 17:41:44 +0100 | [diff] [blame] | 298 | if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 299 | fb_channel_number = 0; |
| 300 | |
| 301 | return df_v3_6_channel_number[fb_channel_number]; |
| 302 | } |
| 303 | |
| 304 | static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
| 305 | bool enable) |
| 306 | { |
| 307 | u32 tmp; |
| 308 | |
Guchun Chen | 6e80753 | 2019-12-04 15:51:16 +0800 | [diff] [blame] | 309 | if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { |
| 310 | /* Put DF on broadcast mode */ |
Joseph Greathouse | bdf84a8 | 2020-01-14 10:05:21 -0500 | [diff] [blame] | 311 | adev->df.funcs->enable_broadcast_mode(adev, true); |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 312 | |
Guchun Chen | 6e80753 | 2019-12-04 15:51:16 +0800 | [diff] [blame] | 313 | if (enable) { |
| 314 | tmp = RREG32_SOC15(DF, 0, |
| 315 | mmDF_PIE_AON0_DfGlobalClkGater); |
| 316 | tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; |
| 317 | tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; |
| 318 | WREG32_SOC15(DF, 0, |
| 319 | mmDF_PIE_AON0_DfGlobalClkGater, tmp); |
| 320 | } else { |
| 321 | tmp = RREG32_SOC15(DF, 0, |
| 322 | mmDF_PIE_AON0_DfGlobalClkGater); |
| 323 | tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; |
| 324 | tmp |= DF_V3_6_MGCG_DISABLE; |
| 325 | WREG32_SOC15(DF, 0, |
| 326 | mmDF_PIE_AON0_DfGlobalClkGater, tmp); |
| 327 | } |
| 328 | |
| 329 | /* Exit broadcast mode */ |
Joseph Greathouse | bdf84a8 | 2020-01-14 10:05:21 -0500 | [diff] [blame] | 330 | adev->df.funcs->enable_broadcast_mode(adev, false); |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 331 | } |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, |
Evan Quan | 25faedd | 2022-03-25 18:00:02 +0800 | [diff] [blame] | 335 | u64 *flags) |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 336 | { |
| 337 | u32 tmp; |
| 338 | |
| 339 | /* AMD_CG_SUPPORT_DF_MGCG */ |
| 340 | tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); |
| 341 | if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY) |
| 342 | *flags |= AMD_CG_SUPPORT_DF_MGCG; |
| 343 | } |
| 344 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 345 | /* get assigned df perfmon ctr as int */ |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 346 | static bool df_v3_6_pmc_has_counter(struct amdgpu_device *adev, |
| 347 | uint64_t config, |
| 348 | int counter_idx) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 349 | { |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 350 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 351 | return ((config & 0x0FFFFFFUL) == |
| 352 | adev->df_perfmon_config_assign_mask[counter_idx]); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 353 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | /* get address based on counter assignment */ |
| 357 | static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, |
| 358 | uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 359 | int counter_idx, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 360 | int is_ctrl, |
| 361 | uint32_t *lo_base_addr, |
| 362 | uint32_t *hi_base_addr) |
| 363 | { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 364 | if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 365 | return; |
| 366 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 367 | switch (counter_idx) { |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 368 | |
| 369 | case 0: |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 370 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4; |
| 371 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 372 | break; |
| 373 | case 1: |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 374 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5; |
| 375 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 376 | break; |
| 377 | case 2: |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 378 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6; |
| 379 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 380 | break; |
| 381 | case 3: |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 382 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7; |
| 383 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 384 | break; |
| 385 | |
| 386 | } |
| 387 | |
| 388 | } |
| 389 | |
| 390 | /* get read counter address */ |
| 391 | static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev, |
| 392 | uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 393 | int counter_idx, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 394 | uint32_t *lo_base_addr, |
| 395 | uint32_t *hi_base_addr) |
| 396 | { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 397 | df_v3_6_pmc_get_addr(adev, config, counter_idx, 0, lo_base_addr, |
| 398 | hi_base_addr); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | /* get control counter settings i.e. address and values to set */ |
Arnd Bergmann | e1a2f2d | 2019-06-17 14:51:45 +0200 | [diff] [blame] | 402 | static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 403 | uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 404 | int counter_idx, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 405 | uint32_t *lo_base_addr, |
| 406 | uint32_t *hi_base_addr, |
| 407 | uint32_t *lo_val, |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 408 | uint32_t *hi_val, |
| 409 | bool is_enable) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 410 | { |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 411 | |
| 412 | uint32_t eventsel, instance, unitmask; |
| 413 | uint32_t instance_10, instance_5432, instance_76; |
| 414 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 415 | df_v3_6_pmc_get_addr(adev, config, counter_idx, 1, lo_base_addr, |
| 416 | hi_base_addr); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 417 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 418 | if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) { |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 419 | DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x", |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 420 | *lo_base_addr, *hi_base_addr); |
Arnd Bergmann | e1a2f2d | 2019-06-17 14:51:45 +0200 | [diff] [blame] | 421 | return -ENXIO; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 422 | } |
| 423 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 424 | eventsel = DF_V3_6_GET_EVENT(config) & 0x3f; |
| 425 | unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf; |
| 426 | instance = DF_V3_6_GET_INSTANCE(config); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 427 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 428 | instance_10 = instance & 0x3; |
| 429 | instance_5432 = (instance >> 2) & 0xf; |
| 430 | instance_76 = (instance >> 6) & 0x3; |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 431 | |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 432 | *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel; |
| 433 | *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22); |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 434 | *hi_val = (instance_76 << 29) | instance_5432; |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 435 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 436 | DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", |
| 437 | config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val); |
Arnd Bergmann | e1a2f2d | 2019-06-17 14:51:45 +0200 | [diff] [blame] | 438 | |
| 439 | return 0; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 440 | } |
| 441 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 442 | /* add df performance counters for read */ |
| 443 | static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, |
| 444 | uint64_t config) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 445 | { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 446 | int i; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 447 | |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 448 | for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { |
| 449 | if (adev->df_perfmon_config_assign_mask[i] == 0U) { |
| 450 | adev->df_perfmon_config_assign_mask[i] = |
| 451 | config & 0x0FFFFFFUL; |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 452 | return i; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 453 | } |
| 454 | } |
| 455 | |
| 456 | return -ENOSPC; |
| 457 | } |
| 458 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 459 | #define DEFERRED_ARM_MASK (1 << 31) |
| 460 | static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, |
huangqu | c57f5ba | 2022-01-26 17:16:02 +0800 | [diff] [blame] | 461 | uint64_t config, int counter_idx, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 462 | bool is_deferred) |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 463 | { |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 464 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 465 | if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 466 | return -EINVAL; |
| 467 | |
| 468 | if (is_deferred) |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 469 | adev->df_perfmon_config_assign_mask[counter_idx] |= |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 470 | DEFERRED_ARM_MASK; |
| 471 | else |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 472 | adev->df_perfmon_config_assign_mask[counter_idx] &= |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 473 | ~DEFERRED_ARM_MASK; |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, |
huangqu | c57f5ba | 2022-01-26 17:16:02 +0800 | [diff] [blame] | 479 | uint64_t config, |
| 480 | int counter_idx) |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 481 | { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 482 | return (df_v3_6_pmc_has_counter(adev, config, counter_idx) && |
| 483 | (adev->df_perfmon_config_assign_mask[counter_idx] |
| 484 | & DEFERRED_ARM_MASK)); |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 485 | |
| 486 | } |
| 487 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 488 | /* release performance counter */ |
| 489 | static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 490 | uint64_t config, |
| 491 | int counter_idx) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 492 | { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 493 | if (df_v3_6_pmc_has_counter(adev, config, counter_idx)) |
| 494 | adev->df_perfmon_config_assign_mask[counter_idx] = 0ULL; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 495 | } |
| 496 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 497 | |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 498 | static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 499 | uint64_t config, |
| 500 | int counter_idx) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 501 | { |
Nirmoy Das | 2e5238a | 2020-06-17 13:36:16 +0200 | [diff] [blame] | 502 | uint32_t lo_base_addr = 0, hi_base_addr = 0; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 503 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 504 | df_v3_6_pmc_get_read_settings(adev, config, counter_idx, &lo_base_addr, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 505 | &hi_base_addr); |
| 506 | |
| 507 | if ((lo_base_addr == 0) || (hi_base_addr == 0)) |
| 508 | return; |
| 509 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 510 | df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 511 | } |
| 512 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 513 | /* return available counter if is_add == 1 otherwise return error status. */ |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 514 | static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 515 | int counter_idx, int is_add) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 516 | { |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 517 | uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 518 | int err = 0, ret = 0; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 519 | |
| 520 | switch (adev->asic_type) { |
| 521 | case CHIP_VEGA20: |
Jonathan Kim | a9d1793 | 2020-09-01 23:59:21 -0400 | [diff] [blame] | 522 | case CHIP_ARCTURUS: |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 523 | if (is_add) |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 524 | return df_v3_6_pmc_add_cntr(adev, config); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 525 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 526 | ret = df_v3_6_pmc_get_ctrl_settings(adev, |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 527 | config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 528 | counter_idx, |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 529 | &lo_base_addr, |
| 530 | &hi_base_addr, |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 531 | &lo_val, |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 532 | &hi_val, |
| 533 | true); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 534 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 535 | if (ret) |
| 536 | return ret; |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 537 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 538 | err = df_v3_6_perfmon_arm_with_retry(adev, |
| 539 | lo_base_addr, |
| 540 | lo_val, |
| 541 | hi_base_addr, |
| 542 | hi_val); |
| 543 | |
| 544 | if (err) |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 545 | ret = df_v3_6_pmc_set_deferred(adev, config, |
| 546 | counter_idx, true); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 547 | |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 548 | break; |
| 549 | default: |
| 550 | break; |
| 551 | } |
| 552 | |
| 553 | return ret; |
| 554 | } |
| 555 | |
| 556 | static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 557 | int counter_idx, int is_remove) |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 558 | { |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 559 | uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 560 | int ret = 0; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 561 | |
| 562 | switch (adev->asic_type) { |
| 563 | case CHIP_VEGA20: |
Jonathan Kim | a9d1793 | 2020-09-01 23:59:21 -0400 | [diff] [blame] | 564 | case CHIP_ARCTURUS: |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 565 | ret = df_v3_6_pmc_get_ctrl_settings(adev, |
| 566 | config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 567 | counter_idx, |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 568 | &lo_base_addr, |
| 569 | &hi_base_addr, |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 570 | &lo_val, |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 571 | &hi_val, |
| 572 | false); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 573 | |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 574 | if (ret) |
| 575 | return ret; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 576 | |
Jonathan Kim | 640a28b | 2021-02-23 16:17:16 -0500 | [diff] [blame] | 577 | df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, |
| 578 | hi_base_addr, hi_val); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 579 | |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 580 | if (is_remove) { |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 581 | df_v3_6_reset_perfmon_cntr(adev, config, counter_idx); |
| 582 | df_v3_6_pmc_release_cntr(adev, config, counter_idx); |
Jonathan Kim | 7c679ef | 2020-08-04 16:26:20 -0400 | [diff] [blame] | 583 | } |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 584 | |
| 585 | break; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 586 | default: |
| 587 | break; |
| 588 | } |
| 589 | |
| 590 | return ret; |
| 591 | } |
| 592 | |
| 593 | static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, |
| 594 | uint64_t config, |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 595 | int counter_idx, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 596 | uint64_t *count) |
| 597 | { |
Tom St Denis | 06b668c | 2020-07-22 07:36:36 -0400 | [diff] [blame] | 598 | uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0; |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 599 | *count = 0; |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 600 | |
| 601 | switch (adev->asic_type) { |
| 602 | case CHIP_VEGA20: |
Jonathan Kim | a9d1793 | 2020-09-01 23:59:21 -0400 | [diff] [blame] | 603 | case CHIP_ARCTURUS: |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 604 | df_v3_6_pmc_get_read_settings(adev, config, counter_idx, |
| 605 | &lo_base_addr, &hi_base_addr); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 606 | |
| 607 | if ((lo_base_addr == 0) || (hi_base_addr == 0)) |
| 608 | return; |
| 609 | |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 610 | /* rearm the counter or throw away count value on failure */ |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 611 | if (df_v3_6_pmc_is_deferred(adev, config, counter_idx)) { |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 612 | int rearm_err = df_v3_6_perfmon_arm_with_status(adev, |
| 613 | lo_base_addr, lo_val, |
| 614 | hi_base_addr, hi_val); |
| 615 | |
| 616 | if (rearm_err) |
| 617 | return; |
| 618 | |
Jonathan Kim | 576e0ec | 2020-06-18 10:40:14 -0400 | [diff] [blame] | 619 | df_v3_6_pmc_set_deferred(adev, config, counter_idx, |
| 620 | false); |
Jonathan Kim | a7843c0 | 2019-12-16 12:31:57 -0500 | [diff] [blame] | 621 | } |
| 622 | |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 623 | df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, |
| 624 | hi_base_addr, &hi_val); |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 625 | |
| 626 | *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL); |
| 627 | |
| 628 | if (*count >= DF_V3_6_PERFMON_OVERFLOW) |
| 629 | *count = 0; |
| 630 | |
| 631 | DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 632 | config, lo_base_addr, hi_base_addr, lo_val, hi_val); |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 633 | |
| 634 | break; |
| 635 | default: |
| 636 | break; |
| 637 | } |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 638 | } |
| 639 | |
Tao Zhou | ca5c636 | 2021-09-17 18:15:23 +0800 | [diff] [blame] | 640 | static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev) |
| 641 | { |
| 642 | uint32_t hw_assert_msklo, hw_assert_mskhi; |
| 643 | uint32_t v0, v1, v28, v31; |
| 644 | |
| 645 | hw_assert_msklo = RREG32_SOC15(DF, 0, |
| 646 | mmDF_CS_UMC_AON0_HardwareAssertMaskLow); |
| 647 | hw_assert_mskhi = RREG32_SOC15(DF, 0, |
| 648 | mmDF_NCS_PG0_HardwareAssertMaskHigh); |
| 649 | |
| 650 | v0 = REG_GET_FIELD(hw_assert_msklo, |
| 651 | DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0); |
| 652 | v1 = REG_GET_FIELD(hw_assert_msklo, |
| 653 | DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1); |
| 654 | v28 = REG_GET_FIELD(hw_assert_mskhi, |
| 655 | DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28); |
| 656 | v31 = REG_GET_FIELD(hw_assert_mskhi, |
| 657 | DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31); |
| 658 | |
| 659 | if (v0 && v1 && v28 && v31) |
| 660 | return true; |
| 661 | else if (!v0 && !v1 && !v28 && !v31) |
| 662 | return false; |
| 663 | else { |
| 664 | dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n", |
| 665 | v0, v1, v28, v31); |
| 666 | return false; |
| 667 | } |
| 668 | } |
| 669 | |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 670 | const struct amdgpu_df_funcs df_v3_6_funcs = { |
Jonathan Kim | e4cf4bf | 2019-06-19 23:37:59 -0400 | [diff] [blame] | 671 | .sw_init = df_v3_6_sw_init, |
Jack Zhang | f1d59e0 | 2019-09-03 10:15:23 +0800 | [diff] [blame] | 672 | .sw_fini = df_v3_6_sw_fini, |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 673 | .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, |
| 674 | .get_fb_channel_number = df_v3_6_get_fb_channel_number, |
| 675 | .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, |
| 676 | .update_medium_grain_clock_gating = |
| 677 | df_v3_6_update_medium_grain_clock_gating, |
| 678 | .get_clockgating_state = df_v3_6_get_clockgating_state, |
Jonathan Kim | 992af94 | 2019-05-02 20:43:00 -0400 | [diff] [blame] | 679 | .pmc_start = df_v3_6_pmc_start, |
| 680 | .pmc_stop = df_v3_6_pmc_stop, |
Jonathan Kim | 64671c0 | 2019-07-09 15:47:57 -0400 | [diff] [blame] | 681 | .pmc_get_count = df_v3_6_pmc_get_count, |
| 682 | .get_fica = df_v3_6_get_fica, |
John Clements | a6c44d2 | 2020-01-17 12:18:00 +0800 | [diff] [blame] | 683 | .set_fica = df_v3_6_set_fica, |
Tao Zhou | ca5c636 | 2021-09-17 18:15:23 +0800 | [diff] [blame] | 684 | .query_ras_poison_mode = df_v3_6_query_ras_poison_mode, |
Feifei Xu | 13b5815 | 2018-04-04 14:30:28 +0800 | [diff] [blame] | 685 | }; |