Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
Huang Rui | 7bd5542 | 2016-12-26 14:05:30 +0800 | [diff] [blame] | 23 | #include "pp_debug.h" |
Masahiro Yamada | f90dee2 | 2017-05-18 13:43:32 +0900 | [diff] [blame] | 24 | #include <linux/delay.h> |
| 25 | #include <linux/fb.h> |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 26 | #include <linux/module.h> |
Sam Ravnborg | f867723 | 2019-06-10 00:07:51 +0200 | [diff] [blame] | 27 | #include <linux/pci.h> |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 28 | #include <linux/slab.h> |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 29 | #include <asm/div64.h> |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 30 | #include <drm/amdgpu_drm.h> |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 31 | #include "ppatomctrl.h" |
| 32 | #include "atombios.h" |
| 33 | #include "pptable_v1_0.h" |
| 34 | #include "pppcielanes.h" |
| 35 | #include "amd_pcie_helpers.h" |
| 36 | #include "hardwaremanager.h" |
| 37 | #include "process_pptables_v1_0.h" |
| 38 | #include "cgs_common.h" |
| 39 | |
| 40 | #include "smu7_common.h" |
| 41 | |
| 42 | #include "hwmgr.h" |
| 43 | #include "smu7_hwmgr.h" |
Huang Rui | e81f749 | 2016-12-15 10:55:09 +0800 | [diff] [blame] | 44 | #include "smu_ucode_xfer_vi.h" |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 45 | #include "smu7_powertune.h" |
| 46 | #include "smu7_dyn_defaults.h" |
| 47 | #include "smu7_thermal.h" |
| 48 | #include "smu7_clockpowergating.h" |
| 49 | #include "processpptables.h" |
Evan Quan | 4ba0825 | 2018-01-10 15:37:20 -0500 | [diff] [blame] | 50 | #include "pp_thermal.h" |
Alex Deucher | 2a113c7 | 2019-02-15 18:17:24 -0500 | [diff] [blame] | 51 | #include "smu7_baco.h" |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 52 | |
Andrey Grodzovsky | 091aec0 | 2018-05-25 10:06:52 -0400 | [diff] [blame] | 53 | #include "ivsrcid/ivsrcid_vislands30.h" |
| 54 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 55 | #define MC_CG_ARB_FREQ_F0 0x0a |
| 56 | #define MC_CG_ARB_FREQ_F1 0x0b |
| 57 | #define MC_CG_ARB_FREQ_F2 0x0c |
| 58 | #define MC_CG_ARB_FREQ_F3 0x0d |
| 59 | |
| 60 | #define MC_CG_SEQ_DRAMCONF_S0 0x05 |
| 61 | #define MC_CG_SEQ_DRAMCONF_S1 0x06 |
| 62 | #define MC_CG_SEQ_YCLK_SUSPEND 0x04 |
| 63 | #define MC_CG_SEQ_YCLK_RESUME 0x0a |
| 64 | |
| 65 | #define SMC_CG_IND_START 0xc0030000 |
| 66 | #define SMC_CG_IND_END 0xc0040000 |
| 67 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 68 | #define MEM_FREQ_LOW_LATENCY 25000 |
| 69 | #define MEM_FREQ_HIGH_LATENCY 80000 |
| 70 | |
| 71 | #define MEM_LATENCY_HIGH 45 |
| 72 | #define MEM_LATENCY_LOW 35 |
| 73 | #define MEM_LATENCY_ERR 0xFFFF |
| 74 | |
| 75 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 |
| 76 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 |
| 77 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 |
| 78 | |
| 79 | #define PCIE_BUS_CLK 10000 |
| 80 | #define TCLK (PCIE_BUS_CLK / 10) |
| 81 | |
Kent Russell | 233d3ee | 2019-03-22 09:01:26 -0400 | [diff] [blame] | 82 | static struct profile_mode_setting smu7_profiling[7] = |
Evan Quan | c27c977 | 2018-12-27 14:23:30 +0800 | [diff] [blame] | 83 | {{0, 0, 0, 0, 0, 0, 0, 0}, |
| 84 | {1, 0, 100, 30, 1, 0, 100, 10}, |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 85 | {1, 10, 0, 30, 0, 0, 0, 0}, |
| 86 | {0, 0, 0, 0, 1, 10, 16, 31}, |
| 87 | {1, 0, 11, 50, 1, 0, 100, 10}, |
| 88 | {1, 0, 5, 30, 0, 0, 0, 0}, |
Rex Zhu | 4aa8c41 | 2018-05-07 14:23:04 +0800 | [diff] [blame] | 89 | {0, 0, 0, 0, 0, 0, 0, 0}, |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 90 | }; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 91 | |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 92 | #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) |
| 93 | |
| 94 | #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 |
| 95 | #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L |
| 96 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L |
| 97 | #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 |
| 98 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 |
| 99 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 100 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ |
| 101 | enum DPM_EVENT_SRC { |
| 102 | DPM_EVENT_SRC_ANALOG = 0, |
| 103 | DPM_EVENT_SRC_EXTERNAL = 1, |
| 104 | DPM_EVENT_SRC_DIGITAL = 2, |
| 105 | DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, |
| 106 | DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 |
| 107 | }; |
| 108 | |
| 109 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 110 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
| 111 | enum pp_clock_type type, uint32_t mask); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 112 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 113 | static struct smu7_power_state *cast_phw_smu7_power_state( |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 114 | struct pp_hw_power_state *hw_ps) |
| 115 | { |
| 116 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
| 117 | "Invalid Powerstate Type!", |
| 118 | return NULL); |
| 119 | |
| 120 | return (struct smu7_power_state *)hw_ps; |
| 121 | } |
| 122 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 123 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 124 | const struct pp_hw_power_state *hw_ps) |
| 125 | { |
| 126 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
| 127 | "Invalid Powerstate Type!", |
| 128 | return NULL); |
| 129 | |
| 130 | return (const struct smu7_power_state *)hw_ps; |
| 131 | } |
| 132 | |
| 133 | /** |
| 134 | * Find the MC microcode version and store it in the HwMgr struct |
| 135 | * |
| 136 | * @param hwmgr the address of the powerplay hardware manager. |
| 137 | * @return always 0 |
| 138 | */ |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 139 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 140 | { |
| 141 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); |
| 142 | |
| 143 | hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 148 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 149 | { |
| 150 | uint32_t speedCntl = 0; |
| 151 | |
| 152 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ |
| 153 | speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, |
| 154 | ixPCIE_LC_SPEED_CNTL); |
| 155 | return((uint16_t)PHM_GET_FIELD(speedCntl, |
| 156 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); |
| 157 | } |
| 158 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 159 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 160 | { |
| 161 | uint32_t link_width; |
| 162 | |
| 163 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ |
| 164 | link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, |
| 165 | PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); |
| 166 | |
| 167 | PP_ASSERT_WITH_CODE((7 >= link_width), |
| 168 | "Invalid PCIe lane width!", return 0); |
| 169 | |
| 170 | return decode_pcie_lane_width(link_width); |
| 171 | } |
| 172 | |
| 173 | /** |
| 174 | * Enable voltage control |
| 175 | * |
| 176 | * @param pHwMgr the address of the powerplay hardware manager. |
| 177 | * @return always PP_Result_OK |
| 178 | */ |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 179 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 180 | { |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 181 | if (hwmgr->chip_id == CHIP_VEGAM) { |
| 182 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 183 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); |
| 184 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 185 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); |
| 186 | } |
| 187 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 188 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 189 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 190 | |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * Checks if we want to support voltage control |
| 196 | * |
| 197 | * @param hwmgr the address of the powerplay hardware manager. |
| 198 | */ |
| 199 | static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) |
| 200 | { |
| 201 | const struct smu7_hwmgr *data = |
| 202 | (const struct smu7_hwmgr *)(hwmgr->backend); |
| 203 | |
| 204 | return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); |
| 205 | } |
| 206 | |
| 207 | /** |
| 208 | * Enable voltage control |
| 209 | * |
| 210 | * @param hwmgr the address of the powerplay hardware manager. |
| 211 | * @return always 0 |
| 212 | */ |
| 213 | static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) |
| 214 | { |
| 215 | /* enable voltage control */ |
| 216 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 217 | GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); |
| 218 | |
| 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, |
| 223 | struct phm_clock_voltage_dependency_table *voltage_dependency_table |
| 224 | ) |
| 225 | { |
| 226 | uint32_t i; |
| 227 | |
| 228 | PP_ASSERT_WITH_CODE((NULL != voltage_table), |
| 229 | "Voltage Dependency Table empty.", return -EINVAL;); |
| 230 | |
| 231 | voltage_table->mask_low = 0; |
| 232 | voltage_table->phase_delay = 0; |
| 233 | voltage_table->count = voltage_dependency_table->count; |
| 234 | |
| 235 | for (i = 0; i < voltage_dependency_table->count; i++) { |
| 236 | voltage_table->entries[i].value = |
| 237 | voltage_dependency_table->entries[i].v; |
| 238 | voltage_table->entries[i].smio_low = 0; |
| 239 | } |
| 240 | |
| 241 | return 0; |
| 242 | } |
| 243 | |
| 244 | |
| 245 | /** |
| 246 | * Create Voltage Tables. |
| 247 | * |
| 248 | * @param hwmgr the address of the powerplay hardware manager. |
| 249 | * @return always 0 |
| 250 | */ |
| 251 | static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) |
| 252 | { |
| 253 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 254 | struct phm_ppt_v1_information *table_info = |
| 255 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 256 | int result = 0; |
| 257 | uint32_t tmp; |
| 258 | |
| 259 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { |
| 260 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 261 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, |
| 262 | &(data->mvdd_voltage_table)); |
| 263 | PP_ASSERT_WITH_CODE((0 == result), |
| 264 | "Failed to retrieve MVDD table.", |
| 265 | return result); |
| 266 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { |
| 267 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 268 | result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), |
| 269 | table_info->vdd_dep_on_mclk); |
| 270 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 271 | result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), |
| 272 | hwmgr->dyn_state.mvdd_dependency_on_mclk); |
| 273 | |
| 274 | PP_ASSERT_WITH_CODE((0 == result), |
Colin Ian King | ce99814 | 2018-11-27 14:19:43 +0000 | [diff] [blame] | 275 | "Failed to retrieve SVI2 MVDD table from dependency table.", |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 276 | return result;); |
| 277 | } |
| 278 | |
| 279 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { |
| 280 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 281 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, |
| 282 | &(data->vddci_voltage_table)); |
| 283 | PP_ASSERT_WITH_CODE((0 == result), |
| 284 | "Failed to retrieve VDDCI table.", |
| 285 | return result); |
| 286 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { |
| 287 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 288 | result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), |
| 289 | table_info->vdd_dep_on_mclk); |
| 290 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 291 | result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), |
| 292 | hwmgr->dyn_state.vddci_dependency_on_mclk); |
| 293 | PP_ASSERT_WITH_CODE((0 == result), |
Colin Ian King | ce99814 | 2018-11-27 14:19:43 +0000 | [diff] [blame] | 294 | "Failed to retrieve SVI2 VDDCI table from dependency table.", |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 295 | return result); |
| 296 | } |
| 297 | |
| 298 | if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { |
| 299 | /* VDDGFX has only SVI2 voltage control */ |
| 300 | result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), |
| 301 | table_info->vddgfx_lookup_table); |
| 302 | PP_ASSERT_WITH_CODE((0 == result), |
| 303 | "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); |
| 304 | } |
| 305 | |
| 306 | |
| 307 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { |
| 308 | result = atomctrl_get_voltage_table_v3(hwmgr, |
| 309 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, |
| 310 | &data->vddc_voltage_table); |
| 311 | PP_ASSERT_WITH_CODE((0 == result), |
| 312 | "Failed to retrieve VDDC table.", return result;); |
| 313 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { |
| 314 | |
| 315 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 316 | result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, |
| 317 | hwmgr->dyn_state.vddc_dependency_on_mclk); |
| 318 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 319 | result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), |
| 320 | table_info->vddc_lookup_table); |
| 321 | |
| 322 | PP_ASSERT_WITH_CODE((0 == result), |
Colin Ian King | ce99814 | 2018-11-27 14:19:43 +0000 | [diff] [blame] | 323 | "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 324 | } |
| 325 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 326 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 327 | PP_ASSERT_WITH_CODE( |
| 328 | (data->vddc_voltage_table.count <= tmp), |
| 329 | "Too many voltage values for VDDC. Trimming to fit state table.", |
| 330 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 331 | &(data->vddc_voltage_table))); |
| 332 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 333 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 334 | PP_ASSERT_WITH_CODE( |
| 335 | (data->vddgfx_voltage_table.count <= tmp), |
| 336 | "Too many voltage values for VDDC. Trimming to fit state table.", |
| 337 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 338 | &(data->vddgfx_voltage_table))); |
| 339 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 340 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 341 | PP_ASSERT_WITH_CODE( |
| 342 | (data->vddci_voltage_table.count <= tmp), |
| 343 | "Too many voltage values for VDDCI. Trimming to fit state table.", |
| 344 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 345 | &(data->vddci_voltage_table))); |
| 346 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 347 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 348 | PP_ASSERT_WITH_CODE( |
| 349 | (data->mvdd_voltage_table.count <= tmp), |
| 350 | "Too many voltage values for MVDD. Trimming to fit state table.", |
| 351 | phm_trim_voltage_table_to_fit_state_table(tmp, |
| 352 | &(data->mvdd_voltage_table))); |
| 353 | |
| 354 | return 0; |
| 355 | } |
| 356 | |
| 357 | /** |
| 358 | * Programs static screed detection parameters |
| 359 | * |
| 360 | * @param hwmgr the address of the powerplay hardware manager. |
| 361 | * @return always 0 |
| 362 | */ |
| 363 | static int smu7_program_static_screen_threshold_parameters( |
| 364 | struct pp_hwmgr *hwmgr) |
| 365 | { |
| 366 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 367 | |
| 368 | /* Set static screen threshold unit */ |
| 369 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 370 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, |
| 371 | data->static_screen_threshold_unit); |
| 372 | /* Set static screen threshold */ |
| 373 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 374 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, |
| 375 | data->static_screen_threshold); |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | /** |
| 381 | * Setup display gap for glitch free memory clock switching. |
| 382 | * |
| 383 | * @param hwmgr the address of the powerplay hardware manager. |
| 384 | * @return always 0 |
| 385 | */ |
| 386 | static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) |
| 387 | { |
| 388 | uint32_t display_gap = |
| 389 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 390 | ixCG_DISPLAY_GAP_CNTL); |
| 391 | |
| 392 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, |
| 393 | DISP_GAP, DISPLAY_GAP_IGNORE); |
| 394 | |
| 395 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, |
| 396 | DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); |
| 397 | |
| 398 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 399 | ixCG_DISPLAY_GAP_CNTL, display_gap); |
| 400 | |
| 401 | return 0; |
| 402 | } |
| 403 | |
| 404 | /** |
| 405 | * Programs activity state transition voting clients |
| 406 | * |
| 407 | * @param hwmgr the address of the powerplay hardware manager. |
| 408 | * @return always 0 |
| 409 | */ |
| 410 | static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) |
| 411 | { |
| 412 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
Rex Zhu | 0596df6 | 2017-09-15 16:30:52 +0800 | [diff] [blame] | 413 | int i; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 414 | |
| 415 | /* Clear reset for voting clients before enabling DPM */ |
| 416 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 417 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); |
| 418 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 419 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); |
| 420 | |
Rex Zhu | 0596df6 | 2017-09-15 16:30:52 +0800 | [diff] [blame] | 421 | for (i = 0; i < 8; i++) |
| 422 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 423 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, |
| 424 | data->voting_rights_clients[i]); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) |
| 429 | { |
Rex Zhu | 0596df6 | 2017-09-15 16:30:52 +0800 | [diff] [blame] | 430 | int i; |
| 431 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 432 | /* Reset voting clients before disabling DPM */ |
| 433 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 434 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); |
| 435 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 436 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); |
| 437 | |
Rex Zhu | 0596df6 | 2017-09-15 16:30:52 +0800 | [diff] [blame] | 438 | for (i = 0; i < 8; i++) |
| 439 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 440 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 441 | |
| 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | /* Copy one arb setting to another and then switch the active set. |
| 446 | * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. |
| 447 | */ |
| 448 | static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, |
| 449 | uint32_t arb_src, uint32_t arb_dest) |
| 450 | { |
| 451 | uint32_t mc_arb_dram_timing; |
| 452 | uint32_t mc_arb_dram_timing2; |
| 453 | uint32_t burst_time; |
| 454 | uint32_t mc_cg_config; |
| 455 | |
| 456 | switch (arb_src) { |
| 457 | case MC_CG_ARB_FREQ_F0: |
| 458 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); |
| 459 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); |
| 460 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); |
| 461 | break; |
| 462 | case MC_CG_ARB_FREQ_F1: |
| 463 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); |
| 464 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); |
| 465 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); |
| 466 | break; |
| 467 | default: |
| 468 | return -EINVAL; |
| 469 | } |
| 470 | |
| 471 | switch (arb_dest) { |
| 472 | case MC_CG_ARB_FREQ_F0: |
| 473 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); |
| 474 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); |
| 475 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); |
| 476 | break; |
| 477 | case MC_CG_ARB_FREQ_F1: |
| 478 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); |
| 479 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); |
| 480 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); |
| 481 | break; |
| 482 | default: |
| 483 | return -EINVAL; |
| 484 | } |
| 485 | |
| 486 | mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); |
| 487 | mc_cg_config |= 0x0000000F; |
| 488 | cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); |
| 489 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) |
| 495 | { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 496 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | /** |
| 500 | * Initial switch from ARB F0->F1 |
| 501 | * |
| 502 | * @param hwmgr the address of the powerplay hardware manager. |
| 503 | * @return always 0 |
| 504 | * This function is to be called from the SetPowerState table. |
| 505 | */ |
| 506 | static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) |
| 507 | { |
| 508 | return smu7_copy_and_switch_arb_sets(hwmgr, |
| 509 | MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); |
| 510 | } |
| 511 | |
| 512 | static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) |
| 513 | { |
| 514 | uint32_t tmp; |
| 515 | |
| 516 | tmp = (cgs_read_ind_register(hwmgr->device, |
| 517 | CGS_IND_REG__SMC, ixSMC_SCRATCH9) & |
| 518 | 0x0000ff00) >> 8; |
| 519 | |
| 520 | if (tmp == MC_CG_ARB_FREQ_F0) |
| 521 | return 0; |
| 522 | |
| 523 | return smu7_copy_and_switch_arb_sets(hwmgr, |
| 524 | tmp, MC_CG_ARB_FREQ_F0); |
| 525 | } |
| 526 | |
| 527 | static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) |
| 528 | { |
| 529 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 530 | |
| 531 | struct phm_ppt_v1_information *table_info = |
| 532 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 533 | struct phm_ppt_v1_pcie_table *pcie_table = NULL; |
| 534 | |
| 535 | uint32_t i, max_entry; |
| 536 | uint32_t tmp; |
| 537 | |
| 538 | PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || |
| 539 | data->use_pcie_power_saving_levels), "No pcie performance levels!", |
| 540 | return -EINVAL); |
| 541 | |
| 542 | if (table_info != NULL) |
| 543 | pcie_table = table_info->pcie_table; |
| 544 | |
| 545 | if (data->use_pcie_performance_levels && |
| 546 | !data->use_pcie_power_saving_levels) { |
| 547 | data->pcie_gen_power_saving = data->pcie_gen_performance; |
| 548 | data->pcie_lane_power_saving = data->pcie_lane_performance; |
| 549 | } else if (!data->use_pcie_performance_levels && |
| 550 | data->use_pcie_power_saving_levels) { |
| 551 | data->pcie_gen_performance = data->pcie_gen_power_saving; |
| 552 | data->pcie_lane_performance = data->pcie_lane_power_saving; |
| 553 | } |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 554 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 555 | phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, |
| 556 | tmp, |
| 557 | MAX_REGULAR_DPM_NUMBER); |
| 558 | |
| 559 | if (pcie_table != NULL) { |
| 560 | /* max_entry is used to make sure we reserve one PCIE level |
| 561 | * for boot level (fix for A+A PSPP issue). |
| 562 | * If PCIE table from PPTable have ULV entry + 8 entries, |
| 563 | * then ignore the last entry.*/ |
| 564 | max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; |
| 565 | for (i = 1; i < max_entry; i++) { |
| 566 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, |
| 567 | get_pcie_gen_support(data->pcie_gen_cap, |
| 568 | pcie_table->entries[i].gen_speed), |
| 569 | get_pcie_lane_support(data->pcie_lane_cap, |
| 570 | pcie_table->entries[i].lane_width)); |
| 571 | } |
| 572 | data->dpm_table.pcie_speed_table.count = max_entry - 1; |
| 573 | smum_update_smc_table(hwmgr, SMU_BIF_TABLE); |
| 574 | } else { |
| 575 | /* Hardcode Pcie Table */ |
| 576 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, |
| 577 | get_pcie_gen_support(data->pcie_gen_cap, |
| 578 | PP_Min_PCIEGen), |
| 579 | get_pcie_lane_support(data->pcie_lane_cap, |
| 580 | PP_Max_PCIELane)); |
| 581 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, |
| 582 | get_pcie_gen_support(data->pcie_gen_cap, |
| 583 | PP_Min_PCIEGen), |
| 584 | get_pcie_lane_support(data->pcie_lane_cap, |
| 585 | PP_Max_PCIELane)); |
| 586 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, |
| 587 | get_pcie_gen_support(data->pcie_gen_cap, |
| 588 | PP_Max_PCIEGen), |
| 589 | get_pcie_lane_support(data->pcie_lane_cap, |
| 590 | PP_Max_PCIELane)); |
| 591 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, |
| 592 | get_pcie_gen_support(data->pcie_gen_cap, |
| 593 | PP_Max_PCIEGen), |
| 594 | get_pcie_lane_support(data->pcie_lane_cap, |
| 595 | PP_Max_PCIELane)); |
| 596 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, |
| 597 | get_pcie_gen_support(data->pcie_gen_cap, |
| 598 | PP_Max_PCIEGen), |
| 599 | get_pcie_lane_support(data->pcie_lane_cap, |
| 600 | PP_Max_PCIELane)); |
| 601 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, |
| 602 | get_pcie_gen_support(data->pcie_gen_cap, |
| 603 | PP_Max_PCIEGen), |
| 604 | get_pcie_lane_support(data->pcie_lane_cap, |
| 605 | PP_Max_PCIELane)); |
| 606 | |
| 607 | data->dpm_table.pcie_speed_table.count = 6; |
| 608 | } |
| 609 | /* Populate last level for boot PCIE level, but do not increment count. */ |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 610 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 611 | for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) |
| 612 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, |
| 613 | get_pcie_gen_support(data->pcie_gen_cap, |
| 614 | PP_Max_PCIEGen), |
| 615 | data->vbios_boot_state.pcie_lane_bootup_value); |
| 616 | } else { |
| 617 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 618 | data->dpm_table.pcie_speed_table.count, |
| 619 | get_pcie_gen_support(data->pcie_gen_cap, |
| 620 | PP_Min_PCIEGen), |
| 621 | get_pcie_lane_support(data->pcie_lane_cap, |
| 622 | PP_Max_PCIELane)); |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 623 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) |
| 628 | { |
| 629 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 630 | |
| 631 | memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); |
| 632 | |
| 633 | phm_reset_single_dpm_table( |
| 634 | &data->dpm_table.sclk_table, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 635 | smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 636 | SMU_MAX_LEVELS_GRAPHICS), |
| 637 | MAX_REGULAR_DPM_NUMBER); |
| 638 | phm_reset_single_dpm_table( |
| 639 | &data->dpm_table.mclk_table, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 640 | smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 641 | SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); |
| 642 | |
| 643 | phm_reset_single_dpm_table( |
| 644 | &data->dpm_table.vddc_table, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 645 | smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 646 | SMU_MAX_LEVELS_VDDC), |
| 647 | MAX_REGULAR_DPM_NUMBER); |
| 648 | phm_reset_single_dpm_table( |
| 649 | &data->dpm_table.vddci_table, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 650 | smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 651 | SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); |
| 652 | |
| 653 | phm_reset_single_dpm_table( |
| 654 | &data->dpm_table.mvdd_table, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 655 | smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 656 | SMU_MAX_LEVELS_MVDD), |
| 657 | MAX_REGULAR_DPM_NUMBER); |
| 658 | return 0; |
| 659 | } |
| 660 | /* |
| 661 | * This function is to initialize all DPM state tables |
| 662 | * for SMU7 based on the dependency table. |
| 663 | * Dynamic state patching function will then trim these |
| 664 | * state tables to the allowed range based |
| 665 | * on the power policy or external client requests, |
| 666 | * such as UVD request, etc. |
| 667 | */ |
| 668 | |
| 669 | static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) |
| 670 | { |
| 671 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 672 | struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = |
| 673 | hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 674 | struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = |
| 675 | hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 676 | struct phm_cac_leakage_table *std_voltage_table = |
| 677 | hwmgr->dyn_state.cac_leakage_table; |
| 678 | uint32_t i; |
| 679 | |
| 680 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, |
| 681 | "SCLK dependency table is missing. This table is mandatory", return -EINVAL); |
| 682 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, |
| 683 | "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); |
| 684 | |
| 685 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, |
| 686 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); |
| 687 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, |
| 688 | "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); |
| 689 | |
| 690 | |
| 691 | /* Initialize Sclk DPM table based on allow Sclk values*/ |
| 692 | data->dpm_table.sclk_table.count = 0; |
| 693 | |
| 694 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { |
| 695 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != |
| 696 | allowed_vdd_sclk_table->entries[i].clk) { |
| 697 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = |
| 698 | allowed_vdd_sclk_table->entries[i].clk; |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 699 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 700 | data->dpm_table.sclk_table.count++; |
| 701 | } |
| 702 | } |
| 703 | |
| 704 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, |
| 705 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); |
| 706 | /* Initialize Mclk DPM table based on allow Mclk values */ |
| 707 | data->dpm_table.mclk_table.count = 0; |
| 708 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 709 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != |
| 710 | allowed_vdd_mclk_table->entries[i].clk) { |
| 711 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = |
| 712 | allowed_vdd_mclk_table->entries[i].clk; |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 713 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 714 | data->dpm_table.mclk_table.count++; |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ |
| 719 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { |
| 720 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
| 721 | data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; |
| 722 | /* param1 is for corresponding std voltage */ |
zhengbin | a16afcd | 2020-01-22 15:53:11 +0800 | [diff] [blame] | 723 | data->dpm_table.vddc_table.dpm_levels[i].enabled = true; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; |
| 727 | allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 728 | |
| 729 | if (NULL != allowed_vdd_mclk_table) { |
| 730 | /* Initialize Vddci DPM table based on allow Mclk values */ |
| 731 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 732 | data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
zhengbin | a16afcd | 2020-01-22 15:53:11 +0800 | [diff] [blame] | 733 | data->dpm_table.vddci_table.dpm_levels[i].enabled = true; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 734 | } |
| 735 | data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; |
| 736 | } |
| 737 | |
| 738 | allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; |
| 739 | |
| 740 | if (NULL != allowed_vdd_mclk_table) { |
| 741 | /* |
| 742 | * Initialize MVDD DPM table based on allow Mclk |
| 743 | * values |
| 744 | */ |
| 745 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { |
| 746 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; |
zhengbin | a16afcd | 2020-01-22 15:53:11 +0800 | [diff] [blame] | 747 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 748 | } |
| 749 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; |
| 750 | } |
| 751 | |
| 752 | return 0; |
| 753 | } |
| 754 | |
| 755 | static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) |
| 756 | { |
| 757 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 758 | struct phm_ppt_v1_information *table_info = |
| 759 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 760 | uint32_t i; |
| 761 | |
| 762 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 763 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 764 | |
| 765 | if (table_info == NULL) |
| 766 | return -EINVAL; |
| 767 | |
| 768 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 769 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 770 | |
| 771 | PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, |
| 772 | "SCLK dependency table is missing.", |
| 773 | return -EINVAL); |
| 774 | PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, |
| 775 | "SCLK dependency table count is 0.", |
| 776 | return -EINVAL); |
| 777 | |
| 778 | PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, |
| 779 | "MCLK dependency table is missing.", |
| 780 | return -EINVAL); |
| 781 | PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, |
| 782 | "MCLK dependency table count is 0", |
| 783 | return -EINVAL); |
| 784 | |
| 785 | /* Initialize Sclk DPM table based on allow Sclk values */ |
| 786 | data->dpm_table.sclk_table.count = 0; |
| 787 | for (i = 0; i < dep_sclk_table->count; i++) { |
| 788 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != |
| 789 | dep_sclk_table->entries[i].clk) { |
| 790 | |
| 791 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = |
| 792 | dep_sclk_table->entries[i].clk; |
| 793 | |
| 794 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = |
| 795 | (i == 0) ? true : false; |
| 796 | data->dpm_table.sclk_table.count++; |
| 797 | } |
| 798 | } |
Rex Zhu | 46defdd | 2018-05-30 16:52:22 +0800 | [diff] [blame] | 799 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) |
| 800 | hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 801 | /* Initialize Mclk DPM table based on allow Mclk values */ |
| 802 | data->dpm_table.mclk_table.count = 0; |
| 803 | for (i = 0; i < dep_mclk_table->count; i++) { |
| 804 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels |
| 805 | [data->dpm_table.mclk_table.count - 1].value != |
| 806 | dep_mclk_table->entries[i].clk) { |
| 807 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = |
| 808 | dep_mclk_table->entries[i].clk; |
| 809 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = |
| 810 | (i == 0) ? true : false; |
| 811 | data->dpm_table.mclk_table.count++; |
| 812 | } |
| 813 | } |
| 814 | |
Rex Zhu | 46defdd | 2018-05-30 16:52:22 +0800 | [diff] [blame] | 815 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) |
| 816 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 817 | return 0; |
| 818 | } |
| 819 | |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 820 | static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) |
| 821 | { |
| 822 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 823 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 824 | struct phm_ppt_v1_information *table_info = |
| 825 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 826 | uint32_t i; |
| 827 | |
| 828 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 829 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
Colin Ian King | 5b29335 | 2018-03-21 18:26:53 +0000 | [diff] [blame] | 830 | struct phm_odn_performance_level *entries; |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 831 | |
| 832 | if (table_info == NULL) |
| 833 | return -EINVAL; |
| 834 | |
| 835 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 836 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 837 | |
| 838 | odn_table->odn_core_clock_dpm_levels.num_of_pl = |
| 839 | data->golden_dpm_table.sclk_table.count; |
Colin Ian King | 5b29335 | 2018-03-21 18:26:53 +0000 | [diff] [blame] | 840 | entries = odn_table->odn_core_clock_dpm_levels.entries; |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 841 | for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { |
Colin Ian King | 5b29335 | 2018-03-21 18:26:53 +0000 | [diff] [blame] | 842 | entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; |
| 843 | entries[i].enabled = true; |
| 844 | entries[i].vddc = dep_sclk_table->entries[i].vddc; |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 845 | } |
| 846 | |
Rex Zhu | 63c2f7e | 2018-04-08 16:57:55 +0800 | [diff] [blame] | 847 | smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 848 | (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); |
| 849 | |
| 850 | odn_table->odn_memory_clock_dpm_levels.num_of_pl = |
| 851 | data->golden_dpm_table.mclk_table.count; |
Colin Ian King | 5b29335 | 2018-03-21 18:26:53 +0000 | [diff] [blame] | 852 | entries = odn_table->odn_memory_clock_dpm_levels.entries; |
| 853 | for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) { |
| 854 | entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; |
| 855 | entries[i].enabled = true; |
| 856 | entries[i].vddc = dep_mclk_table->entries[i].vddc; |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 857 | } |
| 858 | |
Rex Zhu | 63c2f7e | 2018-04-08 16:57:55 +0800 | [diff] [blame] | 859 | smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 860 | (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); |
| 861 | |
| 862 | return 0; |
| 863 | } |
| 864 | |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 865 | static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) |
| 866 | { |
| 867 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 868 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; |
| 869 | struct phm_ppt_v1_information *table_info = |
| 870 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
Rex Zhu | 63e138a | 2018-05-18 14:24:44 +0800 | [diff] [blame] | 871 | uint32_t min_vddc = 0; |
| 872 | uint32_t max_vddc = 0; |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 873 | |
| 874 | if (!table_info) |
| 875 | return; |
| 876 | |
| 877 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
| 878 | |
| 879 | atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); |
| 880 | |
| 881 | if (min_vddc == 0 || min_vddc > 2000 |
| 882 | || min_vddc > dep_sclk_table->entries[0].vddc) |
| 883 | min_vddc = dep_sclk_table->entries[0].vddc; |
| 884 | |
| 885 | if (max_vddc == 0 || max_vddc > 2000 |
| 886 | || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) |
| 887 | max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; |
| 888 | |
| 889 | data->odn_dpm_table.min_vddc = min_vddc; |
| 890 | data->odn_dpm_table.max_vddc = max_vddc; |
| 891 | } |
| 892 | |
Rex Zhu | ecfee95 | 2018-06-13 18:26:38 +0800 | [diff] [blame] | 893 | static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) |
| 894 | { |
| 895 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 896 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 897 | struct phm_ppt_v1_information *table_info = |
| 898 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 899 | uint32_t i; |
| 900 | |
| 901 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; |
| 902 | struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; |
| 903 | |
| 904 | if (table_info == NULL) |
| 905 | return; |
| 906 | |
| 907 | for (i = 0; i < data->dpm_table.sclk_table.count; i++) { |
| 908 | if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != |
| 909 | data->dpm_table.sclk_table.dpm_levels[i].value) { |
| 910 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 911 | break; |
| 912 | } |
| 913 | } |
| 914 | |
| 915 | for (i = 0; i < data->dpm_table.mclk_table.count; i++) { |
| 916 | if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != |
| 917 | data->dpm_table.mclk_table.dpm_levels[i].value) { |
| 918 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 919 | break; |
| 920 | } |
| 921 | } |
| 922 | |
| 923 | dep_table = table_info->vdd_dep_on_mclk; |
| 924 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); |
| 925 | |
| 926 | for (i = 0; i < dep_table->count; i++) { |
| 927 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
| 928 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; |
| 929 | return; |
| 930 | } |
| 931 | } |
| 932 | |
| 933 | dep_table = table_info->vdd_dep_on_sclk; |
| 934 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); |
| 935 | for (i = 0; i < dep_table->count; i++) { |
| 936 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
| 937 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; |
| 938 | return; |
| 939 | } |
| 940 | } |
| 941 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
| 942 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; |
| 943 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; |
| 944 | } |
| 945 | } |
| 946 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 947 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 948 | { |
| 949 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 950 | |
| 951 | smu7_reset_dpm_tables(hwmgr); |
| 952 | |
| 953 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 954 | smu7_setup_dpm_tables_v1(hwmgr); |
| 955 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 956 | smu7_setup_dpm_tables_v0(hwmgr); |
| 957 | |
| 958 | smu7_setup_default_pcie_table(hwmgr); |
| 959 | |
| 960 | /* save a copy of the default DPM table */ |
| 961 | memcpy(&(data->golden_dpm_table), &(data->dpm_table), |
| 962 | sizeof(struct smu7_dpm_table)); |
Rex Zhu | 5d97cf3 | 2018-01-04 17:08:14 +0800 | [diff] [blame] | 963 | |
| 964 | /* initialize ODN table */ |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 965 | if (hwmgr->od_enabled) { |
Rex Zhu | ecfee95 | 2018-06-13 18:26:38 +0800 | [diff] [blame] | 966 | if (data->odn_dpm_table.max_vddc) { |
| 967 | smu7_check_dpm_table_updated(hwmgr); |
| 968 | } else { |
| 969 | smu7_setup_voltage_range_from_vbios(hwmgr); |
| 970 | smu7_odn_initial_default_setting(hwmgr); |
| 971 | } |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 972 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 973 | return 0; |
| 974 | } |
| 975 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 976 | static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) |
| 977 | { |
| 978 | |
| 979 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 980 | PHM_PlatformCaps_RegulatorHot)) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 981 | return smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 982 | PPSMC_MSG_EnableVRHotGPIOInterrupt); |
| 983 | |
| 984 | return 0; |
| 985 | } |
| 986 | |
| 987 | static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) |
| 988 | { |
| 989 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 990 | SCLK_PWRMGT_OFF, 0); |
| 991 | return 0; |
| 992 | } |
| 993 | |
| 994 | static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) |
| 995 | { |
| 996 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 997 | |
| 998 | if (data->ulv_supported) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 999 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1000 | |
| 1001 | return 0; |
| 1002 | } |
| 1003 | |
| 1004 | static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) |
| 1005 | { |
| 1006 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1007 | |
| 1008 | if (data->ulv_supported) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1009 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1010 | |
| 1011 | return 0; |
| 1012 | } |
| 1013 | |
| 1014 | static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) |
| 1015 | { |
| 1016 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1017 | PHM_PlatformCaps_SclkDeepSleep)) { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1018 | if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1019 | PP_ASSERT_WITH_CODE(false, |
| 1020 | "Attempt to enable Master Deep Sleep switch failed!", |
| 1021 | return -EINVAL); |
| 1022 | } else { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1023 | if (smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1024 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { |
| 1025 | PP_ASSERT_WITH_CODE(false, |
| 1026 | "Attempt to disable Master Deep Sleep switch failed!", |
| 1027 | return -EINVAL); |
| 1028 | } |
| 1029 | } |
| 1030 | |
| 1031 | return 0; |
| 1032 | } |
| 1033 | |
| 1034 | static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) |
| 1035 | { |
| 1036 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1037 | PHM_PlatformCaps_SclkDeepSleep)) { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1038 | if (smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1039 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { |
| 1040 | PP_ASSERT_WITH_CODE(false, |
| 1041 | "Attempt to disable Master Deep Sleep switch failed!", |
| 1042 | return -EINVAL); |
| 1043 | } |
| 1044 | } |
| 1045 | |
| 1046 | return 0; |
| 1047 | } |
| 1048 | |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1049 | static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) |
| 1050 | { |
| 1051 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1052 | uint32_t soft_register_value = 0; |
| 1053 | uint32_t handshake_disables_offset = data->soft_regs_start |
| 1054 | + smum_get_offsetof(hwmgr, |
| 1055 | SMU_SoftRegisters, HandshakeDisables); |
| 1056 | |
| 1057 | soft_register_value = cgs_read_ind_register(hwmgr->device, |
| 1058 | CGS_IND_REG__SMC, handshake_disables_offset); |
| 1059 | soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; |
| 1060 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1061 | handshake_disables_offset, soft_register_value); |
| 1062 | return 0; |
| 1063 | } |
| 1064 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1065 | static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) |
| 1066 | { |
| 1067 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1068 | uint32_t soft_register_value = 0; |
| 1069 | uint32_t handshake_disables_offset = data->soft_regs_start |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1070 | + smum_get_offsetof(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1071 | SMU_SoftRegisters, HandshakeDisables); |
| 1072 | |
| 1073 | soft_register_value = cgs_read_ind_register(hwmgr->device, |
| 1074 | CGS_IND_REG__SMC, handshake_disables_offset); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1075 | soft_register_value |= smum_get_mac_definition(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1076 | SMU_UVD_MCLK_HANDSHAKE_DISABLE); |
| 1077 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1078 | handshake_disables_offset, soft_register_value); |
| 1079 | return 0; |
| 1080 | } |
| 1081 | |
| 1082 | static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 1083 | { |
| 1084 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1085 | |
| 1086 | /* enable SCLK dpm */ |
Dan Carpenter | fa19a6e | 2018-05-17 15:56:05 +0300 | [diff] [blame] | 1087 | if (!data->sclk_dpm_key_disabled) { |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1088 | if (hwmgr->chip_id == CHIP_VEGAM) |
| 1089 | smu7_disable_sclk_vce_handshake(hwmgr); |
| 1090 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1091 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1092 | (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1093 | "Failed to enable SCLK DPM during DPM Start Function!", |
| 1094 | return -EINVAL); |
Dan Carpenter | fa19a6e | 2018-05-17 15:56:05 +0300 | [diff] [blame] | 1095 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1096 | |
| 1097 | /* enable MCLK dpm */ |
| 1098 | if (0 == data->mclk_dpm_key_disabled) { |
| 1099 | if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) |
| 1100 | smu7_disable_handshake_uvd(hwmgr); |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1101 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1102 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1103 | (0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1104 | PPSMC_MSG_MCLKDPM_Enable)), |
| 1105 | "Failed to enable MCLK DPM during DPM Start Function!", |
| 1106 | return -EINVAL); |
| 1107 | |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1108 | if (hwmgr->chip_family != CHIP_VEGAM) |
| 1109 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1110 | |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1111 | |
| 1112 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 1113 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); |
| 1114 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); |
| 1115 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); |
| 1116 | udelay(10); |
| 1117 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); |
| 1118 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); |
| 1119 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); |
| 1120 | } else { |
| 1121 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); |
| 1122 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); |
| 1123 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); |
| 1124 | udelay(10); |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1125 | if (hwmgr->chip_id == CHIP_VEGAM) { |
| 1126 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); |
| 1127 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); |
| 1128 | } else { |
| 1129 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); |
| 1130 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); |
| 1131 | } |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1132 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); |
| 1133 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1134 | } |
| 1135 | |
| 1136 | return 0; |
| 1137 | } |
| 1138 | |
| 1139 | static int smu7_start_dpm(struct pp_hwmgr *hwmgr) |
| 1140 | { |
| 1141 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1142 | |
| 1143 | /*enable general power management */ |
| 1144 | |
| 1145 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1146 | GLOBAL_PWRMGT_EN, 1); |
| 1147 | |
| 1148 | /* enable sclk deep sleep */ |
| 1149 | |
| 1150 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 1151 | DYNAMIC_PM_EN, 1); |
| 1152 | |
| 1153 | /* prepare for PCIE DPM */ |
| 1154 | |
| 1155 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1156 | data->soft_regs_start + |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1157 | smum_get_offsetof(hwmgr, SMU_SoftRegisters, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1158 | VoltageChangeTimeout), 0x1000); |
| 1159 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, |
| 1160 | SWRST_COMMAND_1, RESETLC, 0x0); |
| 1161 | |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1162 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) |
| 1163 | cgs_write_register(hwmgr->device, 0x1488, |
| 1164 | (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); |
| 1165 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1166 | if (smu7_enable_sclk_mclk_dpm(hwmgr)) { |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 1167 | pr_err("Failed to enable Sclk DPM and Mclk DPM!"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1168 | return -EINVAL; |
| 1169 | } |
| 1170 | |
| 1171 | /* enable PCIE dpm */ |
| 1172 | if (0 == data->pcie_dpm_key_disabled) { |
| 1173 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1174 | (0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1175 | PPSMC_MSG_PCIeDPM_Enable)), |
| 1176 | "Failed to enable pcie DPM during DPM Start Function!", |
| 1177 | return -EINVAL); |
| 1178 | } |
| 1179 | |
| 1180 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1181 | PHM_PlatformCaps_Falcon_QuickTransition)) { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1182 | PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1183 | PPSMC_MSG_EnableACDCGPIOInterrupt)), |
| 1184 | "Failed to enable AC DC GPIO Interrupt!", |
| 1185 | ); |
| 1186 | } |
| 1187 | |
| 1188 | return 0; |
| 1189 | } |
| 1190 | |
| 1191 | static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 1192 | { |
| 1193 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1194 | |
| 1195 | /* disable SCLK dpm */ |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 1196 | if (!data->sclk_dpm_key_disabled) { |
| 1197 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1198 | "Trying to disable SCLK DPM when DPM is disabled", |
| 1199 | return 0); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1200 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 1201 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1202 | |
| 1203 | /* disable MCLK dpm */ |
| 1204 | if (!data->mclk_dpm_key_disabled) { |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 1205 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1206 | "Trying to disable MCLK DPM when DPM is disabled", |
| 1207 | return 0); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1208 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1209 | } |
| 1210 | |
| 1211 | return 0; |
| 1212 | } |
| 1213 | |
| 1214 | static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) |
| 1215 | { |
| 1216 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1217 | |
| 1218 | /* disable general power management */ |
| 1219 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1220 | GLOBAL_PWRMGT_EN, 0); |
| 1221 | /* disable sclk deep sleep */ |
| 1222 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, |
| 1223 | DYNAMIC_PM_EN, 0); |
| 1224 | |
| 1225 | /* disable PCIE dpm */ |
| 1226 | if (!data->pcie_dpm_key_disabled) { |
| 1227 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1228 | (smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1229 | PPSMC_MSG_PCIeDPM_Disable) == 0), |
| 1230 | "Failed to disable pcie DPM during DPM Stop Function!", |
| 1231 | return -EINVAL); |
| 1232 | } |
| 1233 | |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 1234 | smu7_disable_sclk_mclk_dpm(hwmgr); |
| 1235 | |
| 1236 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 1237 | "Trying to disable voltage DPM when DPM is disabled", |
| 1238 | return 0); |
| 1239 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1240 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1241 | |
| 1242 | return 0; |
| 1243 | } |
| 1244 | |
| 1245 | static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) |
| 1246 | { |
| 1247 | bool protection; |
| 1248 | enum DPM_EVENT_SRC src; |
| 1249 | |
| 1250 | switch (sources) { |
| 1251 | default: |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 1252 | pr_err("Unknown throttling event sources."); |
Joe Perches | 45ce19e | 2020-03-10 21:51:39 -0700 | [diff] [blame] | 1253 | fallthrough; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1254 | case 0: |
| 1255 | protection = false; |
| 1256 | /* src is unused */ |
| 1257 | break; |
| 1258 | case (1 << PHM_AutoThrottleSource_Thermal): |
| 1259 | protection = true; |
| 1260 | src = DPM_EVENT_SRC_DIGITAL; |
| 1261 | break; |
| 1262 | case (1 << PHM_AutoThrottleSource_External): |
| 1263 | protection = true; |
| 1264 | src = DPM_EVENT_SRC_EXTERNAL; |
| 1265 | break; |
| 1266 | case (1 << PHM_AutoThrottleSource_External) | |
| 1267 | (1 << PHM_AutoThrottleSource_Thermal): |
| 1268 | protection = true; |
| 1269 | src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; |
| 1270 | break; |
| 1271 | } |
| 1272 | /* Order matters - don't enable thermal protection for the wrong source. */ |
| 1273 | if (protection) { |
| 1274 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, |
| 1275 | DPM_EVENT_SRC, src); |
| 1276 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1277 | THERMAL_PROTECTION_DIS, |
| 1278 | !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1279 | PHM_PlatformCaps_ThermalController)); |
| 1280 | } else |
| 1281 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, |
| 1282 | THERMAL_PROTECTION_DIS, 1); |
| 1283 | } |
| 1284 | |
| 1285 | static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, |
| 1286 | PHM_AutoThrottleSource source) |
| 1287 | { |
| 1288 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1289 | |
| 1290 | if (!(data->active_auto_throttle_sources & (1 << source))) { |
| 1291 | data->active_auto_throttle_sources |= 1 << source; |
| 1292 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); |
| 1293 | } |
| 1294 | return 0; |
| 1295 | } |
| 1296 | |
| 1297 | static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) |
| 1298 | { |
| 1299 | return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); |
| 1300 | } |
| 1301 | |
| 1302 | static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, |
| 1303 | PHM_AutoThrottleSource source) |
| 1304 | { |
| 1305 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1306 | |
| 1307 | if (data->active_auto_throttle_sources & (1 << source)) { |
| 1308 | data->active_auto_throttle_sources &= ~(1 << source); |
| 1309 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); |
| 1310 | } |
| 1311 | return 0; |
| 1312 | } |
| 1313 | |
| 1314 | static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) |
| 1315 | { |
| 1316 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); |
| 1317 | } |
| 1318 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 1319 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1320 | { |
| 1321 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1322 | data->pcie_performance_request = true; |
| 1323 | |
| 1324 | return 0; |
| 1325 | } |
| 1326 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 1327 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1328 | { |
| 1329 | int tmp_result = 0; |
| 1330 | int result = 0; |
| 1331 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1332 | if (smu7_voltage_control(hwmgr)) { |
| 1333 | tmp_result = smu7_enable_voltage_control(hwmgr); |
| 1334 | PP_ASSERT_WITH_CODE(tmp_result == 0, |
| 1335 | "Failed to enable voltage control!", |
| 1336 | result = tmp_result); |
| 1337 | |
| 1338 | tmp_result = smu7_construct_voltage_tables(hwmgr); |
| 1339 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
Colin Ian King | 3d3c4f1 | 2018-04-28 23:21:55 +0100 | [diff] [blame] | 1340 | "Failed to construct voltage tables!", |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1341 | result = tmp_result); |
| 1342 | } |
| 1343 | smum_initialize_mc_reg_table(hwmgr); |
| 1344 | |
| 1345 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1346 | PHM_PlatformCaps_EngineSpreadSpectrumSupport)) |
| 1347 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1348 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); |
| 1349 | |
| 1350 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1351 | PHM_PlatformCaps_ThermalController)) |
| 1352 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1353 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); |
| 1354 | |
| 1355 | tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); |
| 1356 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1357 | "Failed to program static screen threshold parameters!", |
| 1358 | result = tmp_result); |
| 1359 | |
| 1360 | tmp_result = smu7_enable_display_gap(hwmgr); |
| 1361 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1362 | "Failed to enable display gap!", result = tmp_result); |
| 1363 | |
| 1364 | tmp_result = smu7_program_voting_clients(hwmgr); |
| 1365 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1366 | "Failed to program voting clients!", result = tmp_result); |
| 1367 | |
| 1368 | tmp_result = smum_process_firmware_header(hwmgr); |
| 1369 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1370 | "Failed to process firmware header!", result = tmp_result); |
| 1371 | |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 1372 | if (hwmgr->chip_id != CHIP_VEGAM) { |
| 1373 | tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); |
| 1374 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1375 | "Failed to initialize switch from ArbF0 to F1!", |
| 1376 | result = tmp_result); |
| 1377 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1378 | |
| 1379 | result = smu7_setup_default_dpm_tables(hwmgr); |
| 1380 | PP_ASSERT_WITH_CODE(0 == result, |
| 1381 | "Failed to setup default DPM tables!", return result); |
| 1382 | |
| 1383 | tmp_result = smum_init_smc_table(hwmgr); |
| 1384 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1385 | "Failed to initialize SMC table!", result = tmp_result); |
| 1386 | |
| 1387 | tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); |
| 1388 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1389 | "Failed to enable VR hot GPIO interrupt!", result = tmp_result); |
| 1390 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1391 | smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1392 | |
| 1393 | tmp_result = smu7_enable_sclk_control(hwmgr); |
| 1394 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1395 | "Failed to enable SCLK control!", result = tmp_result); |
| 1396 | |
| 1397 | tmp_result = smu7_enable_smc_voltage_controller(hwmgr); |
| 1398 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1399 | "Failed to enable voltage control!", result = tmp_result); |
| 1400 | |
| 1401 | tmp_result = smu7_enable_ulv(hwmgr); |
| 1402 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1403 | "Failed to enable ULV!", result = tmp_result); |
| 1404 | |
| 1405 | tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); |
| 1406 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1407 | "Failed to enable deep sleep master switch!", result = tmp_result); |
| 1408 | |
| 1409 | tmp_result = smu7_enable_didt_config(hwmgr); |
| 1410 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1411 | "Failed to enable deep sleep master switch!", result = tmp_result); |
| 1412 | |
| 1413 | tmp_result = smu7_start_dpm(hwmgr); |
| 1414 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1415 | "Failed to start DPM!", result = tmp_result); |
| 1416 | |
| 1417 | tmp_result = smu7_enable_smc_cac(hwmgr); |
| 1418 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1419 | "Failed to enable SMC CAC!", result = tmp_result); |
| 1420 | |
| 1421 | tmp_result = smu7_enable_power_containment(hwmgr); |
| 1422 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1423 | "Failed to enable power containment!", result = tmp_result); |
| 1424 | |
| 1425 | tmp_result = smu7_power_control_set_level(hwmgr); |
| 1426 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1427 | "Failed to power control set level!", result = tmp_result); |
| 1428 | |
| 1429 | tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); |
| 1430 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1431 | "Failed to enable thermal auto throttle!", result = tmp_result); |
| 1432 | |
| 1433 | tmp_result = smu7_pcie_performance_request(hwmgr); |
| 1434 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 1435 | "pcie performance request failed!", result = tmp_result); |
| 1436 | |
| 1437 | return 0; |
| 1438 | } |
| 1439 | |
Rex Zhu | 3c9d1fde | 2018-01-02 15:20:55 +0800 | [diff] [blame] | 1440 | static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) |
| 1441 | { |
Rex Zhu | 116af450 | 2018-03-09 18:07:59 +0800 | [diff] [blame] | 1442 | if (!hwmgr->avfs_supported) |
Rex Zhu | 3c9d1fde | 2018-01-02 15:20:55 +0800 | [diff] [blame] | 1443 | return 0; |
| 1444 | |
| 1445 | if (enable) { |
| 1446 | if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 1447 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { |
| 1448 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( |
| 1449 | hwmgr, PPSMC_MSG_EnableAvfs), |
| 1450 | "Failed to enable AVFS!", |
| 1451 | return -EINVAL); |
| 1452 | } |
| 1453 | } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, |
| 1454 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { |
| 1455 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( |
| 1456 | hwmgr, PPSMC_MSG_DisableAvfs), |
| 1457 | "Failed to disable AVFS!", |
| 1458 | return -EINVAL); |
| 1459 | } |
| 1460 | |
| 1461 | return 0; |
| 1462 | } |
| 1463 | |
| 1464 | static int smu7_update_avfs(struct pp_hwmgr *hwmgr) |
| 1465 | { |
Rex Zhu | 3c9d1fde | 2018-01-02 15:20:55 +0800 | [diff] [blame] | 1466 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1467 | |
Rex Zhu | 116af450 | 2018-03-09 18:07:59 +0800 | [diff] [blame] | 1468 | if (!hwmgr->avfs_supported) |
Rex Zhu | 3c9d1fde | 2018-01-02 15:20:55 +0800 | [diff] [blame] | 1469 | return 0; |
| 1470 | |
| 1471 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
| 1472 | smu7_avfs_control(hwmgr, false); |
| 1473 | } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { |
| 1474 | smu7_avfs_control(hwmgr, false); |
| 1475 | smu7_avfs_control(hwmgr, true); |
| 1476 | } else { |
| 1477 | smu7_avfs_control(hwmgr, true); |
| 1478 | } |
| 1479 | |
| 1480 | return 0; |
| 1481 | } |
| 1482 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1483 | int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) |
| 1484 | { |
| 1485 | int tmp_result, result = 0; |
| 1486 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1487 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1488 | PHM_PlatformCaps_ThermalController)) |
| 1489 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1490 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); |
| 1491 | |
| 1492 | tmp_result = smu7_disable_power_containment(hwmgr); |
| 1493 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1494 | "Failed to disable power containment!", result = tmp_result); |
| 1495 | |
| 1496 | tmp_result = smu7_disable_smc_cac(hwmgr); |
| 1497 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1498 | "Failed to disable SMC CAC!", result = tmp_result); |
| 1499 | |
Rex Zhu | 7f61bed | 2016-12-21 20:40:53 +0800 | [diff] [blame] | 1500 | tmp_result = smu7_disable_didt_config(hwmgr); |
| 1501 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1502 | "Failed to disable DIDT!", result = tmp_result); |
| 1503 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1504 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1505 | CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); |
| 1506 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 1507 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); |
| 1508 | |
| 1509 | tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); |
| 1510 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1511 | "Failed to disable thermal auto throttle!", result = tmp_result); |
| 1512 | |
Eric Huang | 35011d3 | 2017-03-01 16:49:06 -0500 | [diff] [blame] | 1513 | tmp_result = smu7_avfs_control(hwmgr, false); |
| 1514 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1515 | "Failed to disable AVFS!", result = tmp_result); |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 1516 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1517 | tmp_result = smu7_stop_dpm(hwmgr); |
| 1518 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1519 | "Failed to stop DPM!", result = tmp_result); |
| 1520 | |
| 1521 | tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); |
| 1522 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1523 | "Failed to disable deep sleep master switch!", result = tmp_result); |
| 1524 | |
| 1525 | tmp_result = smu7_disable_ulv(hwmgr); |
| 1526 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1527 | "Failed to disable ULV!", result = tmp_result); |
| 1528 | |
| 1529 | tmp_result = smu7_clear_voting_clients(hwmgr); |
| 1530 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1531 | "Failed to clear voting clients!", result = tmp_result); |
| 1532 | |
| 1533 | tmp_result = smu7_reset_to_default(hwmgr); |
| 1534 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1535 | "Failed to reset to default!", result = tmp_result); |
| 1536 | |
| 1537 | tmp_result = smu7_force_switch_to_arbf0(hwmgr); |
| 1538 | PP_ASSERT_WITH_CODE((tmp_result == 0), |
| 1539 | "Failed to force to switch arbf0!", result = tmp_result); |
| 1540 | |
| 1541 | return result; |
| 1542 | } |
| 1543 | |
| 1544 | int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr) |
| 1545 | { |
| 1546 | |
| 1547 | return 0; |
| 1548 | } |
| 1549 | |
| 1550 | static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) |
| 1551 | { |
| 1552 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1553 | struct phm_ppt_v1_information *table_info = |
| 1554 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 1555 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1556 | |
| 1557 | data->dll_default_on = false; |
| 1558 | data->mclk_dpm0_activity_target = 0xa; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1559 | data->vddc_vddgfx_delta = 300; |
| 1560 | data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; |
| 1561 | data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; |
Rex Zhu | 0596df6 | 2017-09-15 16:30:52 +0800 | [diff] [blame] | 1562 | data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; |
| 1563 | data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; |
| 1564 | data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; |
| 1565 | data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; |
| 1566 | data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; |
| 1567 | data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; |
| 1568 | data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; |
| 1569 | data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1570 | |
| 1571 | data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; |
| 1572 | data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; |
| 1573 | data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; |
| 1574 | /* need to set voltage control types before EVV patching */ |
| 1575 | data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1576 | data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1577 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; |
| 1578 | data->enable_tdc_limit_feature = true; |
| 1579 | data->enable_pkg_pwr_tracking_feature = true; |
| 1580 | data->force_pcie_gen = PP_PCIEGenInvalid; |
| 1581 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; |
Rex Zhu | c7429b3 | 2018-01-23 17:12:29 +0800 | [diff] [blame] | 1582 | data->current_profile_setting.bupdate_sclk = 1; |
| 1583 | data->current_profile_setting.sclk_up_hyst = 0; |
| 1584 | data->current_profile_setting.sclk_down_hyst = 100; |
| 1585 | data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; |
Alex Deucher | 9861023 | 2018-06-28 12:52:43 -0500 | [diff] [blame] | 1586 | data->current_profile_setting.bupdate_mclk = 1; |
Rex Zhu | c7429b3 | 2018-01-23 17:12:29 +0800 | [diff] [blame] | 1587 | data->current_profile_setting.mclk_up_hyst = 0; |
| 1588 | data->current_profile_setting.mclk_down_hyst = 100; |
| 1589 | data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; |
Rex Zhu | 052fe96 | 2018-03-02 20:09:11 +0800 | [diff] [blame] | 1590 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; |
| 1591 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; |
| 1592 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1593 | |
Rex Zhu | b3b0305 | 2017-09-26 13:28:27 -0400 | [diff] [blame] | 1594 | if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { |
Rex Zhu | 187368a | 2017-02-15 15:48:40 +0800 | [diff] [blame] | 1595 | uint8_t tmp1, tmp2; |
| 1596 | uint16_t tmp3 = 0; |
| 1597 | atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, |
| 1598 | &tmp3); |
| 1599 | tmp3 = (tmp3 >> 5) & 0x3; |
| 1600 | data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1601 | } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
| 1602 | data->vddc_phase_shed_control = 1; |
| 1603 | } else { |
| 1604 | data->vddc_phase_shed_control = 0; |
| 1605 | } |
| 1606 | |
| 1607 | if (hwmgr->chip_id == CHIP_HAWAII) { |
| 1608 | data->thermal_temp_setting.temperature_low = 94500; |
| 1609 | data->thermal_temp_setting.temperature_high = 95000; |
| 1610 | data->thermal_temp_setting.temperature_shutdown = 104000; |
| 1611 | } else { |
| 1612 | data->thermal_temp_setting.temperature_low = 99500; |
| 1613 | data->thermal_temp_setting.temperature_high = 100000; |
| 1614 | data->thermal_temp_setting.temperature_shutdown = 104000; |
Rex Zhu | 187368a | 2017-02-15 15:48:40 +0800 | [diff] [blame] | 1615 | } |
| 1616 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1617 | data->fast_watermark_threshold = 100; |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1618 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1619 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 1620 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1621 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
| 1622 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) |
| 1623 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1624 | |
| 1625 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1626 | PHM_PlatformCaps_ControlVDDGFX)) { |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1627 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1628 | VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { |
| 1629 | data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1630 | } |
| 1631 | } |
| 1632 | |
| 1633 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1634 | PHM_PlatformCaps_EnableMVDDControl)) { |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1635 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1636 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) |
| 1637 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1638 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1639 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) |
| 1640 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1641 | } |
| 1642 | |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 1643 | if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1644 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
| 1645 | PHM_PlatformCaps_ControlVDDGFX); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1646 | |
| 1647 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1648 | PHM_PlatformCaps_ControlVDDCI)) { |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1649 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1650 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) |
| 1651 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; |
Rex Zhu | e71b7ae6 | 2017-09-08 14:31:26 +0800 | [diff] [blame] | 1652 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1653 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) |
| 1654 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; |
| 1655 | } |
| 1656 | |
| 1657 | if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) |
| 1658 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
| 1659 | PHM_PlatformCaps_EnableMVDDControl); |
| 1660 | |
| 1661 | if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) |
| 1662 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
| 1663 | PHM_PlatformCaps_ControlVDDCI); |
| 1664 | |
Rex Zhu | 53b963b | 2016-10-27 17:48:49 +0800 | [diff] [blame] | 1665 | if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1666 | && (table_info->cac_dtp_table->usClockStretchAmount != 0)) |
| 1667 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 1668 | PHM_PlatformCaps_ClockStretcher); |
| 1669 | |
| 1670 | data->pcie_gen_performance.max = PP_PCIEGen1; |
| 1671 | data->pcie_gen_performance.min = PP_PCIEGen3; |
| 1672 | data->pcie_gen_power_saving.max = PP_PCIEGen1; |
| 1673 | data->pcie_gen_power_saving.min = PP_PCIEGen3; |
| 1674 | data->pcie_lane_performance.max = 0; |
| 1675 | data->pcie_lane_performance.min = 16; |
| 1676 | data->pcie_lane_power_saving.max = 0; |
| 1677 | data->pcie_lane_power_saving.min = 16; |
Tom St Denis | 97f40ef | 2016-09-30 10:58:44 -0400 | [diff] [blame] | 1678 | |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 1679 | |
| 1680 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) |
| 1681 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 1682 | PHM_PlatformCaps_UVDPowerGating); |
| 1683 | if (adev->pg_flags & AMD_PG_SUPPORT_VCE) |
| 1684 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 1685 | PHM_PlatformCaps_VCEPowerGating); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1686 | } |
| 1687 | |
| 1688 | /** |
| 1689 | * Get Leakage VDDC based on leakage ID. |
| 1690 | * |
| 1691 | * @param hwmgr the address of the powerplay hardware manager. |
| 1692 | * @return always 0 |
| 1693 | */ |
| 1694 | static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) |
| 1695 | { |
| 1696 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1697 | uint16_t vv_id; |
| 1698 | uint16_t vddc = 0; |
| 1699 | uint16_t vddgfx = 0; |
| 1700 | uint16_t i, j; |
| 1701 | uint32_t sclk = 0; |
| 1702 | struct phm_ppt_v1_information *table_info = |
| 1703 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 1704 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; |
| 1705 | |
| 1706 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1707 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
| 1708 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
| 1709 | |
| 1710 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
Alex Deucher | 0f12f73 | 2016-11-09 17:52:42 -0500 | [diff] [blame] | 1711 | if ((hwmgr->pp_table_version == PP_TABLE_V1) |
| 1712 | && !phm_get_sclk_for_voltage_evv(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1713 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { |
| 1714 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1715 | PHM_PlatformCaps_ClockStretcher)) { |
Alex Deucher | 0f12f73 | 2016-11-09 17:52:42 -0500 | [diff] [blame] | 1716 | sclk_table = table_info->vdd_dep_on_sclk; |
| 1717 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1718 | for (j = 1; j < sclk_table->count; j++) { |
| 1719 | if (sclk_table->entries[j].clk == sclk && |
| 1720 | sclk_table->entries[j].cks_enable == 0) { |
| 1721 | sclk += 5000; |
| 1722 | break; |
| 1723 | } |
| 1724 | } |
| 1725 | } |
| 1726 | if (0 == atomctrl_get_voltage_evv_on_sclk |
| 1727 | (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, |
| 1728 | vv_id, &vddgfx)) { |
| 1729 | /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ |
| 1730 | PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); |
| 1731 | |
| 1732 | /* the voltage should not be zero nor equal to leakage ID */ |
| 1733 | if (vddgfx != 0 && vddgfx != vv_id) { |
| 1734 | data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; |
| 1735 | data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; |
| 1736 | data->vddcgfx_leakage.count++; |
| 1737 | } |
| 1738 | } else { |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 1739 | pr_info("Error retrieving EVV voltage value!\n"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1740 | } |
| 1741 | } |
| 1742 | } else { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1743 | if ((hwmgr->pp_table_version == PP_TABLE_V0) |
| 1744 | || !phm_get_sclk_for_voltage_evv(hwmgr, |
| 1745 | table_info->vddc_lookup_table, vv_id, &sclk)) { |
| 1746 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 1747 | PHM_PlatformCaps_ClockStretcher)) { |
Alex Deucher | 0f12f73 | 2016-11-09 17:52:42 -0500 | [diff] [blame] | 1748 | if (table_info == NULL) |
| 1749 | return -EINVAL; |
| 1750 | sclk_table = table_info->vdd_dep_on_sclk; |
| 1751 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1752 | for (j = 1; j < sclk_table->count; j++) { |
| 1753 | if (sclk_table->entries[j].clk == sclk && |
| 1754 | sclk_table->entries[j].cks_enable == 0) { |
| 1755 | sclk += 5000; |
| 1756 | break; |
| 1757 | } |
| 1758 | } |
| 1759 | } |
| 1760 | |
| 1761 | if (phm_get_voltage_evv_on_sclk(hwmgr, |
| 1762 | VOLTAGE_TYPE_VDDC, |
| 1763 | sclk, vv_id, &vddc) == 0) { |
| 1764 | if (vddc >= 2000 || vddc == 0) |
| 1765 | return -EINVAL; |
| 1766 | } else { |
Rex Zhu | 89c6769 | 2017-09-15 11:09:20 +0800 | [diff] [blame] | 1767 | pr_debug("failed to retrieving EVV voltage!\n"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1768 | continue; |
| 1769 | } |
| 1770 | |
| 1771 | /* the voltage should not be zero nor equal to leakage ID */ |
| 1772 | if (vddc != 0 && vddc != vv_id) { |
| 1773 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); |
| 1774 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; |
| 1775 | data->vddc_leakage.count++; |
| 1776 | } |
| 1777 | } |
| 1778 | } |
| 1779 | } |
| 1780 | |
| 1781 | return 0; |
| 1782 | } |
| 1783 | |
| 1784 | /** |
| 1785 | * Change virtual leakage voltage to actual value. |
| 1786 | * |
| 1787 | * @param hwmgr the address of the powerplay hardware manager. |
| 1788 | * @param pointer to changing voltage |
| 1789 | * @param pointer to leakage table |
| 1790 | */ |
| 1791 | static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, |
| 1792 | uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) |
| 1793 | { |
| 1794 | uint32_t index; |
| 1795 | |
| 1796 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ |
| 1797 | for (index = 0; index < leakage_table->count; index++) { |
| 1798 | /* if this voltage matches a leakage voltage ID */ |
| 1799 | /* patch with actual leakage voltage */ |
| 1800 | if (leakage_table->leakage_id[index] == *voltage) { |
| 1801 | *voltage = leakage_table->actual_voltage[index]; |
| 1802 | break; |
| 1803 | } |
| 1804 | } |
| 1805 | |
| 1806 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 1807 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1808 | } |
| 1809 | |
| 1810 | /** |
| 1811 | * Patch voltage lookup table by EVV leakages. |
| 1812 | * |
| 1813 | * @param hwmgr the address of the powerplay hardware manager. |
| 1814 | * @param pointer to voltage lookup table |
| 1815 | * @param pointer to leakage table |
| 1816 | * @return always 0 |
| 1817 | */ |
| 1818 | static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, |
| 1819 | phm_ppt_v1_voltage_lookup_table *lookup_table, |
| 1820 | struct smu7_leakage_voltage *leakage_table) |
| 1821 | { |
| 1822 | uint32_t i; |
| 1823 | |
| 1824 | for (i = 0; i < lookup_table->count; i++) |
| 1825 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, |
| 1826 | &lookup_table->entries[i].us_vdd, leakage_table); |
| 1827 | |
| 1828 | return 0; |
| 1829 | } |
| 1830 | |
| 1831 | static int smu7_patch_clock_voltage_limits_with_vddc_leakage( |
| 1832 | struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, |
| 1833 | uint16_t *vddc) |
| 1834 | { |
| 1835 | struct phm_ppt_v1_information *table_info = |
| 1836 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1837 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); |
| 1838 | hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = |
| 1839 | table_info->max_clock_voltage_on_dc.vddc; |
| 1840 | return 0; |
| 1841 | } |
| 1842 | |
| 1843 | static int smu7_patch_voltage_dependency_tables_with_lookup_table( |
| 1844 | struct pp_hwmgr *hwmgr) |
| 1845 | { |
| 1846 | uint8_t entry_id; |
| 1847 | uint8_t voltage_id; |
| 1848 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1849 | struct phm_ppt_v1_information *table_info = |
| 1850 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1851 | |
| 1852 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = |
| 1853 | table_info->vdd_dep_on_sclk; |
| 1854 | struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = |
| 1855 | table_info->vdd_dep_on_mclk; |
| 1856 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
| 1857 | table_info->mm_dep_table; |
| 1858 | |
| 1859 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 1860 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 1861 | voltage_id = sclk_table->entries[entry_id].vddInd; |
| 1862 | sclk_table->entries[entry_id].vddgfx = |
| 1863 | table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; |
| 1864 | } |
| 1865 | } else { |
| 1866 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 1867 | voltage_id = sclk_table->entries[entry_id].vddInd; |
| 1868 | sclk_table->entries[entry_id].vddc = |
| 1869 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 1870 | } |
| 1871 | } |
| 1872 | |
| 1873 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { |
| 1874 | voltage_id = mclk_table->entries[entry_id].vddInd; |
| 1875 | mclk_table->entries[entry_id].vddc = |
| 1876 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 1877 | } |
| 1878 | |
| 1879 | for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { |
| 1880 | voltage_id = mm_table->entries[entry_id].vddcInd; |
| 1881 | mm_table->entries[entry_id].vddc = |
| 1882 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
| 1883 | } |
| 1884 | |
| 1885 | return 0; |
| 1886 | |
| 1887 | } |
| 1888 | |
| 1889 | static int phm_add_voltage(struct pp_hwmgr *hwmgr, |
| 1890 | phm_ppt_v1_voltage_lookup_table *look_up_table, |
| 1891 | phm_ppt_v1_voltage_lookup_record *record) |
| 1892 | { |
| 1893 | uint32_t i; |
| 1894 | |
| 1895 | PP_ASSERT_WITH_CODE((NULL != look_up_table), |
| 1896 | "Lookup Table empty.", return -EINVAL); |
| 1897 | PP_ASSERT_WITH_CODE((0 != look_up_table->count), |
| 1898 | "Lookup Table empty.", return -EINVAL); |
| 1899 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 1900 | i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1901 | PP_ASSERT_WITH_CODE((i >= look_up_table->count), |
| 1902 | "Lookup Table is full.", return -EINVAL); |
| 1903 | |
| 1904 | /* This is to avoid entering duplicate calculated records. */ |
| 1905 | for (i = 0; i < look_up_table->count; i++) { |
| 1906 | if (look_up_table->entries[i].us_vdd == record->us_vdd) { |
| 1907 | if (look_up_table->entries[i].us_calculated == 1) |
| 1908 | return 0; |
| 1909 | break; |
| 1910 | } |
| 1911 | } |
| 1912 | |
| 1913 | look_up_table->entries[i].us_calculated = 1; |
| 1914 | look_up_table->entries[i].us_vdd = record->us_vdd; |
| 1915 | look_up_table->entries[i].us_cac_low = record->us_cac_low; |
| 1916 | look_up_table->entries[i].us_cac_mid = record->us_cac_mid; |
| 1917 | look_up_table->entries[i].us_cac_high = record->us_cac_high; |
| 1918 | /* Only increment the count when we're appending, not replacing duplicate entry. */ |
| 1919 | if (i == look_up_table->count) |
| 1920 | look_up_table->count++; |
| 1921 | |
| 1922 | return 0; |
| 1923 | } |
| 1924 | |
| 1925 | |
| 1926 | static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) |
| 1927 | { |
| 1928 | uint8_t entry_id; |
| 1929 | struct phm_ppt_v1_voltage_lookup_record v_record; |
| 1930 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1931 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1932 | |
| 1933 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; |
| 1934 | phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; |
| 1935 | |
| 1936 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 1937 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { |
| 1938 | if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) |
| 1939 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + |
| 1940 | sclk_table->entries[entry_id].vdd_offset - 0xFFFF; |
| 1941 | else |
| 1942 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + |
| 1943 | sclk_table->entries[entry_id].vdd_offset; |
| 1944 | |
| 1945 | sclk_table->entries[entry_id].vddc = |
| 1946 | v_record.us_cac_low = v_record.us_cac_mid = |
| 1947 | v_record.us_cac_high = v_record.us_vdd; |
| 1948 | |
| 1949 | phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); |
| 1950 | } |
| 1951 | |
| 1952 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { |
| 1953 | if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) |
| 1954 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + |
| 1955 | mclk_table->entries[entry_id].vdd_offset - 0xFFFF; |
| 1956 | else |
| 1957 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + |
| 1958 | mclk_table->entries[entry_id].vdd_offset; |
| 1959 | |
| 1960 | mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = |
| 1961 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; |
| 1962 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); |
| 1963 | } |
| 1964 | } |
| 1965 | return 0; |
| 1966 | } |
| 1967 | |
| 1968 | static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) |
| 1969 | { |
| 1970 | uint8_t entry_id; |
| 1971 | struct phm_ppt_v1_voltage_lookup_record v_record; |
| 1972 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 1973 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1974 | phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; |
| 1975 | |
| 1976 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 1977 | for (entry_id = 0; entry_id < mm_table->count; entry_id++) { |
| 1978 | if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) |
| 1979 | v_record.us_vdd = mm_table->entries[entry_id].vddc + |
| 1980 | mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; |
| 1981 | else |
| 1982 | v_record.us_vdd = mm_table->entries[entry_id].vddc + |
| 1983 | mm_table->entries[entry_id].vddgfx_offset; |
| 1984 | |
| 1985 | /* Add the calculated VDDGFX to the VDDGFX lookup table */ |
| 1986 | mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = |
| 1987 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; |
| 1988 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); |
| 1989 | } |
| 1990 | } |
| 1991 | return 0; |
| 1992 | } |
| 1993 | |
| 1994 | static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, |
| 1995 | struct phm_ppt_v1_voltage_lookup_table *lookup_table) |
| 1996 | { |
| 1997 | uint32_t table_size, i, j; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 1998 | table_size = lookup_table->count; |
| 1999 | |
| 2000 | PP_ASSERT_WITH_CODE(0 != lookup_table->count, |
| 2001 | "Lookup table is empty", return -EINVAL); |
| 2002 | |
| 2003 | /* Sorting voltages */ |
| 2004 | for (i = 0; i < table_size - 1; i++) { |
| 2005 | for (j = i + 1; j > 0; j--) { |
| 2006 | if (lookup_table->entries[j].us_vdd < |
| 2007 | lookup_table->entries[j - 1].us_vdd) { |
Ville Syrjälä | ff06184 | 2019-10-10 16:11:58 +0300 | [diff] [blame] | 2008 | swap(lookup_table->entries[j - 1], |
| 2009 | lookup_table->entries[j]); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2010 | } |
| 2011 | } |
| 2012 | } |
| 2013 | |
| 2014 | return 0; |
| 2015 | } |
| 2016 | |
| 2017 | static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) |
| 2018 | { |
| 2019 | int result = 0; |
| 2020 | int tmp_result; |
| 2021 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2022 | struct phm_ppt_v1_information *table_info = |
| 2023 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2024 | |
| 2025 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { |
| 2026 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, |
| 2027 | table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); |
| 2028 | if (tmp_result != 0) |
| 2029 | result = tmp_result; |
| 2030 | |
| 2031 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, |
| 2032 | &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); |
| 2033 | } else { |
| 2034 | |
| 2035 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, |
| 2036 | table_info->vddc_lookup_table, &(data->vddc_leakage)); |
| 2037 | if (tmp_result) |
| 2038 | result = tmp_result; |
| 2039 | |
| 2040 | tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, |
| 2041 | &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); |
| 2042 | if (tmp_result) |
| 2043 | result = tmp_result; |
| 2044 | } |
| 2045 | |
| 2046 | tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); |
| 2047 | if (tmp_result) |
| 2048 | result = tmp_result; |
| 2049 | |
| 2050 | tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); |
| 2051 | if (tmp_result) |
| 2052 | result = tmp_result; |
| 2053 | |
| 2054 | tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); |
| 2055 | if (tmp_result) |
| 2056 | result = tmp_result; |
| 2057 | |
| 2058 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); |
| 2059 | if (tmp_result) |
| 2060 | result = tmp_result; |
| 2061 | |
| 2062 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); |
| 2063 | if (tmp_result) |
| 2064 | result = tmp_result; |
| 2065 | |
| 2066 | return result; |
| 2067 | } |
| 2068 | |
| 2069 | static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) |
| 2070 | { |
| 2071 | struct phm_ppt_v1_information *table_info = |
| 2072 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2073 | |
| 2074 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = |
| 2075 | table_info->vdd_dep_on_sclk; |
| 2076 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = |
| 2077 | table_info->vdd_dep_on_mclk; |
| 2078 | |
| 2079 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, |
| 2080 | "VDD dependency on SCLK table is missing.", |
| 2081 | return -EINVAL); |
| 2082 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, |
| 2083 | "VDD dependency on SCLK table has to have is missing.", |
| 2084 | return -EINVAL); |
| 2085 | |
| 2086 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, |
| 2087 | "VDD dependency on MCLK table is missing", |
| 2088 | return -EINVAL); |
| 2089 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, |
| 2090 | "VDD dependency on MCLK table has to have is missing.", |
| 2091 | return -EINVAL); |
| 2092 | |
| 2093 | table_info->max_clock_voltage_on_ac.sclk = |
| 2094 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; |
| 2095 | table_info->max_clock_voltage_on_ac.mclk = |
| 2096 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; |
| 2097 | table_info->max_clock_voltage_on_ac.vddc = |
| 2098 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; |
| 2099 | table_info->max_clock_voltage_on_ac.vddci = |
| 2100 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; |
| 2101 | |
| 2102 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; |
| 2103 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; |
| 2104 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; |
| 2105 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; |
| 2106 | |
| 2107 | return 0; |
| 2108 | } |
| 2109 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 2110 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2111 | { |
| 2112 | struct phm_ppt_v1_information *table_info = |
| 2113 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2114 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 2115 | struct phm_ppt_v1_voltage_lookup_table *lookup_table; |
| 2116 | uint32_t i; |
| 2117 | uint32_t hw_revision, sub_vendor_id, sub_sys_id; |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 2118 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2119 | |
| 2120 | if (table_info != NULL) { |
| 2121 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 2122 | lookup_table = table_info->vddc_lookup_table; |
| 2123 | } else |
| 2124 | return 0; |
| 2125 | |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 2126 | hw_revision = adev->pdev->revision; |
| 2127 | sub_sys_id = adev->pdev->subsystem_device; |
| 2128 | sub_vendor_id = adev->pdev->subsystem_vendor; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2129 | |
| 2130 | if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && |
| 2131 | ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || |
| 2132 | (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || |
| 2133 | (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { |
| 2134 | if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) |
| 2135 | return 0; |
| 2136 | |
| 2137 | for (i = 0; i < lookup_table->count; i++) { |
| 2138 | if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { |
| 2139 | dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; |
| 2140 | return 0; |
| 2141 | } |
| 2142 | } |
| 2143 | } |
| 2144 | return 0; |
| 2145 | } |
| 2146 | |
| 2147 | static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) |
| 2148 | { |
| 2149 | struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; |
| 2150 | uint32_t temp_reg; |
| 2151 | struct phm_ppt_v1_information *table_info = |
| 2152 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2153 | |
| 2154 | |
| 2155 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { |
| 2156 | temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); |
| 2157 | switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { |
| 2158 | case 0: |
| 2159 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); |
| 2160 | break; |
| 2161 | case 1: |
| 2162 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); |
| 2163 | break; |
| 2164 | case 2: |
| 2165 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); |
| 2166 | break; |
| 2167 | case 3: |
| 2168 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); |
| 2169 | break; |
| 2170 | case 4: |
| 2171 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); |
| 2172 | break; |
| 2173 | default: |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2174 | break; |
| 2175 | } |
| 2176 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); |
| 2177 | } |
| 2178 | |
| 2179 | if (table_info == NULL) |
| 2180 | return 0; |
| 2181 | |
| 2182 | if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && |
| 2183 | hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { |
| 2184 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = |
| 2185 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; |
| 2186 | |
| 2187 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = |
| 2188 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; |
| 2189 | |
| 2190 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; |
| 2191 | |
| 2192 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; |
| 2193 | |
| 2194 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = |
| 2195 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; |
| 2196 | |
| 2197 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; |
| 2198 | |
| 2199 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? |
| 2200 | (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; |
| 2201 | |
| 2202 | table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; |
| 2203 | table_info->cac_dtp_table->usOperatingTempStep = 1; |
| 2204 | table_info->cac_dtp_table->usOperatingTempHyst = 1; |
| 2205 | |
| 2206 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = |
| 2207 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; |
| 2208 | |
| 2209 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = |
| 2210 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; |
| 2211 | |
| 2212 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = |
| 2213 | table_info->cac_dtp_table->usOperatingTempMinLimit; |
| 2214 | |
| 2215 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = |
| 2216 | table_info->cac_dtp_table->usOperatingTempMaxLimit; |
| 2217 | |
| 2218 | hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = |
| 2219 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp; |
| 2220 | |
| 2221 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = |
| 2222 | table_info->cac_dtp_table->usOperatingTempStep; |
| 2223 | |
| 2224 | hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = |
| 2225 | table_info->cac_dtp_table->usTargetOperatingTemp; |
Rex Zhu | cf54d6d | 2016-11-02 13:18:54 +0800 | [diff] [blame] | 2226 | if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) |
| 2227 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 2228 | PHM_PlatformCaps_ODFuzzyFanControlSupport); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2229 | } |
| 2230 | |
| 2231 | return 0; |
| 2232 | } |
| 2233 | |
| 2234 | /** |
| 2235 | * Change virtual leakage voltage to actual value. |
| 2236 | * |
| 2237 | * @param hwmgr the address of the powerplay hardware manager. |
| 2238 | * @param pointer to changing voltage |
| 2239 | * @param pointer to leakage table |
| 2240 | */ |
| 2241 | static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, |
| 2242 | uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) |
| 2243 | { |
| 2244 | uint32_t index; |
| 2245 | |
| 2246 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ |
| 2247 | for (index = 0; index < leakage_table->count; index++) { |
| 2248 | /* if this voltage matches a leakage voltage ID */ |
| 2249 | /* patch with actual leakage voltage */ |
| 2250 | if (leakage_table->leakage_id[index] == *voltage) { |
| 2251 | *voltage = leakage_table->actual_voltage[index]; |
| 2252 | break; |
| 2253 | } |
| 2254 | } |
| 2255 | |
| 2256 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 2257 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2258 | } |
| 2259 | |
| 2260 | |
| 2261 | static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, |
| 2262 | struct phm_clock_voltage_dependency_table *tab) |
| 2263 | { |
| 2264 | uint16_t i; |
| 2265 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2266 | |
| 2267 | if (tab) |
| 2268 | for (i = 0; i < tab->count; i++) |
| 2269 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2270 | &data->vddc_leakage); |
| 2271 | |
| 2272 | return 0; |
| 2273 | } |
| 2274 | |
| 2275 | static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, |
| 2276 | struct phm_clock_voltage_dependency_table *tab) |
| 2277 | { |
| 2278 | uint16_t i; |
| 2279 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2280 | |
| 2281 | if (tab) |
| 2282 | for (i = 0; i < tab->count; i++) |
| 2283 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2284 | &data->vddci_leakage); |
| 2285 | |
| 2286 | return 0; |
| 2287 | } |
| 2288 | |
| 2289 | static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, |
| 2290 | struct phm_vce_clock_voltage_dependency_table *tab) |
| 2291 | { |
| 2292 | uint16_t i; |
| 2293 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2294 | |
| 2295 | if (tab) |
| 2296 | for (i = 0; i < tab->count; i++) |
| 2297 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2298 | &data->vddc_leakage); |
| 2299 | |
| 2300 | return 0; |
| 2301 | } |
| 2302 | |
| 2303 | |
| 2304 | static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, |
| 2305 | struct phm_uvd_clock_voltage_dependency_table *tab) |
| 2306 | { |
| 2307 | uint16_t i; |
| 2308 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2309 | |
| 2310 | if (tab) |
| 2311 | for (i = 0; i < tab->count; i++) |
| 2312 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2313 | &data->vddc_leakage); |
| 2314 | |
| 2315 | return 0; |
| 2316 | } |
| 2317 | |
| 2318 | static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, |
| 2319 | struct phm_phase_shedding_limits_table *tab) |
| 2320 | { |
| 2321 | uint16_t i; |
| 2322 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2323 | |
| 2324 | if (tab) |
| 2325 | for (i = 0; i < tab->count; i++) |
| 2326 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, |
| 2327 | &data->vddc_leakage); |
| 2328 | |
| 2329 | return 0; |
| 2330 | } |
| 2331 | |
| 2332 | static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, |
| 2333 | struct phm_samu_clock_voltage_dependency_table *tab) |
| 2334 | { |
| 2335 | uint16_t i; |
| 2336 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2337 | |
| 2338 | if (tab) |
| 2339 | for (i = 0; i < tab->count; i++) |
| 2340 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2341 | &data->vddc_leakage); |
| 2342 | |
| 2343 | return 0; |
| 2344 | } |
| 2345 | |
| 2346 | static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, |
| 2347 | struct phm_acp_clock_voltage_dependency_table *tab) |
| 2348 | { |
| 2349 | uint16_t i; |
| 2350 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2351 | |
| 2352 | if (tab) |
| 2353 | for (i = 0; i < tab->count; i++) |
| 2354 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, |
| 2355 | &data->vddc_leakage); |
| 2356 | |
| 2357 | return 0; |
| 2358 | } |
| 2359 | |
| 2360 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, |
Alex Deucher | 77f7f71 | 2016-10-14 11:11:06 -0400 | [diff] [blame] | 2361 | struct phm_clock_and_voltage_limits *tab) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2362 | { |
Alex Deucher | 77f7f71 | 2016-10-14 11:11:06 -0400 | [diff] [blame] | 2363 | uint32_t vddc, vddci; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2364 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2365 | |
| 2366 | if (tab) { |
Arnd Bergmann | a29d126 | 2016-11-08 14:52:18 +0100 | [diff] [blame] | 2367 | vddc = tab->vddc; |
Alex Deucher | 77f7f71 | 2016-10-14 11:11:06 -0400 | [diff] [blame] | 2368 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, |
| 2369 | &data->vddc_leakage); |
| 2370 | tab->vddc = vddc; |
Arnd Bergmann | a29d126 | 2016-11-08 14:52:18 +0100 | [diff] [blame] | 2371 | vddci = tab->vddci; |
Alex Deucher | 77f7f71 | 2016-10-14 11:11:06 -0400 | [diff] [blame] | 2372 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, |
| 2373 | &data->vddci_leakage); |
| 2374 | tab->vddci = vddci; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2375 | } |
| 2376 | |
| 2377 | return 0; |
| 2378 | } |
| 2379 | |
| 2380 | static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) |
| 2381 | { |
| 2382 | uint32_t i; |
| 2383 | uint32_t vddc; |
| 2384 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2385 | |
| 2386 | if (tab) { |
| 2387 | for (i = 0; i < tab->count; i++) { |
| 2388 | vddc = (uint32_t)(tab->entries[i].Vddc); |
| 2389 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); |
| 2390 | tab->entries[i].Vddc = (uint16_t)vddc; |
| 2391 | } |
| 2392 | } |
| 2393 | |
| 2394 | return 0; |
| 2395 | } |
| 2396 | |
| 2397 | static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) |
| 2398 | { |
| 2399 | int tmp; |
| 2400 | |
| 2401 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); |
| 2402 | if (tmp) |
| 2403 | return -EINVAL; |
| 2404 | |
| 2405 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); |
| 2406 | if (tmp) |
| 2407 | return -EINVAL; |
| 2408 | |
| 2409 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
| 2410 | if (tmp) |
| 2411 | return -EINVAL; |
| 2412 | |
| 2413 | tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); |
| 2414 | if (tmp) |
| 2415 | return -EINVAL; |
| 2416 | |
| 2417 | tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); |
| 2418 | if (tmp) |
| 2419 | return -EINVAL; |
| 2420 | |
| 2421 | tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); |
| 2422 | if (tmp) |
| 2423 | return -EINVAL; |
| 2424 | |
| 2425 | tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); |
| 2426 | if (tmp) |
| 2427 | return -EINVAL; |
| 2428 | |
| 2429 | tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); |
| 2430 | if (tmp) |
| 2431 | return -EINVAL; |
| 2432 | |
| 2433 | tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); |
| 2434 | if (tmp) |
| 2435 | return -EINVAL; |
| 2436 | |
| 2437 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); |
| 2438 | if (tmp) |
| 2439 | return -EINVAL; |
| 2440 | |
| 2441 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); |
| 2442 | if (tmp) |
| 2443 | return -EINVAL; |
| 2444 | |
| 2445 | tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); |
| 2446 | if (tmp) |
| 2447 | return -EINVAL; |
| 2448 | |
| 2449 | return 0; |
| 2450 | } |
| 2451 | |
| 2452 | |
| 2453 | static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) |
| 2454 | { |
| 2455 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2456 | |
| 2457 | struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; |
| 2458 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; |
| 2459 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 2460 | |
| 2461 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, |
Julia Lawall | 1446413 | 2017-12-27 15:51:44 +0100 | [diff] [blame] | 2462 | "VDDC dependency on SCLK table is missing. This table is mandatory", |
| 2463 | return -EINVAL); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2464 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, |
Julia Lawall | 1446413 | 2017-12-27 15:51:44 +0100 | [diff] [blame] | 2465 | "VDDC dependency on SCLK table has to have is missing. This table is mandatory", |
| 2466 | return -EINVAL); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2467 | |
| 2468 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, |
Julia Lawall | 1446413 | 2017-12-27 15:51:44 +0100 | [diff] [blame] | 2469 | "VDDC dependency on MCLK table is missing. This table is mandatory", |
| 2470 | return -EINVAL); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2471 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, |
Julia Lawall | 1446413 | 2017-12-27 15:51:44 +0100 | [diff] [blame] | 2472 | "VDD dependency on MCLK table has to have is missing. This table is mandatory", |
| 2473 | return -EINVAL); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2474 | |
| 2475 | data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; |
| 2476 | data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; |
| 2477 | |
| 2478 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = |
| 2479 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; |
| 2480 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = |
| 2481 | allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; |
| 2482 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = |
| 2483 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; |
| 2484 | |
| 2485 | if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { |
| 2486 | data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; |
| 2487 | data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; |
| 2488 | } |
| 2489 | |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 2490 | if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2491 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; |
| 2492 | |
| 2493 | return 0; |
| 2494 | } |
| 2495 | |
Rex Zhu | a0aa704 | 2016-12-28 20:15:45 +0800 | [diff] [blame] | 2496 | static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
| 2497 | { |
Himanshu Jha | ebe02de | 2017-08-29 18:42:27 +0530 | [diff] [blame] | 2498 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
| 2499 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; |
Himanshu Jha | ebe02de | 2017-08-29 18:42:27 +0530 | [diff] [blame] | 2500 | kfree(hwmgr->backend); |
| 2501 | hwmgr->backend = NULL; |
Rex Zhu | a0aa704 | 2016-12-28 20:15:45 +0800 | [diff] [blame] | 2502 | |
| 2503 | return 0; |
| 2504 | } |
| 2505 | |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 2506 | static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) |
| 2507 | { |
| 2508 | uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; |
| 2509 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2510 | int i; |
| 2511 | |
| 2512 | if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { |
| 2513 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
| 2514 | virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
| 2515 | if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, |
| 2516 | virtual_voltage_id, |
| 2517 | efuse_voltage_id) == 0) { |
| 2518 | if (vddc != 0 && vddc != virtual_voltage_id) { |
| 2519 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; |
| 2520 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; |
| 2521 | data->vddc_leakage.count++; |
| 2522 | } |
| 2523 | if (vddci != 0 && vddci != virtual_voltage_id) { |
| 2524 | data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; |
| 2525 | data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; |
| 2526 | data->vddci_leakage.count++; |
| 2527 | } |
| 2528 | } |
| 2529 | } |
| 2530 | } |
| 2531 | return 0; |
| 2532 | } |
| 2533 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 2534 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2535 | { |
| 2536 | struct smu7_hwmgr *data; |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 2537 | int result = 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2538 | |
| 2539 | data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); |
| 2540 | if (data == NULL) |
| 2541 | return -ENOMEM; |
| 2542 | |
| 2543 | hwmgr->backend = data; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2544 | smu7_patch_voltage_workaround(hwmgr); |
| 2545 | smu7_init_dpm_defaults(hwmgr); |
| 2546 | |
| 2547 | /* Get leakage voltage based on leakage ID. */ |
Rex Zhu | 86457c3 | 2017-09-14 21:05:18 +0800 | [diff] [blame] | 2548 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 2549 | PHM_PlatformCaps_EVV)) { |
| 2550 | result = smu7_get_evv_voltages(hwmgr); |
| 2551 | if (result) { |
| 2552 | pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); |
| 2553 | return -EINVAL; |
| 2554 | } |
| 2555 | } else { |
| 2556 | smu7_get_elb_voltages(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2557 | } |
| 2558 | |
| 2559 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 2560 | smu7_complete_dependency_tables(hwmgr); |
| 2561 | smu7_set_private_data_based_on_pptable_v1(hwmgr); |
| 2562 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 2563 | smu7_patch_dependency_tables_with_leakage(hwmgr); |
| 2564 | smu7_set_private_data_based_on_pptable_v0(hwmgr); |
| 2565 | } |
| 2566 | |
| 2567 | /* Initalize Dynamic State Adjustment Rule Settings */ |
| 2568 | result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); |
| 2569 | |
| 2570 | if (0 == result) { |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 2571 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2572 | |
| 2573 | data->is_tlu_enabled = false; |
| 2574 | |
| 2575 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = |
| 2576 | SMU7_MAX_HARDWARE_POWERLEVELS; |
| 2577 | hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; |
| 2578 | hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; |
| 2579 | |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 2580 | data->pcie_gen_cap = adev->pm.pcie_gen_mask; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2581 | if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) |
| 2582 | data->pcie_spc_cap = 20; |
Rex Zhu | ada6770 | 2018-02-27 19:15:08 +0800 | [diff] [blame] | 2583 | data->pcie_lane_cap = adev->pm.pcie_mlw_mask; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2584 | |
| 2585 | hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ |
| 2586 | /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ |
| 2587 | hwmgr->platform_descriptor.clockStep.engineClock = 500; |
| 2588 | hwmgr->platform_descriptor.clockStep.memoryClock = 500; |
| 2589 | smu7_thermal_parameter_init(hwmgr); |
| 2590 | } else { |
| 2591 | /* Ignore return value in here, we are cleaning up a mess. */ |
Rex Zhu | a0aa704 | 2016-12-28 20:15:45 +0800 | [diff] [blame] | 2592 | smu7_hwmgr_backend_fini(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2593 | } |
| 2594 | |
| 2595 | return 0; |
| 2596 | } |
| 2597 | |
| 2598 | static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) |
| 2599 | { |
| 2600 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2601 | uint32_t level, tmp; |
| 2602 | |
| 2603 | if (!data->pcie_dpm_key_disabled) { |
| 2604 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
| 2605 | level = 0; |
| 2606 | tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; |
| 2607 | while (tmp >>= 1) |
| 2608 | level++; |
| 2609 | |
| 2610 | if (level) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2611 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2612 | PPSMC_MSG_PCIeDPM_ForceLevel, level); |
| 2613 | } |
| 2614 | } |
| 2615 | |
| 2616 | if (!data->sclk_dpm_key_disabled) { |
| 2617 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 2618 | level = 0; |
| 2619 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; |
| 2620 | while (tmp >>= 1) |
| 2621 | level++; |
| 2622 | |
| 2623 | if (level) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2624 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2625 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 2626 | (1 << level)); |
| 2627 | } |
| 2628 | } |
| 2629 | |
| 2630 | if (!data->mclk_dpm_key_disabled) { |
| 2631 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
| 2632 | level = 0; |
| 2633 | tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; |
| 2634 | while (tmp >>= 1) |
| 2635 | level++; |
| 2636 | |
| 2637 | if (level) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2638 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2639 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 2640 | (1 << level)); |
| 2641 | } |
| 2642 | } |
| 2643 | |
| 2644 | return 0; |
| 2645 | } |
| 2646 | |
| 2647 | static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) |
| 2648 | { |
| 2649 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2650 | |
| 2651 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 2652 | phm_apply_dal_min_voltage_request(hwmgr); |
| 2653 | /* TO DO for v0 iceland and Ci*/ |
| 2654 | |
| 2655 | if (!data->sclk_dpm_key_disabled) { |
| 2656 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2657 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2658 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 2659 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); |
| 2660 | } |
| 2661 | |
| 2662 | if (!data->mclk_dpm_key_disabled) { |
| 2663 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2664 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2665 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 2666 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); |
| 2667 | } |
| 2668 | |
| 2669 | return 0; |
| 2670 | } |
| 2671 | |
| 2672 | static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) |
| 2673 | { |
| 2674 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2675 | |
| 2676 | if (!smum_is_dpm_running(hwmgr)) |
| 2677 | return -EINVAL; |
| 2678 | |
| 2679 | if (!data->pcie_dpm_key_disabled) { |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2680 | smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2681 | PPSMC_MSG_PCIeDPM_UnForceLevel); |
| 2682 | } |
| 2683 | |
| 2684 | return smu7_upload_dpm_level_enable_mask(hwmgr); |
| 2685 | } |
| 2686 | |
| 2687 | static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) |
| 2688 | { |
| 2689 | struct smu7_hwmgr *data = |
| 2690 | (struct smu7_hwmgr *)(hwmgr->backend); |
| 2691 | uint32_t level; |
| 2692 | |
| 2693 | if (!data->sclk_dpm_key_disabled) |
| 2694 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 2695 | level = phm_get_lowest_enabled_level(hwmgr, |
| 2696 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2697 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2698 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 2699 | (1 << level)); |
| 2700 | |
| 2701 | } |
| 2702 | |
| 2703 | if (!data->mclk_dpm_key_disabled) { |
| 2704 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
| 2705 | level = phm_get_lowest_enabled_level(hwmgr, |
| 2706 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2707 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2708 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 2709 | (1 << level)); |
| 2710 | } |
| 2711 | } |
| 2712 | |
| 2713 | if (!data->pcie_dpm_key_disabled) { |
| 2714 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
| 2715 | level = phm_get_lowest_enabled_level(hwmgr, |
| 2716 | data->dpm_level_enable_mask.pcie_dpm_enable_mask); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 2717 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2718 | PPSMC_MSG_PCIeDPM_ForceLevel, |
| 2719 | (level)); |
| 2720 | } |
| 2721 | } |
| 2722 | |
| 2723 | return 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2724 | } |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2725 | |
| 2726 | static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, |
| 2727 | uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) |
| 2728 | { |
| 2729 | uint32_t percentage; |
| 2730 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2731 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; |
| 2732 | int32_t tmp_mclk; |
| 2733 | int32_t tmp_sclk; |
| 2734 | int32_t count; |
| 2735 | |
| 2736 | if (golden_dpm_table->mclk_table.count < 1) |
| 2737 | return -EINVAL; |
| 2738 | |
| 2739 | percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / |
| 2740 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 2741 | |
| 2742 | if (golden_dpm_table->mclk_table.count == 1) { |
| 2743 | percentage = 70; |
| 2744 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; |
| 2745 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; |
| 2746 | } else { |
| 2747 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; |
| 2748 | *mclk_mask = golden_dpm_table->mclk_table.count - 2; |
| 2749 | } |
| 2750 | |
| 2751 | tmp_sclk = tmp_mclk * percentage / 100; |
| 2752 | |
| 2753 | if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 2754 | for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; |
| 2755 | count >= 0; count--) { |
| 2756 | if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { |
| 2757 | tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; |
| 2758 | *sclk_mask = count; |
| 2759 | break; |
| 2760 | } |
| 2761 | } |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2762 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2763 | *sclk_mask = 0; |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2764 | tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; |
| 2765 | } |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2766 | |
| 2767 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2768 | *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; |
| 2769 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 2770 | struct phm_ppt_v1_information *table_info = |
| 2771 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2772 | |
| 2773 | for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { |
| 2774 | if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { |
| 2775 | tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; |
| 2776 | *sclk_mask = count; |
| 2777 | break; |
| 2778 | } |
| 2779 | } |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2780 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2781 | *sclk_mask = 0; |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2782 | tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; |
| 2783 | } |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2784 | |
| 2785 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2786 | *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; |
| 2787 | } |
| 2788 | |
| 2789 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) |
| 2790 | *mclk_mask = 0; |
| 2791 | else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2792 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; |
| 2793 | |
| 2794 | *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2795 | hwmgr->pstate_sclk = tmp_sclk; |
| 2796 | hwmgr->pstate_mclk = tmp_mclk; |
| 2797 | |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2798 | return 0; |
| 2799 | } |
| 2800 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2801 | static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, |
| 2802 | enum amd_dpm_forced_level level) |
| 2803 | { |
| 2804 | int ret = 0; |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2805 | uint32_t sclk_mask = 0; |
| 2806 | uint32_t mclk_mask = 0; |
| 2807 | uint32_t pcie_mask = 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2808 | |
Rex Zhu | dd70949 | 2018-01-05 19:02:48 +0800 | [diff] [blame] | 2809 | if (hwmgr->pstate_sclk == 0) |
| 2810 | smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); |
| 2811 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2812 | switch (level) { |
| 2813 | case AMD_DPM_FORCED_LEVEL_HIGH: |
| 2814 | ret = smu7_force_dpm_highest(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2815 | break; |
| 2816 | case AMD_DPM_FORCED_LEVEL_LOW: |
| 2817 | ret = smu7_force_dpm_lowest(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2818 | break; |
| 2819 | case AMD_DPM_FORCED_LEVEL_AUTO: |
| 2820 | ret = smu7_unforce_dpm_levels(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2821 | break; |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2822 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
| 2823 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: |
| 2824 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: |
| 2825 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: |
| 2826 | ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); |
| 2827 | if (ret) |
| 2828 | return ret; |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2829 | smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); |
| 2830 | smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); |
| 2831 | smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); |
| 2832 | break; |
Rex Zhu | cb256cc | 2017-01-24 17:47:36 +0800 | [diff] [blame] | 2833 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
Rex Zhu | 570272d | 2017-01-06 13:32:49 +0800 | [diff] [blame] | 2834 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2835 | default: |
| 2836 | break; |
| 2837 | } |
| 2838 | |
Rex Zhu | 9947f70 | 2017-08-29 16:08:56 +0800 | [diff] [blame] | 2839 | if (!ret) { |
| 2840 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2841 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); |
| 2842 | else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) |
| 2843 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); |
| 2844 | } |
| 2845 | return ret; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2846 | } |
| 2847 | |
| 2848 | static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) |
| 2849 | { |
| 2850 | return sizeof(struct smu7_power_state); |
| 2851 | } |
| 2852 | |
Alex Deucher | 09be4a5 | 2017-05-11 13:46:12 -0400 | [diff] [blame] | 2853 | static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, |
| 2854 | uint32_t vblank_time_us) |
| 2855 | { |
| 2856 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2857 | uint32_t switch_limit_us; |
| 2858 | |
| 2859 | switch (hwmgr->chip_id) { |
| 2860 | case CHIP_POLARIS10: |
| 2861 | case CHIP_POLARIS11: |
| 2862 | case CHIP_POLARIS12: |
Alex Deucher | 7d98e1e | 2018-11-29 19:20:28 -0500 | [diff] [blame] | 2863 | if (hwmgr->is_kicker) |
| 2864 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; |
| 2865 | else |
| 2866 | switch_limit_us = data->is_memory_gddr5 ? 190 : 150; |
Alex Deucher | 09be4a5 | 2017-05-11 13:46:12 -0400 | [diff] [blame] | 2867 | break; |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 2868 | case CHIP_VEGAM: |
| 2869 | switch_limit_us = 30; |
| 2870 | break; |
Alex Deucher | 09be4a5 | 2017-05-11 13:46:12 -0400 | [diff] [blame] | 2871 | default: |
| 2872 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; |
| 2873 | break; |
| 2874 | } |
| 2875 | |
| 2876 | if (vblank_time_us < switch_limit_us) |
| 2877 | return true; |
| 2878 | else |
| 2879 | return false; |
| 2880 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2881 | |
| 2882 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, |
| 2883 | struct pp_power_state *request_ps, |
| 2884 | const struct pp_power_state *current_ps) |
| 2885 | { |
Rex Zhu | 600ae89 | 2018-06-04 16:39:38 +0800 | [diff] [blame] | 2886 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2887 | struct smu7_power_state *smu7_ps = |
| 2888 | cast_phw_smu7_power_state(&request_ps->hardware); |
| 2889 | uint32_t sclk; |
| 2890 | uint32_t mclk; |
| 2891 | struct PP_Clocks minimum_clocks = {0}; |
| 2892 | bool disable_mclk_switching; |
| 2893 | bool disable_mclk_switching_for_frame_lock; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2894 | const struct phm_clock_and_voltage_limits *max_limits; |
| 2895 | uint32_t i; |
| 2896 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 2897 | struct phm_ppt_v1_information *table_info = |
| 2898 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 2899 | int32_t count; |
| 2900 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; |
| 2901 | |
| 2902 | data->battery_state = (PP_StateUILabel_Battery == |
| 2903 | request_ps->classification.ui_label); |
| 2904 | |
| 2905 | PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, |
| 2906 | "VI should always have 2 performance levels", |
| 2907 | ); |
| 2908 | |
Rex Zhu | 600ae89 | 2018-06-04 16:39:38 +0800 | [diff] [blame] | 2909 | max_limits = adev->pm.ac_power ? |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2910 | &(hwmgr->dyn_state.max_clock_voltage_on_ac) : |
| 2911 | &(hwmgr->dyn_state.max_clock_voltage_on_dc); |
| 2912 | |
| 2913 | /* Cap clock DPM tables at DC MAX if it is in DC. */ |
Rex Zhu | 600ae89 | 2018-06-04 16:39:38 +0800 | [diff] [blame] | 2914 | if (!adev->pm.ac_power) { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2915 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 2916 | if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) |
| 2917 | smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; |
| 2918 | if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) |
| 2919 | smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; |
| 2920 | } |
| 2921 | } |
| 2922 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 2923 | minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; |
| 2924 | minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2925 | |
| 2926 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 2927 | PHM_PlatformCaps_StablePState)) { |
| 2928 | max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); |
| 2929 | stable_pstate_sclk = (max_limits->sclk * 75) / 100; |
| 2930 | |
| 2931 | for (count = table_info->vdd_dep_on_sclk->count - 1; |
| 2932 | count >= 0; count--) { |
| 2933 | if (stable_pstate_sclk >= |
| 2934 | table_info->vdd_dep_on_sclk->entries[count].clk) { |
| 2935 | stable_pstate_sclk = |
| 2936 | table_info->vdd_dep_on_sclk->entries[count].clk; |
| 2937 | break; |
| 2938 | } |
| 2939 | } |
| 2940 | |
| 2941 | if (count < 0) |
| 2942 | stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; |
| 2943 | |
| 2944 | stable_pstate_mclk = max_limits->mclk; |
| 2945 | |
| 2946 | minimum_clocks.engineClock = stable_pstate_sclk; |
| 2947 | minimum_clocks.memoryClock = stable_pstate_mclk; |
| 2948 | } |
| 2949 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2950 | disable_mclk_switching_for_frame_lock = phm_cap_enabled( |
| 2951 | hwmgr->platform_descriptor.platformCaps, |
| 2952 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); |
| 2953 | |
| 2954 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 2955 | if (hwmgr->display_config->num_display == 0) |
Alex Deucher | a9b3c00 | 2018-02-13 14:26:54 -0500 | [diff] [blame] | 2956 | disable_mclk_switching = false; |
| 2957 | else |
Alex Deucher | bb6897f | 2019-08-08 00:47:49 -0500 | [diff] [blame] | 2958 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && |
| 2959 | !hwmgr->display_config->multi_monitor_in_sync) || |
| 2960 | disable_mclk_switching_for_frame_lock || |
| 2961 | smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 2962 | |
| 2963 | sclk = smu7_ps->performance_levels[0].engine_clock; |
| 2964 | mclk = smu7_ps->performance_levels[0].memory_clock; |
| 2965 | |
| 2966 | if (disable_mclk_switching) |
| 2967 | mclk = smu7_ps->performance_levels |
| 2968 | [smu7_ps->performance_level_count - 1].memory_clock; |
| 2969 | |
| 2970 | if (sclk < minimum_clocks.engineClock) |
| 2971 | sclk = (minimum_clocks.engineClock > max_limits->sclk) ? |
| 2972 | max_limits->sclk : minimum_clocks.engineClock; |
| 2973 | |
| 2974 | if (mclk < minimum_clocks.memoryClock) |
| 2975 | mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? |
| 2976 | max_limits->mclk : minimum_clocks.memoryClock; |
| 2977 | |
| 2978 | smu7_ps->performance_levels[0].engine_clock = sclk; |
| 2979 | smu7_ps->performance_levels[0].memory_clock = mclk; |
| 2980 | |
| 2981 | smu7_ps->performance_levels[1].engine_clock = |
| 2982 | (smu7_ps->performance_levels[1].engine_clock >= |
| 2983 | smu7_ps->performance_levels[0].engine_clock) ? |
| 2984 | smu7_ps->performance_levels[1].engine_clock : |
| 2985 | smu7_ps->performance_levels[0].engine_clock; |
| 2986 | |
| 2987 | if (disable_mclk_switching) { |
| 2988 | if (mclk < smu7_ps->performance_levels[1].memory_clock) |
| 2989 | mclk = smu7_ps->performance_levels[1].memory_clock; |
| 2990 | |
| 2991 | smu7_ps->performance_levels[0].memory_clock = mclk; |
| 2992 | smu7_ps->performance_levels[1].memory_clock = mclk; |
| 2993 | } else { |
| 2994 | if (smu7_ps->performance_levels[1].memory_clock < |
| 2995 | smu7_ps->performance_levels[0].memory_clock) |
| 2996 | smu7_ps->performance_levels[1].memory_clock = |
| 2997 | smu7_ps->performance_levels[0].memory_clock; |
| 2998 | } |
| 2999 | |
| 3000 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 3001 | PHM_PlatformCaps_StablePState)) { |
| 3002 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 3003 | smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; |
| 3004 | smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; |
| 3005 | smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; |
| 3006 | smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; |
| 3007 | } |
| 3008 | } |
| 3009 | return 0; |
| 3010 | } |
| 3011 | |
| 3012 | |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 3013 | static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3014 | { |
| 3015 | struct pp_power_state *ps; |
| 3016 | struct smu7_power_state *smu7_ps; |
| 3017 | |
| 3018 | if (hwmgr == NULL) |
| 3019 | return -EINVAL; |
| 3020 | |
| 3021 | ps = hwmgr->request_ps; |
| 3022 | |
| 3023 | if (ps == NULL) |
| 3024 | return -EINVAL; |
| 3025 | |
| 3026 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); |
| 3027 | |
| 3028 | if (low) |
| 3029 | return smu7_ps->performance_levels[0].memory_clock; |
| 3030 | else |
| 3031 | return smu7_ps->performance_levels |
| 3032 | [smu7_ps->performance_level_count-1].memory_clock; |
| 3033 | } |
| 3034 | |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 3035 | static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3036 | { |
| 3037 | struct pp_power_state *ps; |
| 3038 | struct smu7_power_state *smu7_ps; |
| 3039 | |
| 3040 | if (hwmgr == NULL) |
| 3041 | return -EINVAL; |
| 3042 | |
| 3043 | ps = hwmgr->request_ps; |
| 3044 | |
| 3045 | if (ps == NULL) |
| 3046 | return -EINVAL; |
| 3047 | |
| 3048 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); |
| 3049 | |
| 3050 | if (low) |
| 3051 | return smu7_ps->performance_levels[0].engine_clock; |
| 3052 | else |
| 3053 | return smu7_ps->performance_levels |
| 3054 | [smu7_ps->performance_level_count-1].engine_clock; |
| 3055 | } |
| 3056 | |
| 3057 | static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, |
| 3058 | struct pp_hw_power_state *hw_ps) |
| 3059 | { |
| 3060 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3061 | struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; |
| 3062 | ATOM_FIRMWARE_INFO_V2_2 *fw_info; |
| 3063 | uint16_t size; |
| 3064 | uint8_t frev, crev; |
| 3065 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
| 3066 | |
| 3067 | /* First retrieve the Boot clocks and VDDC from the firmware info table. |
| 3068 | * We assume here that fw_info is unchanged if this call fails. |
| 3069 | */ |
Rex Zhu | b3892e2 | 2018-03-26 18:49:35 +0800 | [diff] [blame] | 3070 | fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3071 | &size, &frev, &crev); |
| 3072 | if (!fw_info) |
| 3073 | /* During a test, there is no firmware info table. */ |
| 3074 | return 0; |
| 3075 | |
| 3076 | /* Patch the state. */ |
| 3077 | data->vbios_boot_state.sclk_bootup_value = |
| 3078 | le32_to_cpu(fw_info->ulDefaultEngineClock); |
| 3079 | data->vbios_boot_state.mclk_bootup_value = |
| 3080 | le32_to_cpu(fw_info->ulDefaultMemoryClock); |
| 3081 | data->vbios_boot_state.mvdd_bootup_value = |
| 3082 | le16_to_cpu(fw_info->usBootUpMVDDCVoltage); |
| 3083 | data->vbios_boot_state.vddc_bootup_value = |
| 3084 | le16_to_cpu(fw_info->usBootUpVDDCVoltage); |
| 3085 | data->vbios_boot_state.vddci_bootup_value = |
| 3086 | le16_to_cpu(fw_info->usBootUpVDDCIVoltage); |
| 3087 | data->vbios_boot_state.pcie_gen_bootup_value = |
| 3088 | smu7_get_current_pcie_speed(hwmgr); |
| 3089 | |
| 3090 | data->vbios_boot_state.pcie_lane_bootup_value = |
| 3091 | (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); |
| 3092 | |
| 3093 | /* set boot power state */ |
| 3094 | ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; |
| 3095 | ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; |
| 3096 | ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; |
| 3097 | ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; |
| 3098 | |
| 3099 | return 0; |
| 3100 | } |
| 3101 | |
| 3102 | static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) |
| 3103 | { |
| 3104 | int result; |
| 3105 | unsigned long ret = 0; |
| 3106 | |
| 3107 | if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 3108 | result = pp_tables_get_num_of_entries(hwmgr, &ret); |
| 3109 | return result ? 0 : ret; |
| 3110 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 3111 | result = get_number_of_powerplay_table_entries_v1_0(hwmgr); |
| 3112 | return result; |
| 3113 | } |
| 3114 | return 0; |
| 3115 | } |
| 3116 | |
| 3117 | static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, |
| 3118 | void *state, struct pp_power_state *power_state, |
| 3119 | void *pp_table, uint32_t classification_flag) |
| 3120 | { |
| 3121 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3122 | struct smu7_power_state *smu7_power_state = |
| 3123 | (struct smu7_power_state *)(&(power_state->hardware)); |
| 3124 | struct smu7_performance_level *performance_level; |
| 3125 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; |
| 3126 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = |
| 3127 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; |
| 3128 | PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 3129 | (PPTable_Generic_SubTable_Header *) |
| 3130 | (((unsigned long)powerplay_table) + |
| 3131 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 3132 | |
| 3133 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 3134 | (ATOM_Tonga_MCLK_Dependency_Table *) |
| 3135 | (((unsigned long)powerplay_table) + |
| 3136 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); |
| 3137 | |
| 3138 | /* The following fields are not initialized here: id orderedList allStatesList */ |
| 3139 | power_state->classification.ui_label = |
| 3140 | (le16_to_cpu(state_entry->usClassification) & |
| 3141 | ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> |
| 3142 | ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; |
| 3143 | power_state->classification.flags = classification_flag; |
| 3144 | /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ |
| 3145 | |
| 3146 | power_state->classification.temporary_state = false; |
| 3147 | power_state->classification.to_be_deleted = false; |
| 3148 | |
| 3149 | power_state->validation.disallowOnDC = |
| 3150 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & |
| 3151 | ATOM_Tonga_DISALLOW_ON_DC)); |
| 3152 | |
| 3153 | power_state->pcie.lanes = 0; |
| 3154 | |
| 3155 | power_state->display.disableFrameModulation = false; |
| 3156 | power_state->display.limitRefreshrate = false; |
| 3157 | power_state->display.enableVariBright = |
| 3158 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & |
| 3159 | ATOM_Tonga_ENABLE_VARIBRIGHT)); |
| 3160 | |
| 3161 | power_state->validation.supportedPowerLevels = 0; |
| 3162 | power_state->uvd_clocks.VCLK = 0; |
| 3163 | power_state->uvd_clocks.DCLK = 0; |
| 3164 | power_state->temperatures.min = 0; |
| 3165 | power_state->temperatures.max = 0; |
| 3166 | |
| 3167 | performance_level = &(smu7_power_state->performance_levels |
| 3168 | [smu7_power_state->performance_level_count++]); |
| 3169 | |
| 3170 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3171 | (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3172 | "Performance levels exceeds SMC limit!", |
| 3173 | return -EINVAL); |
| 3174 | |
| 3175 | PP_ASSERT_WITH_CODE( |
| 3176 | (smu7_power_state->performance_level_count <= |
| 3177 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
| 3178 | "Performance levels exceeds Driver limit!", |
| 3179 | return -EINVAL); |
| 3180 | |
| 3181 | /* Performance levels are arranged from low to high. */ |
| 3182 | performance_level->memory_clock = mclk_dep_table->entries |
| 3183 | [state_entry->ucMemoryClockIndexLow].ulMclk; |
| 3184 | if (sclk_dep_table->ucRevId == 0) |
| 3185 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3186 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3187 | else if (sclk_dep_table->ucRevId == 1) |
| 3188 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3189 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3190 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3191 | state_entry->ucPCIEGenLow); |
| 3192 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, |
Alex Deucher | ed54d95 | 2018-06-28 13:21:12 -0500 | [diff] [blame] | 3193 | state_entry->ucPCIELaneLow); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3194 | |
| 3195 | performance_level = &(smu7_power_state->performance_levels |
| 3196 | [smu7_power_state->performance_level_count++]); |
| 3197 | performance_level->memory_clock = mclk_dep_table->entries |
| 3198 | [state_entry->ucMemoryClockIndexHigh].ulMclk; |
| 3199 | |
| 3200 | if (sclk_dep_table->ucRevId == 0) |
| 3201 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3202 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3203 | else if (sclk_dep_table->ucRevId == 1) |
| 3204 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries |
| 3205 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3206 | |
| 3207 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3208 | state_entry->ucPCIEGenHigh); |
| 3209 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, |
| 3210 | state_entry->ucPCIELaneHigh); |
| 3211 | |
| 3212 | return 0; |
| 3213 | } |
| 3214 | |
| 3215 | static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, |
| 3216 | unsigned long entry_index, struct pp_power_state *state) |
| 3217 | { |
| 3218 | int result; |
| 3219 | struct smu7_power_state *ps; |
| 3220 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3221 | struct phm_ppt_v1_information *table_info = |
| 3222 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 3223 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = |
| 3224 | table_info->vdd_dep_on_mclk; |
| 3225 | |
| 3226 | state->hardware.magic = PHM_VIslands_Magic; |
| 3227 | |
| 3228 | ps = (struct smu7_power_state *)(&state->hardware); |
| 3229 | |
| 3230 | result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, |
| 3231 | smu7_get_pp_table_entry_callback_func_v1); |
| 3232 | |
| 3233 | /* This is the earliest time we have all the dependency table and the VBIOS boot state |
| 3234 | * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state |
| 3235 | * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state |
| 3236 | */ |
| 3237 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { |
| 3238 | if (dep_mclk_table->entries[0].clk != |
| 3239 | data->vbios_boot_state.mclk_bootup_value) |
Rex Zhu | 89c6769 | 2017-09-15 11:09:20 +0800 | [diff] [blame] | 3240 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3241 | "does not match VBIOS boot MCLK level"); |
| 3242 | if (dep_mclk_table->entries[0].vddci != |
| 3243 | data->vbios_boot_state.vddci_bootup_value) |
Rex Zhu | 89c6769 | 2017-09-15 11:09:20 +0800 | [diff] [blame] | 3244 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3245 | "does not match VBIOS boot VDDCI level"); |
| 3246 | } |
| 3247 | |
| 3248 | /* set DC compatible flag if this state supports DC */ |
| 3249 | if (!state->validation.disallowOnDC) |
| 3250 | ps->dc_compatible = true; |
| 3251 | |
| 3252 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) |
| 3253 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; |
| 3254 | |
| 3255 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; |
| 3256 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; |
| 3257 | |
| 3258 | if (!result) { |
| 3259 | uint32_t i; |
| 3260 | |
| 3261 | switch (state->classification.ui_label) { |
| 3262 | case PP_StateUILabel_Performance: |
| 3263 | data->use_pcie_performance_levels = true; |
| 3264 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3265 | if (data->pcie_gen_performance.max < |
| 3266 | ps->performance_levels[i].pcie_gen) |
| 3267 | data->pcie_gen_performance.max = |
| 3268 | ps->performance_levels[i].pcie_gen; |
| 3269 | |
| 3270 | if (data->pcie_gen_performance.min > |
| 3271 | ps->performance_levels[i].pcie_gen) |
| 3272 | data->pcie_gen_performance.min = |
| 3273 | ps->performance_levels[i].pcie_gen; |
| 3274 | |
| 3275 | if (data->pcie_lane_performance.max < |
| 3276 | ps->performance_levels[i].pcie_lane) |
| 3277 | data->pcie_lane_performance.max = |
| 3278 | ps->performance_levels[i].pcie_lane; |
| 3279 | if (data->pcie_lane_performance.min > |
| 3280 | ps->performance_levels[i].pcie_lane) |
| 3281 | data->pcie_lane_performance.min = |
| 3282 | ps->performance_levels[i].pcie_lane; |
| 3283 | } |
| 3284 | break; |
| 3285 | case PP_StateUILabel_Battery: |
| 3286 | data->use_pcie_power_saving_levels = true; |
| 3287 | |
| 3288 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3289 | if (data->pcie_gen_power_saving.max < |
| 3290 | ps->performance_levels[i].pcie_gen) |
| 3291 | data->pcie_gen_power_saving.max = |
| 3292 | ps->performance_levels[i].pcie_gen; |
| 3293 | |
| 3294 | if (data->pcie_gen_power_saving.min > |
| 3295 | ps->performance_levels[i].pcie_gen) |
| 3296 | data->pcie_gen_power_saving.min = |
| 3297 | ps->performance_levels[i].pcie_gen; |
| 3298 | |
| 3299 | if (data->pcie_lane_power_saving.max < |
| 3300 | ps->performance_levels[i].pcie_lane) |
| 3301 | data->pcie_lane_power_saving.max = |
| 3302 | ps->performance_levels[i].pcie_lane; |
| 3303 | |
| 3304 | if (data->pcie_lane_power_saving.min > |
| 3305 | ps->performance_levels[i].pcie_lane) |
| 3306 | data->pcie_lane_power_saving.min = |
| 3307 | ps->performance_levels[i].pcie_lane; |
| 3308 | } |
| 3309 | break; |
| 3310 | default: |
| 3311 | break; |
| 3312 | } |
| 3313 | } |
| 3314 | return 0; |
| 3315 | } |
| 3316 | |
| 3317 | static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, |
| 3318 | struct pp_hw_power_state *power_state, |
| 3319 | unsigned int index, const void *clock_info) |
| 3320 | { |
| 3321 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3322 | struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); |
| 3323 | const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; |
| 3324 | struct smu7_performance_level *performance_level; |
| 3325 | uint32_t engine_clock, memory_clock; |
| 3326 | uint16_t pcie_gen_from_bios; |
| 3327 | |
| 3328 | engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; |
| 3329 | memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; |
| 3330 | |
| 3331 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) |
| 3332 | data->highest_mclk = memory_clock; |
| 3333 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3334 | PP_ASSERT_WITH_CODE( |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3335 | (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3336 | "Performance levels exceeds SMC limit!", |
| 3337 | return -EINVAL); |
| 3338 | |
| 3339 | PP_ASSERT_WITH_CODE( |
Rex Zhu | da7800a | 2016-11-14 16:36:08 +0800 | [diff] [blame] | 3340 | (ps->performance_level_count < |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3341 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
Rex Zhu | da7800a | 2016-11-14 16:36:08 +0800 | [diff] [blame] | 3342 | "Performance levels exceeds Driver limit, Skip!", |
| 3343 | return 0); |
| 3344 | |
| 3345 | performance_level = &(ps->performance_levels |
| 3346 | [ps->performance_level_count++]); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3347 | |
| 3348 | /* Performance levels are arranged from low to high. */ |
| 3349 | performance_level->memory_clock = memory_clock; |
| 3350 | performance_level->engine_clock = engine_clock; |
| 3351 | |
| 3352 | pcie_gen_from_bios = visland_clk_info->ucPCIEGen; |
| 3353 | |
| 3354 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); |
| 3355 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); |
| 3356 | |
| 3357 | return 0; |
| 3358 | } |
| 3359 | |
| 3360 | static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, |
| 3361 | unsigned long entry_index, struct pp_power_state *state) |
| 3362 | { |
| 3363 | int result; |
| 3364 | struct smu7_power_state *ps; |
| 3365 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3366 | struct phm_clock_voltage_dependency_table *dep_mclk_table = |
| 3367 | hwmgr->dyn_state.vddci_dependency_on_mclk; |
| 3368 | |
| 3369 | memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); |
| 3370 | |
| 3371 | state->hardware.magic = PHM_VIslands_Magic; |
| 3372 | |
| 3373 | ps = (struct smu7_power_state *)(&state->hardware); |
| 3374 | |
| 3375 | result = pp_tables_get_entry(hwmgr, entry_index, state, |
| 3376 | smu7_get_pp_table_entry_callback_func_v0); |
| 3377 | |
| 3378 | /* |
| 3379 | * This is the earliest time we have all the dependency table |
| 3380 | * and the VBIOS boot state as |
| 3381 | * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot |
| 3382 | * state if there is only one VDDCI/MCLK level, check if it's |
| 3383 | * the same as VBIOS boot state |
| 3384 | */ |
| 3385 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { |
| 3386 | if (dep_mclk_table->entries[0].clk != |
| 3387 | data->vbios_boot_state.mclk_bootup_value) |
Rex Zhu | 89c6769 | 2017-09-15 11:09:20 +0800 | [diff] [blame] | 3388 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3389 | "does not match VBIOS boot MCLK level"); |
| 3390 | if (dep_mclk_table->entries[0].v != |
| 3391 | data->vbios_boot_state.vddci_bootup_value) |
Rex Zhu | 89c6769 | 2017-09-15 11:09:20 +0800 | [diff] [blame] | 3392 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3393 | "does not match VBIOS boot VDDCI level"); |
| 3394 | } |
| 3395 | |
| 3396 | /* set DC compatible flag if this state supports DC */ |
| 3397 | if (!state->validation.disallowOnDC) |
| 3398 | ps->dc_compatible = true; |
| 3399 | |
| 3400 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) |
| 3401 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; |
| 3402 | |
| 3403 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; |
| 3404 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; |
| 3405 | |
| 3406 | if (!result) { |
| 3407 | uint32_t i; |
| 3408 | |
| 3409 | switch (state->classification.ui_label) { |
| 3410 | case PP_StateUILabel_Performance: |
| 3411 | data->use_pcie_performance_levels = true; |
| 3412 | |
| 3413 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3414 | if (data->pcie_gen_performance.max < |
| 3415 | ps->performance_levels[i].pcie_gen) |
| 3416 | data->pcie_gen_performance.max = |
| 3417 | ps->performance_levels[i].pcie_gen; |
| 3418 | |
| 3419 | if (data->pcie_gen_performance.min > |
| 3420 | ps->performance_levels[i].pcie_gen) |
| 3421 | data->pcie_gen_performance.min = |
| 3422 | ps->performance_levels[i].pcie_gen; |
| 3423 | |
| 3424 | if (data->pcie_lane_performance.max < |
| 3425 | ps->performance_levels[i].pcie_lane) |
| 3426 | data->pcie_lane_performance.max = |
| 3427 | ps->performance_levels[i].pcie_lane; |
| 3428 | |
| 3429 | if (data->pcie_lane_performance.min > |
| 3430 | ps->performance_levels[i].pcie_lane) |
| 3431 | data->pcie_lane_performance.min = |
| 3432 | ps->performance_levels[i].pcie_lane; |
| 3433 | } |
| 3434 | break; |
| 3435 | case PP_StateUILabel_Battery: |
| 3436 | data->use_pcie_power_saving_levels = true; |
| 3437 | |
| 3438 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3439 | if (data->pcie_gen_power_saving.max < |
| 3440 | ps->performance_levels[i].pcie_gen) |
| 3441 | data->pcie_gen_power_saving.max = |
| 3442 | ps->performance_levels[i].pcie_gen; |
| 3443 | |
| 3444 | if (data->pcie_gen_power_saving.min > |
| 3445 | ps->performance_levels[i].pcie_gen) |
| 3446 | data->pcie_gen_power_saving.min = |
| 3447 | ps->performance_levels[i].pcie_gen; |
| 3448 | |
| 3449 | if (data->pcie_lane_power_saving.max < |
| 3450 | ps->performance_levels[i].pcie_lane) |
| 3451 | data->pcie_lane_power_saving.max = |
| 3452 | ps->performance_levels[i].pcie_lane; |
| 3453 | |
| 3454 | if (data->pcie_lane_power_saving.min > |
| 3455 | ps->performance_levels[i].pcie_lane) |
| 3456 | data->pcie_lane_power_saving.min = |
| 3457 | ps->performance_levels[i].pcie_lane; |
| 3458 | } |
| 3459 | break; |
| 3460 | default: |
| 3461 | break; |
| 3462 | } |
| 3463 | } |
| 3464 | return 0; |
| 3465 | } |
| 3466 | |
| 3467 | static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, |
| 3468 | unsigned long entry_index, struct pp_power_state *state) |
| 3469 | { |
| 3470 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 3471 | return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); |
| 3472 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 3473 | return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); |
| 3474 | |
| 3475 | return 0; |
| 3476 | } |
| 3477 | |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3478 | static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) |
Eric Huang | 2245b60 | 2017-02-07 11:46:21 -0500 | [diff] [blame] | 3479 | { |
Evan Quan | 93a09aa | 2019-11-14 15:30:39 +0800 | [diff] [blame] | 3480 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3481 | int i; |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3482 | u32 tmp = 0; |
Eric Huang | 2245b60 | 2017-02-07 11:46:21 -0500 | [diff] [blame] | 3483 | |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3484 | if (!query) |
| 3485 | return -EINVAL; |
Eric Huang | 2245b60 | 2017-02-07 11:46:21 -0500 | [diff] [blame] | 3486 | |
Evan Quan | 93a09aa | 2019-11-14 15:30:39 +0800 | [diff] [blame] | 3487 | /* |
| 3488 | * PPSMC_MSG_GetCurrPkgPwr is not supported on: |
| 3489 | * - Hawaii |
| 3490 | * - Bonaire |
| 3491 | * - Fiji |
| 3492 | * - Tonga |
| 3493 | */ |
| 3494 | if ((adev->asic_type != CHIP_HAWAII) && |
| 3495 | (adev->asic_type != CHIP_BONAIRE) && |
| 3496 | (adev->asic_type != CHIP_FIJI) && |
| 3497 | (adev->asic_type != CHIP_TONGA)) { |
| 3498 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); |
| 3499 | tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
| 3500 | *query = tmp; |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3501 | |
Evan Quan | 93a09aa | 2019-11-14 15:30:39 +0800 | [diff] [blame] | 3502 | if (tmp != 0) |
| 3503 | return 0; |
| 3504 | } |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3505 | |
| 3506 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); |
| 3507 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
Evan Quan | f5742ec | 2019-02-25 16:44:36 +0800 | [diff] [blame] | 3508 | ixSMU_PM_STATUS_95, 0); |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3509 | |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3510 | for (i = 0; i < 10; i++) { |
Yrjan Skrimstad | 69064bb | 2019-05-30 02:08:21 +0200 | [diff] [blame] | 3511 | msleep(500); |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3512 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3513 | tmp = cgs_read_ind_register(hwmgr->device, |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3514 | CGS_IND_REG__SMC, |
Evan Quan | f5742ec | 2019-02-25 16:44:36 +0800 | [diff] [blame] | 3515 | ixSMU_PM_STATUS_95); |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3516 | if (tmp != 0) |
Rex Zhu | b89c71d | 2018-04-04 14:17:09 +0800 | [diff] [blame] | 3517 | break; |
| 3518 | } |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3519 | *query = tmp; |
Eric Huang | 2245b60 | 2017-02-07 11:46:21 -0500 | [diff] [blame] | 3520 | |
| 3521 | return 0; |
| 3522 | } |
| 3523 | |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3524 | static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
| 3525 | void *value, int *size) |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3526 | { |
| 3527 | uint32_t sclk, mclk, activity_percent; |
Rex Zhu | 8487725 | 2018-01-03 17:21:28 +0800 | [diff] [blame] | 3528 | uint32_t offset, val_vid; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3529 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3530 | |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3531 | /* size must be at least 4 bytes for all sensors */ |
| 3532 | if (*size < 4) |
| 3533 | return -EINVAL; |
| 3534 | |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3535 | switch (idx) { |
| 3536 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3537 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3538 | sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3539 | *((uint32_t *)value) = sclk; |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3540 | *size = 4; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3541 | return 0; |
| 3542 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3543 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3544 | mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3545 | *((uint32_t *)value) = mclk; |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3546 | *size = 4; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3547 | return 0; |
| 3548 | case AMDGPU_PP_SENSOR_GPU_LOAD: |
Evan Quan | 767fb6b | 2019-04-24 15:46:50 +0800 | [diff] [blame] | 3549 | case AMDGPU_PP_SENSOR_MEM_LOAD: |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3550 | offset = data->soft_regs_start + smum_get_offsetof(hwmgr, |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3551 | SMU_SoftRegisters, |
Evan Quan | 767fb6b | 2019-04-24 15:46:50 +0800 | [diff] [blame] | 3552 | (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? |
| 3553 | AverageGraphicsActivity: |
| 3554 | AverageMemoryActivity); |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3555 | |
| 3556 | activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); |
| 3557 | activity_percent += 0x80; |
| 3558 | activity_percent >>= 8; |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3559 | *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3560 | *size = 4; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3561 | return 0; |
| 3562 | case AMDGPU_PP_SENSOR_GPU_TEMP: |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3563 | *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3564 | *size = 4; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3565 | return 0; |
Tom St Denis | 3de4ec5 | 2016-09-19 12:48:52 -0400 | [diff] [blame] | 3566 | case AMDGPU_PP_SENSOR_UVD_POWER: |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3567 | *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3568 | *size = 4; |
Tom St Denis | 3de4ec5 | 2016-09-19 12:48:52 -0400 | [diff] [blame] | 3569 | return 0; |
| 3570 | case AMDGPU_PP_SENSOR_VCE_POWER: |
Eric Huang | cd7b0c6 | 2017-02-07 16:37:48 -0500 | [diff] [blame] | 3571 | *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; |
Tom St Denis | 9f8df7d | 2017-02-09 14:29:01 -0500 | [diff] [blame] | 3572 | *size = 4; |
Tom St Denis | 3de4ec5 | 2016-09-19 12:48:52 -0400 | [diff] [blame] | 3573 | return 0; |
Eric Huang | 2245b60 | 2017-02-07 11:46:21 -0500 | [diff] [blame] | 3574 | case AMDGPU_PP_SENSOR_GPU_POWER: |
Rex Zhu | 5b79d04 | 2018-04-04 15:37:35 +0800 | [diff] [blame] | 3575 | return smu7_get_gpu_power(hwmgr, (uint32_t *)value); |
Rex Zhu | 8487725 | 2018-01-03 17:21:28 +0800 | [diff] [blame] | 3576 | case AMDGPU_PP_SENSOR_VDDGFX: |
| 3577 | if ((data->vr_config & 0xff) == 0x2) |
| 3578 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, |
| 3579 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); |
| 3580 | else |
| 3581 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, |
| 3582 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); |
| 3583 | |
| 3584 | *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); |
| 3585 | return 0; |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 3586 | default: |
| 3587 | return -EINVAL; |
| 3588 | } |
| 3589 | } |
| 3590 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3591 | static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) |
| 3592 | { |
| 3593 | const struct phm_set_power_state_input *states = |
| 3594 | (const struct phm_set_power_state_input *)input; |
| 3595 | const struct smu7_power_state *smu7_ps = |
| 3596 | cast_const_phw_smu7_power_state(states->pnew_state); |
| 3597 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3598 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 3599 | uint32_t sclk = smu7_ps->performance_levels |
| 3600 | [smu7_ps->performance_level_count - 1].engine_clock; |
| 3601 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 3602 | uint32_t mclk = smu7_ps->performance_levels |
| 3603 | [smu7_ps->performance_level_count - 1].memory_clock; |
| 3604 | struct PP_Clocks min_clocks = {0}; |
| 3605 | uint32_t i; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3606 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3607 | for (i = 0; i < sclk_table->count; i++) { |
| 3608 | if (sclk == sclk_table->dpm_levels[i].value) |
| 3609 | break; |
| 3610 | } |
| 3611 | |
Rex Zhu | 17c7c7e | 2018-10-22 13:27:37 +0800 | [diff] [blame] | 3612 | if (i >= sclk_table->count) { |
tianci yin | 1b3b27b | 2018-12-04 16:07:18 +0800 | [diff] [blame] | 3613 | if (sclk > sclk_table->dpm_levels[i-1].value) { |
| 3614 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
| 3615 | sclk_table->dpm_levels[i-1].value = sclk; |
| 3616 | } |
Rex Zhu | 17c7c7e | 2018-10-22 13:27:37 +0800 | [diff] [blame] | 3617 | } else { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3618 | /* TODO: Check SCLK in DAL's minimum clocks |
| 3619 | * in case DeepSleep divider update is required. |
| 3620 | */ |
| 3621 | if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && |
| 3622 | (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || |
| 3623 | data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) |
| 3624 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; |
| 3625 | } |
| 3626 | |
| 3627 | for (i = 0; i < mclk_table->count; i++) { |
| 3628 | if (mclk == mclk_table->dpm_levels[i].value) |
| 3629 | break; |
| 3630 | } |
| 3631 | |
Rex Zhu | 17c7c7e | 2018-10-22 13:27:37 +0800 | [diff] [blame] | 3632 | if (i >= mclk_table->count) { |
tianci yin | 1b3b27b | 2018-12-04 16:07:18 +0800 | [diff] [blame] | 3633 | if (mclk > mclk_table->dpm_levels[i-1].value) { |
| 3634 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
| 3635 | mclk_table->dpm_levels[i-1].value = mclk; |
| 3636 | } |
Rex Zhu | 17c7c7e | 2018-10-22 13:27:37 +0800 | [diff] [blame] | 3637 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3638 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 3639 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3640 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; |
| 3641 | |
| 3642 | return 0; |
| 3643 | } |
| 3644 | |
| 3645 | static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, |
| 3646 | const struct smu7_power_state *smu7_ps) |
| 3647 | { |
| 3648 | uint32_t i; |
| 3649 | uint32_t sclk, max_sclk = 0; |
| 3650 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3651 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
| 3652 | |
| 3653 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
| 3654 | sclk = smu7_ps->performance_levels[i].engine_clock; |
| 3655 | if (max_sclk < sclk) |
| 3656 | max_sclk = sclk; |
| 3657 | } |
| 3658 | |
| 3659 | for (i = 0; i < dpm_table->sclk_table.count; i++) { |
| 3660 | if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) |
| 3661 | return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? |
| 3662 | dpm_table->pcie_speed_table.dpm_levels |
| 3663 | [dpm_table->pcie_speed_table.count - 1].value : |
| 3664 | dpm_table->pcie_speed_table.dpm_levels[i].value); |
| 3665 | } |
| 3666 | |
| 3667 | return 0; |
| 3668 | } |
| 3669 | |
| 3670 | static int smu7_request_link_speed_change_before_state_change( |
| 3671 | struct pp_hwmgr *hwmgr, const void *input) |
| 3672 | { |
| 3673 | const struct phm_set_power_state_input *states = |
| 3674 | (const struct phm_set_power_state_input *)input; |
| 3675 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3676 | const struct smu7_power_state *smu7_nps = |
| 3677 | cast_const_phw_smu7_power_state(states->pnew_state); |
| 3678 | const struct smu7_power_state *polaris10_cps = |
| 3679 | cast_const_phw_smu7_power_state(states->pcurrent_state); |
| 3680 | |
| 3681 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); |
| 3682 | uint16_t current_link_speed; |
| 3683 | |
| 3684 | if (data->force_pcie_gen == PP_PCIEGenInvalid) |
| 3685 | current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); |
| 3686 | else |
| 3687 | current_link_speed = data->force_pcie_gen; |
| 3688 | |
| 3689 | data->force_pcie_gen = PP_PCIEGenInvalid; |
| 3690 | data->pspp_notify_required = false; |
| 3691 | |
| 3692 | if (target_link_speed > current_link_speed) { |
| 3693 | switch (target_link_speed) { |
Rex Zhu | 37a9479 | 2018-03-06 14:42:24 +0800 | [diff] [blame] | 3694 | #ifdef CONFIG_ACPI |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3695 | case PP_PCIEGen3: |
Rex Zhu | e1deba2 | 2018-02-27 18:27:54 +0800 | [diff] [blame] | 3696 | if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false)) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3697 | break; |
| 3698 | data->force_pcie_gen = PP_PCIEGen2; |
| 3699 | if (current_link_speed == PP_PCIEGen2) |
| 3700 | break; |
Joe Perches | 45ce19e | 2020-03-10 21:51:39 -0700 | [diff] [blame] | 3701 | fallthrough; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3702 | case PP_PCIEGen2: |
Rex Zhu | e1deba2 | 2018-02-27 18:27:54 +0800 | [diff] [blame] | 3703 | if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3704 | break; |
Joe Perches | 45ce19e | 2020-03-10 21:51:39 -0700 | [diff] [blame] | 3705 | fallthrough; |
Joe Perches | 3738de3 | 2020-03-13 05:57:37 -0700 | [diff] [blame] | 3706 | #endif |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3707 | default: |
| 3708 | data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); |
| 3709 | break; |
| 3710 | } |
| 3711 | } else { |
| 3712 | if (target_link_speed < current_link_speed) |
| 3713 | data->pspp_notify_required = true; |
| 3714 | } |
| 3715 | |
| 3716 | return 0; |
| 3717 | } |
| 3718 | |
| 3719 | static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 3720 | { |
| 3721 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3722 | |
| 3723 | if (0 == data->need_update_smu7_dpm_table) |
| 3724 | return 0; |
| 3725 | |
| 3726 | if ((0 == data->sclk_dpm_key_disabled) && |
| 3727 | (data->need_update_smu7_dpm_table & |
| 3728 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
| 3729 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 3730 | "Trying to freeze SCLK DPM when DPM is disabled", |
| 3731 | ); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3732 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3733 | PPSMC_MSG_SCLKDPM_FreezeLevel), |
| 3734 | "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", |
| 3735 | return -EINVAL); |
| 3736 | } |
| 3737 | |
| 3738 | if ((0 == data->mclk_dpm_key_disabled) && |
| 3739 | (data->need_update_smu7_dpm_table & |
| 3740 | DPMTABLE_OD_UPDATE_MCLK)) { |
| 3741 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 3742 | "Trying to freeze MCLK DPM when DPM is disabled", |
| 3743 | ); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3744 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3745 | PPSMC_MSG_MCLKDPM_FreezeLevel), |
| 3746 | "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", |
| 3747 | return -EINVAL); |
| 3748 | } |
| 3749 | |
| 3750 | return 0; |
| 3751 | } |
| 3752 | |
| 3753 | static int smu7_populate_and_upload_sclk_mclk_dpm_levels( |
| 3754 | struct pp_hwmgr *hwmgr, const void *input) |
| 3755 | { |
| 3756 | int result = 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3757 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3758 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 3759 | uint32_t count; |
| 3760 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 3761 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); |
| 3762 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3763 | |
| 3764 | if (0 == data->need_update_smu7_dpm_table) |
| 3765 | return 0; |
| 3766 | |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 3767 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { |
| 3768 | for (count = 0; count < dpm_table->sclk_table.count; count++) { |
| 3769 | dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; |
| 3770 | dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3771 | } |
| 3772 | } |
| 3773 | |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 3774 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { |
| 3775 | for (count = 0; count < dpm_table->mclk_table.count; count++) { |
| 3776 | dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; |
| 3777 | dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3778 | } |
| 3779 | } |
| 3780 | |
| 3781 | if (data->need_update_smu7_dpm_table & |
| 3782 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { |
| 3783 | result = smum_populate_all_graphic_levels(hwmgr); |
| 3784 | PP_ASSERT_WITH_CODE((0 == result), |
| 3785 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", |
| 3786 | return result); |
| 3787 | } |
| 3788 | |
| 3789 | if (data->need_update_smu7_dpm_table & |
| 3790 | (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { |
| 3791 | /*populate MCLK dpm table to SMU7 */ |
| 3792 | result = smum_populate_all_memory_levels(hwmgr); |
| 3793 | PP_ASSERT_WITH_CODE((0 == result), |
| 3794 | "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", |
| 3795 | return result); |
| 3796 | } |
| 3797 | |
| 3798 | return result; |
| 3799 | } |
| 3800 | |
| 3801 | static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, |
| 3802 | struct smu7_single_dpm_table *dpm_table, |
| 3803 | uint32_t low_limit, uint32_t high_limit) |
| 3804 | { |
| 3805 | uint32_t i; |
| 3806 | |
| 3807 | for (i = 0; i < dpm_table->count; i++) { |
Rex Zhu | ecfee95 | 2018-06-13 18:26:38 +0800 | [diff] [blame] | 3808 | /*skip the trim if od is enabled*/ |
| 3809 | if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit |
| 3810 | || dpm_table->dpm_levels[i].value > high_limit)) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3811 | dpm_table->dpm_levels[i].enabled = false; |
| 3812 | else |
| 3813 | dpm_table->dpm_levels[i].enabled = true; |
| 3814 | } |
| 3815 | |
| 3816 | return 0; |
| 3817 | } |
| 3818 | |
| 3819 | static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, |
| 3820 | const struct smu7_power_state *smu7_ps) |
| 3821 | { |
| 3822 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3823 | uint32_t high_limit_count; |
| 3824 | |
| 3825 | PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), |
| 3826 | "power state did not have any performance level", |
| 3827 | return -EINVAL); |
| 3828 | |
| 3829 | high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; |
| 3830 | |
| 3831 | smu7_trim_single_dpm_states(hwmgr, |
| 3832 | &(data->dpm_table.sclk_table), |
| 3833 | smu7_ps->performance_levels[0].engine_clock, |
| 3834 | smu7_ps->performance_levels[high_limit_count].engine_clock); |
| 3835 | |
| 3836 | smu7_trim_single_dpm_states(hwmgr, |
| 3837 | &(data->dpm_table.mclk_table), |
| 3838 | smu7_ps->performance_levels[0].memory_clock, |
| 3839 | smu7_ps->performance_levels[high_limit_count].memory_clock); |
| 3840 | |
| 3841 | return 0; |
| 3842 | } |
| 3843 | |
| 3844 | static int smu7_generate_dpm_level_enable_mask( |
| 3845 | struct pp_hwmgr *hwmgr, const void *input) |
| 3846 | { |
Kenneth Feng | 5c16f36 | 2018-06-12 15:07:37 +0800 | [diff] [blame] | 3847 | int result = 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3848 | const struct phm_set_power_state_input *states = |
| 3849 | (const struct phm_set_power_state_input *)input; |
| 3850 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3851 | const struct smu7_power_state *smu7_ps = |
| 3852 | cast_const_phw_smu7_power_state(states->pnew_state); |
| 3853 | |
Kenneth Feng | 5c16f36 | 2018-06-12 15:07:37 +0800 | [diff] [blame] | 3854 | |
Rex Zhu | ecfee95 | 2018-06-13 18:26:38 +0800 | [diff] [blame] | 3855 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3856 | if (result) |
| 3857 | return result; |
| 3858 | |
| 3859 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = |
| 3860 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); |
| 3861 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = |
| 3862 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); |
| 3863 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = |
| 3864 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); |
| 3865 | |
| 3866 | return 0; |
| 3867 | } |
| 3868 | |
| 3869 | static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 3870 | { |
| 3871 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3872 | |
| 3873 | if (0 == data->need_update_smu7_dpm_table) |
| 3874 | return 0; |
| 3875 | |
| 3876 | if ((0 == data->sclk_dpm_key_disabled) && |
| 3877 | (data->need_update_smu7_dpm_table & |
| 3878 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { |
| 3879 | |
| 3880 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 3881 | "Trying to Unfreeze SCLK DPM when DPM is disabled", |
| 3882 | ); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3883 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3884 | PPSMC_MSG_SCLKDPM_UnfreezeLevel), |
| 3885 | "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", |
| 3886 | return -EINVAL); |
| 3887 | } |
| 3888 | |
| 3889 | if ((0 == data->mclk_dpm_key_disabled) && |
| 3890 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { |
| 3891 | |
| 3892 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
| 3893 | "Trying to Unfreeze MCLK DPM when DPM is disabled", |
| 3894 | ); |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 3895 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
Eric Huang | fd78e6a | 2017-11-16 11:14:21 -0500 | [diff] [blame] | 3896 | PPSMC_MSG_MCLKDPM_UnfreezeLevel), |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3897 | "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", |
| 3898 | return -EINVAL); |
| 3899 | } |
| 3900 | |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 3901 | data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3902 | |
| 3903 | return 0; |
| 3904 | } |
| 3905 | |
| 3906 | static int smu7_notify_link_speed_change_after_state_change( |
| 3907 | struct pp_hwmgr *hwmgr, const void *input) |
| 3908 | { |
| 3909 | const struct phm_set_power_state_input *states = |
| 3910 | (const struct phm_set_power_state_input *)input; |
| 3911 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3912 | const struct smu7_power_state *smu7_ps = |
| 3913 | cast_const_phw_smu7_power_state(states->pnew_state); |
| 3914 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); |
| 3915 | uint8_t request; |
| 3916 | |
| 3917 | if (data->pspp_notify_required) { |
| 3918 | if (target_link_speed == PP_PCIEGen3) |
| 3919 | request = PCIE_PERF_REQ_GEN3; |
| 3920 | else if (target_link_speed == PP_PCIEGen2) |
| 3921 | request = PCIE_PERF_REQ_GEN2; |
| 3922 | else |
| 3923 | request = PCIE_PERF_REQ_GEN1; |
| 3924 | |
| 3925 | if (request == PCIE_PERF_REQ_GEN1 && |
| 3926 | smu7_get_current_pcie_speed(hwmgr) > 0) |
| 3927 | return 0; |
| 3928 | |
Dave Airlie | 62ccb65 | 2018-03-14 10:54:55 +1000 | [diff] [blame] | 3929 | #ifdef CONFIG_ACPI |
Rex Zhu | e1deba2 | 2018-02-27 18:27:54 +0800 | [diff] [blame] | 3930 | if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3931 | if (PP_PCIEGen2 == target_link_speed) |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 3932 | pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3933 | else |
Huang Rui | b5c11b8 | 2016-12-26 15:00:22 +0800 | [diff] [blame] | 3934 | pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3935 | } |
Dave Airlie | 62ccb65 | 2018-03-14 10:54:55 +1000 | [diff] [blame] | 3936 | #endif |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3937 | } |
| 3938 | |
| 3939 | return 0; |
| 3940 | } |
| 3941 | |
| 3942 | static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) |
| 3943 | { |
| 3944 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3945 | |
Eric Huang | 0c24e7e | 2018-04-11 15:38:11 -0500 | [diff] [blame] | 3946 | if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { |
| 3947 | if (hwmgr->chip_id == CHIP_VEGAM) |
| 3948 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3949 | (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); |
| 3950 | else |
| 3951 | smum_send_msg_to_smc_with_parameter(hwmgr, |
| 3952 | (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); |
| 3953 | } |
Tom St Denis | 1756f1b | 2017-10-04 13:44:52 -0400 | [diff] [blame] | 3954 | return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3955 | } |
| 3956 | |
| 3957 | static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) |
| 3958 | { |
| 3959 | int tmp_result, result = 0; |
| 3960 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 3961 | |
| 3962 | tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); |
| 3963 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3964 | "Failed to find DPM states clocks in DPM table!", |
| 3965 | result = tmp_result); |
| 3966 | |
| 3967 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 3968 | PHM_PlatformCaps_PCIEPerformanceRequest)) { |
| 3969 | tmp_result = |
| 3970 | smu7_request_link_speed_change_before_state_change(hwmgr, input); |
| 3971 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3972 | "Failed to request link speed change before state change!", |
| 3973 | result = tmp_result); |
| 3974 | } |
| 3975 | |
| 3976 | tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); |
| 3977 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3978 | "Failed to freeze SCLK MCLK DPM!", result = tmp_result); |
| 3979 | |
| 3980 | tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); |
| 3981 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3982 | "Failed to populate and upload SCLK MCLK DPM levels!", |
| 3983 | result = tmp_result); |
| 3984 | |
Alex Deucher | 90124562 | 2019-11-08 11:15:17 -0500 | [diff] [blame] | 3985 | /* |
| 3986 | * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. |
| 3987 | * That effectively disables AVFS feature. |
| 3988 | */ |
| 3989 | if (hwmgr->hardcode_pp_table != NULL) |
| 3990 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; |
| 3991 | |
Rex Zhu | 3c9d1fde | 2018-01-02 15:20:55 +0800 | [diff] [blame] | 3992 | tmp_result = smu7_update_avfs(hwmgr); |
| 3993 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3994 | "Failed to update avfs voltages!", |
| 3995 | result = tmp_result); |
| 3996 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 3997 | tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); |
| 3998 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 3999 | "Failed to generate DPM level enabled mask!", |
| 4000 | result = tmp_result); |
| 4001 | |
| 4002 | tmp_result = smum_update_sclk_threshold(hwmgr); |
| 4003 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4004 | "Failed to update SCLK threshold!", |
| 4005 | result = tmp_result); |
| 4006 | |
| 4007 | tmp_result = smu7_notify_smc_display(hwmgr); |
| 4008 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4009 | "Failed to notify smc display settings!", |
| 4010 | result = tmp_result); |
| 4011 | |
| 4012 | tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); |
| 4013 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4014 | "Failed to unfreeze SCLK MCLK DPM!", |
| 4015 | result = tmp_result); |
| 4016 | |
| 4017 | tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); |
| 4018 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4019 | "Failed to upload DPM level enabled mask!", |
| 4020 | result = tmp_result); |
| 4021 | |
| 4022 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 4023 | PHM_PlatformCaps_PCIEPerformanceRequest)) { |
| 4024 | tmp_result = |
| 4025 | smu7_notify_link_speed_change_after_state_change(hwmgr, input); |
| 4026 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4027 | "Failed to notify link speed change after state change!", |
| 4028 | result = tmp_result); |
| 4029 | } |
| 4030 | data->apply_optimized_settings = false; |
| 4031 | return result; |
| 4032 | } |
| 4033 | |
| 4034 | static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) |
| 4035 | { |
| 4036 | hwmgr->thermal_controller. |
| 4037 | advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; |
| 4038 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4039 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4040 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); |
| 4041 | } |
| 4042 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4043 | static int |
| 4044 | smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4045 | { |
| 4046 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; |
| 4047 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4048 | return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4049 | } |
| 4050 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4051 | static int |
| 4052 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4053 | { |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4054 | if (hwmgr->display_config->num_display > 1 && |
| 4055 | !hwmgr->display_config->multi_monitor_in_sync) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4056 | smu7_notify_smc_display_change(hwmgr, false); |
| 4057 | |
| 4058 | return 0; |
| 4059 | } |
| 4060 | |
| 4061 | /** |
| 4062 | * Programs the display gap |
| 4063 | * |
| 4064 | * @param hwmgr the address of the powerplay hardware manager. |
| 4065 | * @return always OK |
| 4066 | */ |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4067 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4068 | { |
| 4069 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4070 | uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); |
| 4071 | uint32_t display_gap2; |
| 4072 | uint32_t pre_vbi_time_in_us; |
| 4073 | uint32_t frame_time_in_us; |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4074 | uint32_t ref_clock, refresh_rate; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4075 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4076 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4077 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); |
| 4078 | |
Rex Zhu | 2538090 | 2018-03-16 16:56:58 +0800 | [diff] [blame] | 4079 | ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4080 | refresh_rate = hwmgr->display_config->vrefresh; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4081 | |
| 4082 | if (0 == refresh_rate) |
| 4083 | refresh_rate = 60; |
| 4084 | |
| 4085 | frame_time_in_us = 1000000 / refresh_rate; |
| 4086 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4087 | pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; |
Rex Zhu | 8b95f4f | 2017-10-20 15:07:41 +0800 | [diff] [blame] | 4088 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4089 | data->frame_time_x2 = frame_time_in_us * 2 / 100; |
| 4090 | |
Ahzo | f659bb6 | 2019-08-05 21:14:18 +0200 | [diff] [blame] | 4091 | if (data->frame_time_x2 < 280) { |
| 4092 | pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); |
| 4093 | data->frame_time_x2 = 280; |
| 4094 | } |
| 4095 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4096 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); |
| 4097 | |
| 4098 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); |
| 4099 | |
| 4100 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4101 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4102 | SMU_SoftRegisters, |
| 4103 | PreVBlankGap), 0x64); |
| 4104 | |
| 4105 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4106 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4107 | SMU_SoftRegisters, |
| 4108 | VBlankTimeout), |
| 4109 | (frame_time_in_us - pre_vbi_time_in_us)); |
| 4110 | |
| 4111 | return 0; |
| 4112 | } |
| 4113 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4114 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4115 | { |
| 4116 | return smu7_program_display_gap(hwmgr); |
| 4117 | } |
| 4118 | |
| 4119 | /** |
| 4120 | * Set maximum target operating fan output RPM |
| 4121 | * |
| 4122 | * @param hwmgr: the address of the powerplay hardware manager. |
| 4123 | * @param usMaxFanRpm: max operating fan RPM value. |
| 4124 | * @return The response that came from the SMC. |
| 4125 | */ |
| 4126 | static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) |
| 4127 | { |
| 4128 | hwmgr->thermal_controller. |
| 4129 | advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; |
| 4130 | |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4131 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4132 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); |
| 4133 | } |
| 4134 | |
Rex Zhu | 031ec94 | 2018-03-21 16:19:21 +0800 | [diff] [blame] | 4135 | static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { |
| 4136 | .process = phm_irq_process, |
| 4137 | }; |
| 4138 | |
Rex Zhu | 4d20037 | 2018-03-21 13:11:27 +0800 | [diff] [blame] | 4139 | static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4140 | { |
Rex Zhu | 031ec94 | 2018-03-21 16:19:21 +0800 | [diff] [blame] | 4141 | struct amdgpu_irq_src *source = |
| 4142 | kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); |
| 4143 | |
| 4144 | if (!source) |
| 4145 | return -ENOMEM; |
| 4146 | |
| 4147 | source->funcs = &smu7_irq_funcs; |
| 4148 | |
| 4149 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), |
Christian König | 1ffdeca | 2018-09-17 15:29:28 +0200 | [diff] [blame] | 4150 | AMDGPU_IRQ_CLIENTID_LEGACY, |
Andrey Grodzovsky | 091aec0 | 2018-05-25 10:06:52 -0400 | [diff] [blame] | 4151 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, |
Rex Zhu | 031ec94 | 2018-03-21 16:19:21 +0800 | [diff] [blame] | 4152 | source); |
| 4153 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), |
Christian König | 1ffdeca | 2018-09-17 15:29:28 +0200 | [diff] [blame] | 4154 | AMDGPU_IRQ_CLIENTID_LEGACY, |
Andrey Grodzovsky | 091aec0 | 2018-05-25 10:06:52 -0400 | [diff] [blame] | 4155 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, |
Rex Zhu | 031ec94 | 2018-03-21 16:19:21 +0800 | [diff] [blame] | 4156 | source); |
| 4157 | |
| 4158 | /* Register CTF(GPIO_19) interrupt */ |
| 4159 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), |
Christian König | 1ffdeca | 2018-09-17 15:29:28 +0200 | [diff] [blame] | 4160 | AMDGPU_IRQ_CLIENTID_LEGACY, |
Andrey Grodzovsky | 091aec0 | 2018-05-25 10:06:52 -0400 | [diff] [blame] | 4161 | VISLANDS30_IV_SRCID_GPIO_19, |
Rex Zhu | 031ec94 | 2018-03-21 16:19:21 +0800 | [diff] [blame] | 4162 | source); |
| 4163 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4164 | return 0; |
| 4165 | } |
| 4166 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4167 | static bool |
| 4168 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4169 | { |
| 4170 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4171 | bool is_update_required = false; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4172 | |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4173 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4174 | is_update_required = true; |
| 4175 | |
Alex Deucher | ec2e082 | 2018-08-09 14:24:08 -0500 | [diff] [blame] | 4176 | if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) |
| 4177 | is_update_required = true; |
| 4178 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4179 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4180 | if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4181 | (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || |
Rex Zhu | 555fd70 | 2018-03-27 13:32:02 +0800 | [diff] [blame] | 4182 | hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4183 | is_update_required = true; |
| 4184 | } |
| 4185 | return is_update_required; |
| 4186 | } |
| 4187 | |
| 4188 | static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, |
| 4189 | const struct smu7_performance_level *pl2) |
| 4190 | { |
| 4191 | return ((pl1->memory_clock == pl2->memory_clock) && |
| 4192 | (pl1->engine_clock == pl2->engine_clock) && |
| 4193 | (pl1->pcie_gen == pl2->pcie_gen) && |
| 4194 | (pl1->pcie_lane == pl2->pcie_lane)); |
| 4195 | } |
| 4196 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4197 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
| 4198 | const struct pp_hw_power_state *pstate1, |
| 4199 | const struct pp_hw_power_state *pstate2, bool *equal) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4200 | { |
Rex Zhu | 9faa6b0 | 2016-10-11 18:51:16 +0800 | [diff] [blame] | 4201 | const struct smu7_power_state *psa; |
| 4202 | const struct smu7_power_state *psb; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4203 | int i; |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 4204 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4205 | |
| 4206 | if (pstate1 == NULL || pstate2 == NULL || equal == NULL) |
| 4207 | return -EINVAL; |
| 4208 | |
Rex Zhu | 9faa6b0 | 2016-10-11 18:51:16 +0800 | [diff] [blame] | 4209 | psa = cast_const_phw_smu7_power_state(pstate1); |
| 4210 | psb = cast_const_phw_smu7_power_state(pstate2); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4211 | /* If the two states don't even have the same number of performance levels they cannot be the same state. */ |
| 4212 | if (psa->performance_level_count != psb->performance_level_count) { |
| 4213 | *equal = false; |
| 4214 | return 0; |
| 4215 | } |
| 4216 | |
| 4217 | for (i = 0; i < psa->performance_level_count; i++) { |
| 4218 | if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { |
| 4219 | /* If we have found even one performance level pair that is different the states are different. */ |
| 4220 | *equal = false; |
| 4221 | return 0; |
| 4222 | } |
| 4223 | } |
| 4224 | |
| 4225 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ |
| 4226 | *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); |
| 4227 | *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); |
| 4228 | *equal &= (psa->sclk_threshold == psb->sclk_threshold); |
Rex Zhu | 49fd66e | 2018-01-17 16:49:29 +0800 | [diff] [blame] | 4229 | /* For OD call, set value based on flag */ |
| 4230 | *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | |
| 4231 | DPMTABLE_OD_UPDATE_MCLK | |
| 4232 | DPMTABLE_OD_UPDATE_VDDC)); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4233 | |
| 4234 | return 0; |
| 4235 | } |
| 4236 | |
Alex Deucher | 9da0063 | 2018-04-11 18:09:39 -0500 | [diff] [blame] | 4237 | static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4238 | { |
| 4239 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4240 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4241 | uint32_t tmp; |
| 4242 | |
| 4243 | /* Read MC indirect register offset 0x9F bits [3:0] to see |
| 4244 | * if VBIOS has already loaded a full version of MC ucode |
| 4245 | * or not. |
| 4246 | */ |
| 4247 | |
| 4248 | smu7_get_mc_microcode_version(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4249 | |
| 4250 | data->need_long_memory_training = false; |
| 4251 | |
| 4252 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, |
| 4253 | ixMC_IO_DEBUG_UP_13); |
| 4254 | tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); |
| 4255 | |
| 4256 | if (tmp & (1 << 23)) { |
| 4257 | data->mem_latency_high = MEM_LATENCY_HIGH; |
| 4258 | data->mem_latency_low = MEM_LATENCY_LOW; |
Alex Deucher | 34c08da | 2018-11-29 19:35:14 -0500 | [diff] [blame] | 4259 | if ((hwmgr->chip_id == CHIP_POLARIS10) || |
| 4260 | (hwmgr->chip_id == CHIP_POLARIS11) || |
| 4261 | (hwmgr->chip_id == CHIP_POLARIS12)) |
| 4262 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4263 | } else { |
| 4264 | data->mem_latency_high = 330; |
| 4265 | data->mem_latency_low = 330; |
Alex Deucher | 34c08da | 2018-11-29 19:35:14 -0500 | [diff] [blame] | 4266 | if ((hwmgr->chip_id == CHIP_POLARIS10) || |
| 4267 | (hwmgr->chip_id == CHIP_POLARIS11) || |
| 4268 | (hwmgr->chip_id == CHIP_POLARIS12)) |
| 4269 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4270 | } |
| 4271 | |
| 4272 | return 0; |
| 4273 | } |
| 4274 | |
| 4275 | static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) |
| 4276 | { |
| 4277 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4278 | |
| 4279 | data->clock_registers.vCG_SPLL_FUNC_CNTL = |
| 4280 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); |
| 4281 | data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = |
| 4282 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); |
| 4283 | data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = |
| 4284 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); |
| 4285 | data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = |
| 4286 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); |
| 4287 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = |
| 4288 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); |
| 4289 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = |
| 4290 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); |
| 4291 | data->clock_registers.vDLL_CNTL = |
| 4292 | cgs_read_register(hwmgr->device, mmDLL_CNTL); |
| 4293 | data->clock_registers.vMCLK_PWRMGT_CNTL = |
| 4294 | cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); |
| 4295 | data->clock_registers.vMPLL_AD_FUNC_CNTL = |
| 4296 | cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); |
| 4297 | data->clock_registers.vMPLL_DQ_FUNC_CNTL = |
| 4298 | cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); |
| 4299 | data->clock_registers.vMPLL_FUNC_CNTL = |
| 4300 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); |
| 4301 | data->clock_registers.vMPLL_FUNC_CNTL_1 = |
| 4302 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); |
| 4303 | data->clock_registers.vMPLL_FUNC_CNTL_2 = |
| 4304 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); |
| 4305 | data->clock_registers.vMPLL_SS1 = |
| 4306 | cgs_read_register(hwmgr->device, mmMPLL_SS1); |
| 4307 | data->clock_registers.vMPLL_SS2 = |
| 4308 | cgs_read_register(hwmgr->device, mmMPLL_SS2); |
| 4309 | return 0; |
| 4310 | |
| 4311 | } |
| 4312 | |
| 4313 | /** |
| 4314 | * Find out if memory is GDDR5. |
| 4315 | * |
| 4316 | * @param hwmgr the address of the powerplay hardware manager. |
| 4317 | * @return always 0 |
| 4318 | */ |
| 4319 | static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) |
| 4320 | { |
| 4321 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
Alex Deucher | c73a362 | 2018-04-11 17:57:13 -0500 | [diff] [blame] | 4322 | struct amdgpu_device *adev = hwmgr->adev; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4323 | |
Alex Deucher | c73a362 | 2018-04-11 17:57:13 -0500 | [diff] [blame] | 4324 | data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4325 | |
| 4326 | return 0; |
| 4327 | } |
| 4328 | |
| 4329 | /** |
| 4330 | * Enables Dynamic Power Management by SMC |
| 4331 | * |
| 4332 | * @param hwmgr the address of the powerplay hardware manager. |
| 4333 | * @return always 0 |
| 4334 | */ |
| 4335 | static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) |
| 4336 | { |
| 4337 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
| 4338 | GENERAL_PWRMGT, STATIC_PM_EN, 1); |
| 4339 | |
| 4340 | return 0; |
| 4341 | } |
| 4342 | |
| 4343 | /** |
| 4344 | * Initialize PowerGating States for different engines |
| 4345 | * |
| 4346 | * @param hwmgr the address of the powerplay hardware manager. |
| 4347 | * @return always 0 |
| 4348 | */ |
| 4349 | static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) |
| 4350 | { |
| 4351 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4352 | |
| 4353 | data->uvd_power_gated = false; |
| 4354 | data->vce_power_gated = false; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4355 | |
| 4356 | return 0; |
| 4357 | } |
| 4358 | |
| 4359 | static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) |
| 4360 | { |
| 4361 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4362 | |
| 4363 | data->low_sclk_interrupt_threshold = 0; |
| 4364 | return 0; |
| 4365 | } |
| 4366 | |
Baoyou Xie | f8a4c11 | 2016-09-30 17:58:42 +0800 | [diff] [blame] | 4367 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4368 | { |
| 4369 | int tmp_result, result = 0; |
| 4370 | |
Alex Deucher | 9da0063 | 2018-04-11 18:09:39 -0500 | [diff] [blame] | 4371 | smu7_check_mc_firmware(hwmgr); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4372 | |
| 4373 | tmp_result = smu7_read_clock_registers(hwmgr); |
| 4374 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4375 | "Failed to read clock registers!", result = tmp_result); |
| 4376 | |
| 4377 | tmp_result = smu7_get_memory_type(hwmgr); |
| 4378 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4379 | "Failed to get memory type!", result = tmp_result); |
| 4380 | |
| 4381 | tmp_result = smu7_enable_acpi_power_management(hwmgr); |
| 4382 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4383 | "Failed to enable ACPI power management!", result = tmp_result); |
| 4384 | |
| 4385 | tmp_result = smu7_init_power_gate_state(hwmgr); |
| 4386 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4387 | "Failed to init power gate state!", result = tmp_result); |
| 4388 | |
| 4389 | tmp_result = smu7_get_mc_microcode_version(hwmgr); |
| 4390 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4391 | "Failed to get MC microcode version!", result = tmp_result); |
| 4392 | |
| 4393 | tmp_result = smu7_init_sclk_threshold(hwmgr); |
| 4394 | PP_ASSERT_WITH_CODE((0 == tmp_result), |
| 4395 | "Failed to init sclk threshold!", result = tmp_result); |
| 4396 | |
| 4397 | return result; |
| 4398 | } |
| 4399 | |
| 4400 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
| 4401 | enum pp_clock_type type, uint32_t mask) |
| 4402 | { |
| 4403 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4404 | |
Rex Zhu | 61e208b | 2018-01-25 18:42:08 +0800 | [diff] [blame] | 4405 | if (mask == 0) |
| 4406 | return -EINVAL; |
| 4407 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4408 | switch (type) { |
| 4409 | case PP_SCLK: |
| 4410 | if (!data->sclk_dpm_key_disabled) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4411 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4412 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
| 4413 | data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); |
| 4414 | break; |
| 4415 | case PP_MCLK: |
| 4416 | if (!data->mclk_dpm_key_disabled) |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4417 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4418 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
| 4419 | data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); |
| 4420 | break; |
| 4421 | case PP_PCIE: |
| 4422 | { |
| 4423 | uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4424 | |
Rex Zhu | 61e208b | 2018-01-25 18:42:08 +0800 | [diff] [blame] | 4425 | if (!data->pcie_dpm_key_disabled) { |
| 4426 | if (fls(tmp) != ffs(tmp)) |
| 4427 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); |
| 4428 | else |
| 4429 | smum_send_msg_to_smc_with_parameter(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4430 | PPSMC_MSG_PCIeDPM_ForceLevel, |
Rex Zhu | 61e208b | 2018-01-25 18:42:08 +0800 | [diff] [blame] | 4431 | fls(tmp) - 1); |
| 4432 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4433 | break; |
| 4434 | } |
| 4435 | default: |
| 4436 | break; |
| 4437 | } |
| 4438 | |
| 4439 | return 0; |
| 4440 | } |
| 4441 | |
| 4442 | static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, |
| 4443 | enum pp_clock_type type, char *buf) |
| 4444 | { |
| 4445 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4446 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4447 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4448 | struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4449 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
| 4450 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); |
| 4451 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4452 | int i, now, size = 0; |
| 4453 | uint32_t clock, pcie_speed; |
| 4454 | |
| 4455 | switch (type) { |
| 4456 | case PP_SCLK: |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4457 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4458 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
| 4459 | |
| 4460 | for (i = 0; i < sclk_table->count; i++) { |
| 4461 | if (clock > sclk_table->dpm_levels[i].value) |
| 4462 | continue; |
| 4463 | break; |
| 4464 | } |
| 4465 | now = i; |
| 4466 | |
| 4467 | for (i = 0; i < sclk_table->count; i++) |
| 4468 | size += sprintf(buf + size, "%d: %uMhz %s\n", |
| 4469 | i, sclk_table->dpm_levels[i].value / 100, |
| 4470 | (i == now) ? "*" : ""); |
| 4471 | break; |
| 4472 | case PP_MCLK: |
Rex Zhu | d3f8c0a | 2017-09-20 11:22:56 +0800 | [diff] [blame] | 4473 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4474 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
| 4475 | |
| 4476 | for (i = 0; i < mclk_table->count; i++) { |
| 4477 | if (clock > mclk_table->dpm_levels[i].value) |
| 4478 | continue; |
| 4479 | break; |
| 4480 | } |
| 4481 | now = i; |
| 4482 | |
| 4483 | for (i = 0; i < mclk_table->count; i++) |
| 4484 | size += sprintf(buf + size, "%d: %uMhz %s\n", |
| 4485 | i, mclk_table->dpm_levels[i].value / 100, |
| 4486 | (i == now) ? "*" : ""); |
| 4487 | break; |
| 4488 | case PP_PCIE: |
| 4489 | pcie_speed = smu7_get_current_pcie_speed(hwmgr); |
| 4490 | for (i = 0; i < pcie_table->count; i++) { |
| 4491 | if (pcie_speed != pcie_table->dpm_levels[i].value) |
| 4492 | continue; |
| 4493 | break; |
| 4494 | } |
| 4495 | now = i; |
| 4496 | |
| 4497 | for (i = 0; i < pcie_table->count; i++) |
| 4498 | size += sprintf(buf + size, "%d: %s %s\n", i, |
Evan Quan | 7413d2f | 2017-10-26 17:29:34 +0800 | [diff] [blame] | 4499 | (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : |
| 4500 | (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : |
| 4501 | (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4502 | (i == now) ? "*" : ""); |
| 4503 | break; |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4504 | case OD_SCLK: |
| 4505 | if (hwmgr->od_enabled) { |
Rex Zhu | a3c991f | 2018-04-19 10:39:17 +0800 | [diff] [blame] | 4506 | size = sprintf(buf, "%s:\n", "OD_SCLK"); |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4507 | for (i = 0; i < odn_sclk_table->num_of_pl; i++) |
Rex Zhu | a3c991f | 2018-04-19 10:39:17 +0800 | [diff] [blame] | 4508 | size += sprintf(buf + size, "%d: %10uMHz %10umV\n", |
| 4509 | i, odn_sclk_table->entries[i].clock/100, |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4510 | odn_sclk_table->entries[i].vddc); |
| 4511 | } |
| 4512 | break; |
| 4513 | case OD_MCLK: |
| 4514 | if (hwmgr->od_enabled) { |
Rex Zhu | a3c991f | 2018-04-19 10:39:17 +0800 | [diff] [blame] | 4515 | size = sprintf(buf, "%s:\n", "OD_MCLK"); |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4516 | for (i = 0; i < odn_mclk_table->num_of_pl; i++) |
Rex Zhu | a3c991f | 2018-04-19 10:39:17 +0800 | [diff] [blame] | 4517 | size += sprintf(buf + size, "%d: %10uMHz %10umV\n", |
| 4518 | i, odn_mclk_table->entries[i].clock/100, |
Rex Zhu | 6df21b7 | 2018-01-15 18:01:35 +0800 | [diff] [blame] | 4519 | odn_mclk_table->entries[i].vddc); |
| 4520 | } |
| 4521 | break; |
Rex Zhu | a3c991f | 2018-04-19 10:39:17 +0800 | [diff] [blame] | 4522 | case OD_RANGE: |
| 4523 | if (hwmgr->od_enabled) { |
| 4524 | size = sprintf(buf, "%s:\n", "OD_RANGE"); |
| 4525 | size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", |
| 4526 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, |
| 4527 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); |
| 4528 | size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", |
| 4529 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, |
| 4530 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); |
| 4531 | size += sprintf(buf + size, "VDDC: %7umV %11umV\n", |
| 4532 | data->odn_dpm_table.min_vddc, |
| 4533 | data->odn_dpm_table.max_vddc); |
| 4534 | } |
| 4535 | break; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4536 | default: |
| 4537 | break; |
| 4538 | } |
| 4539 | return size; |
| 4540 | } |
| 4541 | |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 4542 | static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4543 | { |
Rex Zhu | 2fde9ab | 2017-05-05 16:50:36 +0800 | [diff] [blame] | 4544 | switch (mode) { |
| 4545 | case AMD_FAN_CTRL_NONE: |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 4546 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); |
Rex Zhu | 2fde9ab | 2017-05-05 16:50:36 +0800 | [diff] [blame] | 4547 | break; |
| 4548 | case AMD_FAN_CTRL_MANUAL: |
| 4549 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
| 4550 | PHM_PlatformCaps_MicrocodeFanControl)) |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 4551 | smu7_fan_ctrl_stop_smc_fan_control(hwmgr); |
Rex Zhu | 2fde9ab | 2017-05-05 16:50:36 +0800 | [diff] [blame] | 4552 | break; |
| 4553 | case AMD_FAN_CTRL_AUTO: |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 4554 | if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) |
| 4555 | smu7_fan_ctrl_start_smc_fan_control(hwmgr); |
Rex Zhu | 2fde9ab | 2017-05-05 16:50:36 +0800 | [diff] [blame] | 4556 | break; |
| 4557 | default: |
| 4558 | break; |
| 4559 | } |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4560 | } |
| 4561 | |
Rex Zhu | f93f0c3 | 2017-09-06 16:08:03 +0800 | [diff] [blame] | 4562 | static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4563 | { |
Rex Zhu | 2fde9ab | 2017-05-05 16:50:36 +0800 | [diff] [blame] | 4564 | return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4565 | } |
| 4566 | |
| 4567 | static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) |
| 4568 | { |
| 4569 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4570 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4571 | struct smu7_single_dpm_table *golden_sclk_table = |
| 4572 | &(data->golden_dpm_table.sclk_table); |
Greathouse, Joseph | a4233cc | 2018-11-19 16:59:28 +0000 | [diff] [blame] | 4573 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 4574 | int golden_value = golden_sclk_table->dpm_levels |
| 4575 | [golden_sclk_table->count - 1].value; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4576 | |
Greathouse, Joseph | a4233cc | 2018-11-19 16:59:28 +0000 | [diff] [blame] | 4577 | value -= golden_value; |
| 4578 | value = DIV_ROUND_UP(value * 100, golden_value); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4579 | |
| 4580 | return value; |
| 4581 | } |
| 4582 | |
| 4583 | static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) |
| 4584 | { |
| 4585 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4586 | struct smu7_single_dpm_table *golden_sclk_table = |
| 4587 | &(data->golden_dpm_table.sclk_table); |
| 4588 | struct pp_power_state *ps; |
| 4589 | struct smu7_power_state *smu7_ps; |
| 4590 | |
| 4591 | if (value > 20) |
| 4592 | value = 20; |
| 4593 | |
| 4594 | ps = hwmgr->request_ps; |
| 4595 | |
| 4596 | if (ps == NULL) |
| 4597 | return -EINVAL; |
| 4598 | |
| 4599 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); |
| 4600 | |
| 4601 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = |
| 4602 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * |
| 4603 | value / 100 + |
| 4604 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; |
| 4605 | |
| 4606 | return 0; |
| 4607 | } |
| 4608 | |
| 4609 | static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) |
| 4610 | { |
| 4611 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4612 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4613 | struct smu7_single_dpm_table *golden_mclk_table = |
| 4614 | &(data->golden_dpm_table.mclk_table); |
Greathouse, Joseph | a4233cc | 2018-11-19 16:59:28 +0000 | [diff] [blame] | 4615 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 4616 | int golden_value = golden_mclk_table->dpm_levels |
| 4617 | [golden_mclk_table->count - 1].value; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4618 | |
Greathouse, Joseph | a4233cc | 2018-11-19 16:59:28 +0000 | [diff] [blame] | 4619 | value -= golden_value; |
| 4620 | value = DIV_ROUND_UP(value * 100, golden_value); |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4621 | |
| 4622 | return value; |
| 4623 | } |
| 4624 | |
| 4625 | static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) |
| 4626 | { |
| 4627 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4628 | struct smu7_single_dpm_table *golden_mclk_table = |
| 4629 | &(data->golden_dpm_table.mclk_table); |
| 4630 | struct pp_power_state *ps; |
| 4631 | struct smu7_power_state *smu7_ps; |
| 4632 | |
| 4633 | if (value > 20) |
| 4634 | value = 20; |
| 4635 | |
| 4636 | ps = hwmgr->request_ps; |
| 4637 | |
| 4638 | if (ps == NULL) |
| 4639 | return -EINVAL; |
| 4640 | |
| 4641 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); |
| 4642 | |
| 4643 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = |
| 4644 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * |
| 4645 | value / 100 + |
| 4646 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; |
| 4647 | |
| 4648 | return 0; |
| 4649 | } |
| 4650 | |
| 4651 | |
| 4652 | static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) |
| 4653 | { |
| 4654 | struct phm_ppt_v1_information *table_info = |
| 4655 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4656 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; |
| 4657 | struct phm_clock_voltage_dependency_table *sclk_table; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4658 | int i; |
| 4659 | |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4660 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 4661 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) |
| 4662 | return -EINVAL; |
| 4663 | dep_sclk_table = table_info->vdd_dep_on_sclk; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4664 | for (i = 0; i < dep_sclk_table->count; i++) |
Harry Wentland | c3cb424 | 2018-07-09 13:48:12 -0400 | [diff] [blame] | 4665 | clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4666 | clocks->count = dep_sclk_table->count; |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4667 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 4668 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4669 | for (i = 0; i < sclk_table->count; i++) |
Harry Wentland | c3cb424 | 2018-07-09 13:48:12 -0400 | [diff] [blame] | 4670 | clocks->clock[i] = sclk_table->entries[i].clk * 10; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4671 | clocks->count = sclk_table->count; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4672 | } |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4673 | |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4674 | return 0; |
| 4675 | } |
| 4676 | |
| 4677 | static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) |
| 4678 | { |
| 4679 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4680 | |
| 4681 | if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) |
| 4682 | return data->mem_latency_high; |
| 4683 | else if (clk >= MEM_FREQ_HIGH_LATENCY) |
| 4684 | return data->mem_latency_low; |
| 4685 | else |
| 4686 | return MEM_LATENCY_ERR; |
| 4687 | } |
| 4688 | |
| 4689 | static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) |
| 4690 | { |
| 4691 | struct phm_ppt_v1_information *table_info = |
| 4692 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 4693 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; |
| 4694 | int i; |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4695 | struct phm_clock_voltage_dependency_table *mclk_table; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4696 | |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4697 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
| 4698 | if (table_info == NULL) |
| 4699 | return -EINVAL; |
| 4700 | dep_mclk_table = table_info->vdd_dep_on_mclk; |
| 4701 | for (i = 0; i < dep_mclk_table->count; i++) { |
Harry Wentland | c3cb424 | 2018-07-09 13:48:12 -0400 | [diff] [blame] | 4702 | clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4703 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4704 | dep_mclk_table->entries[i].clk); |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4705 | } |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4706 | clocks->count = dep_mclk_table->count; |
Rex Zhu | 954e6be | 2016-11-10 14:07:34 +0800 | [diff] [blame] | 4707 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
| 4708 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4709 | for (i = 0; i < mclk_table->count; i++) |
Harry Wentland | c3cb424 | 2018-07-09 13:48:12 -0400 | [diff] [blame] | 4710 | clocks->clock[i] = mclk_table->entries[i].clk * 10; |
Rex Zhu | 4d8d44c | 2017-02-06 12:58:57 +0800 | [diff] [blame] | 4711 | clocks->count = mclk_table->count; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 4712 | } |
| 4713 | return 0; |
| 4714 | } |
| 4715 | |
| 4716 | static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, |
| 4717 | struct amd_pp_clocks *clocks) |
| 4718 | { |
| 4719 | switch (type) { |
| 4720 | case amd_pp_sys_clock: |
| 4721 | smu7_get_sclks(hwmgr, clocks); |
| 4722 | break; |
| 4723 | case amd_pp_mem_clock: |
| 4724 | smu7_get_mclks(hwmgr, clocks); |
| 4725 | break; |
| 4726 | default: |
| 4727 | return -EINVAL; |
| 4728 | } |
| 4729 | |
| 4730 | return 0; |
| 4731 | } |
| 4732 | |
Rex Zhu | 26f5278 | 2017-09-15 19:39:52 +0800 | [diff] [blame] | 4733 | static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, |
| 4734 | uint32_t virtual_addr_low, |
| 4735 | uint32_t virtual_addr_hi, |
| 4736 | uint32_t mc_addr_low, |
| 4737 | uint32_t mc_addr_hi, |
| 4738 | uint32_t size) |
| 4739 | { |
| 4740 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4741 | |
| 4742 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4743 | data->soft_regs_start + |
| 4744 | smum_get_offsetof(hwmgr, |
| 4745 | SMU_SoftRegisters, DRAM_LOG_ADDR_H), |
| 4746 | mc_addr_hi); |
| 4747 | |
| 4748 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4749 | data->soft_regs_start + |
| 4750 | smum_get_offsetof(hwmgr, |
| 4751 | SMU_SoftRegisters, DRAM_LOG_ADDR_L), |
| 4752 | mc_addr_low); |
| 4753 | |
| 4754 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4755 | data->soft_regs_start + |
| 4756 | smum_get_offsetof(hwmgr, |
| 4757 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), |
| 4758 | virtual_addr_hi); |
| 4759 | |
| 4760 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4761 | data->soft_regs_start + |
| 4762 | smum_get_offsetof(hwmgr, |
| 4763 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), |
| 4764 | virtual_addr_low); |
| 4765 | |
| 4766 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 4767 | data->soft_regs_start + |
| 4768 | smum_get_offsetof(hwmgr, |
| 4769 | SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), |
| 4770 | size); |
| 4771 | return 0; |
| 4772 | } |
| 4773 | |
Rex Zhu | ad8cec7 | 2017-11-08 16:39:00 +0800 | [diff] [blame] | 4774 | static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, |
| 4775 | struct amd_pp_simple_clock_info *clocks) |
| 4776 | { |
| 4777 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4778 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4779 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4780 | |
| 4781 | if (clocks == NULL) |
| 4782 | return -EINVAL; |
| 4783 | |
| 4784 | clocks->memory_max_clock = mclk_table->count > 1 ? |
| 4785 | mclk_table->dpm_levels[mclk_table->count-1].value : |
| 4786 | mclk_table->dpm_levels[0].value; |
| 4787 | clocks->engine_max_clock = sclk_table->count > 1 ? |
| 4788 | sclk_table->dpm_levels[sclk_table->count-1].value : |
| 4789 | sclk_table->dpm_levels[0].value; |
| 4790 | return 0; |
| 4791 | } |
| 4792 | |
Evan Quan | 4ba0825 | 2018-01-10 15:37:20 -0500 | [diff] [blame] | 4793 | static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, |
| 4794 | struct PP_TemperatureRange *thermal_data) |
| 4795 | { |
| 4796 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4797 | struct phm_ppt_v1_information *table_info = |
| 4798 | (struct phm_ppt_v1_information *)hwmgr->pptable; |
| 4799 | |
| 4800 | memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); |
| 4801 | |
| 4802 | if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 4803 | thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * |
| 4804 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; |
| 4805 | else if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 4806 | thermal_data->max = data->thermal_temp_setting.temperature_shutdown * |
| 4807 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; |
| 4808 | |
| 4809 | return 0; |
| 4810 | } |
| 4811 | |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4812 | static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, |
| 4813 | enum PP_OD_DPM_TABLE_COMMAND type, |
| 4814 | uint32_t clk, |
| 4815 | uint32_t voltage) |
| 4816 | { |
| 4817 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4818 | |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 4819 | if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { |
| 4820 | pr_info("OD voltage is out of range [%d - %d] mV\n", |
| 4821 | data->odn_dpm_table.min_vddc, |
| 4822 | data->odn_dpm_table.max_vddc); |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4823 | return false; |
| 4824 | } |
| 4825 | |
| 4826 | if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 4827 | if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4828 | hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { |
| 4829 | pr_info("OD engine clock is out of range [%d - %d] MHz\n", |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 4830 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, |
| 4831 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4832 | return false; |
| 4833 | } |
| 4834 | } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 4835 | if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4836 | hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { |
| 4837 | pr_info("OD memory clock is out of range [%d - %d] MHz\n", |
Rex Zhu | d389d607 | 2018-04-18 21:09:35 +0800 | [diff] [blame] | 4838 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, |
| 4839 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4840 | return false; |
| 4841 | } |
| 4842 | } else { |
| 4843 | return false; |
| 4844 | } |
| 4845 | |
| 4846 | return true; |
| 4847 | } |
| 4848 | |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4849 | static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, |
| 4850 | enum PP_OD_DPM_TABLE_COMMAND type, |
| 4851 | long *input, uint32_t size) |
| 4852 | { |
| 4853 | uint32_t i; |
| 4854 | struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; |
| 4855 | struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; |
| 4856 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4857 | |
| 4858 | uint32_t input_clk; |
| 4859 | uint32_t input_vol; |
| 4860 | uint32_t input_level; |
| 4861 | |
| 4862 | PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", |
| 4863 | return -EINVAL); |
| 4864 | |
| 4865 | if (!hwmgr->od_enabled) { |
| 4866 | pr_info("OverDrive feature not enabled\n"); |
| 4867 | return -EINVAL; |
| 4868 | } |
| 4869 | |
| 4870 | if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { |
| 4871 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; |
| 4872 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; |
| 4873 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), |
| 4874 | "Failed to get ODN SCLK and Voltage tables", |
| 4875 | return -EINVAL); |
| 4876 | } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { |
| 4877 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; |
| 4878 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; |
| 4879 | |
| 4880 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), |
| 4881 | "Failed to get ODN MCLK and Voltage tables", |
| 4882 | return -EINVAL); |
| 4883 | } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { |
| 4884 | smu7_odn_initial_default_setting(hwmgr); |
| 4885 | return 0; |
| 4886 | } else if (PP_OD_COMMIT_DPM_TABLE == type) { |
| 4887 | smu7_check_dpm_table_updated(hwmgr); |
| 4888 | return 0; |
| 4889 | } else { |
| 4890 | return -EINVAL; |
| 4891 | } |
| 4892 | |
| 4893 | for (i = 0; i < size; i += 3) { |
| 4894 | if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { |
| 4895 | pr_info("invalid clock voltage input \n"); |
| 4896 | return 0; |
| 4897 | } |
| 4898 | input_level = input[i]; |
| 4899 | input_clk = input[i+1] * 100; |
| 4900 | input_vol = input[i+2]; |
| 4901 | |
| 4902 | if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { |
| 4903 | podn_dpm_table_in_backend->entries[input_level].clock = input_clk; |
| 4904 | podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; |
| 4905 | podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; |
| 4906 | podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; |
Rex Zhu | e51ee68 | 2018-08-13 18:37:39 +0800 | [diff] [blame] | 4907 | podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 4908 | } else { |
| 4909 | return -EINVAL; |
| 4910 | } |
| 4911 | } |
| 4912 | |
| 4913 | return 0; |
| 4914 | } |
| 4915 | |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 4916 | static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) |
| 4917 | { |
| 4918 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4919 | uint32_t i, size = 0; |
| 4920 | uint32_t len; |
| 4921 | |
Evan Quan | c27c977 | 2018-12-27 14:23:30 +0800 | [diff] [blame] | 4922 | static const char *profile_name[7] = {"BOOTUP_DEFAULT", |
| 4923 | "3D_FULL_SCREEN", |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 4924 | "POWER_SAVING", |
| 4925 | "VIDEO", |
| 4926 | "VR", |
| 4927 | "COMPUTE", |
| 4928 | "CUSTOM"}; |
| 4929 | |
| 4930 | static const char *title[8] = {"NUM", |
| 4931 | "MODE_NAME", |
| 4932 | "SCLK_UP_HYST", |
| 4933 | "SCLK_DOWN_HYST", |
| 4934 | "SCLK_ACTIVE_LEVEL", |
| 4935 | "MCLK_UP_HYST", |
| 4936 | "MCLK_DOWN_HYST", |
| 4937 | "MCLK_ACTIVE_LEVEL"}; |
| 4938 | |
| 4939 | if (!buf) |
| 4940 | return -EINVAL; |
| 4941 | |
| 4942 | size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n", |
| 4943 | title[0], title[1], title[2], title[3], |
| 4944 | title[4], title[5], title[6], title[7]); |
| 4945 | |
zhengbin | 19efcb9 | 2019-11-22 11:42:51 +0800 | [diff] [blame] | 4946 | len = ARRAY_SIZE(smu7_profiling); |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 4947 | |
| 4948 | for (i = 0; i < len; i++) { |
Rex Zhu | 4aa8c41 | 2018-05-07 14:23:04 +0800 | [diff] [blame] | 4949 | if (i == hwmgr->power_profile_mode) { |
| 4950 | size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", |
| 4951 | i, profile_name[i], "*", |
| 4952 | data->current_profile_setting.sclk_up_hyst, |
| 4953 | data->current_profile_setting.sclk_down_hyst, |
| 4954 | data->current_profile_setting.sclk_activity, |
| 4955 | data->current_profile_setting.mclk_up_hyst, |
| 4956 | data->current_profile_setting.mclk_down_hyst, |
| 4957 | data->current_profile_setting.mclk_activity); |
| 4958 | continue; |
| 4959 | } |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 4960 | if (smu7_profiling[i].bupdate_sclk) |
| 4961 | size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", |
| 4962 | i, profile_name[i], smu7_profiling[i].sclk_up_hyst, |
| 4963 | smu7_profiling[i].sclk_down_hyst, |
| 4964 | smu7_profiling[i].sclk_activity); |
| 4965 | else |
| 4966 | size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ", |
| 4967 | i, profile_name[i], "-", "-", "-"); |
| 4968 | |
| 4969 | if (smu7_profiling[i].bupdate_mclk) |
| 4970 | size += sprintf(buf + size, "%16d %16d %16d\n", |
| 4971 | smu7_profiling[i].mclk_up_hyst, |
| 4972 | smu7_profiling[i].mclk_down_hyst, |
| 4973 | smu7_profiling[i].mclk_activity); |
| 4974 | else |
| 4975 | size += sprintf(buf + size, "%16s %16s %16s\n", |
| 4976 | "-", "-", "-"); |
| 4977 | } |
| 4978 | |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 4979 | return size; |
| 4980 | } |
| 4981 | |
Rex Zhu | 180a8be | 2018-02-23 17:41:07 +0800 | [diff] [blame] | 4982 | static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, |
| 4983 | enum PP_SMC_POWER_PROFILE requst) |
| 4984 | { |
| 4985 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 4986 | uint32_t tmp, level; |
| 4987 | |
| 4988 | if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { |
| 4989 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
| 4990 | level = 0; |
| 4991 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; |
| 4992 | while (tmp >>= 1) |
| 4993 | level++; |
| 4994 | if (level > 0) |
| 4995 | smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1)); |
| 4996 | } |
| 4997 | } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { |
| 4998 | smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask); |
| 4999 | } |
| 5000 | } |
| 5001 | |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 5002 | static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) |
| 5003 | { |
| 5004 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
| 5005 | struct profile_mode_setting tmp; |
| 5006 | enum PP_SMC_POWER_PROFILE mode; |
| 5007 | |
| 5008 | if (input == NULL) |
| 5009 | return -EINVAL; |
| 5010 | |
| 5011 | mode = input[size]; |
| 5012 | switch (mode) { |
| 5013 | case PP_SMC_POWER_PROFILE_CUSTOM: |
Kent Russell | 233d3ee | 2019-03-22 09:01:26 -0400 | [diff] [blame] | 5014 | if (size < 8 && size != 0) |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 5015 | return -EINVAL; |
Kent Russell | 233d3ee | 2019-03-22 09:01:26 -0400 | [diff] [blame] | 5016 | /* If only CUSTOM is passed in, use the saved values. Check |
| 5017 | * that we actually have a CUSTOM profile by ensuring that |
| 5018 | * the "use sclk" or the "use mclk" bits are set |
| 5019 | */ |
| 5020 | tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; |
| 5021 | if (size == 0) { |
| 5022 | if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) |
| 5023 | return -EINVAL; |
| 5024 | } else { |
| 5025 | tmp.bupdate_sclk = input[0]; |
| 5026 | tmp.sclk_up_hyst = input[1]; |
| 5027 | tmp.sclk_down_hyst = input[2]; |
| 5028 | tmp.sclk_activity = input[3]; |
| 5029 | tmp.bupdate_mclk = input[4]; |
| 5030 | tmp.mclk_up_hyst = input[5]; |
| 5031 | tmp.mclk_down_hyst = input[6]; |
| 5032 | tmp.mclk_activity = input[7]; |
| 5033 | smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; |
| 5034 | } |
Rex Zhu | 4aa8c41 | 2018-05-07 14:23:04 +0800 | [diff] [blame] | 5035 | if (!smum_update_dpm_settings(hwmgr, &tmp)) { |
| 5036 | memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 5037 | hwmgr->power_profile_mode = mode; |
| 5038 | } |
| 5039 | break; |
| 5040 | case PP_SMC_POWER_PROFILE_FULLSCREEN3D: |
| 5041 | case PP_SMC_POWER_PROFILE_POWERSAVING: |
| 5042 | case PP_SMC_POWER_PROFILE_VIDEO: |
| 5043 | case PP_SMC_POWER_PROFILE_VR: |
| 5044 | case PP_SMC_POWER_PROFILE_COMPUTE: |
| 5045 | if (mode == hwmgr->power_profile_mode) |
| 5046 | return 0; |
| 5047 | |
| 5048 | memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); |
| 5049 | if (!smum_update_dpm_settings(hwmgr, &tmp)) { |
| 5050 | if (tmp.bupdate_sclk) { |
| 5051 | data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; |
| 5052 | data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; |
| 5053 | data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; |
| 5054 | data->current_profile_setting.sclk_activity = tmp.sclk_activity; |
| 5055 | } |
| 5056 | if (tmp.bupdate_mclk) { |
| 5057 | data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; |
| 5058 | data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; |
| 5059 | data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; |
| 5060 | data->current_profile_setting.mclk_activity = tmp.mclk_activity; |
| 5061 | } |
Rex Zhu | 180a8be | 2018-02-23 17:41:07 +0800 | [diff] [blame] | 5062 | smu7_patch_compute_profile_mode(hwmgr, mode); |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 5063 | hwmgr->power_profile_mode = mode; |
| 5064 | } |
| 5065 | break; |
| 5066 | default: |
| 5067 | return -EINVAL; |
| 5068 | } |
| 5069 | |
| 5070 | return 0; |
| 5071 | } |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 5072 | |
Rex Zhu | f688b61 | 2018-07-05 19:22:50 +0800 | [diff] [blame] | 5073 | static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, |
| 5074 | PHM_PerformanceLevelDesignation designation, uint32_t index, |
| 5075 | PHM_PerformanceLevel *level) |
| 5076 | { |
| 5077 | const struct smu7_power_state *ps; |
Rex Zhu | f688b61 | 2018-07-05 19:22:50 +0800 | [diff] [blame] | 5078 | uint32_t i; |
| 5079 | |
| 5080 | if (level == NULL || hwmgr == NULL || state == NULL) |
| 5081 | return -EINVAL; |
| 5082 | |
Rex Zhu | f688b61 | 2018-07-05 19:22:50 +0800 | [diff] [blame] | 5083 | ps = cast_const_phw_smu7_power_state(state); |
| 5084 | |
| 5085 | i = index > ps->performance_level_count - 1 ? |
| 5086 | ps->performance_level_count - 1 : index; |
| 5087 | |
| 5088 | level->coreClock = ps->performance_levels[i].engine_clock; |
| 5089 | level->memory_clock = ps->performance_levels[i].memory_clock; |
| 5090 | |
| 5091 | return 0; |
| 5092 | } |
| 5093 | |
Rex Zhu | d355f14 | 2018-09-20 16:47:06 +0800 | [diff] [blame] | 5094 | static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) |
| 5095 | { |
| 5096 | int result; |
| 5097 | |
| 5098 | result = smu7_disable_dpm_tasks(hwmgr); |
| 5099 | PP_ASSERT_WITH_CODE((0 == result), |
| 5100 | "[disable_dpm_tasks] Failed to disable DPM!", |
| 5101 | ); |
| 5102 | |
| 5103 | return result; |
| 5104 | } |
| 5105 | |
Nils Wallménius | a1c1a1d | 2016-09-22 21:13:49 +0200 | [diff] [blame] | 5106 | static const struct pp_hwmgr_func smu7_hwmgr_funcs = { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5107 | .backend_init = &smu7_hwmgr_backend_init, |
Rex Zhu | a0aa704 | 2016-12-28 20:15:45 +0800 | [diff] [blame] | 5108 | .backend_fini = &smu7_hwmgr_backend_fini, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5109 | .asic_setup = &smu7_setup_asic_task, |
| 5110 | .dynamic_state_management_enable = &smu7_enable_dpm_tasks, |
| 5111 | .apply_state_adjust_rules = smu7_apply_state_adjust_rules, |
| 5112 | .force_dpm_level = &smu7_force_dpm_level, |
| 5113 | .power_state_set = smu7_set_power_state_tasks, |
| 5114 | .get_power_state_size = smu7_get_power_state_size, |
| 5115 | .get_mclk = smu7_dpm_get_mclk, |
| 5116 | .get_sclk = smu7_dpm_get_sclk, |
| 5117 | .patch_boot_state = smu7_dpm_patch_boot_state, |
| 5118 | .get_pp_table_entry = smu7_get_pp_table_entry, |
| 5119 | .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5120 | .powerdown_uvd = smu7_powerdown_uvd, |
| 5121 | .powergate_uvd = smu7_powergate_uvd, |
| 5122 | .powergate_vce = smu7_powergate_vce, |
| 5123 | .disable_clock_power_gating = smu7_disable_clock_power_gating, |
| 5124 | .update_clock_gatings = smu7_update_clock_gatings, |
| 5125 | .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, |
| 5126 | .display_config_changed = smu7_display_configuration_changed_task, |
| 5127 | .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, |
| 5128 | .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5129 | .stop_thermal_controller = smu7_thermal_stop_thermal_controller, |
| 5130 | .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, |
| 5131 | .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, |
| 5132 | .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, |
| 5133 | .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, |
| 5134 | .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, |
| 5135 | .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, |
| 5136 | .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, |
Rex Zhu | 4d20037 | 2018-03-21 13:11:27 +0800 | [diff] [blame] | 5137 | .register_irq_handlers = smu7_register_irq_handlers, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5138 | .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, |
| 5139 | .check_states_equal = smu7_check_states_equal, |
| 5140 | .set_fan_control_mode = smu7_set_fan_control_mode, |
| 5141 | .get_fan_control_mode = smu7_get_fan_control_mode, |
| 5142 | .force_clock_level = smu7_force_clock_level, |
| 5143 | .print_clock_levels = smu7_print_clock_levels, |
Rex Zhu | 3eb6e47 | 2018-06-05 11:28:03 +0800 | [diff] [blame] | 5144 | .powergate_gfx = smu7_powergate_gfx, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5145 | .get_sclk_od = smu7_get_sclk_od, |
| 5146 | .set_sclk_od = smu7_set_sclk_od, |
| 5147 | .get_mclk_od = smu7_get_mclk_od, |
| 5148 | .set_mclk_od = smu7_set_mclk_od, |
| 5149 | .get_clock_by_type = smu7_get_clock_by_type, |
Tom St Denis | a6e3695 | 2016-09-15 10:07:34 -0400 | [diff] [blame] | 5150 | .read_sensor = smu7_read_sensor, |
Rex Zhu | f28a9b6 | 2016-10-13 15:24:12 +0800 | [diff] [blame] | 5151 | .dynamic_state_management_disable = smu7_disable_dpm_tasks, |
Eric Huang | f9c993c | 2017-03-01 15:56:17 -0500 | [diff] [blame] | 5152 | .avfs_control = smu7_avfs_control, |
Rex Zhu | 1dfc41d | 2017-04-27 15:46:35 +0800 | [diff] [blame] | 5153 | .disable_smc_firmware_ctf = smu7_thermal_disable_alert, |
Rex Zhu | 1ab4720 | 2017-09-04 16:25:48 +0800 | [diff] [blame] | 5154 | .start_thermal_controller = smu7_start_thermal_controller, |
Rex Zhu | 26f5278 | 2017-09-15 19:39:52 +0800 | [diff] [blame] | 5155 | .notify_cac_buffer_info = smu7_notify_cac_buffer_info, |
Rex Zhu | ad8cec7 | 2017-11-08 16:39:00 +0800 | [diff] [blame] | 5156 | .get_max_high_clocks = smu7_get_max_high_clocks, |
Evan Quan | 4ba0825 | 2018-01-10 15:37:20 -0500 | [diff] [blame] | 5157 | .get_thermal_temperature_range = smu7_get_thermal_temperature_range, |
Rex Zhu | 5e4d4fb | 2018-01-16 16:04:43 +0800 | [diff] [blame] | 5158 | .odn_edit_dpm_table = smu7_odn_edit_dpm_table, |
Rex Zhu | 6ab8555 | 2018-01-29 18:04:18 +0800 | [diff] [blame] | 5159 | .set_power_limit = smu7_set_power_limit, |
Rex Zhu | 5d24af8 | 2018-01-31 14:48:14 +0800 | [diff] [blame] | 5160 | .get_power_profile_mode = smu7_get_power_profile_mode, |
| 5161 | .set_power_profile_mode = smu7_set_power_profile_mode, |
Rex Zhu | f688b61 | 2018-07-05 19:22:50 +0800 | [diff] [blame] | 5162 | .get_performance_level = smu7_get_performance_level, |
Alex Deucher | 2a113c7 | 2019-02-15 18:17:24 -0500 | [diff] [blame] | 5163 | .get_asic_baco_capability = smu7_baco_get_capability, |
| 5164 | .get_asic_baco_state = smu7_baco_get_state, |
| 5165 | .set_asic_baco_state = smu7_baco_set_state, |
Rex Zhu | d355f14 | 2018-09-20 16:47:06 +0800 | [diff] [blame] | 5166 | .power_off_asic = smu7_power_off_asic, |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5167 | }; |
| 5168 | |
| 5169 | uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, |
| 5170 | uint32_t clock_insr) |
| 5171 | { |
| 5172 | uint8_t i; |
| 5173 | uint32_t temp; |
| 5174 | uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); |
| 5175 | |
| 5176 | PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); |
| 5177 | for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { |
| 5178 | temp = clock >> i; |
| 5179 | |
| 5180 | if (temp >= min || i == 0) |
| 5181 | break; |
| 5182 | } |
| 5183 | return i; |
| 5184 | } |
| 5185 | |
Rex Zhu | a5b580e | 2016-12-29 15:30:38 +0800 | [diff] [blame] | 5186 | int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5187 | { |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5188 | hwmgr->hwmgr_func = &smu7_hwmgr_funcs; |
| 5189 | if (hwmgr->pp_table_version == PP_TABLE_V0) |
| 5190 | hwmgr->pptable_func = &pptable_funcs; |
| 5191 | else if (hwmgr->pp_table_version == PP_TABLE_V1) |
| 5192 | hwmgr->pptable_func = &pptable_v1_0_funcs; |
| 5193 | |
zhengbin | 7bb4b88 | 2019-11-27 17:33:40 +0800 | [diff] [blame] | 5194 | return 0; |
Rex Zhu | 599a7e9 | 2016-09-09 13:25:22 +0800 | [diff] [blame] | 5195 | } |