blob: 31776b12e4c45e70fd570a19aa72d93685a16ec1 [file] [log] [blame]
Huang Ruia7e91bd2020-08-27 12:02:37 -04001/*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_atombios.h"
25#include "nbio_v7_2.h"
26
27#include "nbio/nbio_7_2_0_offset.h"
28#include "nbio/nbio_7_2_0_sh_mask.h"
29#include <uapi/linux/kfd_ioctl.h>
30
Aaron Liu011b5142020-08-25 10:27:59 +080031#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC 0x0015
32#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC_BASE_IDX 2
33#define regBIF_BX0_BIF_FB_EN_YC 0x0100
34#define regBIF_BX0_BIF_FB_EN_YC_BASE_IDX 2
35#define regBIF1_PCIE_MST_CTRL_3 0x4601c6
36#define regBIF1_PCIE_MST_CTRL_3_BASE_IDX 5
37#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT \
38 0x1b
39#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT \
40 0x1c
41#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK \
42 0x08000000L
43#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK \
44 0x30000000L
45#define regBIF1_PCIE_TX_POWER_CTRL_1 0x460187
46#define regBIF1_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
47#define BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
48#define BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
49
Huang Ruia7e91bd2020-08-27 12:02:37 -040050static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev)
51{
52 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
53 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
54 WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
55 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
56}
57
58static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
59{
Aaron Liu011b5142020-08-25 10:27:59 +080060 u32 tmp;
61
Tim Huangd726d432022-01-25 11:12:07 +080062 switch (adev->ip_versions[NBIO_HWIP][0]) {
63 case IP_VERSION(7, 2, 1):
Yifan Zhang935ad3a2022-01-21 18:40:39 +080064 case IP_VERSION(7, 3, 0):
Tim Huangd726d432022-01-25 11:12:07 +080065 case IP_VERSION(7, 5, 0):
Aaron Liu011b5142020-08-25 10:27:59 +080066 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC);
Tim Huangd726d432022-01-25 11:12:07 +080067 break;
68 default:
Aaron Liu011b5142020-08-25 10:27:59 +080069 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
Tim Huangd726d432022-01-25 11:12:07 +080070 break;
71 }
Huang Ruia7e91bd2020-08-27 12:02:37 -040072
73 tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
74 tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
75
76 return tmp;
77}
78
79static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
80{
Tim Huangd726d432022-01-25 11:12:07 +080081 switch (adev->ip_versions[NBIO_HWIP][0]) {
82 case IP_VERSION(7, 2, 1):
Yifan Zhang935ad3a2022-01-21 18:40:39 +080083 case IP_VERSION(7, 3, 0):
Tim Huangd726d432022-01-25 11:12:07 +080084 case IP_VERSION(7, 5, 0):
85 if (enable)
Aaron Liu011b5142020-08-25 10:27:59 +080086 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC,
87 BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
88 BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
89 else
Tim Huangd726d432022-01-25 11:12:07 +080090 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
91 break;
92 default:
93 if (enable)
Aaron Liu011b5142020-08-25 10:27:59 +080094 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
95 BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
96 BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
Aaron Liu011b5142020-08-25 10:27:59 +080097 else
98 WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
Tim Huangd726d432022-01-25 11:12:07 +080099 break;
100 }
Huang Ruia7e91bd2020-08-27 12:02:37 -0400101}
102
Huang Ruia7e91bd2020-08-27 12:02:37 -0400103static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
104{
105 return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
106}
107
108static void nbio_v7_2_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
109 bool use_doorbell, int doorbell_index,
110 int doorbell_size)
111{
112 u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_SDMA0_DOORBELL_RANGE);
113 u32 doorbell_range = RREG32_PCIE_PORT(reg);
114
115 if (use_doorbell) {
116 doorbell_range = REG_SET_FIELD(doorbell_range,
117 GDC0_BIF_SDMA0_DOORBELL_RANGE,
118 OFFSET, doorbell_index);
119 doorbell_range = REG_SET_FIELD(doorbell_range,
120 GDC0_BIF_SDMA0_DOORBELL_RANGE,
121 SIZE, doorbell_size);
122 } else {
123 doorbell_range = REG_SET_FIELD(doorbell_range,
124 GDC0_BIF_SDMA0_DOORBELL_RANGE,
125 SIZE, 0);
126 }
127
128 WREG32_PCIE_PORT(reg, doorbell_range);
129}
130
131static void nbio_v7_2_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
132 int doorbell_index, int instance)
133{
134 u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
135 u32 doorbell_range = RREG32_PCIE_PORT(reg);
136
137 if (use_doorbell) {
138 doorbell_range = REG_SET_FIELD(doorbell_range,
Aaron Liu011b5142020-08-25 10:27:59 +0800139 GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
140 doorbell_index);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400141 doorbell_range = REG_SET_FIELD(doorbell_range,
Aaron Liu011b5142020-08-25 10:27:59 +0800142 GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400143 } else {
144 doorbell_range = REG_SET_FIELD(doorbell_range,
Aaron Liu011b5142020-08-25 10:27:59 +0800145 GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400146 }
147
148 WREG32_PCIE_PORT(reg, doorbell_range);
149}
150
151static void nbio_v7_2_enable_doorbell_aperture(struct amdgpu_device *adev,
152 bool enable)
153{
154 u32 reg;
155
156 reg = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN);
157 reg = REG_SET_FIELD(reg, RCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN,
158 BIF_DOORBELL_APER_EN, enable ? 1 : 0);
159
160 WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, reg);
161}
162
163static void nbio_v7_2_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
164 bool enable)
165{
166 u32 tmp = 0;
167
168 if (enable) {
169 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
Aaron Liu011b5142020-08-25 10:27:59 +0800170 DOORBELL_SELFRING_GPA_APER_EN, 1) |
171 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
172 DOORBELL_SELFRING_GPA_APER_MODE, 1) |
173 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
174 DOORBELL_SELFRING_GPA_APER_SIZE, 0);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400175
176 WREG32_SOC15(NBIO, 0,
Aaron Liu011b5142020-08-25 10:27:59 +0800177 regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
178 lower_32_bits(adev->doorbell.base));
Huang Ruia7e91bd2020-08-27 12:02:37 -0400179 WREG32_SOC15(NBIO, 0,
Aaron Liu011b5142020-08-25 10:27:59 +0800180 regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
181 upper_32_bits(adev->doorbell.base));
Huang Ruia7e91bd2020-08-27 12:02:37 -0400182 }
183
184 WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
Aaron Liu011b5142020-08-25 10:27:59 +0800185 tmp);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400186}
187
188
189static void nbio_v7_2_ih_doorbell_range(struct amdgpu_device *adev,
190 bool use_doorbell, int doorbell_index)
191{
192 u32 ih_doorbell_range = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE));
193
194 if (use_doorbell) {
195 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
196 GDC0_BIF_IH_DOORBELL_RANGE, OFFSET,
197 doorbell_index);
198 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
199 GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
200 2);
201 } else {
202 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
203 GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
204 0);
205 }
206
207 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE),
208 ih_doorbell_range);
209}
210
211static void nbio_v7_2_ih_control(struct amdgpu_device *adev)
212{
213 u32 interrupt_cntl;
214
215 /* setup interrupt control */
216 WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2,
217 adev->dummy_page_addr >> 8);
218
219 interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
220 /*
221 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
222 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
223 */
224 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
225 IH_DUMMY_RD_OVERRIDE, 0);
226
227 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
228 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
229 IH_REQ_NONSNOOP_EN, 0);
230
231 WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
232}
233
234static void nbio_v7_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
235 bool enable)
236{
237 uint32_t def, data;
238
239 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL));
240 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
241 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
242 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
243 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
244 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
245 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
246 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
247 } else {
248 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
249 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
250 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
251 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
252 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
253 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
254 }
255
256 if (def != data)
257 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL), data);
258}
259
260static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
261 bool enable)
262{
263 uint32_t def, data;
264
Tim Huangd726d432022-01-25 11:12:07 +0800265 switch (adev->ip_versions[NBIO_HWIP][0]) {
266 case IP_VERSION(7, 2, 1):
Yifan Zhang935ad3a2022-01-21 18:40:39 +0800267 case IP_VERSION(7, 3, 0):
Tim Huangd726d432022-01-25 11:12:07 +0800268 case IP_VERSION(7, 5, 0):
Aaron Liu011b5142020-08-25 10:27:59 +0800269 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
270 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
271 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
272 else
273 data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
Huang Ruia7e91bd2020-08-27 12:02:37 -0400274
Aaron Liu011b5142020-08-25 10:27:59 +0800275 if (def != data)
276 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
277
Tim Huangd726d432022-01-25 11:12:07 +0800278 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0,
279 regBIF1_PCIE_TX_POWER_CTRL_1));
Aaron Liu011b5142020-08-25 10:27:59 +0800280 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
281 data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
282 BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
283 else
284 data &= ~(BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
285 BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
286
287 if (def != data)
288 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1),
289 data);
Tim Huangd726d432022-01-25 11:12:07 +0800290 break;
291 default:
Aaron Liu011b5142020-08-25 10:27:59 +0800292 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
293 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
294 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
295 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
296 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
297 else
298 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
299 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
300 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
301
302 if (def != data)
303 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
Tim Huangd726d432022-01-25 11:12:07 +0800304 break;
Aaron Liu011b5142020-08-25 10:27:59 +0800305 }
Huang Ruia7e91bd2020-08-27 12:02:37 -0400306}
307
308static void nbio_v7_2_get_clockgating_state(struct amdgpu_device *adev,
Evan Quan25faedd2022-03-25 18:00:02 +0800309 u64 *flags)
Huang Ruia7e91bd2020-08-27 12:02:37 -0400310{
311 int data;
312
313 /* AMD_CG_SUPPORT_BIF_MGCG */
314 data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regCPM_CONTROL));
315 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
316 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
317
318 /* AMD_CG_SUPPORT_BIF_LS */
319 data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
320 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
321 *flags |= AMD_CG_SUPPORT_BIF_LS;
322}
323
324static u32 nbio_v7_2_get_hdp_flush_req_offset(struct amdgpu_device *adev)
325{
326 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
327}
328
329static u32 nbio_v7_2_get_hdp_flush_done_offset(struct amdgpu_device *adev)
330{
331 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
332}
333
334static u32 nbio_v7_2_get_pcie_index_offset(struct amdgpu_device *adev)
335{
336 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
337}
338
339static u32 nbio_v7_2_get_pcie_data_offset(struct amdgpu_device *adev)
340{
341 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
342}
343
344static u32 nbio_v7_2_get_pcie_port_index_offset(struct amdgpu_device *adev)
345{
346 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
347}
348
349static u32 nbio_v7_2_get_pcie_port_data_offset(struct amdgpu_device *adev)
350{
351 return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
352}
353
354const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = {
355 .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
356 .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
357 .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
358 .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
359 .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
360 .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
361 .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
362 .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
363 .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
364 .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
365 .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
366 .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
367};
368
369static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
370{
371 uint32_t def, data;
Tim Huangd726d432022-01-25 11:12:07 +0800372 switch (adev->ip_versions[NBIO_HWIP][0]) {
373 case IP_VERSION(7, 2, 1):
Yifan Zhang935ad3a2022-01-21 18:40:39 +0800374 case IP_VERSION(7, 3, 0):
Tim Huangd726d432022-01-25 11:12:07 +0800375 case IP_VERSION(7, 5, 0):
Aaron Liu011b5142020-08-25 10:27:59 +0800376 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3));
377 data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
378 CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
379 data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
380 CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400381
Aaron Liu011b5142020-08-25 10:27:59 +0800382 if (def != data)
383 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
Tim Huangd726d432022-01-25 11:12:07 +0800384 break;
385 default:
Aaron Liu011b5142020-08-25 10:27:59 +0800386 def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
387 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
388 CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
389 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
390 CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
Huang Ruia7e91bd2020-08-27 12:02:37 -0400391
Aaron Liu011b5142020-08-25 10:27:59 +0800392 if (def != data)
393 WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
Tim Huangd726d432022-01-25 11:12:07 +0800394 break;
Aaron Liu011b5142020-08-25 10:27:59 +0800395 }
Felix Kuehlinge3993812021-11-04 16:15:43 -0400396
397 if (amdgpu_sriov_vf(adev))
398 adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
399 regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
Huang Ruia7e91bd2020-08-27 12:02:37 -0400400}
401
402const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
403 .get_hdp_flush_req_offset = nbio_v7_2_get_hdp_flush_req_offset,
404 .get_hdp_flush_done_offset = nbio_v7_2_get_hdp_flush_done_offset,
405 .get_pcie_index_offset = nbio_v7_2_get_pcie_index_offset,
406 .get_pcie_data_offset = nbio_v7_2_get_pcie_data_offset,
407 .get_pcie_port_index_offset = nbio_v7_2_get_pcie_port_index_offset,
408 .get_pcie_port_data_offset = nbio_v7_2_get_pcie_port_data_offset,
409 .get_rev_id = nbio_v7_2_get_rev_id,
410 .mc_access_enable = nbio_v7_2_mc_access_enable,
Huang Ruia7e91bd2020-08-27 12:02:37 -0400411 .get_memsize = nbio_v7_2_get_memsize,
412 .sdma_doorbell_range = nbio_v7_2_sdma_doorbell_range,
413 .vcn_doorbell_range = nbio_v7_2_vcn_doorbell_range,
414 .enable_doorbell_aperture = nbio_v7_2_enable_doorbell_aperture,
415 .enable_doorbell_selfring_aperture = nbio_v7_2_enable_doorbell_selfring_aperture,
416 .ih_doorbell_range = nbio_v7_2_ih_doorbell_range,
417 .update_medium_grain_clock_gating = nbio_v7_2_update_medium_grain_clock_gating,
418 .update_medium_grain_light_sleep = nbio_v7_2_update_medium_grain_light_sleep,
419 .get_clockgating_state = nbio_v7_2_get_clockgating_state,
420 .ih_control = nbio_v7_2_ih_control,
421 .init_registers = nbio_v7_2_init_registers,
422 .remap_hdp_registers = nbio_v7_2_remap_hdp_registers,
423};