Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Sharat Masetty | e812744 | 2019-12-03 15:16:14 +0000 | [diff] [blame] | 2 | /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 3 | |
| 4 | |
| 5 | #include "msm_gem.h" |
| 6 | #include "msm_mmu.h" |
Jordan Crouse | 4241db4 | 2018-11-02 09:25:21 -0600 | [diff] [blame] | 7 | #include "msm_gpu_trace.h" |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 8 | #include "a6xx_gpu.h" |
| 9 | #include "a6xx_gmu.xml.h" |
| 10 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 11 | #include <linux/bitfield.h> |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 12 | #include <linux/devfreq.h> |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 13 | #include <linux/firmware/qcom/qcom_scm.h> |
Akhil P Oommen | c11fa12 | 2023-01-02 16:18:31 +0530 | [diff] [blame] | 14 | #include <linux/pm_domain.h> |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 15 | #include <linux/soc/qcom/llcc-qcom.h> |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 16 | |
Jordan Crouse | abccb9f | 2019-04-19 13:46:15 -0600 | [diff] [blame] | 17 | #define GPU_PAS_ID 13 |
| 18 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 19 | static inline bool _a6xx_check_idle(struct msm_gpu *gpu) |
| 20 | { |
| 21 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 22 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 23 | |
| 24 | /* Check that the GMU is idle */ |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 25 | if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 26 | return false; |
| 27 | |
| 28 | /* Check tha the CX master is idle */ |
| 29 | if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & |
| 30 | ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER) |
| 31 | return false; |
| 32 | |
| 33 | return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & |
| 34 | A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT); |
| 35 | } |
| 36 | |
Lee Jones | 991a271 | 2020-11-16 17:40:45 +0000 | [diff] [blame] | 37 | static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 38 | { |
| 39 | /* wait for CP to drain ringbuffer: */ |
| 40 | if (!adreno_idle(gpu, ring)) |
| 41 | return false; |
| 42 | |
| 43 | if (spin_until(_a6xx_check_idle(gpu))) { |
| 44 | DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", |
| 45 | gpu->name, __builtin_return_address(0), |
| 46 | gpu_read(gpu, REG_A6XX_RBBM_STATUS), |
| 47 | gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), |
| 48 | gpu_read(gpu, REG_A6XX_CP_RB_RPTR), |
| 49 | gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); |
| 50 | return false; |
| 51 | } |
| 52 | |
| 53 | return true; |
| 54 | } |
| 55 | |
Rob Clark | 0710a74 | 2021-04-28 12:36:49 -0700 | [diff] [blame] | 56 | static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 57 | { |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 58 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 59 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 60 | |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 61 | /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ |
| 62 | if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 63 | OUT_PKT7(ring, CP_WHERE_AM_I, 2); |
| 64 | OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); |
| 65 | OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); |
| 66 | } |
Rob Clark | 0710a74 | 2021-04-28 12:36:49 -0700 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) |
| 70 | { |
| 71 | uint32_t wptr; |
| 72 | unsigned long flags; |
| 73 | |
| 74 | update_shadow_rptr(gpu, ring); |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 75 | |
Rob Clark | 77c4060 | 2020-10-23 09:51:15 -0700 | [diff] [blame] | 76 | spin_lock_irqsave(&ring->preempt_lock, flags); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 77 | |
| 78 | /* Copy the shadow to the actual register */ |
| 79 | ring->cur = ring->next; |
| 80 | |
| 81 | /* Make sure to wrap wptr if we need to */ |
| 82 | wptr = get_wptr(ring); |
| 83 | |
Rob Clark | 77c4060 | 2020-10-23 09:51:15 -0700 | [diff] [blame] | 84 | spin_unlock_irqrestore(&ring->preempt_lock, flags); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 85 | |
| 86 | /* Make sure everything is posted before making a decision */ |
| 87 | mb(); |
| 88 | |
| 89 | gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); |
| 90 | } |
| 91 | |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 92 | static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, |
| 93 | u64 iova) |
| 94 | { |
| 95 | OUT_PKT7(ring, CP_REG_TO_MEM, 3); |
Rob Clark | b5e02e1 | 2020-07-07 13:35:00 -0700 | [diff] [blame] | 96 | OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) | |
| 97 | CP_REG_TO_MEM_0_CNT(2) | |
| 98 | CP_REG_TO_MEM_0_64B); |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 99 | OUT_RING(ring, lower_32_bits(iova)); |
| 100 | OUT_RING(ring, upper_32_bits(iova)); |
| 101 | } |
| 102 | |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 103 | static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, |
| 104 | struct msm_ringbuffer *ring, struct msm_file_private *ctx) |
| 105 | { |
Rob Clark | 5f9ffe8 | 2022-03-03 16:52:17 -0800 | [diff] [blame] | 106 | bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 107 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 108 | phys_addr_t ttbr; |
| 109 | u32 asid; |
| 110 | u64 memptr = rbmemptr(ring, ttbr0); |
| 111 | |
Rob Clark | 1d054c9 | 2021-11-09 10:11:02 -0800 | [diff] [blame] | 112 | if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 113 | return; |
| 114 | |
| 115 | if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) |
| 116 | return; |
| 117 | |
Rob Clark | 5f9ffe8 | 2022-03-03 16:52:17 -0800 | [diff] [blame] | 118 | if (!sysprof) { |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 119 | if (!adreno_is_a7xx(adreno_gpu)) { |
| 120 | /* Turn off protected mode to write to special registers */ |
| 121 | OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); |
| 122 | OUT_RING(ring, 0); |
| 123 | } |
Rob Clark | 5f9ffe8 | 2022-03-03 16:52:17 -0800 | [diff] [blame] | 124 | |
| 125 | OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); |
| 126 | OUT_RING(ring, 1); |
| 127 | } |
| 128 | |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 129 | /* Execute the table update */ |
| 130 | OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4); |
| 131 | OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr))); |
| 132 | |
| 133 | OUT_RING(ring, |
| 134 | CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) | |
| 135 | CP_SMMU_TABLE_UPDATE_1_ASID(asid)); |
| 136 | OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0)); |
| 137 | OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0)); |
| 138 | |
| 139 | /* |
| 140 | * Write the new TTBR0 to the memstore. This is good for debugging. |
| 141 | */ |
| 142 | OUT_PKT7(ring, CP_MEM_WRITE, 4); |
| 143 | OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); |
| 144 | OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); |
| 145 | OUT_RING(ring, lower_32_bits(ttbr)); |
| 146 | OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); |
| 147 | |
| 148 | /* |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 149 | * Sync both threads after switching pagetables and enable BR only |
| 150 | * to make sure BV doesn't race ahead while BR is still switching |
| 151 | * pagetables. |
| 152 | */ |
| 153 | if (adreno_is_a7xx(&a6xx_gpu->base)) { |
| 154 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 155 | OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); |
| 156 | } |
| 157 | |
| 158 | /* |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 159 | * And finally, trigger a uche flush to be sure there isn't anything |
| 160 | * lingering in that part of the GPU |
| 161 | */ |
| 162 | |
| 163 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); |
Rob Clark | 80059b8 | 2022-08-21 08:54:35 -0700 | [diff] [blame] | 164 | OUT_RING(ring, CACHE_INVALIDATE); |
Rob Clark | 5f9ffe8 | 2022-03-03 16:52:17 -0800 | [diff] [blame] | 165 | |
| 166 | if (!sysprof) { |
| 167 | /* |
| 168 | * Wait for SRAM clear after the pgtable update, so the |
| 169 | * two can happen in parallel: |
| 170 | */ |
| 171 | OUT_PKT7(ring, CP_WAIT_REG_MEM, 6); |
| 172 | OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ)); |
| 173 | OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO( |
| 174 | REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS)); |
| 175 | OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0)); |
| 176 | OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1)); |
| 177 | OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1)); |
| 178 | OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0)); |
| 179 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 180 | if (!adreno_is_a7xx(adreno_gpu)) { |
| 181 | /* Re-enable protected mode: */ |
| 182 | OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); |
| 183 | OUT_RING(ring, 1); |
| 184 | } |
Rob Clark | 5f9ffe8 | 2022-03-03 16:52:17 -0800 | [diff] [blame] | 185 | } |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 186 | } |
| 187 | |
Jordan Crouse | 15eb9ad | 2020-08-17 15:01:37 -0700 | [diff] [blame] | 188 | static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 189 | { |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 190 | unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; |
Jordan Crouse | 4241db4 | 2018-11-02 09:25:21 -0600 | [diff] [blame] | 191 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 192 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 193 | struct msm_ringbuffer *ring = submit->ring; |
Rob Clark | 0710a74 | 2021-04-28 12:36:49 -0700 | [diff] [blame] | 194 | unsigned int i, ibs = 0; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 195 | |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 196 | a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); |
| 197 | |
Rob Clark | cc4c26d | 2021-05-30 15:44:23 -0700 | [diff] [blame] | 198 | get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 199 | rbmemptr_stats(ring, index, cpcycles_start)); |
| 200 | |
| 201 | /* |
| 202 | * For PM4 the GMU register offsets are calculated from the base of the |
| 203 | * GPU registers so we need to add 0x1a800 to the register value on A630 |
| 204 | * to get the right value from PM4. |
| 205 | */ |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 206 | get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 207 | rbmemptr_stats(ring, index, alwayson_start)); |
| 208 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 209 | /* Invalidate CCU depth and color */ |
| 210 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); |
Rob Clark | b5e02e1 | 2020-07-07 13:35:00 -0700 | [diff] [blame] | 211 | OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH)); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 212 | |
| 213 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); |
Rob Clark | b5e02e1 | 2020-07-07 13:35:00 -0700 | [diff] [blame] | 214 | OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR)); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 215 | |
| 216 | /* Submit the commands */ |
| 217 | for (i = 0; i < submit->nr_cmds; i++) { |
| 218 | switch (submit->cmd[i].type) { |
| 219 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: |
| 220 | break; |
| 221 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
Rob Clark | 1d054c9 | 2021-11-09 10:11:02 -0800 | [diff] [blame] | 222 | if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 223 | break; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 224 | fallthrough; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 225 | case MSM_SUBMIT_CMD_BUF: |
| 226 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); |
| 227 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); |
| 228 | OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); |
| 229 | OUT_RING(ring, submit->cmd[i].size); |
Rob Clark | 0710a74 | 2021-04-28 12:36:49 -0700 | [diff] [blame] | 230 | ibs++; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 231 | break; |
| 232 | } |
Rob Clark | 0710a74 | 2021-04-28 12:36:49 -0700 | [diff] [blame] | 233 | |
| 234 | /* |
| 235 | * Periodically update shadow-wptr if needed, so that we |
| 236 | * can see partial progress of submits with large # of |
| 237 | * cmds.. otherwise we could needlessly stall waiting for |
| 238 | * ringbuffer state, simply due to looking at a shadow |
| 239 | * rptr value that has not been updated |
| 240 | */ |
| 241 | if ((ibs % 32) == 0) |
| 242 | update_shadow_rptr(gpu, ring); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 243 | } |
| 244 | |
Rob Clark | cc4c26d | 2021-05-30 15:44:23 -0700 | [diff] [blame] | 245 | get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 246 | rbmemptr_stats(ring, index, cpcycles_end)); |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 247 | get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, |
Jordan Crouse | 5686921 | 2018-11-02 09:25:20 -0600 | [diff] [blame] | 248 | rbmemptr_stats(ring, index, alwayson_end)); |
| 249 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 250 | /* Write the fence to the scratch register */ |
| 251 | OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); |
| 252 | OUT_RING(ring, submit->seqno); |
| 253 | |
| 254 | /* |
| 255 | * Execute a CACHE_FLUSH_TS event. This will ensure that the |
| 256 | * timestamp is written to the memory and then triggers the interrupt |
| 257 | */ |
| 258 | OUT_PKT7(ring, CP_EVENT_WRITE, 4); |
Rob Clark | b5e02e1 | 2020-07-07 13:35:00 -0700 | [diff] [blame] | 259 | OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) | |
| 260 | CP_EVENT_WRITE_0_IRQ); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 261 | OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); |
| 262 | OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); |
| 263 | OUT_RING(ring, submit->seqno); |
| 264 | |
Jordan Crouse | 4241db4 | 2018-11-02 09:25:21 -0600 | [diff] [blame] | 265 | trace_msm_gpu_submit_flush(submit, |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 266 | gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); |
Jordan Crouse | 4241db4 | 2018-11-02 09:25:21 -0600 | [diff] [blame] | 267 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 268 | a6xx_flush(gpu, ring); |
| 269 | } |
| 270 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 271 | static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) |
| 272 | { |
| 273 | unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; |
| 274 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 275 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 276 | struct msm_ringbuffer *ring = submit->ring; |
| 277 | unsigned int i, ibs = 0; |
| 278 | |
| 279 | /* |
| 280 | * Toggle concurrent binning for pagetable switch and set the thread to |
| 281 | * BR since only it can execute the pagetable switch packets. |
| 282 | */ |
| 283 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 284 | OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); |
| 285 | |
| 286 | a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); |
| 287 | |
Zan Dobersek | 3286602 | 2024-04-09 14:57:00 +0200 | [diff] [blame] | 288 | get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 289 | rbmemptr_stats(ring, index, cpcycles_start)); |
| 290 | get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, |
| 291 | rbmemptr_stats(ring, index, alwayson_start)); |
| 292 | |
| 293 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 294 | OUT_RING(ring, CP_SET_THREAD_BOTH); |
| 295 | |
| 296 | OUT_PKT7(ring, CP_SET_MARKER, 1); |
| 297 | OUT_RING(ring, 0x101); /* IFPC disable */ |
| 298 | |
| 299 | OUT_PKT7(ring, CP_SET_MARKER, 1); |
| 300 | OUT_RING(ring, 0x00d); /* IB1LIST start */ |
| 301 | |
| 302 | /* Submit the commands */ |
| 303 | for (i = 0; i < submit->nr_cmds; i++) { |
| 304 | switch (submit->cmd[i].type) { |
| 305 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: |
| 306 | break; |
| 307 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
| 308 | if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) |
| 309 | break; |
| 310 | fallthrough; |
| 311 | case MSM_SUBMIT_CMD_BUF: |
| 312 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); |
| 313 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); |
| 314 | OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); |
| 315 | OUT_RING(ring, submit->cmd[i].size); |
| 316 | ibs++; |
| 317 | break; |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * Periodically update shadow-wptr if needed, so that we |
| 322 | * can see partial progress of submits with large # of |
| 323 | * cmds.. otherwise we could needlessly stall waiting for |
| 324 | * ringbuffer state, simply due to looking at a shadow |
| 325 | * rptr value that has not been updated |
| 326 | */ |
| 327 | if ((ibs % 32) == 0) |
| 328 | update_shadow_rptr(gpu, ring); |
| 329 | } |
| 330 | |
| 331 | OUT_PKT7(ring, CP_SET_MARKER, 1); |
| 332 | OUT_RING(ring, 0x00e); /* IB1LIST end */ |
| 333 | |
Zan Dobersek | 3286602 | 2024-04-09 14:57:00 +0200 | [diff] [blame] | 334 | get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 335 | rbmemptr_stats(ring, index, cpcycles_end)); |
| 336 | get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, |
| 337 | rbmemptr_stats(ring, index, alwayson_end)); |
| 338 | |
| 339 | /* Write the fence to the scratch register */ |
| 340 | OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); |
| 341 | OUT_RING(ring, submit->seqno); |
| 342 | |
| 343 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 344 | OUT_RING(ring, CP_SET_THREAD_BR); |
| 345 | |
| 346 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); |
| 347 | OUT_RING(ring, CCU_INVALIDATE_DEPTH); |
| 348 | |
| 349 | OUT_PKT7(ring, CP_EVENT_WRITE, 1); |
| 350 | OUT_RING(ring, CCU_INVALIDATE_COLOR); |
| 351 | |
| 352 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 353 | OUT_RING(ring, CP_SET_THREAD_BV); |
| 354 | |
| 355 | /* |
| 356 | * Make sure the timestamp is committed once BV pipe is |
| 357 | * completely done with this submission. |
| 358 | */ |
| 359 | OUT_PKT7(ring, CP_EVENT_WRITE, 4); |
| 360 | OUT_RING(ring, CACHE_CLEAN | BIT(27)); |
| 361 | OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence))); |
| 362 | OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); |
| 363 | OUT_RING(ring, submit->seqno); |
| 364 | |
| 365 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 366 | OUT_RING(ring, CP_SET_THREAD_BR); |
| 367 | |
| 368 | /* |
| 369 | * This makes sure that BR doesn't race ahead and commit |
| 370 | * timestamp to memstore while BV is still processing |
| 371 | * this submission. |
| 372 | */ |
| 373 | OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4); |
| 374 | OUT_RING(ring, 0); |
| 375 | OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence))); |
| 376 | OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence))); |
| 377 | OUT_RING(ring, submit->seqno); |
| 378 | |
| 379 | /* write the ringbuffer timestamp */ |
| 380 | OUT_PKT7(ring, CP_EVENT_WRITE, 4); |
| 381 | OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27)); |
| 382 | OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); |
| 383 | OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); |
| 384 | OUT_RING(ring, submit->seqno); |
| 385 | |
| 386 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 387 | OUT_RING(ring, CP_SET_THREAD_BOTH); |
| 388 | |
| 389 | OUT_PKT7(ring, CP_SET_MARKER, 1); |
| 390 | OUT_RING(ring, 0x100); /* IFPC enable */ |
| 391 | |
| 392 | trace_msm_gpu_submit_flush(submit, |
| 393 | gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); |
| 394 | |
| 395 | a6xx_flush(gpu, ring); |
| 396 | } |
| 397 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 398 | static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) |
| 399 | { |
| 400 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 401 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 402 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
Jonathan Marek | b1c53a2 | 2020-07-10 19:04:09 -0400 | [diff] [blame] | 403 | const struct adreno_reglist *reg; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 404 | unsigned int i; |
Konrad Dybcio | 40c297e | 2024-08-28 17:06:58 +0200 | [diff] [blame] | 405 | u32 cgc_delay, cgc_hyst; |
Konrad Dybcio | 51682bc | 2024-08-28 17:06:57 +0200 | [diff] [blame] | 406 | u32 val, clock_cntl_on; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 407 | |
Rob Clark | dff2f69 | 2024-06-18 09:42:50 -0700 | [diff] [blame] | 408 | if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu))) |
Jonathan Marek | b1c53a2 | 2020-07-10 19:04:09 -0400 | [diff] [blame] | 409 | return; |
| 410 | |
Jonathan Marek | 66ffb91 | 2020-07-10 19:04:10 -0400 | [diff] [blame] | 411 | if (adreno_is_a630(adreno_gpu)) |
| 412 | clock_cntl_on = 0x8aa8aa02; |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 413 | else if (adreno_is_a610(adreno_gpu)) |
| 414 | clock_cntl_on = 0xaaa8aa82; |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 415 | else if (adreno_is_a702(adreno_gpu)) |
| 416 | clock_cntl_on = 0xaaaaaa82; |
Jonathan Marek | 66ffb91 | 2020-07-10 19:04:10 -0400 | [diff] [blame] | 417 | else |
| 418 | clock_cntl_on = 0x8aa8aa82; |
| 419 | |
Konrad Dybcio | 40c297e | 2024-08-28 17:06:58 +0200 | [diff] [blame] | 420 | cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111; |
| 421 | cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555; |
| 422 | |
| 423 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, |
| 424 | state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0); |
| 425 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, |
| 426 | state ? cgc_delay : 0); |
| 427 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, |
| 428 | state ? cgc_hyst : 0); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 429 | |
Rob Clark | dff2f69 | 2024-06-18 09:42:50 -0700 | [diff] [blame] | 430 | if (!adreno_gpu->info->a6xx->hwcg) { |
Neil Armstrong | d2bcca0 | 2024-02-16 12:03:52 +0100 | [diff] [blame] | 431 | gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1); |
| 432 | gpu_write(gpu, REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0); |
| 433 | |
| 434 | if (state) { |
| 435 | gpu_write(gpu, REG_A7XX_RBBM_CGC_P2S_TRIG_CMD, 1); |
| 436 | |
| 437 | if (gpu_poll_timeout(gpu, REG_A7XX_RBBM_CGC_P2S_STATUS, val, |
| 438 | val & A7XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) { |
| 439 | dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n"); |
| 440 | return; |
| 441 | } |
| 442 | |
| 443 | gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 0); |
| 444 | } |
| 445 | |
| 446 | return; |
| 447 | } |
| 448 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 449 | val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); |
| 450 | |
| 451 | /* Don't re-program the registers if they are already correct */ |
Jonathan Marek | 66ffb91 | 2020-07-10 19:04:10 -0400 | [diff] [blame] | 452 | if ((!state && !val) || (state && (val == clock_cntl_on))) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 453 | return; |
| 454 | |
| 455 | /* Disable SP clock before programming HWCG registers */ |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 456 | if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu)) |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 457 | gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 458 | |
Rob Clark | dff2f69 | 2024-06-18 09:42:50 -0700 | [diff] [blame] | 459 | for (i = 0; (reg = &adreno_gpu->info->a6xx->hwcg[i], reg->offset); i++) |
Jonathan Marek | b1c53a2 | 2020-07-10 19:04:09 -0400 | [diff] [blame] | 460 | gpu_write(gpu, reg->offset, state ? reg->value : 0); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 461 | |
| 462 | /* Enable SP clock */ |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 463 | if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu)) |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 464 | gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 465 | |
Jonathan Marek | 66ffb91 | 2020-07-10 19:04:10 -0400 | [diff] [blame] | 466 | gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 467 | } |
| 468 | |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 469 | static void a6xx_set_cp_protect(struct msm_gpu *gpu) |
| 470 | { |
| 471 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | fccf9dd | 2024-06-18 09:42:51 -0700 | [diff] [blame] | 472 | const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect; |
| 473 | unsigned i; |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * Enable access protection to privileged registers, fault on an access |
| 477 | * protect violation and select the last span to protect from the start |
| 478 | * address all the way to the end of the register address space |
| 479 | */ |
Konrad Dybcio | 02a726f | 2023-06-20 13:10:37 +0200 | [diff] [blame] | 480 | gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, |
| 481 | A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN | |
| 482 | A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN | |
| 483 | A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE); |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 484 | |
Rob Clark | fccf9dd | 2024-06-18 09:42:51 -0700 | [diff] [blame] | 485 | for (i = 0; i < protect->count - 1; i++) { |
Konrad Dybcio | 29af760 | 2023-06-20 13:10:38 +0200 | [diff] [blame] | 486 | /* Intentionally skip writing to some registers */ |
Rob Clark | fccf9dd | 2024-06-18 09:42:51 -0700 | [diff] [blame] | 487 | if (protect->regs[i]) |
| 488 | gpu_write(gpu, REG_A6XX_CP_PROTECT(i), protect->regs[i]); |
Konrad Dybcio | 29af760 | 2023-06-20 13:10:38 +0200 | [diff] [blame] | 489 | } |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 490 | /* last CP_PROTECT to have "infinite" length on the last entry */ |
Rob Clark | fccf9dd | 2024-06-18 09:42:51 -0700 | [diff] [blame] | 491 | gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]); |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 492 | } |
| 493 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 494 | static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 495 | { |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 496 | gpu->ubwc_config.rgb565_predicator = 0; |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 497 | gpu->ubwc_config.uavflagprd_inv = 0; |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 498 | gpu->ubwc_config.min_acc_len = 0; |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 499 | gpu->ubwc_config.ubwc_swizzle = 0x6; |
| 500 | gpu->ubwc_config.macrotile_mode = 0; |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 501 | gpu->ubwc_config.highest_bank_bit = 15; |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 502 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 503 | if (adreno_is_a610(gpu)) { |
Dmitry Baryshkov | 6a0dbcd | 2024-01-09 22:41:08 +0200 | [diff] [blame] | 504 | gpu->ubwc_config.highest_bank_bit = 13; |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 505 | gpu->ubwc_config.min_acc_len = 1; |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 506 | gpu->ubwc_config.ubwc_swizzle = 0x7; |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 507 | } |
| 508 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 509 | if (adreno_is_a618(gpu)) |
Dmitry Baryshkov | 0d7dfc7 | 2024-02-20 19:12:10 +0200 | [diff] [blame] | 510 | gpu->ubwc_config.highest_bank_bit = 14; |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 511 | |
Luca Weiss | 9dc23cb | 2024-03-28 09:02:45 +0100 | [diff] [blame] | 512 | if (adreno_is_a619(gpu)) |
| 513 | /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */ |
| 514 | gpu->ubwc_config.highest_bank_bit = 13; |
| 515 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 516 | if (adreno_is_a619_holi(gpu)) |
| 517 | gpu->ubwc_config.highest_bank_bit = 13; |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 518 | |
Konrad Dybcio | dbfbb376 | 2024-08-28 17:06:59 +0200 | [diff] [blame] | 519 | if (adreno_is_a621(gpu)) { |
| 520 | gpu->ubwc_config.highest_bank_bit = 13; |
| 521 | gpu->ubwc_config.amsbc = 1; |
| 522 | gpu->ubwc_config.uavflagprd_inv = 2; |
| 523 | } |
| 524 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 525 | if (adreno_is_a640_family(gpu)) |
| 526 | gpu->ubwc_config.amsbc = 1; |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 527 | |
Connor Abbott | 6f68294 | 2024-08-07 14:04:59 +0100 | [diff] [blame] | 528 | if (adreno_is_a680(gpu)) |
| 529 | gpu->ubwc_config.macrotile_mode = 1; |
| 530 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 531 | if (adreno_is_a650(gpu) || |
| 532 | adreno_is_a660(gpu) || |
| 533 | adreno_is_a690(gpu) || |
| 534 | adreno_is_a730(gpu) || |
| 535 | adreno_is_a740_family(gpu)) { |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 536 | /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 537 | gpu->ubwc_config.highest_bank_bit = 16; |
| 538 | gpu->ubwc_config.amsbc = 1; |
| 539 | gpu->ubwc_config.rgb565_predicator = 1; |
| 540 | gpu->ubwc_config.uavflagprd_inv = 2; |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 541 | gpu->ubwc_config.macrotile_mode = 1; |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 542 | } |
| 543 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 544 | if (adreno_is_7c3(gpu)) { |
| 545 | gpu->ubwc_config.highest_bank_bit = 14; |
| 546 | gpu->ubwc_config.amsbc = 1; |
| 547 | gpu->ubwc_config.rgb565_predicator = 1; |
| 548 | gpu->ubwc_config.uavflagprd_inv = 2; |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 549 | gpu->ubwc_config.macrotile_mode = 1; |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 550 | } |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 551 | |
| 552 | if (adreno_is_a702(gpu)) { |
| 553 | gpu->ubwc_config.highest_bank_bit = 14; |
| 554 | gpu->ubwc_config.min_acc_len = 1; |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 555 | } |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | static void a6xx_set_ubwc_config(struct msm_gpu *gpu) |
| 559 | { |
| 560 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 561 | /* |
| 562 | * We subtract 13 from the highest bank bit (13 is the minimum value |
| 563 | * allowed by hw) and write the lowest two bits of the remaining value |
| 564 | * as hbb_lo and the one above it as hbb_hi to the hardware. |
| 565 | */ |
| 566 | BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); |
| 567 | u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; |
| 568 | u32 hbb_hi = hbb >> 2; |
| 569 | u32 hbb_lo = hbb & 3; |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 570 | u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1; |
| 571 | u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2); |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 572 | |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 573 | gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 574 | level2_swizzling_dis << 12 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 575 | adreno_gpu->ubwc_config.rgb565_predicator << 11 | |
| 576 | hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | |
| 577 | adreno_gpu->ubwc_config.min_acc_len << 3 | |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 578 | hbb_lo << 1 | ubwc_mode); |
Konrad Dybcio | df5bb40 | 2023-06-16 01:20:51 +0200 | [diff] [blame] | 579 | |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 580 | gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, |
| 581 | level2_swizzling_dis << 6 | hbb_hi << 4 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 582 | adreno_gpu->ubwc_config.min_acc_len << 3 | |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 583 | hbb_lo << 1 | ubwc_mode); |
Konrad Dybcio | df5bb40 | 2023-06-16 01:20:51 +0200 | [diff] [blame] | 584 | |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 585 | gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, |
| 586 | level2_swizzling_dis << 12 | hbb_hi << 10 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 587 | adreno_gpu->ubwc_config.uavflagprd_inv << 4 | |
| 588 | adreno_gpu->ubwc_config.min_acc_len << 3 | |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 589 | hbb_lo << 1 | ubwc_mode); |
Konrad Dybcio | df5bb40 | 2023-06-16 01:20:51 +0200 | [diff] [blame] | 590 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 591 | if (adreno_is_a7xx(adreno_gpu)) |
| 592 | gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, |
| 593 | FIELD_PREP(GENMASK(8, 5), hbb_lo)); |
| 594 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 595 | gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, |
| 596 | adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); |
Connor Abbott | b874638 | 2024-08-07 14:04:57 +0100 | [diff] [blame] | 597 | |
| 598 | gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL, |
| 599 | adreno_gpu->ubwc_config.macrotile_mode); |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 600 | } |
| 601 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 602 | static int a6xx_cp_init(struct msm_gpu *gpu) |
| 603 | { |
| 604 | struct msm_ringbuffer *ring = gpu->rb[0]; |
| 605 | |
| 606 | OUT_PKT7(ring, CP_ME_INIT, 8); |
| 607 | |
| 608 | OUT_RING(ring, 0x0000002f); |
| 609 | |
| 610 | /* Enable multiple hardware contexts */ |
| 611 | OUT_RING(ring, 0x00000003); |
| 612 | |
| 613 | /* Enable error detection */ |
| 614 | OUT_RING(ring, 0x20000000); |
| 615 | |
| 616 | /* Don't enable header dump */ |
| 617 | OUT_RING(ring, 0x00000000); |
| 618 | OUT_RING(ring, 0x00000000); |
| 619 | |
| 620 | /* No workarounds enabled */ |
| 621 | OUT_RING(ring, 0x00000000); |
| 622 | |
| 623 | /* Pad rest of the cmds with 0's */ |
| 624 | OUT_RING(ring, 0x00000000); |
| 625 | OUT_RING(ring, 0x00000000); |
| 626 | |
| 627 | a6xx_flush(gpu, ring); |
| 628 | return a6xx_idle(gpu, ring) ? 0 : -EINVAL; |
| 629 | } |
| 630 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 631 | static int a7xx_cp_init(struct msm_gpu *gpu) |
| 632 | { |
| 633 | struct msm_ringbuffer *ring = gpu->rb[0]; |
| 634 | u32 mask; |
| 635 | |
| 636 | /* Disable concurrent binning before sending CP init */ |
| 637 | OUT_PKT7(ring, CP_THREAD_CONTROL, 1); |
| 638 | OUT_RING(ring, BIT(27)); |
| 639 | |
| 640 | OUT_PKT7(ring, CP_ME_INIT, 7); |
| 641 | |
| 642 | /* Use multiple HW contexts */ |
| 643 | mask = BIT(0); |
| 644 | |
| 645 | /* Enable error detection */ |
| 646 | mask |= BIT(1); |
| 647 | |
| 648 | /* Set default reset state */ |
| 649 | mask |= BIT(3); |
| 650 | |
| 651 | /* Disable save/restore of performance counters across preemption */ |
| 652 | mask |= BIT(6); |
| 653 | |
| 654 | /* Enable the register init list with the spinlock */ |
| 655 | mask |= BIT(8); |
| 656 | |
| 657 | OUT_RING(ring, mask); |
| 658 | |
| 659 | /* Enable multiple hardware contexts */ |
| 660 | OUT_RING(ring, 0x00000003); |
| 661 | |
| 662 | /* Enable error detection */ |
| 663 | OUT_RING(ring, 0x20000000); |
| 664 | |
| 665 | /* Operation mode mask */ |
| 666 | OUT_RING(ring, 0x00000002); |
| 667 | |
| 668 | /* *Don't* send a power up reg list for concurrent binning (TODO) */ |
| 669 | /* Lo address */ |
| 670 | OUT_RING(ring, 0x00000000); |
| 671 | /* Hi address */ |
| 672 | OUT_RING(ring, 0x00000000); |
| 673 | /* BIT(31) set => read the regs from the list */ |
| 674 | OUT_RING(ring, 0x00000000); |
| 675 | |
| 676 | a6xx_flush(gpu, ring); |
| 677 | return a6xx_idle(gpu, ring) ? 0 : -EINVAL; |
| 678 | } |
| 679 | |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 680 | /* |
| 681 | * Check that the microcode version is new enough to include several key |
| 682 | * security fixes. Return true if the ucode is safe. |
| 683 | */ |
| 684 | static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 685 | struct drm_gem_object *obj) |
| 686 | { |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 687 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
| 688 | struct msm_gpu *gpu = &adreno_gpu->base; |
Rob Clark | f3a6b02 | 2021-08-07 09:30:12 -0700 | [diff] [blame] | 689 | const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; |
Rob Clark | 96c876f | 2020-10-23 09:51:02 -0700 | [diff] [blame] | 690 | u32 *buf = msm_gem_get_vaddr(obj); |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 691 | bool ret = false; |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 692 | |
| 693 | if (IS_ERR(buf)) |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 694 | return false; |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 695 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 696 | /* A7xx is safe! */ |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 697 | if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu)) |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 698 | return true; |
| 699 | |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 700 | /* |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 701 | * Targets up to a640 (a618, a630 and a640) need to check for a |
| 702 | * microcode version that is patched to support the whereami opcode or |
| 703 | * one that is new enough to include it by default. |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 704 | * |
| 705 | * a650 tier targets don't need whereami but still need to be |
| 706 | * equal to or newer than 0.95 for other security fixes |
| 707 | * |
| 708 | * a660 targets have all the critical security fixes from the start |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 709 | */ |
Rob Clark | f3a6b02 | 2021-08-07 09:30:12 -0700 | [diff] [blame] | 710 | if (!strcmp(sqe_name, "a630_sqe.fw")) { |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 711 | /* |
| 712 | * If the lowest nibble is 0xa that is an indication that this |
| 713 | * microcode has been patched. The actual version is in dword |
| 714 | * [3] but we only care about the patchlevel which is the lowest |
| 715 | * nibble of dword [3] |
| 716 | * |
| 717 | * Otherwise check that the firmware is greater than or equal |
| 718 | * to 1.90 which was the first version that had this fix built |
| 719 | * in |
| 720 | */ |
| 721 | if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) || |
| 722 | (buf[0] & 0xfff) >= 0x190) { |
| 723 | a6xx_gpu->has_whereami = true; |
| 724 | ret = true; |
| 725 | goto out; |
| 726 | } |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 727 | |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 728 | DRM_DEV_ERROR(&gpu->pdev->dev, |
| 729 | "a630 SQE ucode is too old. Have version %x need at least %x\n", |
| 730 | buf[0] & 0xfff, 0x190); |
Rob Clark | f3a6b02 | 2021-08-07 09:30:12 -0700 | [diff] [blame] | 731 | } else if (!strcmp(sqe_name, "a650_sqe.fw")) { |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 732 | if ((buf[0] & 0xfff) >= 0x095) { |
| 733 | ret = true; |
| 734 | goto out; |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 735 | } |
| 736 | |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 737 | DRM_DEV_ERROR(&gpu->pdev->dev, |
| 738 | "a650 SQE ucode is too old. Have version %x need at least %x\n", |
| 739 | buf[0] & 0xfff, 0x095); |
Rob Clark | f3a6b02 | 2021-08-07 09:30:12 -0700 | [diff] [blame] | 740 | } else if (!strcmp(sqe_name, "a660_sqe.fw")) { |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 741 | ret = true; |
| 742 | } else { |
| 743 | DRM_DEV_ERROR(&gpu->pdev->dev, |
| 744 | "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 745 | } |
| 746 | out: |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 747 | msm_gem_put_vaddr(obj); |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 748 | return ret; |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 749 | } |
| 750 | |
Rob Clark | 8ead967 | 2023-03-20 07:43:35 -0700 | [diff] [blame] | 751 | static int a6xx_ucode_load(struct msm_gpu *gpu) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 752 | { |
| 753 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 754 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 755 | |
| 756 | if (!a6xx_gpu->sqe_bo) { |
| 757 | a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, |
| 758 | adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); |
| 759 | |
| 760 | if (IS_ERR(a6xx_gpu->sqe_bo)) { |
| 761 | int ret = PTR_ERR(a6xx_gpu->sqe_bo); |
| 762 | |
| 763 | a6xx_gpu->sqe_bo = NULL; |
| 764 | DRM_DEV_ERROR(&gpu->pdev->dev, |
| 765 | "Could not allocate SQE ucode: %d\n", ret); |
| 766 | |
| 767 | return ret; |
| 768 | } |
Jordan Crouse | 0815d77 | 2018-11-07 15:35:52 -0700 | [diff] [blame] | 769 | |
| 770 | msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); |
Jordan Crouse | 8490f02 | 2021-02-09 17:52:05 -0700 | [diff] [blame] | 771 | if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { |
| 772 | msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); |
| 773 | drm_gem_object_put(a6xx_gpu->sqe_bo); |
| 774 | |
| 775 | a6xx_gpu->sqe_bo = NULL; |
| 776 | return -EPERM; |
| 777 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 778 | } |
| 779 | |
Rob Clark | 8ead967 | 2023-03-20 07:43:35 -0700 | [diff] [blame] | 780 | /* |
| 781 | * Expanded APRIV and targets that support WHERE_AM_I both need a |
| 782 | * privileged buffer to store the RPTR shadow |
| 783 | */ |
| 784 | if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && |
| 785 | !a6xx_gpu->shadow_bo) { |
| 786 | a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, |
| 787 | sizeof(u32) * gpu->nr_rings, |
| 788 | MSM_BO_WC | MSM_BO_MAP_PRIV, |
| 789 | gpu->aspace, &a6xx_gpu->shadow_bo, |
| 790 | &a6xx_gpu->shadow_iova); |
| 791 | |
| 792 | if (IS_ERR(a6xx_gpu->shadow)) |
| 793 | return PTR_ERR(a6xx_gpu->shadow); |
| 794 | |
| 795 | msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); |
| 796 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 797 | |
| 798 | return 0; |
| 799 | } |
| 800 | |
Jordan Crouse | abccb9f | 2019-04-19 13:46:15 -0600 | [diff] [blame] | 801 | static int a6xx_zap_shader_init(struct msm_gpu *gpu) |
| 802 | { |
| 803 | static bool loaded; |
| 804 | int ret; |
| 805 | |
| 806 | if (loaded) |
| 807 | return 0; |
| 808 | |
| 809 | ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); |
| 810 | |
| 811 | loaded = !ret; |
| 812 | return ret; |
| 813 | } |
| 814 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 815 | #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 816 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ |
| 817 | A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ |
| 818 | A6XX_RBBM_INT_0_MASK_CP_IB2 | \ |
| 819 | A6XX_RBBM_INT_0_MASK_CP_IB1 | \ |
| 820 | A6XX_RBBM_INT_0_MASK_CP_RB | \ |
| 821 | A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ |
| 822 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ |
| 823 | A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ |
| 824 | A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ |
| 825 | A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) |
| 826 | |
| 827 | #define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ |
| 828 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ |
| 829 | A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \ |
| 830 | A6XX_RBBM_INT_0_MASK_CP_SW | \ |
| 831 | A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ |
| 832 | A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \ |
| 833 | A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \ |
| 834 | A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ |
| 835 | A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ |
| 836 | A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ |
| 837 | A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ |
| 838 | A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \ |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 839 | A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \ |
| 840 | A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 841 | |
| 842 | #define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \ |
| 843 | A6XX_CP_APRIV_CNTL_RBFETCH | \ |
| 844 | A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \ |
| 845 | A6XX_CP_APRIV_CNTL_RBRPWB) |
| 846 | |
| 847 | #define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \ |
| 848 | A6XX_CP_APRIV_CNTL_CDREAD | \ |
| 849 | A6XX_CP_APRIV_CNTL_CDWRITE) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 850 | |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 851 | static int hw_init(struct msm_gpu *gpu) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 852 | { |
| 853 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 854 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 855 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 856 | u64 gmem_range_min; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 857 | int ret; |
| 858 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 859 | if (!adreno_has_gmu_wrapper(adreno_gpu)) { |
| 860 | /* Make sure the GMU keeps the GPU on while we set it up */ |
Konrad Dybcio | 34b149e | 2023-08-08 23:02:45 +0200 | [diff] [blame] | 861 | ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); |
| 862 | if (ret) |
| 863 | return ret; |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 864 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 865 | |
Akhil P Oommen | 3a9dd70 | 2022-08-19 01:52:14 +0530 | [diff] [blame] | 866 | /* Clear GBIF halt in case GX domain was not collapsed */ |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 867 | if (adreno_is_a619_holi(adreno_gpu)) { |
| 868 | gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); |
Konrad Dybcio | 43ec1a202 | 2024-06-25 20:54:41 +0200 | [diff] [blame] | 869 | gpu_read(gpu, REG_A6XX_GBIF_HALT); |
| 870 | |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 871 | gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0); |
Konrad Dybcio | 43ec1a202 | 2024-06-25 20:54:41 +0200 | [diff] [blame] | 872 | gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL); |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 873 | } else if (a6xx_has_gbif(adreno_gpu)) { |
Konrad Dybcio | 05a23a7 | 2023-06-16 01:20:50 +0200 | [diff] [blame] | 874 | gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); |
Konrad Dybcio | 43ec1a202 | 2024-06-25 20:54:41 +0200 | [diff] [blame] | 875 | gpu_read(gpu, REG_A6XX_GBIF_HALT); |
| 876 | |
Akhil P Oommen | 3a9dd70 | 2022-08-19 01:52:14 +0530 | [diff] [blame] | 877 | gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); |
Konrad Dybcio | 43ec1a202 | 2024-06-25 20:54:41 +0200 | [diff] [blame] | 878 | gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT); |
Konrad Dybcio | 05a23a7 | 2023-06-16 01:20:50 +0200 | [diff] [blame] | 879 | } |
Akhil P Oommen | 3a9dd70 | 2022-08-19 01:52:14 +0530 | [diff] [blame] | 880 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 881 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); |
| 882 | |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 883 | if (adreno_is_a619_holi(adreno_gpu)) |
| 884 | a6xx_sptprac_enable(gmu); |
| 885 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 886 | /* |
| 887 | * Disable the trusted memory range - we don't actually supported secure |
| 888 | * memory rendering at this point in time and we don't want to block off |
| 889 | * part of the virtual memory space. |
| 890 | */ |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 891 | gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 892 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); |
| 893 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 894 | if (!adreno_is_a7xx(adreno_gpu)) { |
| 895 | /* Turn on 64 bit addressing for all blocks */ |
| 896 | gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); |
| 897 | gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); |
| 898 | gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); |
| 899 | gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); |
| 900 | gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); |
| 901 | gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); |
| 902 | gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); |
| 903 | gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); |
| 904 | gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); |
| 905 | gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); |
| 906 | gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); |
| 907 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); |
| 908 | } |
Jordan Crouse | adf151c | 2019-05-07 12:02:05 -0600 | [diff] [blame] | 909 | |
Jonathan Marek | b1c53a2 | 2020-07-10 19:04:09 -0400 | [diff] [blame] | 910 | /* enable hardware clockgating */ |
| 911 | a6xx_set_hwcg(gpu, true); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 912 | |
Sharat Masetty | e812744 | 2019-12-03 15:16:14 +0000 | [diff] [blame] | 913 | /* VBIF/GBIF start*/ |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 914 | if (adreno_is_a610_family(adreno_gpu) || |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 915 | adreno_is_a640_family(adreno_gpu) || |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 916 | adreno_is_a650_family(adreno_gpu) || |
| 917 | adreno_is_a7xx(adreno_gpu)) { |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 918 | gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); |
| 919 | gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); |
| 920 | gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); |
| 921 | gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 922 | gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, |
| 923 | adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3); |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 924 | } else { |
| 925 | gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); |
| 926 | } |
| 927 | |
Sharat Masetty | e812744 | 2019-12-03 15:16:14 +0000 | [diff] [blame] | 928 | if (adreno_is_a630(adreno_gpu)) |
| 929 | gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 930 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 931 | if (adreno_is_a7xx(adreno_gpu)) |
| 932 | gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0); |
| 933 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 934 | /* Make all blocks contribute to the GPU BUSY perf counter */ |
| 935 | gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); |
| 936 | |
| 937 | /* Disable L2 bypass in the UCHE */ |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 938 | if (adreno_is_a7xx(adreno_gpu)) { |
| 939 | gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); |
| 940 | gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); |
| 941 | } else { |
| 942 | gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu); |
| 943 | gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); |
| 944 | gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); |
| 945 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 946 | |
Konrad Dybcio | 9588d2f | 2023-09-25 16:50:37 +0200 | [diff] [blame] | 947 | if (!(adreno_is_a650_family(adreno_gpu) || |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 948 | adreno_is_a702(adreno_gpu) || |
Konrad Dybcio | 9588d2f | 2023-09-25 16:50:37 +0200 | [diff] [blame] | 949 | adreno_is_a730(adreno_gpu))) { |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 950 | gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M; |
| 951 | |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 952 | /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 953 | gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 954 | |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 955 | gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 956 | gmem_range_min + adreno_gpu->info->gmem - 1); |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 957 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 958 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 959 | if (adreno_is_a7xx(adreno_gpu)) |
| 960 | gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23)); |
| 961 | else { |
| 962 | gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); |
| 963 | gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); |
| 964 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 965 | |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 966 | if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) { |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 967 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 968 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 969 | } else if (adreno_is_a610_family(adreno_gpu)) { |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 970 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); |
| 971 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 972 | } else if (!adreno_is_a7xx(adreno_gpu)) { |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 973 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 974 | gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); |
| 975 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 976 | |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 977 | if (adreno_is_a660_family(adreno_gpu)) |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 978 | gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); |
| 979 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 980 | /* Setting the mem pool size */ |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 981 | if (adreno_is_a610(adreno_gpu)) { |
| 982 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); |
| 983 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 984 | } else if (adreno_is_a702(adreno_gpu)) { |
| 985 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 64); |
| 986 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 63); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 987 | } else if (!adreno_is_a7xx(adreno_gpu)) |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 988 | gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 989 | |
Konrad Dybcio | 2bbb5fe | 2024-08-28 17:06:55 +0200 | [diff] [blame] | 990 | |
| 991 | /* Set the default primFifo threshold values */ |
| 992 | if (adreno_gpu->info->a6xx->prim_fifo_threshold) |
| 993 | gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, |
| 994 | adreno_gpu->info->a6xx->prim_fifo_threshold); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 995 | |
| 996 | /* Set the AHB default slave response to "ERROR" */ |
| 997 | gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); |
| 998 | |
| 999 | /* Turn on performance counters */ |
| 1000 | gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); |
| 1001 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1002 | if (adreno_is_a7xx(adreno_gpu)) { |
| 1003 | /* Turn on the IFPC counter (countable 4 on XOCLK4) */ |
| 1004 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, |
| 1005 | FIELD_PREP(GENMASK(7, 0), 0x4)); |
| 1006 | } |
| 1007 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1008 | /* Select CP0 to always count cycles */ |
Rob Clark | cc4c26d | 2021-05-30 15:44:23 -0700 | [diff] [blame] | 1009 | gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1010 | |
Jonathan Marek | d0bac4e | 2020-05-25 23:25:13 -0400 | [diff] [blame] | 1011 | a6xx_set_ubwc_config(gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1012 | |
| 1013 | /* Enable fault detection */ |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 1014 | if (adreno_is_a730(adreno_gpu) || |
| 1015 | adreno_is_a740_family(adreno_gpu)) |
Konrad Dybcio | 9588d2f | 2023-09-25 16:50:37 +0200 | [diff] [blame] | 1016 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff); |
Danylo Piliaiev | 07e6de7 | 2023-11-25 11:11:51 -0800 | [diff] [blame] | 1017 | else if (adreno_is_a690(adreno_gpu)) |
| 1018 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff); |
Konrad Dybcio | 9588d2f | 2023-09-25 16:50:37 +0200 | [diff] [blame] | 1019 | else if (adreno_is_a619(adreno_gpu)) |
Konrad Dybcio | 3e90044 | 2023-06-16 01:20:57 +0200 | [diff] [blame] | 1020 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 1021 | else if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu)) |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 1022 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff); |
| 1023 | else |
| 1024 | gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1025 | |
Danylo Piliaiev | cf1aaa7 | 2023-11-25 11:11:50 -0800 | [diff] [blame] | 1026 | gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1027 | |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 1028 | /* Set weights for bicubic filtering */ |
Akhil P Oommen | d6225e0 | 2024-06-29 07:19:35 +0530 | [diff] [blame] | 1029 | if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) { |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 1030 | gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); |
| 1031 | gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, |
| 1032 | 0x3fe05ff4); |
| 1033 | gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, |
| 1034 | 0x3fa0ebee); |
| 1035 | gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, |
| 1036 | 0x3f5193ed); |
| 1037 | gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, |
| 1038 | 0x3f0243f0); |
| 1039 | } |
| 1040 | |
Konrad Dybcio | 30f55f3 | 2023-06-16 01:20:52 +0200 | [diff] [blame] | 1041 | /* Set up the CX GMU counter 0 to count busy ticks */ |
| 1042 | gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); |
| 1043 | |
| 1044 | /* Enable the power counter */ |
| 1045 | gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5)); |
| 1046 | gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); |
| 1047 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1048 | /* Protect registers from the CP */ |
Jonathan Marek | 4084340 | 2021-05-13 13:13:59 -0400 | [diff] [blame] | 1049 | a6xx_set_cp_protect(gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1050 | |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 1051 | if (adreno_is_a660_family(adreno_gpu)) { |
Danylo Piliaiev | 07e6de7 | 2023-11-25 11:11:51 -0800 | [diff] [blame] | 1052 | if (adreno_is_a690(adreno_gpu)) |
| 1053 | gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801); |
| 1054 | else |
| 1055 | gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 1056 | gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); |
Konrad Dybcio | 1839751 | 2024-02-23 22:21:41 +0100 | [diff] [blame] | 1057 | } else if (adreno_is_a702(adreno_gpu)) { |
| 1058 | /* Something to do with the HLSQ cluster */ |
| 1059 | gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(24)); |
Jonathan Marek | f6d62d0 | 2021-06-08 13:27:48 -0400 | [diff] [blame] | 1060 | } |
| 1061 | |
Danylo Piliaiev | 07e6de7 | 2023-11-25 11:11:51 -0800 | [diff] [blame] | 1062 | if (adreno_is_a690(adreno_gpu)) |
| 1063 | gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90); |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 1064 | /* Set dualQ + disable afull for A660 GPU */ |
Danylo Piliaiev | 07e6de7 | 2023-11-25 11:11:51 -0800 | [diff] [blame] | 1065 | else if (adreno_is_a660(adreno_gpu)) |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 1066 | gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1067 | else if (adreno_is_a7xx(adreno_gpu)) |
| 1068 | gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, |
| 1069 | FIELD_PREP(GENMASK(19, 16), 6) | |
| 1070 | FIELD_PREP(GENMASK(15, 12), 6) | |
| 1071 | FIELD_PREP(GENMASK(11, 8), 9) | |
| 1072 | BIT(3) | BIT(2) | |
| 1073 | FIELD_PREP(GENMASK(1, 0), 2)); |
Akhil P Oommen | 192f4ee | 2021-07-30 01:21:25 +0530 | [diff] [blame] | 1074 | |
Jordan Crouse | 604234f | 2020-09-03 20:03:11 -0600 | [diff] [blame] | 1075 | /* Enable expanded apriv for targets that support it */ |
| 1076 | if (gpu->hw_apriv) { |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1077 | if (adreno_is_a7xx(adreno_gpu)) { |
| 1078 | gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, |
| 1079 | A7XX_BR_APRIVMASK); |
| 1080 | gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL, |
| 1081 | A7XX_APRIV_MASK); |
| 1082 | gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL, |
| 1083 | A7XX_APRIV_MASK); |
| 1084 | } else |
| 1085 | gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, |
| 1086 | BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1)); |
Jonathan Marek | 24e6938 | 2020-04-23 17:09:21 -0400 | [diff] [blame] | 1087 | } |
| 1088 | |
Connor Abbott | ecbf9b3 | 2024-04-30 11:43:20 +0100 | [diff] [blame] | 1089 | if (adreno_is_a750(adreno_gpu)) { |
| 1090 | /* Disable ubwc merged UFC request feature */ |
| 1091 | gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(19), BIT(19)); |
| 1092 | |
| 1093 | /* Enable TP flaghint and other performance settings */ |
| 1094 | gpu_write(gpu, REG_A6XX_TPL1_DBG_ECO_CNTL1, 0xc0700); |
| 1095 | } else if (adreno_is_a7xx(adreno_gpu)) { |
| 1096 | /* Disable non-ubwc read reqs from passing write reqs */ |
| 1097 | gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(11), BIT(11)); |
| 1098 | } |
| 1099 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1100 | /* Enable interrupts */ |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1101 | gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, |
| 1102 | adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1103 | |
| 1104 | ret = adreno_hw_init(gpu); |
| 1105 | if (ret) |
| 1106 | goto out; |
| 1107 | |
Rob Clark | 8ead967 | 2023-03-20 07:43:35 -0700 | [diff] [blame] | 1108 | gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1109 | |
Jordan Crouse | f6828e0 | 2020-09-03 20:03:13 -0600 | [diff] [blame] | 1110 | /* Set the ringbuffer address */ |
Rob Clark | cade05b | 2022-11-14 11:30:40 -0800 | [diff] [blame] | 1111 | gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); |
Jordan Crouse | f6828e0 | 2020-09-03 20:03:13 -0600 | [diff] [blame] | 1112 | |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 1113 | /* Targets that support extended APRIV can use the RPTR shadow from |
| 1114 | * hardware but all the other ones need to disable the feature. Targets |
| 1115 | * that support the WHERE_AM_I opcode can use that instead |
| 1116 | */ |
| 1117 | if (adreno_gpu->base.hw_apriv) |
| 1118 | gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); |
| 1119 | else |
| 1120 | gpu_write(gpu, REG_A6XX_CP_RB_CNTL, |
| 1121 | MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); |
| 1122 | |
Rob Clark | 8ead967 | 2023-03-20 07:43:35 -0700 | [diff] [blame] | 1123 | /* Configure the RPTR shadow if needed: */ |
| 1124 | if (a6xx_gpu->shadow_bo) { |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 1125 | gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 1126 | shadowptr(a6xx_gpu, gpu->rb[0])); |
| 1127 | } |
Jordan Crouse | f6828e0 | 2020-09-03 20:03:13 -0600 | [diff] [blame] | 1128 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1129 | /* ..which means "always" on A7xx, also for BV shadow */ |
| 1130 | if (adreno_is_a7xx(adreno_gpu)) { |
| 1131 | gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR, |
| 1132 | rbmemptr(gpu->rb[0], bv_fence)); |
| 1133 | } |
| 1134 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1135 | /* Always come up on rb 0 */ |
| 1136 | a6xx_gpu->cur_ring = gpu->rb[0]; |
| 1137 | |
Rob Clark | 1d054c9 | 2021-11-09 10:11:02 -0800 | [diff] [blame] | 1138 | gpu->cur_ctx_seqno = 0; |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 1139 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1140 | /* Enable the SQE_to start the CP engine */ |
| 1141 | gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); |
| 1142 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1143 | ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1144 | if (ret) |
| 1145 | goto out; |
| 1146 | |
Jordan Crouse | abccb9f | 2019-04-19 13:46:15 -0600 | [diff] [blame] | 1147 | /* |
| 1148 | * Try to load a zap shader into the secure world. If successful |
| 1149 | * we can use the CP to switch out of secure mode. If not then we |
| 1150 | * have no resource but to try to switch ourselves out manually. If we |
| 1151 | * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will |
| 1152 | * be blocked and a permissions violation will soon follow. |
| 1153 | */ |
| 1154 | ret = a6xx_zap_shader_init(gpu); |
| 1155 | if (!ret) { |
| 1156 | OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); |
| 1157 | OUT_RING(gpu->rb[0], 0x00000000); |
| 1158 | |
| 1159 | a6xx_flush(gpu, gpu->rb[0]); |
| 1160 | if (!a6xx_idle(gpu, gpu->rb[0])) |
| 1161 | return -EINVAL; |
Rob Clark | 15ab987 | 2019-11-24 14:23:38 -0800 | [diff] [blame] | 1162 | } else if (ret == -ENODEV) { |
| 1163 | /* |
| 1164 | * This device does not use zap shader (but print a warning |
| 1165 | * just in case someone got their dt wrong.. hopefully they |
| 1166 | * have a debug UART to realize the error of their ways... |
| 1167 | * if you mess this up you are about to crash horribly) |
| 1168 | */ |
Jordan Crouse | abccb9f | 2019-04-19 13:46:15 -0600 | [diff] [blame] | 1169 | dev_warn_once(gpu->dev->dev, |
| 1170 | "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); |
| 1171 | gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); |
Rob Clark | 15273ff | 2019-05-08 06:06:52 -0700 | [diff] [blame] | 1172 | ret = 0; |
Rob Clark | 15ab987 | 2019-11-24 14:23:38 -0800 | [diff] [blame] | 1173 | } else { |
| 1174 | return ret; |
Jordan Crouse | abccb9f | 2019-04-19 13:46:15 -0600 | [diff] [blame] | 1175 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1176 | |
| 1177 | out: |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1178 | if (adreno_has_gmu_wrapper(adreno_gpu)) |
| 1179 | return ret; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1180 | /* |
| 1181 | * Tell the GMU that we are done touching the GPU and it can start power |
| 1182 | * management |
| 1183 | */ |
| 1184 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); |
| 1185 | |
Jonathan Marek | 8167e6f | 2020-04-23 17:09:17 -0400 | [diff] [blame] | 1186 | if (a6xx_gpu->gmu.legacy) { |
| 1187 | /* Take the GMU out of its special boot mode */ |
| 1188 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); |
| 1189 | } |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1190 | |
| 1191 | return ret; |
| 1192 | } |
| 1193 | |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1194 | static int a6xx_hw_init(struct msm_gpu *gpu) |
| 1195 | { |
| 1196 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1197 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1198 | int ret; |
| 1199 | |
| 1200 | mutex_lock(&a6xx_gpu->gmu.lock); |
| 1201 | ret = hw_init(gpu); |
| 1202 | mutex_unlock(&a6xx_gpu->gmu.lock); |
| 1203 | |
| 1204 | return ret; |
| 1205 | } |
| 1206 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1207 | static void a6xx_dump(struct msm_gpu *gpu) |
| 1208 | { |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 1209 | DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1210 | gpu_read(gpu, REG_A6XX_RBBM_STATUS)); |
| 1211 | adreno_dump(gpu); |
| 1212 | } |
| 1213 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1214 | static void a6xx_recover(struct msm_gpu *gpu) |
| 1215 | { |
| 1216 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1217 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Akhil P Oommen | c11fa12 | 2023-01-02 16:18:31 +0530 | [diff] [blame] | 1218 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
Akhil P Oommen | f350bfb | 2022-08-19 01:52:12 +0530 | [diff] [blame] | 1219 | int i, active_submits; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1220 | |
| 1221 | adreno_dump_info(gpu); |
| 1222 | |
| 1223 | for (i = 0; i < 8; i++) |
Mamta Shukla | 6a41da1 | 2018-10-20 23:19:26 +0530 | [diff] [blame] | 1224 | DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1225 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); |
| 1226 | |
| 1227 | if (hang_debug) |
| 1228 | a6xx_dump(gpu); |
| 1229 | |
Akhil P Oommen | f4a75b5 | 2022-12-16 22:33:14 +0530 | [diff] [blame] | 1230 | /* |
| 1231 | * To handle recovery specific sequences during the rpm suspend we are |
| 1232 | * about to trigger |
| 1233 | */ |
| 1234 | a6xx_gpu->hung = true; |
| 1235 | |
Akhil P Oommen | 3a9dd70 | 2022-08-19 01:52:14 +0530 | [diff] [blame] | 1236 | /* Halt SQE first */ |
| 1237 | gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); |
| 1238 | |
Akhil P Oommen | f350bfb | 2022-08-19 01:52:12 +0530 | [diff] [blame] | 1239 | pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); |
| 1240 | |
| 1241 | /* active_submit won't change until we make a submission */ |
| 1242 | mutex_lock(&gpu->active_lock); |
| 1243 | active_submits = gpu->active_submits; |
| 1244 | |
| 1245 | /* |
| 1246 | * Temporarily clear active_submits count to silence a WARN() in the |
| 1247 | * runtime suspend cb |
| 1248 | */ |
| 1249 | gpu->active_submits = 0; |
| 1250 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1251 | if (adreno_has_gmu_wrapper(adreno_gpu)) { |
| 1252 | /* Drain the outstanding traffic on memory buses */ |
| 1253 | a6xx_bus_clear_pending_transactions(adreno_gpu, true); |
| 1254 | |
| 1255 | /* Reset the GPU to a clean state */ |
| 1256 | a6xx_gpu_sw_reset(gpu, true); |
| 1257 | a6xx_gpu_sw_reset(gpu, false); |
| 1258 | } |
| 1259 | |
Akhil P Oommen | c11fa12 | 2023-01-02 16:18:31 +0530 | [diff] [blame] | 1260 | reinit_completion(&gmu->pd_gate); |
| 1261 | dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); |
| 1262 | dev_pm_genpd_synced_poweroff(gmu->cxpd); |
| 1263 | |
Akhil P Oommen | f350bfb | 2022-08-19 01:52:12 +0530 | [diff] [blame] | 1264 | /* Drop the rpm refcount from active submits */ |
| 1265 | if (active_submits) |
| 1266 | pm_runtime_put(&gpu->pdev->dev); |
| 1267 | |
| 1268 | /* And the final one from recover worker */ |
| 1269 | pm_runtime_put_sync(&gpu->pdev->dev); |
| 1270 | |
Akhil P Oommen | c11fa12 | 2023-01-02 16:18:31 +0530 | [diff] [blame] | 1271 | if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) |
| 1272 | DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); |
| 1273 | |
| 1274 | dev_pm_genpd_remove_notifier(gmu->cxpd); |
| 1275 | |
Akhil P Oommen | f350bfb | 2022-08-19 01:52:12 +0530 | [diff] [blame] | 1276 | pm_runtime_use_autosuspend(&gpu->pdev->dev); |
| 1277 | |
| 1278 | if (active_submits) |
| 1279 | pm_runtime_get(&gpu->pdev->dev); |
| 1280 | |
| 1281 | pm_runtime_get_sync(&gpu->pdev->dev); |
| 1282 | |
| 1283 | gpu->active_submits = active_submits; |
| 1284 | mutex_unlock(&gpu->active_lock); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1285 | |
| 1286 | msm_gpu_hw_init(gpu); |
Akhil P Oommen | f4a75b5 | 2022-12-16 22:33:14 +0530 | [diff] [blame] | 1287 | a6xx_gpu->hung = false; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1288 | } |
| 1289 | |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1290 | static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) |
| 1291 | { |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1292 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1293 | static const char *uche_clients[7] = { |
| 1294 | "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ", |
| 1295 | }; |
| 1296 | u32 val; |
| 1297 | |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1298 | if (adreno_is_a7xx(adreno_gpu)) { |
| 1299 | if (mid != 1 && mid != 2 && mid != 3 && mid != 8) |
| 1300 | return "UNKNOWN"; |
| 1301 | } else { |
| 1302 | if (mid < 1 || mid > 3) |
| 1303 | return "UNKNOWN"; |
| 1304 | } |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1305 | |
| 1306 | /* |
| 1307 | * The source of the data depends on the mid ID read from FSYNR1. |
| 1308 | * and the client ID read from the UCHE block |
| 1309 | */ |
| 1310 | val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); |
| 1311 | |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1312 | if (adreno_is_a7xx(adreno_gpu)) { |
| 1313 | /* Bit 3 for mid=3 indicates BR or BV */ |
| 1314 | static const char *uche_clients_a7xx[16] = { |
| 1315 | "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC", |
| 1316 | "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP", |
| 1317 | "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC", |
| 1318 | "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP", |
| 1319 | }; |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1320 | |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1321 | /* LPAC has the same clients as BR and BV, but because it is |
| 1322 | * compute-only some of them do not exist and there are holes |
| 1323 | * in the array. |
| 1324 | */ |
| 1325 | static const char *uche_clients_lpac_a7xx[8] = { |
| 1326 | "-", "LPAC_SP", "-", "-", |
| 1327 | "LPAC_HLSQ", "-", "-", "LPAC_TP", |
| 1328 | }; |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1329 | |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1330 | val &= GENMASK(6, 0); |
| 1331 | |
| 1332 | /* mid=3 refers to BR or BV */ |
| 1333 | if (mid == 3) { |
| 1334 | if (val < ARRAY_SIZE(uche_clients_a7xx)) |
| 1335 | return uche_clients_a7xx[val]; |
| 1336 | else |
| 1337 | return "UCHE"; |
| 1338 | } |
| 1339 | |
| 1340 | /* mid=8 refers to LPAC */ |
| 1341 | if (mid == 8) { |
| 1342 | if (val < ARRAY_SIZE(uche_clients_lpac_a7xx)) |
| 1343 | return uche_clients_lpac_a7xx[val]; |
| 1344 | else |
| 1345 | return "UCHE_LPAC"; |
| 1346 | } |
| 1347 | |
| 1348 | /* mid=2 is a catchall for everything else in LPAC */ |
| 1349 | if (mid == 2) |
| 1350 | return "UCHE_LPAC"; |
| 1351 | |
| 1352 | /* mid=1 is a catchall for everything else in BR/BV */ |
| 1353 | return "UCHE"; |
| 1354 | } else if (adreno_is_a660_family(adreno_gpu)) { |
| 1355 | static const char *uche_clients_a660[8] = { |
| 1356 | "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ", "TP", |
| 1357 | }; |
| 1358 | |
| 1359 | static const char *uche_clients_a660_not[8] = { |
| 1360 | "not VFD", "not SP", "not VSC", "not VPC", |
| 1361 | "not HLSQ", "not PC", "not LRZ", "not TP", |
| 1362 | }; |
| 1363 | |
| 1364 | val &= GENMASK(6, 0); |
| 1365 | |
| 1366 | if (mid == 3 && val < ARRAY_SIZE(uche_clients_a660)) |
| 1367 | return uche_clients_a660[val]; |
| 1368 | |
| 1369 | if (mid == 1 && val < ARRAY_SIZE(uche_clients_a660_not)) |
| 1370 | return uche_clients_a660_not[val]; |
| 1371 | |
| 1372 | return "UCHE"; |
| 1373 | } else { |
| 1374 | /* mid = 3 is most precise and refers to only one block per client */ |
| 1375 | if (mid == 3) |
| 1376 | return uche_clients[val & 7]; |
| 1377 | |
| 1378 | /* For mid=2 the source is TP or VFD except when the client id is 0 */ |
| 1379 | if (mid == 2) |
| 1380 | return ((val & 7) == 0) ? "TP" : "TP|VFD"; |
| 1381 | |
| 1382 | /* For mid=1 just return "UCHE" as a catchall for everything else */ |
| 1383 | return "UCHE"; |
| 1384 | } |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1385 | } |
| 1386 | |
| 1387 | static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) |
| 1388 | { |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1389 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1390 | |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1391 | if (id == 0) |
| 1392 | return "CP"; |
| 1393 | else if (id == 4) |
| 1394 | return "CCU"; |
| 1395 | else if (id == 6) |
| 1396 | return "CDP Prefetch"; |
Connor Abbott | 77beba3 | 2024-01-25 13:10:58 +0000 | [diff] [blame] | 1397 | else if (id == 7) |
| 1398 | return "GMU"; |
| 1399 | else if (id == 5 && adreno_is_a7xx(adreno_gpu)) |
| 1400 | return "Flag cache"; |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1401 | |
| 1402 | return a6xx_uche_fault_block(gpu, id); |
| 1403 | } |
| 1404 | |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1405 | static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1406 | { |
| 1407 | struct msm_gpu *gpu = arg; |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1408 | struct adreno_smmu_fault_info *info = data; |
Dmitry Baryshkov | f62ad0f | 2023-02-14 15:35:03 +0300 | [diff] [blame] | 1409 | const char *block = "unknown"; |
Rob Clark | e25e92e | 2021-06-10 14:44:13 -0700 | [diff] [blame] | 1410 | |
Dmitry Baryshkov | f62ad0f | 2023-02-14 15:35:03 +0300 | [diff] [blame] | 1411 | u32 scratch[] = { |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1412 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), |
| 1413 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), |
| 1414 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), |
Dmitry Baryshkov | f62ad0f | 2023-02-14 15:35:03 +0300 | [diff] [blame] | 1415 | gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)), |
| 1416 | }; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1417 | |
Dmitry Baryshkov | f62ad0f | 2023-02-14 15:35:03 +0300 | [diff] [blame] | 1418 | if (info) |
| 1419 | block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); |
Jordan Crouse | 2a574cc | 2021-06-10 14:44:11 -0700 | [diff] [blame] | 1420 | |
Dmitry Baryshkov | f62ad0f | 2023-02-14 15:35:03 +0300 | [diff] [blame] | 1421 | return adreno_fault_handler(gpu, iova, flags, info, block, scratch); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1422 | } |
| 1423 | |
| 1424 | static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) |
| 1425 | { |
| 1426 | u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); |
| 1427 | |
| 1428 | if (status & A6XX_CP_INT_CP_OPCODE_ERROR) { |
| 1429 | u32 val; |
| 1430 | |
| 1431 | gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); |
| 1432 | val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); |
| 1433 | dev_err_ratelimited(&gpu->pdev->dev, |
| 1434 | "CP | opcode error | possible opcode=0x%8.8X\n", |
| 1435 | val); |
| 1436 | } |
| 1437 | |
| 1438 | if (status & A6XX_CP_INT_CP_UCODE_ERROR) |
| 1439 | dev_err_ratelimited(&gpu->pdev->dev, |
| 1440 | "CP ucode error interrupt\n"); |
| 1441 | |
| 1442 | if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR) |
| 1443 | dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", |
| 1444 | gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); |
| 1445 | |
| 1446 | if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { |
| 1447 | u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); |
| 1448 | |
| 1449 | dev_err_ratelimited(&gpu->pdev->dev, |
| 1450 | "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", |
| 1451 | val & (1 << 20) ? "READ" : "WRITE", |
| 1452 | (val & 0x3ffff), val); |
| 1453 | } |
| 1454 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1455 | if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu))) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1456 | dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); |
| 1457 | |
| 1458 | if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR) |
| 1459 | dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); |
| 1460 | |
| 1461 | if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR) |
| 1462 | dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); |
| 1463 | |
| 1464 | } |
| 1465 | |
| 1466 | static void a6xx_fault_detect_irq(struct msm_gpu *gpu) |
| 1467 | { |
| 1468 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1469 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1470 | struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); |
| 1471 | |
| 1472 | /* |
Rob Clark | e25e92e | 2021-06-10 14:44:13 -0700 | [diff] [blame] | 1473 | * If stalled on SMMU fault, we could trip the GPU's hang detection, |
| 1474 | * but the fault handler will trigger the devcore dump, and we want |
| 1475 | * to otherwise resume normally rather than killing the submit, so |
| 1476 | * just bail. |
| 1477 | */ |
| 1478 | if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) |
| 1479 | return; |
| 1480 | |
| 1481 | /* |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1482 | * Force the GPU to stay on until after we finish |
| 1483 | * collecting information |
| 1484 | */ |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1485 | if (!adreno_has_gmu_wrapper(adreno_gpu)) |
| 1486 | gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1487 | |
| 1488 | DRM_DEV_ERROR(&gpu->pdev->dev, |
| 1489 | "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", |
Rob Clark | f9d5355 | 2022-04-11 14:58:31 -0700 | [diff] [blame] | 1490 | ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1491 | gpu_read(gpu, REG_A6XX_RBBM_STATUS), |
| 1492 | gpu_read(gpu, REG_A6XX_CP_RB_RPTR), |
| 1493 | gpu_read(gpu, REG_A6XX_CP_RB_WPTR), |
Rob Clark | cade05b | 2022-11-14 11:30:40 -0800 | [diff] [blame] | 1494 | gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1495 | gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), |
Rob Clark | cade05b | 2022-11-14 11:30:40 -0800 | [diff] [blame] | 1496 | gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1497 | gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); |
| 1498 | |
| 1499 | /* Turn off the hangcheck timer to keep it from bothering us */ |
| 1500 | del_timer(&gpu->hangcheck_timer); |
| 1501 | |
Rob Clark | 7e68829 | 2020-10-19 14:10:51 -0700 | [diff] [blame] | 1502 | kthread_queue_work(gpu->worker, &gpu->recover_work); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1503 | } |
| 1504 | |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 1505 | static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu) |
| 1506 | { |
| 1507 | u32 status; |
| 1508 | |
| 1509 | status = gpu_read(gpu, REG_A7XX_RBBM_SW_FUSE_INT_STATUS); |
| 1510 | gpu_write(gpu, REG_A7XX_RBBM_SW_FUSE_INT_MASK, 0); |
| 1511 | |
| 1512 | dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status); |
| 1513 | |
| 1514 | /* |
| 1515 | * Ignore FASTBLEND violations, because the HW will silently fall back |
| 1516 | * to legacy blending. |
| 1517 | */ |
| 1518 | if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | |
| 1519 | A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) { |
| 1520 | del_timer(&gpu->hangcheck_timer); |
| 1521 | |
| 1522 | kthread_queue_work(gpu->worker, &gpu->recover_work); |
| 1523 | } |
| 1524 | } |
| 1525 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1526 | static irqreturn_t a6xx_irq(struct msm_gpu *gpu) |
| 1527 | { |
Rob Clark | 5edf275 | 2021-11-09 10:11:05 -0800 | [diff] [blame] | 1528 | struct msm_drm_private *priv = gpu->dev->dev_private; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1529 | u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); |
| 1530 | |
| 1531 | gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); |
| 1532 | |
Rob Clark | 5edf275 | 2021-11-09 10:11:05 -0800 | [diff] [blame] | 1533 | if (priv->disable_err_irq) |
| 1534 | status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS; |
| 1535 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1536 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT) |
| 1537 | a6xx_fault_detect_irq(gpu); |
| 1538 | |
| 1539 | if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) |
| 1540 | dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); |
| 1541 | |
| 1542 | if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR) |
| 1543 | a6xx_cp_hw_err_irq(gpu); |
| 1544 | |
| 1545 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW) |
| 1546 | dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); |
| 1547 | |
| 1548 | if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) |
| 1549 | dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); |
| 1550 | |
| 1551 | if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) |
| 1552 | dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); |
| 1553 | |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 1554 | if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION) |
| 1555 | a7xx_sw_fuse_violation_irq(gpu); |
| 1556 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1557 | if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) |
| 1558 | msm_gpu_retire(gpu); |
| 1559 | |
| 1560 | return IRQ_HANDLED; |
| 1561 | } |
| 1562 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1563 | static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu) |
| 1564 | { |
| 1565 | llcc_slice_deactivate(a6xx_gpu->llc_slice); |
| 1566 | llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); |
| 1567 | } |
| 1568 | |
| 1569 | static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) |
| 1570 | { |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1571 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
| 1572 | struct msm_gpu *gpu = &adreno_gpu->base; |
Akhil P Oommen | 9ba873e | 2021-11-18 15:50:31 +0530 | [diff] [blame] | 1573 | u32 cntl1_regval = 0; |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1574 | |
| 1575 | if (IS_ERR(a6xx_gpu->llc_mmio)) |
| 1576 | return; |
| 1577 | |
| 1578 | if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { |
Akhil P Oommen | 9ba873e | 2021-11-18 15:50:31 +0530 | [diff] [blame] | 1579 | u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1580 | |
| 1581 | gpu_scid &= 0x1f; |
| 1582 | cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | |
| 1583 | (gpu_scid << 15) | (gpu_scid << 20); |
Akhil P Oommen | 9ba873e | 2021-11-18 15:50:31 +0530 | [diff] [blame] | 1584 | |
| 1585 | /* On A660, the SCID programming for UCHE traffic is done in |
| 1586 | * A6XX_GBIF_SCACHE_CNTL0[14:10] |
| 1587 | */ |
| 1588 | if (adreno_is_a660_family(adreno_gpu)) |
| 1589 | gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | |
| 1590 | (1 << 8), (gpu_scid << 10) | (1 << 8)); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1591 | } |
| 1592 | |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1593 | /* |
| 1594 | * For targets with a MMU500, activate the slice but don't program the |
| 1595 | * register. The XBL will take care of that. |
| 1596 | */ |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1597 | if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1598 | if (!a6xx_gpu->have_mmu500) { |
| 1599 | u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1600 | |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1601 | gpuhtw_scid &= 0x1f; |
| 1602 | cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid); |
| 1603 | } |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1604 | } |
| 1605 | |
Akhil P Oommen | a6f2438 | 2021-07-30 01:21:23 +0530 | [diff] [blame] | 1606 | if (!cntl1_regval) |
| 1607 | return; |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1608 | |
Akhil P Oommen | a6f2438 | 2021-07-30 01:21:23 +0530 | [diff] [blame] | 1609 | /* |
| 1610 | * Program the slice IDs for the various GPU blocks and GPU MMU |
| 1611 | * pagetables |
| 1612 | */ |
| 1613 | if (!a6xx_gpu->have_mmu500) { |
| 1614 | a6xx_llc_write(a6xx_gpu, |
| 1615 | REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); |
| 1616 | |
| 1617 | /* |
| 1618 | * Program cacheability overrides to not allocate cache |
| 1619 | * lines on a write miss |
| 1620 | */ |
| 1621 | a6xx_llc_rmw(a6xx_gpu, |
| 1622 | REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); |
| 1623 | return; |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1624 | } |
Akhil P Oommen | a6f2438 | 2021-07-30 01:21:23 +0530 | [diff] [blame] | 1625 | |
| 1626 | gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1627 | } |
| 1628 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1629 | static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu) |
| 1630 | { |
| 1631 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
| 1632 | struct msm_gpu *gpu = &adreno_gpu->base; |
| 1633 | |
| 1634 | if (IS_ERR(a6xx_gpu->llc_mmio)) |
| 1635 | return; |
| 1636 | |
| 1637 | if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { |
| 1638 | u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); |
| 1639 | |
| 1640 | gpu_scid &= GENMASK(4, 0); |
| 1641 | |
| 1642 | gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, |
| 1643 | FIELD_PREP(GENMASK(29, 25), gpu_scid) | |
| 1644 | FIELD_PREP(GENMASK(24, 20), gpu_scid) | |
| 1645 | FIELD_PREP(GENMASK(19, 15), gpu_scid) | |
| 1646 | FIELD_PREP(GENMASK(14, 10), gpu_scid) | |
| 1647 | FIELD_PREP(GENMASK(9, 5), gpu_scid) | |
| 1648 | FIELD_PREP(GENMASK(4, 0), gpu_scid)); |
| 1649 | |
| 1650 | gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, |
| 1651 | FIELD_PREP(GENMASK(14, 10), gpu_scid) | |
| 1652 | BIT(8)); |
| 1653 | } |
| 1654 | |
| 1655 | llcc_slice_activate(a6xx_gpu->htw_llc_slice); |
| 1656 | } |
| 1657 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1658 | static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) |
| 1659 | { |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1660 | /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ |
| 1661 | if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) |
| 1662 | return; |
| 1663 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1664 | llcc_slice_putd(a6xx_gpu->llc_slice); |
| 1665 | llcc_slice_putd(a6xx_gpu->htw_llc_slice); |
| 1666 | } |
| 1667 | |
| 1668 | static void a6xx_llc_slices_init(struct platform_device *pdev, |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1669 | struct a6xx_gpu *a6xx_gpu, bool is_a7xx) |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1670 | { |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1671 | struct device_node *phandle; |
| 1672 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1673 | /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ |
| 1674 | if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) |
| 1675 | return; |
| 1676 | |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1677 | /* |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1678 | * There is a different programming path for A6xx targets with an |
| 1679 | * mmu500 attached, so detect if that is the case |
Jordan Crouse | 3d24712 | 2020-11-25 12:30:16 +0530 | [diff] [blame] | 1680 | */ |
| 1681 | phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); |
| 1682 | a6xx_gpu->have_mmu500 = (phandle && |
| 1683 | of_device_is_compatible(phandle, "arm,mmu-500")); |
| 1684 | of_node_put(phandle); |
| 1685 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1686 | if (is_a7xx || !a6xx_gpu->have_mmu500) |
Dmitry Baryshkov | c0e745d | 2022-01-06 02:26:59 +0300 | [diff] [blame] | 1687 | a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 1688 | else |
| 1689 | a6xx_gpu->llc_mmio = NULL; |
Jonathan Marek | 4b95d37 | 2021-04-23 21:49:26 -0400 | [diff] [blame] | 1690 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1691 | a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); |
| 1692 | a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); |
| 1693 | |
Sai Prakash Ranjan | 276619c | 2021-01-11 17:34:08 +0530 | [diff] [blame] | 1694 | if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1695 | a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); |
| 1696 | } |
| 1697 | |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 1698 | static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu) |
| 1699 | { |
| 1700 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
| 1701 | struct msm_gpu *gpu = &adreno_gpu->base; |
| 1702 | u32 fuse_val; |
| 1703 | int ret; |
| 1704 | |
| 1705 | if (adreno_is_a750(adreno_gpu)) { |
| 1706 | /* |
| 1707 | * Assume that if qcom scm isn't available, that whatever |
| 1708 | * replacement allows writing the fuse register ourselves. |
| 1709 | * Users of alternative firmware need to make sure this |
| 1710 | * register is writeable or indicate that it's not somehow. |
| 1711 | * Print a warning because if you mess this up you're about to |
| 1712 | * crash horribly. |
| 1713 | */ |
| 1714 | if (!qcom_scm_is_available()) { |
| 1715 | dev_warn_once(gpu->dev->dev, |
| 1716 | "SCM is not available, poking fuse register\n"); |
| 1717 | a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_SW_FUSE_VALUE, |
| 1718 | A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | |
| 1719 | A7XX_CX_MISC_SW_FUSE_VALUE_FASTBLEND | |
| 1720 | A7XX_CX_MISC_SW_FUSE_VALUE_LPAC); |
| 1721 | adreno_gpu->has_ray_tracing = true; |
| 1722 | return 0; |
| 1723 | } |
| 1724 | |
| 1725 | ret = qcom_scm_gpu_init_regs(QCOM_SCM_GPU_ALWAYS_EN_REQ | |
| 1726 | QCOM_SCM_GPU_TSENSE_EN_REQ); |
| 1727 | if (ret) |
| 1728 | return ret; |
| 1729 | |
| 1730 | /* |
| 1731 | * On a750 raytracing may be disabled by the firmware, find out |
| 1732 | * whether that's the case. The scm call above sets the fuse |
| 1733 | * register. |
| 1734 | */ |
| 1735 | fuse_val = a6xx_llc_read(a6xx_gpu, |
| 1736 | REG_A7XX_CX_MISC_SW_FUSE_VALUE); |
| 1737 | adreno_gpu->has_ray_tracing = |
| 1738 | !!(fuse_val & A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING); |
Neil Armstrong | cc2ccd1 | 2024-06-26 14:04:21 +0200 | [diff] [blame] | 1739 | } else if (adreno_is_a740(adreno_gpu)) { |
| 1740 | /* Raytracing is always enabled on a740 */ |
| 1741 | adreno_gpu->has_ray_tracing = true; |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 1742 | } |
| 1743 | |
| 1744 | return 0; |
| 1745 | } |
| 1746 | |
| 1747 | |
Konrad Dybcio | 3773a57 | 2023-06-16 01:20:48 +0200 | [diff] [blame] | 1748 | #define GBIF_CLIENT_HALT_MASK BIT(0) |
| 1749 | #define GBIF_ARB_HALT_MASK BIT(1) |
| 1750 | #define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0) |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 1751 | #define VBIF_RESET_ACK_MASK 0xF0 |
| 1752 | #define GPR0_GBIF_HALT_REQUEST 0x1E0 |
Konrad Dybcio | 6e332c9 | 2023-06-16 01:20:47 +0200 | [diff] [blame] | 1753 | |
| 1754 | void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off) |
| 1755 | { |
| 1756 | struct msm_gpu *gpu = &adreno_gpu->base; |
| 1757 | |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 1758 | if (adreno_is_a619_holi(adreno_gpu)) { |
| 1759 | gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST); |
| 1760 | spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) & |
| 1761 | (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK); |
| 1762 | } else if (!a6xx_has_gbif(adreno_gpu)) { |
Konrad Dybcio | 3773a57 | 2023-06-16 01:20:48 +0200 | [diff] [blame] | 1763 | gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK); |
Konrad Dybcio | 6e332c9 | 2023-06-16 01:20:47 +0200 | [diff] [blame] | 1764 | spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & |
Konrad Dybcio | 3773a57 | 2023-06-16 01:20:48 +0200 | [diff] [blame] | 1765 | (VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK); |
Konrad Dybcio | 6e332c9 | 2023-06-16 01:20:47 +0200 | [diff] [blame] | 1766 | gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); |
| 1767 | |
| 1768 | return; |
| 1769 | } |
| 1770 | |
| 1771 | if (gx_off) { |
| 1772 | /* Halt the gx side of GBIF */ |
| 1773 | gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); |
| 1774 | spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); |
| 1775 | } |
| 1776 | |
| 1777 | /* Halt new client requests on GBIF */ |
| 1778 | gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); |
| 1779 | spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & |
| 1780 | (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); |
| 1781 | |
| 1782 | /* Halt all AXI requests on GBIF */ |
| 1783 | gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); |
| 1784 | spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & |
| 1785 | (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); |
| 1786 | |
| 1787 | /* The GBIF halt needs to be explicitly cleared */ |
| 1788 | gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); |
| 1789 | } |
| 1790 | |
Konrad Dybcio | 277b967 | 2023-06-16 01:20:49 +0200 | [diff] [blame] | 1791 | void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert) |
| 1792 | { |
Konrad Dybcio | e7fc939 | 2023-06-16 01:20:56 +0200 | [diff] [blame] | 1793 | /* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */ |
| 1794 | if (adreno_is_a610(to_adreno_gpu(gpu))) |
| 1795 | return; |
| 1796 | |
Konrad Dybcio | 277b967 | 2023-06-16 01:20:49 +0200 | [diff] [blame] | 1797 | gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert); |
| 1798 | /* Perform a bogus read and add a brief delay to ensure ordering. */ |
| 1799 | gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD); |
| 1800 | udelay(1); |
| 1801 | |
| 1802 | /* The reset line needs to be asserted for at least 100 us */ |
| 1803 | if (assert) |
| 1804 | udelay(100); |
| 1805 | } |
| 1806 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1807 | static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1808 | { |
| 1809 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1810 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1811 | int ret; |
| 1812 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1813 | gpu->needs_hw_init = true; |
| 1814 | |
Rob Clark | ec1cb6e | 2020-09-01 08:41:56 -0700 | [diff] [blame] | 1815 | trace_msm_gpu_resume(0); |
| 1816 | |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1817 | mutex_lock(&a6xx_gpu->gmu.lock); |
Jordan Crouse | 41570b7 | 2019-02-04 09:15:43 -0700 | [diff] [blame] | 1818 | ret = a6xx_gmu_resume(a6xx_gpu); |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1819 | mutex_unlock(&a6xx_gpu->gmu.lock); |
Jordan Crouse | 41570b7 | 2019-02-04 09:15:43 -0700 | [diff] [blame] | 1820 | if (ret) |
| 1821 | return ret; |
| 1822 | |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 1823 | msm_devfreq_resume(gpu); |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 1824 | |
Rob Clark | 0776ad9 | 2024-01-02 11:33:45 -0800 | [diff] [blame] | 1825 | adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1826 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1827 | return ret; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1828 | } |
| 1829 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1830 | static int a6xx_pm_resume(struct msm_gpu *gpu) |
| 1831 | { |
| 1832 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1833 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1834 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
| 1835 | unsigned long freq = gpu->fast_rate; |
| 1836 | struct dev_pm_opp *opp; |
| 1837 | int ret; |
| 1838 | |
| 1839 | gpu->needs_hw_init = true; |
| 1840 | |
| 1841 | trace_msm_gpu_resume(0); |
| 1842 | |
| 1843 | mutex_lock(&a6xx_gpu->gmu.lock); |
| 1844 | |
| 1845 | opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq); |
| 1846 | if (IS_ERR(opp)) { |
| 1847 | ret = PTR_ERR(opp); |
| 1848 | goto err_set_opp; |
| 1849 | } |
| 1850 | dev_pm_opp_put(opp); |
| 1851 | |
| 1852 | /* Set the core clock and bus bw, having VDD scaling in mind */ |
| 1853 | dev_pm_opp_set_opp(&gpu->pdev->dev, opp); |
| 1854 | |
| 1855 | pm_runtime_resume_and_get(gmu->dev); |
| 1856 | pm_runtime_resume_and_get(gmu->gxpd); |
| 1857 | |
| 1858 | ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); |
| 1859 | if (ret) |
| 1860 | goto err_bulk_clk; |
| 1861 | |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 1862 | if (adreno_is_a619_holi(adreno_gpu)) |
| 1863 | a6xx_sptprac_enable(gmu); |
| 1864 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1865 | /* If anything goes south, tear the GPU down piece by piece.. */ |
| 1866 | if (ret) { |
| 1867 | err_bulk_clk: |
| 1868 | pm_runtime_put(gmu->gxpd); |
| 1869 | pm_runtime_put(gmu->dev); |
| 1870 | dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); |
| 1871 | } |
| 1872 | err_set_opp: |
| 1873 | mutex_unlock(&a6xx_gpu->gmu.lock); |
| 1874 | |
| 1875 | if (!ret) |
| 1876 | msm_devfreq_resume(gpu); |
| 1877 | |
| 1878 | return ret; |
| 1879 | } |
| 1880 | |
| 1881 | static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1882 | { |
| 1883 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1884 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Rob Clark | e8b0b99 | 2020-11-10 10:23:06 -0800 | [diff] [blame] | 1885 | int i, ret; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1886 | |
Rob Clark | ec1cb6e | 2020-09-01 08:41:56 -0700 | [diff] [blame] | 1887 | trace_msm_gpu_suspend(0); |
| 1888 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1889 | a6xx_llc_deactivate(a6xx_gpu); |
| 1890 | |
Rob Clark | af5b4ff | 2021-07-26 07:46:48 -0700 | [diff] [blame] | 1891 | msm_devfreq_suspend(gpu); |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 1892 | |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1893 | mutex_lock(&a6xx_gpu->gmu.lock); |
Rob Clark | e8b0b99 | 2020-11-10 10:23:06 -0800 | [diff] [blame] | 1894 | ret = a6xx_gmu_stop(a6xx_gpu); |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1895 | mutex_unlock(&a6xx_gpu->gmu.lock); |
Rob Clark | e8b0b99 | 2020-11-10 10:23:06 -0800 | [diff] [blame] | 1896 | if (ret) |
| 1897 | return ret; |
| 1898 | |
Jonathan Marek | ce86c23 | 2021-05-13 13:14:00 -0400 | [diff] [blame] | 1899 | if (a6xx_gpu->shadow_bo) |
Rob Clark | e8b0b99 | 2020-11-10 10:23:06 -0800 | [diff] [blame] | 1900 | for (i = 0; i < gpu->nr_rings; i++) |
| 1901 | a6xx_gpu->shadow[i] = 0; |
| 1902 | |
Rob Clark | 860a7b2 | 2022-01-13 08:32:13 -0800 | [diff] [blame] | 1903 | gpu->suspend_count++; |
| 1904 | |
Rob Clark | e8b0b99 | 2020-11-10 10:23:06 -0800 | [diff] [blame] | 1905 | return 0; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1906 | } |
| 1907 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1908 | static int a6xx_pm_suspend(struct msm_gpu *gpu) |
| 1909 | { |
| 1910 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1911 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1912 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
| 1913 | int i; |
| 1914 | |
| 1915 | trace_msm_gpu_suspend(0); |
| 1916 | |
| 1917 | msm_devfreq_suspend(gpu); |
| 1918 | |
| 1919 | mutex_lock(&a6xx_gpu->gmu.lock); |
| 1920 | |
| 1921 | /* Drain the outstanding traffic on memory buses */ |
| 1922 | a6xx_bus_clear_pending_transactions(adreno_gpu, true); |
| 1923 | |
Konrad Dybcio | 8296ff0 | 2023-06-16 01:20:55 +0200 | [diff] [blame] | 1924 | if (adreno_is_a619_holi(adreno_gpu)) |
| 1925 | a6xx_sptprac_disable(gmu); |
| 1926 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1927 | clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); |
| 1928 | |
| 1929 | pm_runtime_put_sync(gmu->gxpd); |
| 1930 | dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); |
| 1931 | pm_runtime_put_sync(gmu->dev); |
| 1932 | |
| 1933 | mutex_unlock(&a6xx_gpu->gmu.lock); |
| 1934 | |
| 1935 | if (a6xx_gpu->shadow_bo) |
| 1936 | for (i = 0; i < gpu->nr_rings; i++) |
| 1937 | a6xx_gpu->shadow[i] = 0; |
| 1938 | |
| 1939 | gpu->suspend_count++; |
| 1940 | |
| 1941 | return 0; |
| 1942 | } |
| 1943 | |
| 1944 | static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1945 | { |
| 1946 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1947 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Eric Anholt | 5f98b33 | 2021-01-28 13:03:31 -0800 | [diff] [blame] | 1948 | |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1949 | mutex_lock(&a6xx_gpu->gmu.lock); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1950 | |
| 1951 | /* Force the GPU power on so we can read this register */ |
Eric Anholt | 7a7cbf2 | 2021-01-28 13:03:30 -0800 | [diff] [blame] | 1952 | a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1953 | |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 1954 | *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1955 | |
Eric Anholt | 7a7cbf2 | 2021-01-28 13:03:30 -0800 | [diff] [blame] | 1956 | a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 1957 | |
| 1958 | mutex_unlock(&a6xx_gpu->gmu.lock); |
| 1959 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1960 | return 0; |
| 1961 | } |
| 1962 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 1963 | static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) |
| 1964 | { |
| 1965 | *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); |
| 1966 | return 0; |
| 1967 | } |
| 1968 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1969 | static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) |
| 1970 | { |
| 1971 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1972 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1973 | |
| 1974 | return a6xx_gpu->cur_ring; |
| 1975 | } |
| 1976 | |
| 1977 | static void a6xx_destroy(struct msm_gpu *gpu) |
| 1978 | { |
| 1979 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 1980 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 1981 | |
| 1982 | if (a6xx_gpu->sqe_bo) { |
Jordan Crouse | 7ad0e8c | 2018-11-07 15:35:51 -0700 | [diff] [blame] | 1983 | msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); |
Emil Velikov | f7d3395 | 2020-05-15 10:51:04 +0100 | [diff] [blame] | 1984 | drm_gem_object_put(a6xx_gpu->sqe_bo); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1985 | } |
| 1986 | |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 1987 | if (a6xx_gpu->shadow_bo) { |
| 1988 | msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); |
| 1989 | drm_gem_object_put(a6xx_gpu->shadow_bo); |
| 1990 | } |
| 1991 | |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 1992 | a6xx_llc_slices_destroy(a6xx_gpu); |
| 1993 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1994 | a6xx_gmu_remove(a6xx_gpu); |
| 1995 | |
| 1996 | adreno_gpu_cleanup(adreno_gpu); |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 1997 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 1998 | kfree(a6xx_gpu); |
| 1999 | } |
| 2000 | |
Chia-I Wu | 15c4119 | 2022-04-15 17:33:13 -0700 | [diff] [blame] | 2001 | static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 2002 | { |
| 2003 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 2004 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Chia-I Wu | 15c4119 | 2022-04-15 17:33:13 -0700 | [diff] [blame] | 2005 | u64 busy_cycles; |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 2006 | |
Chia-I Wu | 15c4119 | 2022-04-15 17:33:13 -0700 | [diff] [blame] | 2007 | /* 19.2MHz */ |
| 2008 | *out_sample_rate = 19200000; |
Jordan Crouse | eadf792 | 2020-05-01 13:43:26 -0600 | [diff] [blame] | 2009 | |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 2010 | busy_cycles = gmu_read64(&a6xx_gpu->gmu, |
| 2011 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, |
| 2012 | REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); |
| 2013 | |
Chia-I Wu | 15c4119 | 2022-04-15 17:33:13 -0700 | [diff] [blame] | 2014 | return busy_cycles; |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 2015 | } |
| 2016 | |
Douglas Anderson | 6694482 | 2022-06-10 12:47:31 -0700 | [diff] [blame] | 2017 | static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, |
| 2018 | bool suspended) |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 2019 | { |
| 2020 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 2021 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 2022 | |
| 2023 | mutex_lock(&a6xx_gpu->gmu.lock); |
Douglas Anderson | 6694482 | 2022-06-10 12:47:31 -0700 | [diff] [blame] | 2024 | a6xx_gmu_set_freq(gpu, opp, suspended); |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 2025 | mutex_unlock(&a6xx_gpu->gmu.lock); |
| 2026 | } |
| 2027 | |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 2028 | static struct msm_gem_address_space * |
Sai Prakash Ranjan | 45596f2 | 2021-01-11 17:34:09 +0530 | [diff] [blame] | 2029 | a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) |
| 2030 | { |
| 2031 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 2032 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
Dmitry Baryshkov | 3236130 | 2022-11-02 20:54:48 +0300 | [diff] [blame] | 2033 | unsigned long quirks = 0; |
Sai Prakash Ranjan | 45596f2 | 2021-01-11 17:34:09 +0530 | [diff] [blame] | 2034 | |
| 2035 | /* |
| 2036 | * This allows GPU to set the bus attributes required to use system |
| 2037 | * cache on behalf of the iommu page table walker. |
| 2038 | */ |
Dmitry Baryshkov | 38e27a6f | 2023-04-10 21:52:26 +0300 | [diff] [blame] | 2039 | if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) && |
| 2040 | !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) |
Dmitry Baryshkov | 3236130 | 2022-11-02 20:54:48 +0300 | [diff] [blame] | 2041 | quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; |
Sai Prakash Ranjan | 45596f2 | 2021-01-11 17:34:09 +0530 | [diff] [blame] | 2042 | |
Dmitry Baryshkov | 822ff99 | 2022-11-02 20:54:49 +0300 | [diff] [blame] | 2043 | return adreno_iommu_create_address_space(gpu, pdev, quirks); |
Sai Prakash Ranjan | 45596f2 | 2021-01-11 17:34:09 +0530 | [diff] [blame] | 2044 | } |
| 2045 | |
| 2046 | static struct msm_gem_address_space * |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 2047 | a6xx_create_private_address_space(struct msm_gpu *gpu) |
| 2048 | { |
| 2049 | struct msm_mmu *mmu; |
| 2050 | |
| 2051 | mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); |
| 2052 | |
| 2053 | if (IS_ERR(mmu)) |
| 2054 | return ERR_CAST(mmu); |
| 2055 | |
| 2056 | return msm_gem_address_space_create(mmu, |
Rob Clark | 36bbfdb | 2022-05-29 11:04:23 -0700 | [diff] [blame] | 2057 | "gpu", 0x100000000ULL, |
| 2058 | adreno_private_address_space_size(gpu)); |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 2059 | } |
| 2060 | |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 2061 | static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) |
| 2062 | { |
| 2063 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 2064 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
| 2065 | |
| 2066 | if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) |
| 2067 | return a6xx_gpu->shadow[ring->id]; |
| 2068 | |
| 2069 | return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); |
| 2070 | } |
| 2071 | |
Rob Clark | d73b1d0 | 2022-11-14 11:30:41 -0800 | [diff] [blame] | 2072 | static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) |
| 2073 | { |
| 2074 | struct msm_cp_state cp_state = { |
| 2075 | .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), |
| 2076 | .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), |
| 2077 | .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), |
| 2078 | .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), |
| 2079 | }; |
| 2080 | bool progress; |
| 2081 | |
| 2082 | /* |
| 2083 | * Adjust the remaining data to account for what has already been |
| 2084 | * fetched from memory, but not yet consumed by the SQE. |
| 2085 | * |
| 2086 | * This is not *technically* correct, the amount buffered could |
| 2087 | * exceed the IB size due to hw prefetching ahead, but: |
| 2088 | * |
| 2089 | * (1) We aren't trying to find the exact position, just whether |
| 2090 | * progress has been made |
| 2091 | * (2) The CP_REG_TO_MEM at the end of a submit should be enough |
| 2092 | * to prevent prefetching into an unrelated submit. (And |
| 2093 | * either way, at some point the ROQ will be full.) |
| 2094 | */ |
Rob Clark | f73343f | 2023-03-20 11:54:14 -0700 | [diff] [blame] | 2095 | cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16; |
| 2096 | cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16; |
Rob Clark | d73b1d0 | 2022-11-14 11:30:41 -0800 | [diff] [blame] | 2097 | |
| 2098 | progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); |
| 2099 | |
| 2100 | ring->last_cp_state = cp_state; |
| 2101 | |
| 2102 | return progress; |
| 2103 | } |
| 2104 | |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2105 | static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse) |
Konrad Dybcio | cd036d5 | 2023-06-16 01:21:01 +0200 | [diff] [blame] | 2106 | { |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2107 | if (!info->speedbins) |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2108 | return UINT_MAX; |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2109 | |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2110 | for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++) |
| 2111 | if (info->speedbins[i].fuse == fuse) |
| 2112 | return BIT(info->speedbins[i].speedbin); |
| 2113 | |
| 2114 | return UINT_MAX; |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2115 | } |
| 2116 | |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2117 | static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info) |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2118 | { |
Rob Clark | f6d1918 | 2022-11-15 07:46:34 -0800 | [diff] [blame] | 2119 | u32 supp_hw; |
Douglas Anderson | c9f737c | 2021-05-21 13:45:50 -0700 | [diff] [blame] | 2120 | u32 speedbin; |
Douglas Anderson | 7bf168c | 2021-02-26 16:26:01 -0800 | [diff] [blame] | 2121 | int ret; |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2122 | |
Akhil P Oommen | afab9d9 | 2022-02-26 00:51:31 +0530 | [diff] [blame] | 2123 | ret = adreno_read_speedbin(dev, &speedbin); |
John Stultz | 2b0b219 | 2021-03-30 01:34:08 +0000 | [diff] [blame] | 2124 | /* |
| 2125 | * -ENOENT means that the platform doesn't support speedbin which is |
| 2126 | * fine |
| 2127 | */ |
| 2128 | if (ret == -ENOENT) { |
| 2129 | return 0; |
| 2130 | } else if (ret) { |
Rob Clark | f6d1918 | 2022-11-15 07:46:34 -0800 | [diff] [blame] | 2131 | dev_err_probe(dev, ret, |
| 2132 | "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); |
| 2133 | return ret; |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2134 | } |
| 2135 | |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2136 | supp_hw = fuse_to_supp_hw(info, speedbin); |
| 2137 | |
| 2138 | if (supp_hw == UINT_MAX) { |
| 2139 | DRM_DEV_ERROR(dev, |
| 2140 | "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", |
| 2141 | speedbin); |
Konrad Dybcio | 75cb60d | 2023-09-26 20:24:36 +0200 | [diff] [blame] | 2142 | supp_hw = BIT(0); /* Default */ |
Rob Clark | c928a05 | 2023-07-27 14:20:12 -0700 | [diff] [blame] | 2143 | } |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2144 | |
Yangtao Li | 11120e9 | 2021-03-14 19:34:04 +0300 | [diff] [blame] | 2145 | ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); |
| 2146 | if (ret) |
| 2147 | return ret; |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2148 | |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2149 | return 0; |
| 2150 | } |
| 2151 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2152 | static const struct adreno_gpu_funcs funcs = { |
| 2153 | .base = { |
| 2154 | .get_param = adreno_get_param, |
Rob Clark | f7ddbf5 | 2022-03-03 16:52:15 -0800 | [diff] [blame] | 2155 | .set_param = adreno_set_param, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2156 | .hw_init = a6xx_hw_init, |
Rob Clark | 8ead967 | 2023-03-20 07:43:35 -0700 | [diff] [blame] | 2157 | .ucode_load = a6xx_ucode_load, |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 2158 | .pm_suspend = a6xx_gmu_pm_suspend, |
| 2159 | .pm_resume = a6xx_gmu_pm_resume, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2160 | .recover = a6xx_recover, |
| 2161 | .submit = a6xx_submit, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2162 | .active_ring = a6xx_active_ring, |
| 2163 | .irq = a6xx_irq, |
| 2164 | .destroy = a6xx_destroy, |
Jordan Crouse | b02872d | 2019-04-10 10:58:16 -0600 | [diff] [blame] | 2165 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2166 | .show = a6xx_show, |
| 2167 | #endif |
Sharat Masetty | a2c3c0a | 2018-10-04 15:11:43 +0530 | [diff] [blame] | 2168 | .gpu_busy = a6xx_gpu_busy, |
| 2169 | .gpu_get_freq = a6xx_gmu_get_freq, |
Rob Clark | f6f5907 | 2021-09-27 11:00:04 -0700 | [diff] [blame] | 2170 | .gpu_set_freq = a6xx_gpu_set_freq, |
Jordan Crouse | b02872d | 2019-04-10 10:58:16 -0600 | [diff] [blame] | 2171 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
Jordan Crouse | 1707add | 2018-11-02 09:25:25 -0600 | [diff] [blame] | 2172 | .gpu_state_get = a6xx_gpu_state_get, |
| 2173 | .gpu_state_put = a6xx_gpu_state_put, |
Jordan Crouse | b02872d | 2019-04-10 10:58:16 -0600 | [diff] [blame] | 2174 | #endif |
Sai Prakash Ranjan | 45596f2 | 2021-01-11 17:34:09 +0530 | [diff] [blame] | 2175 | .create_address_space = a6xx_create_address_space, |
Jordan Crouse | 84c31ee | 2020-08-17 15:01:41 -0700 | [diff] [blame] | 2176 | .create_private_address_space = a6xx_create_private_address_space, |
Jordan Crouse | d3a569f | 2020-09-14 16:40:22 -0600 | [diff] [blame] | 2177 | .get_rptr = a6xx_get_rptr, |
Rob Clark | d73b1d0 | 2022-11-14 11:30:41 -0800 | [diff] [blame] | 2178 | .progress = a6xx_progress, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2179 | }, |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 2180 | .get_timestamp = a6xx_gmu_get_timestamp, |
| 2181 | }; |
| 2182 | |
| 2183 | static const struct adreno_gpu_funcs funcs_gmuwrapper = { |
| 2184 | .base = { |
| 2185 | .get_param = adreno_get_param, |
| 2186 | .set_param = adreno_set_param, |
| 2187 | .hw_init = a6xx_hw_init, |
| 2188 | .ucode_load = a6xx_ucode_load, |
| 2189 | .pm_suspend = a6xx_pm_suspend, |
| 2190 | .pm_resume = a6xx_pm_resume, |
| 2191 | .recover = a6xx_recover, |
| 2192 | .submit = a6xx_submit, |
| 2193 | .active_ring = a6xx_active_ring, |
| 2194 | .irq = a6xx_irq, |
| 2195 | .destroy = a6xx_destroy, |
| 2196 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
| 2197 | .show = a6xx_show, |
| 2198 | #endif |
| 2199 | .gpu_busy = a6xx_gpu_busy, |
| 2200 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
| 2201 | .gpu_state_get = a6xx_gpu_state_get, |
| 2202 | .gpu_state_put = a6xx_gpu_state_put, |
| 2203 | #endif |
| 2204 | .create_address_space = a6xx_create_address_space, |
| 2205 | .create_private_address_space = a6xx_create_private_address_space, |
| 2206 | .get_rptr = a6xx_get_rptr, |
| 2207 | .progress = a6xx_progress, |
| 2208 | }, |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2209 | .get_timestamp = a6xx_get_timestamp, |
| 2210 | }; |
| 2211 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 2212 | static const struct adreno_gpu_funcs funcs_a7xx = { |
| 2213 | .base = { |
| 2214 | .get_param = adreno_get_param, |
| 2215 | .set_param = adreno_set_param, |
| 2216 | .hw_init = a6xx_hw_init, |
| 2217 | .ucode_load = a6xx_ucode_load, |
| 2218 | .pm_suspend = a6xx_gmu_pm_suspend, |
| 2219 | .pm_resume = a6xx_gmu_pm_resume, |
| 2220 | .recover = a6xx_recover, |
| 2221 | .submit = a7xx_submit, |
| 2222 | .active_ring = a6xx_active_ring, |
| 2223 | .irq = a6xx_irq, |
| 2224 | .destroy = a6xx_destroy, |
| 2225 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
| 2226 | .show = a6xx_show, |
| 2227 | #endif |
| 2228 | .gpu_busy = a6xx_gpu_busy, |
| 2229 | .gpu_get_freq = a6xx_gmu_get_freq, |
| 2230 | .gpu_set_freq = a6xx_gpu_set_freq, |
| 2231 | #if defined(CONFIG_DRM_MSM_GPU_STATE) |
| 2232 | .gpu_state_get = a6xx_gpu_state_get, |
| 2233 | .gpu_state_put = a6xx_gpu_state_put, |
| 2234 | #endif |
| 2235 | .create_address_space = a6xx_create_address_space, |
| 2236 | .create_private_address_space = a6xx_create_private_address_space, |
| 2237 | .get_rptr = a6xx_get_rptr, |
| 2238 | .progress = a6xx_progress, |
| 2239 | }, |
| 2240 | .get_timestamp = a6xx_gmu_get_timestamp, |
| 2241 | }; |
| 2242 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2243 | struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) |
| 2244 | { |
| 2245 | struct msm_drm_private *priv = dev->dev_private; |
| 2246 | struct platform_device *pdev = priv->gpu_pdev; |
Jordan Crouse | e9ba8d5 | 2020-09-15 10:35:51 -0600 | [diff] [blame] | 2247 | struct adreno_platform_config *config = pdev->dev.platform_data; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2248 | struct device_node *node; |
| 2249 | struct a6xx_gpu *a6xx_gpu; |
| 2250 | struct adreno_gpu *adreno_gpu; |
| 2251 | struct msm_gpu *gpu; |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 2252 | bool is_a7xx; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2253 | int ret; |
| 2254 | |
| 2255 | a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); |
| 2256 | if (!a6xx_gpu) |
| 2257 | return ERR_PTR(-ENOMEM); |
| 2258 | |
| 2259 | adreno_gpu = &a6xx_gpu->base; |
| 2260 | gpu = &adreno_gpu->base; |
| 2261 | |
Dmitry Baryshkov | 12abd735 | 2023-04-10 19:59:08 +0300 | [diff] [blame] | 2262 | mutex_init(&a6xx_gpu->gmu.lock); |
| 2263 | |
Jordan Crouse | 1707add | 2018-11-02 09:25:25 -0600 | [diff] [blame] | 2264 | adreno_gpu->registers = NULL; |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2265 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 2266 | /* Check if there is a GMU phandle and set it up */ |
| 2267 | node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); |
| 2268 | /* FIXME: How do we gracefully handle this? */ |
| 2269 | BUG_ON(!node); |
| 2270 | |
| 2271 | adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); |
| 2272 | |
Rob Clark | 47bd37f | 2023-07-27 14:20:16 -0700 | [diff] [blame] | 2273 | adreno_gpu->base.hw_apriv = |
| 2274 | !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); |
Jordan Crouse | 604234f | 2020-09-03 20:03:11 -0600 | [diff] [blame] | 2275 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 2276 | /* gpu->info only gets assigned in adreno_gpu_init() */ |
Konrad Dybcio | 1f8c29e | 2023-09-25 16:50:38 +0200 | [diff] [blame] | 2277 | is_a7xx = config->info->family == ADRENO_7XX_GEN1 || |
Neil Armstrong | d2bcca0 | 2024-02-16 12:03:52 +0100 | [diff] [blame] | 2278 | config->info->family == ADRENO_7XX_GEN2 || |
| 2279 | config->info->family == ADRENO_7XX_GEN3; |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 2280 | |
| 2281 | a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx); |
Sharat Masetty | 474dadb | 2020-11-25 12:30:15 +0530 | [diff] [blame] | 2282 | |
Rob Clark | 47bd37f | 2023-07-27 14:20:16 -0700 | [diff] [blame] | 2283 | ret = a6xx_set_supported_hw(&pdev->dev, config->info); |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2284 | if (ret) { |
Konrad Dybcio | 46d4efc | 2024-04-12 10:53:25 +0200 | [diff] [blame] | 2285 | a6xx_llc_slices_destroy(a6xx_gpu); |
| 2286 | kfree(a6xx_gpu); |
Akhil P Oommen | fe7952c | 2021-01-08 23:45:30 +0530 | [diff] [blame] | 2287 | return ERR_PTR(ret); |
| 2288 | } |
| 2289 | |
Konrad Dybcio | af66706 | 2023-09-25 16:50:34 +0200 | [diff] [blame] | 2290 | if (is_a7xx) |
| 2291 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1); |
| 2292 | else if (adreno_has_gmu_wrapper(adreno_gpu)) |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 2293 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1); |
| 2294 | else |
| 2295 | ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2296 | if (ret) { |
| 2297 | a6xx_destroy(&(a6xx_gpu->base.base)); |
| 2298 | return ERR_PTR(ret); |
| 2299 | } |
| 2300 | |
Rob Clark | 2c1b774 | 2022-11-15 07:55:33 -0800 | [diff] [blame] | 2301 | /* |
| 2302 | * For now only clamp to idle freq for devices where this is known not |
| 2303 | * to cause power supply issues: |
| 2304 | */ |
| 2305 | if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu)) |
Rob Clark | 6563f60 | 2023-01-10 15:14:42 -0800 | [diff] [blame] | 2306 | priv->gpu_clamp_to_idle = true; |
Rob Clark | 2c1b774 | 2022-11-15 07:55:33 -0800 | [diff] [blame] | 2307 | |
Konrad Dybcio | 5a903a4 | 2023-06-16 01:20:53 +0200 | [diff] [blame] | 2308 | if (adreno_has_gmu_wrapper(adreno_gpu)) |
| 2309 | ret = a6xx_gmu_wrapper_init(a6xx_gpu, node); |
| 2310 | else |
| 2311 | ret = a6xx_gmu_init(a6xx_gpu, node); |
Miaoqian Lin | c56de48 | 2022-05-12 16:19:50 +0400 | [diff] [blame] | 2312 | of_node_put(node); |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2313 | if (ret) { |
| 2314 | a6xx_destroy(&(a6xx_gpu->base.base)); |
| 2315 | return ERR_PTR(ret); |
| 2316 | } |
| 2317 | |
Connor Abbott | 14b27d5 | 2024-04-30 11:43:18 +0100 | [diff] [blame] | 2318 | if (adreno_is_a7xx(adreno_gpu)) { |
| 2319 | ret = a7xx_cx_mem_init(a6xx_gpu); |
| 2320 | if (ret) { |
| 2321 | a6xx_destroy(&(a6xx_gpu->base.base)); |
| 2322 | return ERR_PTR(ret); |
| 2323 | } |
| 2324 | } |
| 2325 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2326 | if (gpu->aspace) |
| 2327 | msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, |
| 2328 | a6xx_fault_handler); |
| 2329 | |
Connor Abbott | 8814455 | 2023-12-07 21:30:47 +0000 | [diff] [blame] | 2330 | a6xx_calc_ubwc_config(adreno_gpu); |
| 2331 | |
Jordan Crouse | 4b565ca | 2018-08-06 11:33:24 -0600 | [diff] [blame] | 2332 | return gpu; |
| 2333 | } |