blob: 06cab2c6fd663b81d7f2c2bf4faf57104d2d574e [file] [log] [blame]
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001// SPDX-License-Identifier: GPL-2.0
Sharat Masettye8127442019-12-03 15:16:14 +00002/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
Jordan Crouse4b565ca2018-08-06 11:33:24 -06003
4
5#include "msm_gem.h"
6#include "msm_mmu.h"
Jordan Crouse4241db42018-11-02 09:25:21 -06007#include "msm_gpu_trace.h"
Jordan Crouse4b565ca2018-08-06 11:33:24 -06008#include "a6xx_gpu.h"
9#include "a6xx_gmu.xml.h"
10
Sharat Masetty474dadb2020-11-25 12:30:15 +053011#include <linux/bitfield.h>
Sharat Masettya2c3c0a2018-10-04 15:11:43 +053012#include <linux/devfreq.h>
Connor Abbott14b27d52024-04-30 11:43:18 +010013#include <linux/firmware/qcom/qcom_scm.h>
Akhil P Oommenc11fa122023-01-02 16:18:31 +053014#include <linux/pm_domain.h>
Sharat Masetty474dadb2020-11-25 12:30:15 +053015#include <linux/soc/qcom/llcc-qcom.h>
Sharat Masettya2c3c0a2018-10-04 15:11:43 +053016
Jordan Crouseabccb9f2019-04-19 13:46:15 -060017#define GPU_PAS_ID 13
18
Jordan Crouse4b565ca2018-08-06 11:33:24 -060019static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
20{
21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
22 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
23
24 /* Check that the GMU is idle */
Konrad Dybcio5a903a42023-06-16 01:20:53 +020025 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
Jordan Crouse4b565ca2018-08-06 11:33:24 -060026 return false;
27
28 /* Check tha the CX master is idle */
29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
30 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
31 return false;
32
33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
34 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
35}
36
Lee Jones991a2712020-11-16 17:40:45 +000037static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
Jordan Crouse4b565ca2018-08-06 11:33:24 -060038{
39 /* wait for CP to drain ringbuffer: */
40 if (!adreno_idle(gpu, ring))
41 return false;
42
43 if (spin_until(_a6xx_check_idle(gpu))) {
44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
45 gpu->name, __builtin_return_address(0),
46 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
47 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
48 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
49 gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
50 return false;
51 }
52
53 return true;
54}
55
Rob Clark0710a742021-04-28 12:36:49 -070056static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
Jordan Crouse4b565ca2018-08-06 11:33:24 -060057{
Jordan Croused3a569f2020-09-14 16:40:22 -060058 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
59 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -060060
Jordan Croused3a569f2020-09-14 16:40:22 -060061 /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
62 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
Jordan Croused3a569f2020-09-14 16:40:22 -060063 OUT_PKT7(ring, CP_WHERE_AM_I, 2);
64 OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
65 OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
66 }
Rob Clark0710a742021-04-28 12:36:49 -070067}
68
69static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
70{
71 uint32_t wptr;
72 unsigned long flags;
73
74 update_shadow_rptr(gpu, ring);
Jordan Croused3a569f2020-09-14 16:40:22 -060075
Rob Clark77c40602020-10-23 09:51:15 -070076 spin_lock_irqsave(&ring->preempt_lock, flags);
Jordan Crouse4b565ca2018-08-06 11:33:24 -060077
78 /* Copy the shadow to the actual register */
79 ring->cur = ring->next;
80
81 /* Make sure to wrap wptr if we need to */
82 wptr = get_wptr(ring);
83
Rob Clark77c40602020-10-23 09:51:15 -070084 spin_unlock_irqrestore(&ring->preempt_lock, flags);
Jordan Crouse4b565ca2018-08-06 11:33:24 -060085
86 /* Make sure everything is posted before making a decision */
87 mb();
88
89 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
90}
91
Jordan Crouse56869212018-11-02 09:25:20 -060092static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
93 u64 iova)
94{
95 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
Rob Clarkb5e02e12020-07-07 13:35:00 -070096 OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
97 CP_REG_TO_MEM_0_CNT(2) |
98 CP_REG_TO_MEM_0_64B);
Jordan Crouse56869212018-11-02 09:25:20 -060099 OUT_RING(ring, lower_32_bits(iova));
100 OUT_RING(ring, upper_32_bits(iova));
101}
102
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700103static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
104 struct msm_ringbuffer *ring, struct msm_file_private *ctx)
105{
Rob Clark5f9ffe82022-03-03 16:52:17 -0800106 bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
Konrad Dybcioaf667062023-09-25 16:50:34 +0200107 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700108 phys_addr_t ttbr;
109 u32 asid;
110 u64 memptr = rbmemptr(ring, ttbr0);
111
Rob Clark1d054c92021-11-09 10:11:02 -0800112 if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700113 return;
114
115 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
116 return;
117
Rob Clark5f9ffe82022-03-03 16:52:17 -0800118 if (!sysprof) {
Konrad Dybcioaf667062023-09-25 16:50:34 +0200119 if (!adreno_is_a7xx(adreno_gpu)) {
120 /* Turn off protected mode to write to special registers */
121 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
122 OUT_RING(ring, 0);
123 }
Rob Clark5f9ffe82022-03-03 16:52:17 -0800124
125 OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
126 OUT_RING(ring, 1);
127 }
128
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700129 /* Execute the table update */
130 OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
131 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
132
133 OUT_RING(ring,
134 CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
135 CP_SMMU_TABLE_UPDATE_1_ASID(asid));
136 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
137 OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
138
139 /*
140 * Write the new TTBR0 to the memstore. This is good for debugging.
141 */
142 OUT_PKT7(ring, CP_MEM_WRITE, 4);
143 OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
144 OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
145 OUT_RING(ring, lower_32_bits(ttbr));
146 OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
147
148 /*
Konrad Dybcioaf667062023-09-25 16:50:34 +0200149 * Sync both threads after switching pagetables and enable BR only
150 * to make sure BV doesn't race ahead while BR is still switching
151 * pagetables.
152 */
153 if (adreno_is_a7xx(&a6xx_gpu->base)) {
154 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
155 OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
156 }
157
158 /*
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700159 * And finally, trigger a uche flush to be sure there isn't anything
160 * lingering in that part of the GPU
161 */
162
163 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
Rob Clark80059b82022-08-21 08:54:35 -0700164 OUT_RING(ring, CACHE_INVALIDATE);
Rob Clark5f9ffe82022-03-03 16:52:17 -0800165
166 if (!sysprof) {
167 /*
168 * Wait for SRAM clear after the pgtable update, so the
169 * two can happen in parallel:
170 */
171 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
172 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
173 OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
174 REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
175 OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
176 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
177 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
178 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
179
Konrad Dybcioaf667062023-09-25 16:50:34 +0200180 if (!adreno_is_a7xx(adreno_gpu)) {
181 /* Re-enable protected mode: */
182 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
183 OUT_RING(ring, 1);
184 }
Rob Clark5f9ffe82022-03-03 16:52:17 -0800185 }
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700186}
187
Jordan Crouse15eb9ad2020-08-17 15:01:37 -0700188static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600189{
Jordan Crouse56869212018-11-02 09:25:20 -0600190 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
Jordan Crouse4241db42018-11-02 09:25:21 -0600191 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
192 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600193 struct msm_ringbuffer *ring = submit->ring;
Rob Clark0710a742021-04-28 12:36:49 -0700194 unsigned int i, ibs = 0;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600195
Jordan Crouse84c31ee2020-08-17 15:01:41 -0700196 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
197
Rob Clarkcc4c26d2021-05-30 15:44:23 -0700198 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
Jordan Crouse56869212018-11-02 09:25:20 -0600199 rbmemptr_stats(ring, index, cpcycles_start));
200
201 /*
202 * For PM4 the GMU register offsets are calculated from the base of the
203 * GPU registers so we need to add 0x1a800 to the register value on A630
204 * to get the right value from PM4.
205 */
Rob Clarkf73343f2023-03-20 11:54:14 -0700206 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
Jordan Crouse56869212018-11-02 09:25:20 -0600207 rbmemptr_stats(ring, index, alwayson_start));
208
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600209 /* Invalidate CCU depth and color */
210 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
Rob Clarkb5e02e12020-07-07 13:35:00 -0700211 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600212
213 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
Rob Clarkb5e02e12020-07-07 13:35:00 -0700214 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600215
216 /* Submit the commands */
217 for (i = 0; i < submit->nr_cmds; i++) {
218 switch (submit->cmd[i].type) {
219 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
220 break;
221 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
Rob Clark1d054c92021-11-09 10:11:02 -0800222 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600223 break;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500224 fallthrough;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600225 case MSM_SUBMIT_CMD_BUF:
226 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
227 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
228 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
229 OUT_RING(ring, submit->cmd[i].size);
Rob Clark0710a742021-04-28 12:36:49 -0700230 ibs++;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600231 break;
232 }
Rob Clark0710a742021-04-28 12:36:49 -0700233
234 /*
235 * Periodically update shadow-wptr if needed, so that we
236 * can see partial progress of submits with large # of
237 * cmds.. otherwise we could needlessly stall waiting for
238 * ringbuffer state, simply due to looking at a shadow
239 * rptr value that has not been updated
240 */
241 if ((ibs % 32) == 0)
242 update_shadow_rptr(gpu, ring);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600243 }
244
Rob Clarkcc4c26d2021-05-30 15:44:23 -0700245 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
Jordan Crouse56869212018-11-02 09:25:20 -0600246 rbmemptr_stats(ring, index, cpcycles_end));
Rob Clarkf73343f2023-03-20 11:54:14 -0700247 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
Jordan Crouse56869212018-11-02 09:25:20 -0600248 rbmemptr_stats(ring, index, alwayson_end));
249
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600250 /* Write the fence to the scratch register */
251 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
252 OUT_RING(ring, submit->seqno);
253
254 /*
255 * Execute a CACHE_FLUSH_TS event. This will ensure that the
256 * timestamp is written to the memory and then triggers the interrupt
257 */
258 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
Rob Clarkb5e02e12020-07-07 13:35:00 -0700259 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
260 CP_EVENT_WRITE_0_IRQ);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600261 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
262 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
263 OUT_RING(ring, submit->seqno);
264
Jordan Crouse4241db42018-11-02 09:25:21 -0600265 trace_msm_gpu_submit_flush(submit,
Rob Clarkf73343f2023-03-20 11:54:14 -0700266 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
Jordan Crouse4241db42018-11-02 09:25:21 -0600267
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600268 a6xx_flush(gpu, ring);
269}
270
Konrad Dybcioaf667062023-09-25 16:50:34 +0200271static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
272{
273 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
274 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
275 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
276 struct msm_ringbuffer *ring = submit->ring;
277 unsigned int i, ibs = 0;
278
279 /*
280 * Toggle concurrent binning for pagetable switch and set the thread to
281 * BR since only it can execute the pagetable switch packets.
282 */
283 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
284 OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
285
286 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
287
Zan Dobersek32866022024-04-09 14:57:00 +0200288 get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
Konrad Dybcioaf667062023-09-25 16:50:34 +0200289 rbmemptr_stats(ring, index, cpcycles_start));
290 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
291 rbmemptr_stats(ring, index, alwayson_start));
292
293 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
294 OUT_RING(ring, CP_SET_THREAD_BOTH);
295
296 OUT_PKT7(ring, CP_SET_MARKER, 1);
297 OUT_RING(ring, 0x101); /* IFPC disable */
298
299 OUT_PKT7(ring, CP_SET_MARKER, 1);
300 OUT_RING(ring, 0x00d); /* IB1LIST start */
301
302 /* Submit the commands */
303 for (i = 0; i < submit->nr_cmds; i++) {
304 switch (submit->cmd[i].type) {
305 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
306 break;
307 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
308 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
309 break;
310 fallthrough;
311 case MSM_SUBMIT_CMD_BUF:
312 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
313 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
314 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
315 OUT_RING(ring, submit->cmd[i].size);
316 ibs++;
317 break;
318 }
319
320 /*
321 * Periodically update shadow-wptr if needed, so that we
322 * can see partial progress of submits with large # of
323 * cmds.. otherwise we could needlessly stall waiting for
324 * ringbuffer state, simply due to looking at a shadow
325 * rptr value that has not been updated
326 */
327 if ((ibs % 32) == 0)
328 update_shadow_rptr(gpu, ring);
329 }
330
331 OUT_PKT7(ring, CP_SET_MARKER, 1);
332 OUT_RING(ring, 0x00e); /* IB1LIST end */
333
Zan Dobersek32866022024-04-09 14:57:00 +0200334 get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
Konrad Dybcioaf667062023-09-25 16:50:34 +0200335 rbmemptr_stats(ring, index, cpcycles_end));
336 get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
337 rbmemptr_stats(ring, index, alwayson_end));
338
339 /* Write the fence to the scratch register */
340 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
341 OUT_RING(ring, submit->seqno);
342
343 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
344 OUT_RING(ring, CP_SET_THREAD_BR);
345
346 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
347 OUT_RING(ring, CCU_INVALIDATE_DEPTH);
348
349 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
350 OUT_RING(ring, CCU_INVALIDATE_COLOR);
351
352 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
353 OUT_RING(ring, CP_SET_THREAD_BV);
354
355 /*
356 * Make sure the timestamp is committed once BV pipe is
357 * completely done with this submission.
358 */
359 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
360 OUT_RING(ring, CACHE_CLEAN | BIT(27));
361 OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
362 OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
363 OUT_RING(ring, submit->seqno);
364
365 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
366 OUT_RING(ring, CP_SET_THREAD_BR);
367
368 /*
369 * This makes sure that BR doesn't race ahead and commit
370 * timestamp to memstore while BV is still processing
371 * this submission.
372 */
373 OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
374 OUT_RING(ring, 0);
375 OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
376 OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
377 OUT_RING(ring, submit->seqno);
378
379 /* write the ringbuffer timestamp */
380 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
381 OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
382 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
383 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
384 OUT_RING(ring, submit->seqno);
385
386 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
387 OUT_RING(ring, CP_SET_THREAD_BOTH);
388
389 OUT_PKT7(ring, CP_SET_MARKER, 1);
390 OUT_RING(ring, 0x100); /* IFPC enable */
391
392 trace_msm_gpu_submit_flush(submit,
393 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
394
395 a6xx_flush(gpu, ring);
396}
397
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600398static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
399{
400 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
401 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
402 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
Jonathan Marekb1c53a22020-07-10 19:04:09 -0400403 const struct adreno_reglist *reg;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600404 unsigned int i;
Konrad Dybcio40c297e2024-08-28 17:06:58 +0200405 u32 cgc_delay, cgc_hyst;
Konrad Dybcio51682bc2024-08-28 17:06:57 +0200406 u32 val, clock_cntl_on;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600407
Rob Clarkdff2f692024-06-18 09:42:50 -0700408 if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu)))
Jonathan Marekb1c53a22020-07-10 19:04:09 -0400409 return;
410
Jonathan Marek66ffb912020-07-10 19:04:10 -0400411 if (adreno_is_a630(adreno_gpu))
412 clock_cntl_on = 0x8aa8aa02;
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200413 else if (adreno_is_a610(adreno_gpu))
414 clock_cntl_on = 0xaaa8aa82;
Konrad Dybcio18397512024-02-23 22:21:41 +0100415 else if (adreno_is_a702(adreno_gpu))
416 clock_cntl_on = 0xaaaaaa82;
Jonathan Marek66ffb912020-07-10 19:04:10 -0400417 else
418 clock_cntl_on = 0x8aa8aa82;
419
Konrad Dybcio40c297e2024-08-28 17:06:58 +0200420 cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
421 cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
422
423 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
424 state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
425 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
426 state ? cgc_delay : 0);
427 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
428 state ? cgc_hyst : 0);
Konrad Dybcioaf667062023-09-25 16:50:34 +0200429
Rob Clarkdff2f692024-06-18 09:42:50 -0700430 if (!adreno_gpu->info->a6xx->hwcg) {
Neil Armstrongd2bcca02024-02-16 12:03:52 +0100431 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
432 gpu_write(gpu, REG_A7XX_RBBM_CGC_GLOBAL_LOAD_CMD, state ? 1 : 0);
433
434 if (state) {
435 gpu_write(gpu, REG_A7XX_RBBM_CGC_P2S_TRIG_CMD, 1);
436
437 if (gpu_poll_timeout(gpu, REG_A7XX_RBBM_CGC_P2S_STATUS, val,
438 val & A7XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) {
439 dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n");
440 return;
441 }
442
443 gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 0);
444 }
445
446 return;
447 }
448
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600449 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
450
451 /* Don't re-program the registers if they are already correct */
Jonathan Marek66ffb912020-07-10 19:04:10 -0400452 if ((!state && !val) || (state && (val == clock_cntl_on)))
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600453 return;
454
455 /* Disable SP clock before programming HWCG registers */
Konrad Dybcio18397512024-02-23 22:21:41 +0100456 if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200457 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600458
Rob Clarkdff2f692024-06-18 09:42:50 -0700459 for (i = 0; (reg = &adreno_gpu->info->a6xx->hwcg[i], reg->offset); i++)
Jonathan Marekb1c53a22020-07-10 19:04:09 -0400460 gpu_write(gpu, reg->offset, state ? reg->value : 0);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600461
462 /* Enable SP clock */
Konrad Dybcio18397512024-02-23 22:21:41 +0100463 if (!adreno_is_a610_family(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200464 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600465
Jonathan Marek66ffb912020-07-10 19:04:10 -0400466 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600467}
468
Jonathan Marek40843402021-05-13 13:13:59 -0400469static void a6xx_set_cp_protect(struct msm_gpu *gpu)
470{
471 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
Rob Clarkfccf9dd2024-06-18 09:42:51 -0700472 const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect;
473 unsigned i;
Jonathan Marek40843402021-05-13 13:13:59 -0400474
475 /*
476 * Enable access protection to privileged registers, fault on an access
477 * protect violation and select the last span to protect from the start
478 * address all the way to the end of the register address space
479 */
Konrad Dybcio02a726f2023-06-20 13:10:37 +0200480 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL,
481 A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN |
482 A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN |
483 A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE);
Jonathan Marek40843402021-05-13 13:13:59 -0400484
Rob Clarkfccf9dd2024-06-18 09:42:51 -0700485 for (i = 0; i < protect->count - 1; i++) {
Konrad Dybcio29af7602023-06-20 13:10:38 +0200486 /* Intentionally skip writing to some registers */
Rob Clarkfccf9dd2024-06-18 09:42:51 -0700487 if (protect->regs[i])
488 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), protect->regs[i]);
Konrad Dybcio29af7602023-06-20 13:10:38 +0200489 }
Jonathan Marek40843402021-05-13 13:13:59 -0400490 /* last CP_PROTECT to have "infinite" length on the last entry */
Rob Clarkfccf9dd2024-06-18 09:42:51 -0700491 gpu_write(gpu, REG_A6XX_CP_PROTECT(protect->count_max - 1), protect->regs[i]);
Jonathan Marek40843402021-05-13 13:13:59 -0400492}
493
Connor Abbott88144552023-12-07 21:30:47 +0000494static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400495{
Connor Abbott88144552023-12-07 21:30:47 +0000496 gpu->ubwc_config.rgb565_predicator = 0;
Connor Abbott88144552023-12-07 21:30:47 +0000497 gpu->ubwc_config.uavflagprd_inv = 0;
Connor Abbott88144552023-12-07 21:30:47 +0000498 gpu->ubwc_config.min_acc_len = 0;
Connor Abbottb8746382024-08-07 14:04:57 +0100499 gpu->ubwc_config.ubwc_swizzle = 0x6;
500 gpu->ubwc_config.macrotile_mode = 0;
Connor Abbott88144552023-12-07 21:30:47 +0000501 gpu->ubwc_config.highest_bank_bit = 15;
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400502
Connor Abbott88144552023-12-07 21:30:47 +0000503 if (adreno_is_a610(gpu)) {
Dmitry Baryshkov6a0dbcd2024-01-09 22:41:08 +0200504 gpu->ubwc_config.highest_bank_bit = 13;
Connor Abbott88144552023-12-07 21:30:47 +0000505 gpu->ubwc_config.min_acc_len = 1;
Connor Abbottb8746382024-08-07 14:04:57 +0100506 gpu->ubwc_config.ubwc_swizzle = 0x7;
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200507 }
508
Connor Abbott88144552023-12-07 21:30:47 +0000509 if (adreno_is_a618(gpu))
Dmitry Baryshkov0d7dfc72024-02-20 19:12:10 +0200510 gpu->ubwc_config.highest_bank_bit = 14;
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400511
Luca Weiss9dc23cb2024-03-28 09:02:45 +0100512 if (adreno_is_a619(gpu))
513 /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
514 gpu->ubwc_config.highest_bank_bit = 13;
515
Connor Abbott88144552023-12-07 21:30:47 +0000516 if (adreno_is_a619_holi(gpu))
517 gpu->ubwc_config.highest_bank_bit = 13;
Konrad Dybcio8296ff02023-06-16 01:20:55 +0200518
Konrad Dybciodbfbb3762024-08-28 17:06:59 +0200519 if (adreno_is_a621(gpu)) {
520 gpu->ubwc_config.highest_bank_bit = 13;
521 gpu->ubwc_config.amsbc = 1;
522 gpu->ubwc_config.uavflagprd_inv = 2;
523 }
524
Connor Abbott88144552023-12-07 21:30:47 +0000525 if (adreno_is_a640_family(gpu))
526 gpu->ubwc_config.amsbc = 1;
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400527
Connor Abbott6f682942024-08-07 14:04:59 +0100528 if (adreno_is_a680(gpu))
529 gpu->ubwc_config.macrotile_mode = 1;
530
Connor Abbott88144552023-12-07 21:30:47 +0000531 if (adreno_is_a650(gpu) ||
532 adreno_is_a660(gpu) ||
533 adreno_is_a690(gpu) ||
534 adreno_is_a730(gpu) ||
535 adreno_is_a740_family(gpu)) {
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400536 /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
Connor Abbott88144552023-12-07 21:30:47 +0000537 gpu->ubwc_config.highest_bank_bit = 16;
538 gpu->ubwc_config.amsbc = 1;
539 gpu->ubwc_config.rgb565_predicator = 1;
540 gpu->ubwc_config.uavflagprd_inv = 2;
Connor Abbottb8746382024-08-07 14:04:57 +0100541 gpu->ubwc_config.macrotile_mode = 1;
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400542 }
543
Connor Abbott88144552023-12-07 21:30:47 +0000544 if (adreno_is_7c3(gpu)) {
545 gpu->ubwc_config.highest_bank_bit = 14;
546 gpu->ubwc_config.amsbc = 1;
547 gpu->ubwc_config.rgb565_predicator = 1;
548 gpu->ubwc_config.uavflagprd_inv = 2;
Connor Abbottb8746382024-08-07 14:04:57 +0100549 gpu->ubwc_config.macrotile_mode = 1;
Akhil P Oommen192f4ee2021-07-30 01:21:25 +0530550 }
Konrad Dybcio18397512024-02-23 22:21:41 +0100551
552 if (adreno_is_a702(gpu)) {
553 gpu->ubwc_config.highest_bank_bit = 14;
554 gpu->ubwc_config.min_acc_len = 1;
Konrad Dybcio18397512024-02-23 22:21:41 +0100555 }
Connor Abbott88144552023-12-07 21:30:47 +0000556}
557
558static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
559{
560 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
561 /*
562 * We subtract 13 from the highest bank bit (13 is the minimum value
563 * allowed by hw) and write the lowest two bits of the remaining value
564 * as hbb_lo and the one above it as hbb_hi to the hardware.
565 */
566 BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
567 u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
568 u32 hbb_hi = hbb >> 2;
569 u32 hbb_lo = hbb & 3;
Connor Abbottb8746382024-08-07 14:04:57 +0100570 u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
571 u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
Akhil P Oommen192f4ee2021-07-30 01:21:25 +0530572
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400573 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
Connor Abbottb8746382024-08-07 14:04:57 +0100574 level2_swizzling_dis << 12 |
Connor Abbott88144552023-12-07 21:30:47 +0000575 adreno_gpu->ubwc_config.rgb565_predicator << 11 |
576 hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
577 adreno_gpu->ubwc_config.min_acc_len << 3 |
Connor Abbottb8746382024-08-07 14:04:57 +0100578 hbb_lo << 1 | ubwc_mode);
Konrad Dybciodf5bb402023-06-16 01:20:51 +0200579
Connor Abbottb8746382024-08-07 14:04:57 +0100580 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
581 level2_swizzling_dis << 6 | hbb_hi << 4 |
Connor Abbott88144552023-12-07 21:30:47 +0000582 adreno_gpu->ubwc_config.min_acc_len << 3 |
Connor Abbottb8746382024-08-07 14:04:57 +0100583 hbb_lo << 1 | ubwc_mode);
Konrad Dybciodf5bb402023-06-16 01:20:51 +0200584
Connor Abbottb8746382024-08-07 14:04:57 +0100585 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
586 level2_swizzling_dis << 12 | hbb_hi << 10 |
Connor Abbott88144552023-12-07 21:30:47 +0000587 adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
588 adreno_gpu->ubwc_config.min_acc_len << 3 |
Connor Abbottb8746382024-08-07 14:04:57 +0100589 hbb_lo << 1 | ubwc_mode);
Konrad Dybciodf5bb402023-06-16 01:20:51 +0200590
Konrad Dybcioaf667062023-09-25 16:50:34 +0200591 if (adreno_is_a7xx(adreno_gpu))
592 gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
593 FIELD_PREP(GENMASK(8, 5), hbb_lo));
594
Connor Abbott88144552023-12-07 21:30:47 +0000595 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
596 adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
Connor Abbottb8746382024-08-07 14:04:57 +0100597
598 gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
599 adreno_gpu->ubwc_config.macrotile_mode);
Jonathan Marekd0bac4e2020-05-25 23:25:13 -0400600}
601
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600602static int a6xx_cp_init(struct msm_gpu *gpu)
603{
604 struct msm_ringbuffer *ring = gpu->rb[0];
605
606 OUT_PKT7(ring, CP_ME_INIT, 8);
607
608 OUT_RING(ring, 0x0000002f);
609
610 /* Enable multiple hardware contexts */
611 OUT_RING(ring, 0x00000003);
612
613 /* Enable error detection */
614 OUT_RING(ring, 0x20000000);
615
616 /* Don't enable header dump */
617 OUT_RING(ring, 0x00000000);
618 OUT_RING(ring, 0x00000000);
619
620 /* No workarounds enabled */
621 OUT_RING(ring, 0x00000000);
622
623 /* Pad rest of the cmds with 0's */
624 OUT_RING(ring, 0x00000000);
625 OUT_RING(ring, 0x00000000);
626
627 a6xx_flush(gpu, ring);
628 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
629}
630
Konrad Dybcioaf667062023-09-25 16:50:34 +0200631static int a7xx_cp_init(struct msm_gpu *gpu)
632{
633 struct msm_ringbuffer *ring = gpu->rb[0];
634 u32 mask;
635
636 /* Disable concurrent binning before sending CP init */
637 OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
638 OUT_RING(ring, BIT(27));
639
640 OUT_PKT7(ring, CP_ME_INIT, 7);
641
642 /* Use multiple HW contexts */
643 mask = BIT(0);
644
645 /* Enable error detection */
646 mask |= BIT(1);
647
648 /* Set default reset state */
649 mask |= BIT(3);
650
651 /* Disable save/restore of performance counters across preemption */
652 mask |= BIT(6);
653
654 /* Enable the register init list with the spinlock */
655 mask |= BIT(8);
656
657 OUT_RING(ring, mask);
658
659 /* Enable multiple hardware contexts */
660 OUT_RING(ring, 0x00000003);
661
662 /* Enable error detection */
663 OUT_RING(ring, 0x20000000);
664
665 /* Operation mode mask */
666 OUT_RING(ring, 0x00000002);
667
668 /* *Don't* send a power up reg list for concurrent binning (TODO) */
669 /* Lo address */
670 OUT_RING(ring, 0x00000000);
671 /* Hi address */
672 OUT_RING(ring, 0x00000000);
673 /* BIT(31) set => read the regs from the list */
674 OUT_RING(ring, 0x00000000);
675
676 a6xx_flush(gpu, ring);
677 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
678}
679
Jordan Crouse8490f022021-02-09 17:52:05 -0700680/*
681 * Check that the microcode version is new enough to include several key
682 * security fixes. Return true if the ucode is safe.
683 */
684static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
Jordan Croused3a569f2020-09-14 16:40:22 -0600685 struct drm_gem_object *obj)
686{
Jordan Crouse8490f022021-02-09 17:52:05 -0700687 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
688 struct msm_gpu *gpu = &adreno_gpu->base;
Rob Clarkf3a6b022021-08-07 09:30:12 -0700689 const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
Rob Clark96c876f2020-10-23 09:51:02 -0700690 u32 *buf = msm_gem_get_vaddr(obj);
Jordan Crouse8490f022021-02-09 17:52:05 -0700691 bool ret = false;
Jordan Croused3a569f2020-09-14 16:40:22 -0600692
693 if (IS_ERR(buf))
Jordan Crouse8490f022021-02-09 17:52:05 -0700694 return false;
Jordan Croused3a569f2020-09-14 16:40:22 -0600695
Konrad Dybcioaf667062023-09-25 16:50:34 +0200696 /* A7xx is safe! */
Konrad Dybcio18397512024-02-23 22:21:41 +0100697 if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu))
Konrad Dybcioaf667062023-09-25 16:50:34 +0200698 return true;
699
Jordan Croused3a569f2020-09-14 16:40:22 -0600700 /*
Jordan Crouse8490f022021-02-09 17:52:05 -0700701 * Targets up to a640 (a618, a630 and a640) need to check for a
702 * microcode version that is patched to support the whereami opcode or
703 * one that is new enough to include it by default.
Jonathan Marekf6d62d02021-06-08 13:27:48 -0400704 *
705 * a650 tier targets don't need whereami but still need to be
706 * equal to or newer than 0.95 for other security fixes
707 *
708 * a660 targets have all the critical security fixes from the start
Jordan Croused3a569f2020-09-14 16:40:22 -0600709 */
Rob Clarkf3a6b022021-08-07 09:30:12 -0700710 if (!strcmp(sqe_name, "a630_sqe.fw")) {
Jordan Crouse8490f022021-02-09 17:52:05 -0700711 /*
712 * If the lowest nibble is 0xa that is an indication that this
713 * microcode has been patched. The actual version is in dword
714 * [3] but we only care about the patchlevel which is the lowest
715 * nibble of dword [3]
716 *
717 * Otherwise check that the firmware is greater than or equal
718 * to 1.90 which was the first version that had this fix built
719 * in
720 */
721 if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
722 (buf[0] & 0xfff) >= 0x190) {
723 a6xx_gpu->has_whereami = true;
724 ret = true;
725 goto out;
726 }
Jordan Croused3a569f2020-09-14 16:40:22 -0600727
Jordan Crouse8490f022021-02-09 17:52:05 -0700728 DRM_DEV_ERROR(&gpu->pdev->dev,
729 "a630 SQE ucode is too old. Have version %x need at least %x\n",
730 buf[0] & 0xfff, 0x190);
Rob Clarkf3a6b022021-08-07 09:30:12 -0700731 } else if (!strcmp(sqe_name, "a650_sqe.fw")) {
Jonathan Marekf6d62d02021-06-08 13:27:48 -0400732 if ((buf[0] & 0xfff) >= 0x095) {
733 ret = true;
734 goto out;
Jordan Crouse8490f022021-02-09 17:52:05 -0700735 }
736
Jonathan Marekf6d62d02021-06-08 13:27:48 -0400737 DRM_DEV_ERROR(&gpu->pdev->dev,
738 "a650 SQE ucode is too old. Have version %x need at least %x\n",
739 buf[0] & 0xfff, 0x095);
Rob Clarkf3a6b022021-08-07 09:30:12 -0700740 } else if (!strcmp(sqe_name, "a660_sqe.fw")) {
Jonathan Marekf6d62d02021-06-08 13:27:48 -0400741 ret = true;
742 } else {
743 DRM_DEV_ERROR(&gpu->pdev->dev,
744 "unknown GPU, add it to a6xx_ucode_check_version()!!\n");
Jordan Crouse8490f022021-02-09 17:52:05 -0700745 }
746out:
Jordan Croused3a569f2020-09-14 16:40:22 -0600747 msm_gem_put_vaddr(obj);
Jordan Crouse8490f022021-02-09 17:52:05 -0700748 return ret;
Jordan Croused3a569f2020-09-14 16:40:22 -0600749}
750
Rob Clark8ead9672023-03-20 07:43:35 -0700751static int a6xx_ucode_load(struct msm_gpu *gpu)
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600752{
753 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
754 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
755
756 if (!a6xx_gpu->sqe_bo) {
757 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
758 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
759
760 if (IS_ERR(a6xx_gpu->sqe_bo)) {
761 int ret = PTR_ERR(a6xx_gpu->sqe_bo);
762
763 a6xx_gpu->sqe_bo = NULL;
764 DRM_DEV_ERROR(&gpu->pdev->dev,
765 "Could not allocate SQE ucode: %d\n", ret);
766
767 return ret;
768 }
Jordan Crouse0815d772018-11-07 15:35:52 -0700769
770 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
Jordan Crouse8490f022021-02-09 17:52:05 -0700771 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
772 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
773 drm_gem_object_put(a6xx_gpu->sqe_bo);
774
775 a6xx_gpu->sqe_bo = NULL;
776 return -EPERM;
777 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600778 }
779
Rob Clark8ead9672023-03-20 07:43:35 -0700780 /*
781 * Expanded APRIV and targets that support WHERE_AM_I both need a
782 * privileged buffer to store the RPTR shadow
783 */
784 if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) &&
785 !a6xx_gpu->shadow_bo) {
786 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
787 sizeof(u32) * gpu->nr_rings,
788 MSM_BO_WC | MSM_BO_MAP_PRIV,
789 gpu->aspace, &a6xx_gpu->shadow_bo,
790 &a6xx_gpu->shadow_iova);
791
792 if (IS_ERR(a6xx_gpu->shadow))
793 return PTR_ERR(a6xx_gpu->shadow);
794
795 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
796 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600797
798 return 0;
799}
800
Jordan Crouseabccb9f2019-04-19 13:46:15 -0600801static int a6xx_zap_shader_init(struct msm_gpu *gpu)
802{
803 static bool loaded;
804 int ret;
805
806 if (loaded)
807 return 0;
808
809 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
810
811 loaded = !ret;
812 return ret;
813}
814
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600815#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
Konrad Dybcioaf667062023-09-25 16:50:34 +0200816 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
817 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
818 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
819 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
820 A6XX_RBBM_INT_0_MASK_CP_RB | \
821 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
822 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
823 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
824 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
825 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
826
827#define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
828 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
829 A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
830 A6XX_RBBM_INT_0_MASK_CP_SW | \
831 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
832 A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
833 A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
834 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
835 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
836 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
837 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
838 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
Connor Abbott14b27d52024-04-30 11:43:18 +0100839 A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \
840 A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
Konrad Dybcioaf667062023-09-25 16:50:34 +0200841
842#define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \
843 A6XX_CP_APRIV_CNTL_RBFETCH | \
844 A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \
845 A6XX_CP_APRIV_CNTL_RBRPWB)
846
847#define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \
848 A6XX_CP_APRIV_CNTL_CDREAD | \
849 A6XX_CP_APRIV_CNTL_CDWRITE)
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600850
Rob Clarkf6f59072021-09-27 11:00:04 -0700851static int hw_init(struct msm_gpu *gpu)
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600852{
853 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
854 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Konrad Dybcio5a903a42023-06-16 01:20:53 +0200855 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +0200856 u64 gmem_range_min;
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600857 int ret;
858
Konrad Dybcio5a903a42023-06-16 01:20:53 +0200859 if (!adreno_has_gmu_wrapper(adreno_gpu)) {
860 /* Make sure the GMU keeps the GPU on while we set it up */
Konrad Dybcio34b149e2023-08-08 23:02:45 +0200861 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
862 if (ret)
863 return ret;
Konrad Dybcio5a903a42023-06-16 01:20:53 +0200864 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600865
Akhil P Oommen3a9dd702022-08-19 01:52:14 +0530866 /* Clear GBIF halt in case GX domain was not collapsed */
Konrad Dybcio8296ff02023-06-16 01:20:55 +0200867 if (adreno_is_a619_holi(adreno_gpu)) {
868 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
Konrad Dybcio43ec1a2022024-06-25 20:54:41 +0200869 gpu_read(gpu, REG_A6XX_GBIF_HALT);
870
Konrad Dybcio8296ff02023-06-16 01:20:55 +0200871 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
Konrad Dybcio43ec1a2022024-06-25 20:54:41 +0200872 gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL);
Konrad Dybcio8296ff02023-06-16 01:20:55 +0200873 } else if (a6xx_has_gbif(adreno_gpu)) {
Konrad Dybcio05a23a72023-06-16 01:20:50 +0200874 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
Konrad Dybcio43ec1a2022024-06-25 20:54:41 +0200875 gpu_read(gpu, REG_A6XX_GBIF_HALT);
876
Akhil P Oommen3a9dd702022-08-19 01:52:14 +0530877 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
Konrad Dybcio43ec1a2022024-06-25 20:54:41 +0200878 gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT);
Konrad Dybcio05a23a72023-06-16 01:20:50 +0200879 }
Akhil P Oommen3a9dd702022-08-19 01:52:14 +0530880
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600881 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
882
Konrad Dybcio8296ff02023-06-16 01:20:55 +0200883 if (adreno_is_a619_holi(adreno_gpu))
884 a6xx_sptprac_enable(gmu);
885
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600886 /*
887 * Disable the trusted memory range - we don't actually supported secure
888 * memory rendering at this point in time and we don't want to block off
889 * part of the virtual memory space.
890 */
Rob Clarkf73343f2023-03-20 11:54:14 -0700891 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600892 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
893
Konrad Dybcioaf667062023-09-25 16:50:34 +0200894 if (!adreno_is_a7xx(adreno_gpu)) {
895 /* Turn on 64 bit addressing for all blocks */
896 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
897 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
898 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
899 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
900 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
901 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
902 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
903 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
904 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
905 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
906 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
907 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
908 }
Jordan Crouseadf151c2019-05-07 12:02:05 -0600909
Jonathan Marekb1c53a22020-07-10 19:04:09 -0400910 /* enable hardware clockgating */
911 a6xx_set_hwcg(gpu, true);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600912
Sharat Masettye8127442019-12-03 15:16:14 +0000913 /* VBIF/GBIF start*/
Konrad Dybcio18397512024-02-23 22:21:41 +0100914 if (adreno_is_a610_family(adreno_gpu) ||
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200915 adreno_is_a640_family(adreno_gpu) ||
Konrad Dybcioaf667062023-09-25 16:50:34 +0200916 adreno_is_a650_family(adreno_gpu) ||
917 adreno_is_a7xx(adreno_gpu)) {
Jonathan Marek24e69382020-04-23 17:09:21 -0400918 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
919 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
920 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
921 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
Konrad Dybcioaf667062023-09-25 16:50:34 +0200922 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
923 adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
Jonathan Marek24e69382020-04-23 17:09:21 -0400924 } else {
925 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
926 }
927
Sharat Masettye8127442019-12-03 15:16:14 +0000928 if (adreno_is_a630(adreno_gpu))
929 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600930
Konrad Dybcioaf667062023-09-25 16:50:34 +0200931 if (adreno_is_a7xx(adreno_gpu))
932 gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0);
933
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600934 /* Make all blocks contribute to the GPU BUSY perf counter */
935 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
936
937 /* Disable L2 bypass in the UCHE */
Konrad Dybcioaf667062023-09-25 16:50:34 +0200938 if (adreno_is_a7xx(adreno_gpu)) {
939 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
940 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
941 } else {
942 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
943 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
944 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
945 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600946
Konrad Dybcio9588d2f2023-09-25 16:50:37 +0200947 if (!(adreno_is_a650_family(adreno_gpu) ||
Konrad Dybcio18397512024-02-23 22:21:41 +0100948 adreno_is_a702(adreno_gpu) ||
Konrad Dybcio9588d2f2023-09-25 16:50:37 +0200949 adreno_is_a730(adreno_gpu))) {
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +0200950 gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
951
Jonathan Marek24e69382020-04-23 17:09:21 -0400952 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +0200953 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600954
Rob Clarkf73343f2023-03-20 11:54:14 -0700955 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +0200956 gmem_range_min + adreno_gpu->info->gmem - 1);
Jonathan Marek24e69382020-04-23 17:09:21 -0400957 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600958
Konrad Dybcioaf667062023-09-25 16:50:34 +0200959 if (adreno_is_a7xx(adreno_gpu))
960 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23));
961 else {
962 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
963 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
964 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600965
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200966 if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
Jonathan Marek24e69382020-04-23 17:09:21 -0400967 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200968 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
Konrad Dybcio18397512024-02-23 22:21:41 +0100969 } else if (adreno_is_a610_family(adreno_gpu)) {
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200970 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
971 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
Konrad Dybcioaf667062023-09-25 16:50:34 +0200972 } else if (!adreno_is_a7xx(adreno_gpu)) {
Jonathan Marek24e69382020-04-23 17:09:21 -0400973 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200974 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
975 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600976
Akhil P Oommen192f4ee2021-07-30 01:21:25 +0530977 if (adreno_is_a660_family(adreno_gpu))
Jonathan Marekf6d62d02021-06-08 13:27:48 -0400978 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
979
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600980 /* Setting the mem pool size */
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200981 if (adreno_is_a610(adreno_gpu)) {
982 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
983 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
Konrad Dybcio18397512024-02-23 22:21:41 +0100984 } else if (adreno_is_a702(adreno_gpu)) {
985 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 64);
986 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 63);
Konrad Dybcioaf667062023-09-25 16:50:34 +0200987 } else if (!adreno_is_a7xx(adreno_gpu))
Konrad Dybcioe7fc9392023-06-16 01:20:56 +0200988 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600989
Konrad Dybcio2bbb5fe2024-08-28 17:06:55 +0200990
991 /* Set the default primFifo threshold values */
992 if (adreno_gpu->info->a6xx->prim_fifo_threshold)
993 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL,
994 adreno_gpu->info->a6xx->prim_fifo_threshold);
Jordan Crouse4b565ca2018-08-06 11:33:24 -0600995
996 /* Set the AHB default slave response to "ERROR" */
997 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
998
999 /* Turn on performance counters */
1000 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
1001
Konrad Dybcioaf667062023-09-25 16:50:34 +02001002 if (adreno_is_a7xx(adreno_gpu)) {
1003 /* Turn on the IFPC counter (countable 4 on XOCLK4) */
1004 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
1005 FIELD_PREP(GENMASK(7, 0), 0x4));
1006 }
1007
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001008 /* Select CP0 to always count cycles */
Rob Clarkcc4c26d2021-05-30 15:44:23 -07001009 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001010
Jonathan Marekd0bac4e2020-05-25 23:25:13 -04001011 a6xx_set_ubwc_config(gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001012
1013 /* Enable fault detection */
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +02001014 if (adreno_is_a730(adreno_gpu) ||
1015 adreno_is_a740_family(adreno_gpu))
Konrad Dybcio9588d2f2023-09-25 16:50:37 +02001016 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
Danylo Piliaiev07e6de72023-11-25 11:11:51 -08001017 else if (adreno_is_a690(adreno_gpu))
1018 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff);
Konrad Dybcio9588d2f2023-09-25 16:50:37 +02001019 else if (adreno_is_a619(adreno_gpu))
Konrad Dybcio3e900442023-06-16 01:20:57 +02001020 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
Konrad Dybcio18397512024-02-23 22:21:41 +01001021 else if (adreno_is_a610(adreno_gpu) || adreno_is_a702(adreno_gpu))
Konrad Dybcioe7fc9392023-06-16 01:20:56 +02001022 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
1023 else
1024 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001025
Danylo Piliaievcf1aaa72023-11-25 11:11:50 -08001026 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001027
Jonathan Marek24e69382020-04-23 17:09:21 -04001028 /* Set weights for bicubic filtering */
Akhil P Oommend6225e02024-06-29 07:19:35 +05301029 if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) {
Jonathan Marek24e69382020-04-23 17:09:21 -04001030 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
1031 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
1032 0x3fe05ff4);
1033 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
1034 0x3fa0ebee);
1035 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
1036 0x3f5193ed);
1037 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
1038 0x3f0243f0);
1039 }
1040
Konrad Dybcio30f55f32023-06-16 01:20:52 +02001041 /* Set up the CX GMU counter 0 to count busy ticks */
1042 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
1043
1044 /* Enable the power counter */
1045 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
1046 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
1047
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001048 /* Protect registers from the CP */
Jonathan Marek40843402021-05-13 13:13:59 -04001049 a6xx_set_cp_protect(gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001050
Akhil P Oommen192f4ee2021-07-30 01:21:25 +05301051 if (adreno_is_a660_family(adreno_gpu)) {
Danylo Piliaiev07e6de72023-11-25 11:11:51 -08001052 if (adreno_is_a690(adreno_gpu))
1053 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801);
1054 else
1055 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
Jonathan Marekf6d62d02021-06-08 13:27:48 -04001056 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
Konrad Dybcio18397512024-02-23 22:21:41 +01001057 } else if (adreno_is_a702(adreno_gpu)) {
1058 /* Something to do with the HLSQ cluster */
1059 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(24));
Jonathan Marekf6d62d02021-06-08 13:27:48 -04001060 }
1061
Danylo Piliaiev07e6de72023-11-25 11:11:51 -08001062 if (adreno_is_a690(adreno_gpu))
1063 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
Akhil P Oommen192f4ee2021-07-30 01:21:25 +05301064 /* Set dualQ + disable afull for A660 GPU */
Danylo Piliaiev07e6de72023-11-25 11:11:51 -08001065 else if (adreno_is_a660(adreno_gpu))
Akhil P Oommen192f4ee2021-07-30 01:21:25 +05301066 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
Konrad Dybcioaf667062023-09-25 16:50:34 +02001067 else if (adreno_is_a7xx(adreno_gpu))
1068 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
1069 FIELD_PREP(GENMASK(19, 16), 6) |
1070 FIELD_PREP(GENMASK(15, 12), 6) |
1071 FIELD_PREP(GENMASK(11, 8), 9) |
1072 BIT(3) | BIT(2) |
1073 FIELD_PREP(GENMASK(1, 0), 2));
Akhil P Oommen192f4ee2021-07-30 01:21:25 +05301074
Jordan Crouse604234f2020-09-03 20:03:11 -06001075 /* Enable expanded apriv for targets that support it */
1076 if (gpu->hw_apriv) {
Konrad Dybcioaf667062023-09-25 16:50:34 +02001077 if (adreno_is_a7xx(adreno_gpu)) {
1078 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
1079 A7XX_BR_APRIVMASK);
1080 gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL,
1081 A7XX_APRIV_MASK);
1082 gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL,
1083 A7XX_APRIV_MASK);
1084 } else
1085 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
1086 BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1));
Jonathan Marek24e69382020-04-23 17:09:21 -04001087 }
1088
Connor Abbottecbf9b32024-04-30 11:43:20 +01001089 if (adreno_is_a750(adreno_gpu)) {
1090 /* Disable ubwc merged UFC request feature */
1091 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(19), BIT(19));
1092
1093 /* Enable TP flaghint and other performance settings */
1094 gpu_write(gpu, REG_A6XX_TPL1_DBG_ECO_CNTL1, 0xc0700);
1095 } else if (adreno_is_a7xx(adreno_gpu)) {
1096 /* Disable non-ubwc read reqs from passing write reqs */
1097 gpu_rmw(gpu, REG_A6XX_RB_CMP_DBG_ECO_CNTL, BIT(11), BIT(11));
1098 }
1099
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001100 /* Enable interrupts */
Konrad Dybcioaf667062023-09-25 16:50:34 +02001101 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK,
1102 adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001103
1104 ret = adreno_hw_init(gpu);
1105 if (ret)
1106 goto out;
1107
Rob Clark8ead9672023-03-20 07:43:35 -07001108 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001109
Jordan Crousef6828e02020-09-03 20:03:13 -06001110 /* Set the ringbuffer address */
Rob Clarkcade05b2022-11-14 11:30:40 -08001111 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
Jordan Crousef6828e02020-09-03 20:03:13 -06001112
Jordan Croused3a569f2020-09-14 16:40:22 -06001113 /* Targets that support extended APRIV can use the RPTR shadow from
1114 * hardware but all the other ones need to disable the feature. Targets
1115 * that support the WHERE_AM_I opcode can use that instead
1116 */
1117 if (adreno_gpu->base.hw_apriv)
1118 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
1119 else
1120 gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
1121 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
1122
Rob Clark8ead9672023-03-20 07:43:35 -07001123 /* Configure the RPTR shadow if needed: */
1124 if (a6xx_gpu->shadow_bo) {
Rob Clarkf73343f2023-03-20 11:54:14 -07001125 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
Jordan Croused3a569f2020-09-14 16:40:22 -06001126 shadowptr(a6xx_gpu, gpu->rb[0]));
1127 }
Jordan Crousef6828e02020-09-03 20:03:13 -06001128
Konrad Dybcioaf667062023-09-25 16:50:34 +02001129 /* ..which means "always" on A7xx, also for BV shadow */
1130 if (adreno_is_a7xx(adreno_gpu)) {
1131 gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
1132 rbmemptr(gpu->rb[0], bv_fence));
1133 }
1134
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001135 /* Always come up on rb 0 */
1136 a6xx_gpu->cur_ring = gpu->rb[0];
1137
Rob Clark1d054c92021-11-09 10:11:02 -08001138 gpu->cur_ctx_seqno = 0;
Jordan Crouse84c31ee2020-08-17 15:01:41 -07001139
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001140 /* Enable the SQE_to start the CP engine */
1141 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
1142
Konrad Dybcioaf667062023-09-25 16:50:34 +02001143 ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001144 if (ret)
1145 goto out;
1146
Jordan Crouseabccb9f2019-04-19 13:46:15 -06001147 /*
1148 * Try to load a zap shader into the secure world. If successful
1149 * we can use the CP to switch out of secure mode. If not then we
1150 * have no resource but to try to switch ourselves out manually. If we
1151 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
1152 * be blocked and a permissions violation will soon follow.
1153 */
1154 ret = a6xx_zap_shader_init(gpu);
1155 if (!ret) {
1156 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
1157 OUT_RING(gpu->rb[0], 0x00000000);
1158
1159 a6xx_flush(gpu, gpu->rb[0]);
1160 if (!a6xx_idle(gpu, gpu->rb[0]))
1161 return -EINVAL;
Rob Clark15ab9872019-11-24 14:23:38 -08001162 } else if (ret == -ENODEV) {
1163 /*
1164 * This device does not use zap shader (but print a warning
1165 * just in case someone got their dt wrong.. hopefully they
1166 * have a debug UART to realize the error of their ways...
1167 * if you mess this up you are about to crash horribly)
1168 */
Jordan Crouseabccb9f2019-04-19 13:46:15 -06001169 dev_warn_once(gpu->dev->dev,
1170 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1171 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
Rob Clark15273ff2019-05-08 06:06:52 -07001172 ret = 0;
Rob Clark15ab9872019-11-24 14:23:38 -08001173 } else {
1174 return ret;
Jordan Crouseabccb9f2019-04-19 13:46:15 -06001175 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001176
1177out:
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001178 if (adreno_has_gmu_wrapper(adreno_gpu))
1179 return ret;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001180 /*
1181 * Tell the GMU that we are done touching the GPU and it can start power
1182 * management
1183 */
1184 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
1185
Jonathan Marek8167e6f2020-04-23 17:09:17 -04001186 if (a6xx_gpu->gmu.legacy) {
1187 /* Take the GMU out of its special boot mode */
1188 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
1189 }
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001190
1191 return ret;
1192}
1193
Rob Clarkf6f59072021-09-27 11:00:04 -07001194static int a6xx_hw_init(struct msm_gpu *gpu)
1195{
1196 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1197 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1198 int ret;
1199
1200 mutex_lock(&a6xx_gpu->gmu.lock);
1201 ret = hw_init(gpu);
1202 mutex_unlock(&a6xx_gpu->gmu.lock);
1203
1204 return ret;
1205}
1206
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001207static void a6xx_dump(struct msm_gpu *gpu)
1208{
Mamta Shukla6a41da12018-10-20 23:19:26 +05301209 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001210 gpu_read(gpu, REG_A6XX_RBBM_STATUS));
1211 adreno_dump(gpu);
1212}
1213
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001214static void a6xx_recover(struct msm_gpu *gpu)
1215{
1216 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1217 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Akhil P Oommenc11fa122023-01-02 16:18:31 +05301218 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
Akhil P Oommenf350bfb2022-08-19 01:52:12 +05301219 int i, active_submits;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001220
1221 adreno_dump_info(gpu);
1222
1223 for (i = 0; i < 8; i++)
Mamta Shukla6a41da12018-10-20 23:19:26 +05301224 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001225 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
1226
1227 if (hang_debug)
1228 a6xx_dump(gpu);
1229
Akhil P Oommenf4a75b52022-12-16 22:33:14 +05301230 /*
1231 * To handle recovery specific sequences during the rpm suspend we are
1232 * about to trigger
1233 */
1234 a6xx_gpu->hung = true;
1235
Akhil P Oommen3a9dd702022-08-19 01:52:14 +05301236 /* Halt SQE first */
1237 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
1238
Akhil P Oommenf350bfb2022-08-19 01:52:12 +05301239 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
1240
1241 /* active_submit won't change until we make a submission */
1242 mutex_lock(&gpu->active_lock);
1243 active_submits = gpu->active_submits;
1244
1245 /*
1246 * Temporarily clear active_submits count to silence a WARN() in the
1247 * runtime suspend cb
1248 */
1249 gpu->active_submits = 0;
1250
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001251 if (adreno_has_gmu_wrapper(adreno_gpu)) {
1252 /* Drain the outstanding traffic on memory buses */
1253 a6xx_bus_clear_pending_transactions(adreno_gpu, true);
1254
1255 /* Reset the GPU to a clean state */
1256 a6xx_gpu_sw_reset(gpu, true);
1257 a6xx_gpu_sw_reset(gpu, false);
1258 }
1259
Akhil P Oommenc11fa122023-01-02 16:18:31 +05301260 reinit_completion(&gmu->pd_gate);
1261 dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
1262 dev_pm_genpd_synced_poweroff(gmu->cxpd);
1263
Akhil P Oommenf350bfb2022-08-19 01:52:12 +05301264 /* Drop the rpm refcount from active submits */
1265 if (active_submits)
1266 pm_runtime_put(&gpu->pdev->dev);
1267
1268 /* And the final one from recover worker */
1269 pm_runtime_put_sync(&gpu->pdev->dev);
1270
Akhil P Oommenc11fa122023-01-02 16:18:31 +05301271 if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
1272 DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
1273
1274 dev_pm_genpd_remove_notifier(gmu->cxpd);
1275
Akhil P Oommenf350bfb2022-08-19 01:52:12 +05301276 pm_runtime_use_autosuspend(&gpu->pdev->dev);
1277
1278 if (active_submits)
1279 pm_runtime_get(&gpu->pdev->dev);
1280
1281 pm_runtime_get_sync(&gpu->pdev->dev);
1282
1283 gpu->active_submits = active_submits;
1284 mutex_unlock(&gpu->active_lock);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001285
1286 msm_gpu_hw_init(gpu);
Akhil P Oommenf4a75b52022-12-16 22:33:14 +05301287 a6xx_gpu->hung = false;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001288}
1289
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001290static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
1291{
Connor Abbott77beba32024-01-25 13:10:58 +00001292 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001293 static const char *uche_clients[7] = {
1294 "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
1295 };
1296 u32 val;
1297
Connor Abbott77beba32024-01-25 13:10:58 +00001298 if (adreno_is_a7xx(adreno_gpu)) {
1299 if (mid != 1 && mid != 2 && mid != 3 && mid != 8)
1300 return "UNKNOWN";
1301 } else {
1302 if (mid < 1 || mid > 3)
1303 return "UNKNOWN";
1304 }
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001305
1306 /*
1307 * The source of the data depends on the mid ID read from FSYNR1.
1308 * and the client ID read from the UCHE block
1309 */
1310 val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
1311
Connor Abbott77beba32024-01-25 13:10:58 +00001312 if (adreno_is_a7xx(adreno_gpu)) {
1313 /* Bit 3 for mid=3 indicates BR or BV */
1314 static const char *uche_clients_a7xx[16] = {
1315 "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC",
1316 "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP",
1317 "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC",
1318 "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP",
1319 };
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001320
Connor Abbott77beba32024-01-25 13:10:58 +00001321 /* LPAC has the same clients as BR and BV, but because it is
1322 * compute-only some of them do not exist and there are holes
1323 * in the array.
1324 */
1325 static const char *uche_clients_lpac_a7xx[8] = {
1326 "-", "LPAC_SP", "-", "-",
1327 "LPAC_HLSQ", "-", "-", "LPAC_TP",
1328 };
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001329
Connor Abbott77beba32024-01-25 13:10:58 +00001330 val &= GENMASK(6, 0);
1331
1332 /* mid=3 refers to BR or BV */
1333 if (mid == 3) {
1334 if (val < ARRAY_SIZE(uche_clients_a7xx))
1335 return uche_clients_a7xx[val];
1336 else
1337 return "UCHE";
1338 }
1339
1340 /* mid=8 refers to LPAC */
1341 if (mid == 8) {
1342 if (val < ARRAY_SIZE(uche_clients_lpac_a7xx))
1343 return uche_clients_lpac_a7xx[val];
1344 else
1345 return "UCHE_LPAC";
1346 }
1347
1348 /* mid=2 is a catchall for everything else in LPAC */
1349 if (mid == 2)
1350 return "UCHE_LPAC";
1351
1352 /* mid=1 is a catchall for everything else in BR/BV */
1353 return "UCHE";
1354 } else if (adreno_is_a660_family(adreno_gpu)) {
1355 static const char *uche_clients_a660[8] = {
1356 "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ", "TP",
1357 };
1358
1359 static const char *uche_clients_a660_not[8] = {
1360 "not VFD", "not SP", "not VSC", "not VPC",
1361 "not HLSQ", "not PC", "not LRZ", "not TP",
1362 };
1363
1364 val &= GENMASK(6, 0);
1365
1366 if (mid == 3 && val < ARRAY_SIZE(uche_clients_a660))
1367 return uche_clients_a660[val];
1368
1369 if (mid == 1 && val < ARRAY_SIZE(uche_clients_a660_not))
1370 return uche_clients_a660_not[val];
1371
1372 return "UCHE";
1373 } else {
1374 /* mid = 3 is most precise and refers to only one block per client */
1375 if (mid == 3)
1376 return uche_clients[val & 7];
1377
1378 /* For mid=2 the source is TP or VFD except when the client id is 0 */
1379 if (mid == 2)
1380 return ((val & 7) == 0) ? "TP" : "TP|VFD";
1381
1382 /* For mid=1 just return "UCHE" as a catchall for everything else */
1383 return "UCHE";
1384 }
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001385}
1386
1387static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
1388{
Connor Abbott77beba32024-01-25 13:10:58 +00001389 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1390
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001391 if (id == 0)
1392 return "CP";
1393 else if (id == 4)
1394 return "CCU";
1395 else if (id == 6)
1396 return "CDP Prefetch";
Connor Abbott77beba32024-01-25 13:10:58 +00001397 else if (id == 7)
1398 return "GMU";
1399 else if (id == 5 && adreno_is_a7xx(adreno_gpu))
1400 return "Flag cache";
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001401
1402 return a6xx_uche_fault_block(gpu, id);
1403}
1404
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001405static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001406{
1407 struct msm_gpu *gpu = arg;
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001408 struct adreno_smmu_fault_info *info = data;
Dmitry Baryshkovf62ad0f2023-02-14 15:35:03 +03001409 const char *block = "unknown";
Rob Clarke25e92e2021-06-10 14:44:13 -07001410
Dmitry Baryshkovf62ad0f2023-02-14 15:35:03 +03001411 u32 scratch[] = {
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001412 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
1413 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
1414 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
Dmitry Baryshkovf62ad0f2023-02-14 15:35:03 +03001415 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
1416 };
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001417
Dmitry Baryshkovf62ad0f2023-02-14 15:35:03 +03001418 if (info)
1419 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
Jordan Crouse2a574cc2021-06-10 14:44:11 -07001420
Dmitry Baryshkovf62ad0f2023-02-14 15:35:03 +03001421 return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001422}
1423
1424static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
1425{
1426 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
1427
1428 if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
1429 u32 val;
1430
1431 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
1432 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
1433 dev_err_ratelimited(&gpu->pdev->dev,
1434 "CP | opcode error | possible opcode=0x%8.8X\n",
1435 val);
1436 }
1437
1438 if (status & A6XX_CP_INT_CP_UCODE_ERROR)
1439 dev_err_ratelimited(&gpu->pdev->dev,
1440 "CP ucode error interrupt\n");
1441
1442 if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
1443 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
1444 gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
1445
1446 if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
1447 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
1448
1449 dev_err_ratelimited(&gpu->pdev->dev,
1450 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1451 val & (1 << 20) ? "READ" : "WRITE",
1452 (val & 0x3ffff), val);
1453 }
1454
Konrad Dybcioaf667062023-09-25 16:50:34 +02001455 if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu)))
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001456 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
1457
1458 if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
1459 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
1460
1461 if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
1462 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
1463
1464}
1465
1466static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
1467{
1468 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1469 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001470 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1471
1472 /*
Rob Clarke25e92e2021-06-10 14:44:13 -07001473 * If stalled on SMMU fault, we could trip the GPU's hang detection,
1474 * but the fault handler will trigger the devcore dump, and we want
1475 * to otherwise resume normally rather than killing the submit, so
1476 * just bail.
1477 */
1478 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
1479 return;
1480
1481 /*
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001482 * Force the GPU to stay on until after we finish
1483 * collecting information
1484 */
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001485 if (!adreno_has_gmu_wrapper(adreno_gpu))
1486 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001487
1488 DRM_DEV_ERROR(&gpu->pdev->dev,
1489 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
Rob Clarkf9d53552022-04-11 14:58:31 -07001490 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001491 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
1492 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
1493 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
Rob Clarkcade05b2022-11-14 11:30:40 -08001494 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001495 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
Rob Clarkcade05b2022-11-14 11:30:40 -08001496 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001497 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
1498
1499 /* Turn off the hangcheck timer to keep it from bothering us */
1500 del_timer(&gpu->hangcheck_timer);
1501
Rob Clark7e688292020-10-19 14:10:51 -07001502 kthread_queue_work(gpu->worker, &gpu->recover_work);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001503}
1504
Connor Abbott14b27d52024-04-30 11:43:18 +01001505static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
1506{
1507 u32 status;
1508
1509 status = gpu_read(gpu, REG_A7XX_RBBM_SW_FUSE_INT_STATUS);
1510 gpu_write(gpu, REG_A7XX_RBBM_SW_FUSE_INT_MASK, 0);
1511
1512 dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status);
1513
1514 /*
1515 * Ignore FASTBLEND violations, because the HW will silently fall back
1516 * to legacy blending.
1517 */
1518 if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
1519 A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) {
1520 del_timer(&gpu->hangcheck_timer);
1521
1522 kthread_queue_work(gpu->worker, &gpu->recover_work);
1523 }
1524}
1525
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001526static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
1527{
Rob Clark5edf2752021-11-09 10:11:05 -08001528 struct msm_drm_private *priv = gpu->dev->dev_private;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001529 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
1530
1531 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
1532
Rob Clark5edf2752021-11-09 10:11:05 -08001533 if (priv->disable_err_irq)
1534 status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
1535
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001536 if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
1537 a6xx_fault_detect_irq(gpu);
1538
1539 if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
1540 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
1541
1542 if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1543 a6xx_cp_hw_err_irq(gpu);
1544
1545 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
1546 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
1547
1548 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1549 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
1550
1551 if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1552 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
1553
Connor Abbott14b27d52024-04-30 11:43:18 +01001554 if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
1555 a7xx_sw_fuse_violation_irq(gpu);
1556
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001557 if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
1558 msm_gpu_retire(gpu);
1559
1560 return IRQ_HANDLED;
1561}
1562
Sharat Masetty474dadb2020-11-25 12:30:15 +05301563static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
1564{
1565 llcc_slice_deactivate(a6xx_gpu->llc_slice);
1566 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
1567}
1568
1569static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
1570{
Jordan Crouse3d247122020-11-25 12:30:16 +05301571 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1572 struct msm_gpu *gpu = &adreno_gpu->base;
Akhil P Oommen9ba873e2021-11-18 15:50:31 +05301573 u32 cntl1_regval = 0;
Sharat Masetty474dadb2020-11-25 12:30:15 +05301574
1575 if (IS_ERR(a6xx_gpu->llc_mmio))
1576 return;
1577
1578 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
Akhil P Oommen9ba873e2021-11-18 15:50:31 +05301579 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
Sharat Masetty474dadb2020-11-25 12:30:15 +05301580
1581 gpu_scid &= 0x1f;
1582 cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
1583 (gpu_scid << 15) | (gpu_scid << 20);
Akhil P Oommen9ba873e2021-11-18 15:50:31 +05301584
1585 /* On A660, the SCID programming for UCHE traffic is done in
1586 * A6XX_GBIF_SCACHE_CNTL0[14:10]
1587 */
1588 if (adreno_is_a660_family(adreno_gpu))
1589 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
1590 (1 << 8), (gpu_scid << 10) | (1 << 8));
Sharat Masetty474dadb2020-11-25 12:30:15 +05301591 }
1592
Jordan Crouse3d247122020-11-25 12:30:16 +05301593 /*
1594 * For targets with a MMU500, activate the slice but don't program the
1595 * register. The XBL will take care of that.
1596 */
Sharat Masetty474dadb2020-11-25 12:30:15 +05301597 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
Jordan Crouse3d247122020-11-25 12:30:16 +05301598 if (!a6xx_gpu->have_mmu500) {
1599 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
Sharat Masetty474dadb2020-11-25 12:30:15 +05301600
Jordan Crouse3d247122020-11-25 12:30:16 +05301601 gpuhtw_scid &= 0x1f;
1602 cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
1603 }
Sharat Masetty474dadb2020-11-25 12:30:15 +05301604 }
1605
Akhil P Oommena6f24382021-07-30 01:21:23 +05301606 if (!cntl1_regval)
1607 return;
Sharat Masetty474dadb2020-11-25 12:30:15 +05301608
Akhil P Oommena6f24382021-07-30 01:21:23 +05301609 /*
1610 * Program the slice IDs for the various GPU blocks and GPU MMU
1611 * pagetables
1612 */
1613 if (!a6xx_gpu->have_mmu500) {
1614 a6xx_llc_write(a6xx_gpu,
1615 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
1616
1617 /*
1618 * Program cacheability overrides to not allocate cache
1619 * lines on a write miss
1620 */
1621 a6xx_llc_rmw(a6xx_gpu,
1622 REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
1623 return;
Sharat Masetty474dadb2020-11-25 12:30:15 +05301624 }
Akhil P Oommena6f24382021-07-30 01:21:23 +05301625
1626 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
Sharat Masetty474dadb2020-11-25 12:30:15 +05301627}
1628
Konrad Dybcioaf667062023-09-25 16:50:34 +02001629static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
1630{
1631 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1632 struct msm_gpu *gpu = &adreno_gpu->base;
1633
1634 if (IS_ERR(a6xx_gpu->llc_mmio))
1635 return;
1636
1637 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
1638 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
1639
1640 gpu_scid &= GENMASK(4, 0);
1641
1642 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
1643 FIELD_PREP(GENMASK(29, 25), gpu_scid) |
1644 FIELD_PREP(GENMASK(24, 20), gpu_scid) |
1645 FIELD_PREP(GENMASK(19, 15), gpu_scid) |
1646 FIELD_PREP(GENMASK(14, 10), gpu_scid) |
1647 FIELD_PREP(GENMASK(9, 5), gpu_scid) |
1648 FIELD_PREP(GENMASK(4, 0), gpu_scid));
1649
1650 gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
1651 FIELD_PREP(GENMASK(14, 10), gpu_scid) |
1652 BIT(8));
1653 }
1654
1655 llcc_slice_activate(a6xx_gpu->htw_llc_slice);
1656}
1657
Sharat Masetty474dadb2020-11-25 12:30:15 +05301658static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
1659{
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001660 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
1661 if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
1662 return;
1663
Sharat Masetty474dadb2020-11-25 12:30:15 +05301664 llcc_slice_putd(a6xx_gpu->llc_slice);
1665 llcc_slice_putd(a6xx_gpu->htw_llc_slice);
1666}
1667
1668static void a6xx_llc_slices_init(struct platform_device *pdev,
Konrad Dybcioaf667062023-09-25 16:50:34 +02001669 struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
Sharat Masetty474dadb2020-11-25 12:30:15 +05301670{
Jordan Crouse3d247122020-11-25 12:30:16 +05301671 struct device_node *phandle;
1672
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001673 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
1674 if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
1675 return;
1676
Jordan Crouse3d247122020-11-25 12:30:16 +05301677 /*
Konrad Dybcioaf667062023-09-25 16:50:34 +02001678 * There is a different programming path for A6xx targets with an
1679 * mmu500 attached, so detect if that is the case
Jordan Crouse3d247122020-11-25 12:30:16 +05301680 */
1681 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
1682 a6xx_gpu->have_mmu500 = (phandle &&
1683 of_device_is_compatible(phandle, "arm,mmu-500"));
1684 of_node_put(phandle);
1685
Konrad Dybcioaf667062023-09-25 16:50:34 +02001686 if (is_a7xx || !a6xx_gpu->have_mmu500)
Dmitry Baryshkovc0e745d2022-01-06 02:26:59 +03001687 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
Konrad Dybcioaf667062023-09-25 16:50:34 +02001688 else
1689 a6xx_gpu->llc_mmio = NULL;
Jonathan Marek4b95d372021-04-23 21:49:26 -04001690
Sharat Masetty474dadb2020-11-25 12:30:15 +05301691 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
1692 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
1693
Sai Prakash Ranjan276619c2021-01-11 17:34:08 +05301694 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
Sharat Masetty474dadb2020-11-25 12:30:15 +05301695 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
1696}
1697
Connor Abbott14b27d52024-04-30 11:43:18 +01001698static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
1699{
1700 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1701 struct msm_gpu *gpu = &adreno_gpu->base;
1702 u32 fuse_val;
1703 int ret;
1704
1705 if (adreno_is_a750(adreno_gpu)) {
1706 /*
1707 * Assume that if qcom scm isn't available, that whatever
1708 * replacement allows writing the fuse register ourselves.
1709 * Users of alternative firmware need to make sure this
1710 * register is writeable or indicate that it's not somehow.
1711 * Print a warning because if you mess this up you're about to
1712 * crash horribly.
1713 */
1714 if (!qcom_scm_is_available()) {
1715 dev_warn_once(gpu->dev->dev,
1716 "SCM is not available, poking fuse register\n");
1717 a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_SW_FUSE_VALUE,
1718 A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
1719 A7XX_CX_MISC_SW_FUSE_VALUE_FASTBLEND |
1720 A7XX_CX_MISC_SW_FUSE_VALUE_LPAC);
1721 adreno_gpu->has_ray_tracing = true;
1722 return 0;
1723 }
1724
1725 ret = qcom_scm_gpu_init_regs(QCOM_SCM_GPU_ALWAYS_EN_REQ |
1726 QCOM_SCM_GPU_TSENSE_EN_REQ);
1727 if (ret)
1728 return ret;
1729
1730 /*
1731 * On a750 raytracing may be disabled by the firmware, find out
1732 * whether that's the case. The scm call above sets the fuse
1733 * register.
1734 */
1735 fuse_val = a6xx_llc_read(a6xx_gpu,
1736 REG_A7XX_CX_MISC_SW_FUSE_VALUE);
1737 adreno_gpu->has_ray_tracing =
1738 !!(fuse_val & A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING);
Neil Armstrongcc2ccd12024-06-26 14:04:21 +02001739 } else if (adreno_is_a740(adreno_gpu)) {
1740 /* Raytracing is always enabled on a740 */
1741 adreno_gpu->has_ray_tracing = true;
Connor Abbott14b27d52024-04-30 11:43:18 +01001742 }
1743
1744 return 0;
1745}
1746
1747
Konrad Dybcio3773a572023-06-16 01:20:48 +02001748#define GBIF_CLIENT_HALT_MASK BIT(0)
1749#define GBIF_ARB_HALT_MASK BIT(1)
1750#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
Konrad Dybcio8296ff02023-06-16 01:20:55 +02001751#define VBIF_RESET_ACK_MASK 0xF0
1752#define GPR0_GBIF_HALT_REQUEST 0x1E0
Konrad Dybcio6e332c92023-06-16 01:20:47 +02001753
1754void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
1755{
1756 struct msm_gpu *gpu = &adreno_gpu->base;
1757
Konrad Dybcio8296ff02023-06-16 01:20:55 +02001758 if (adreno_is_a619_holi(adreno_gpu)) {
1759 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
1760 spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
1761 (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
1762 } else if (!a6xx_has_gbif(adreno_gpu)) {
Konrad Dybcio3773a572023-06-16 01:20:48 +02001763 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
Konrad Dybcio6e332c92023-06-16 01:20:47 +02001764 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
Konrad Dybcio3773a572023-06-16 01:20:48 +02001765 (VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
Konrad Dybcio6e332c92023-06-16 01:20:47 +02001766 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
1767
1768 return;
1769 }
1770
1771 if (gx_off) {
1772 /* Halt the gx side of GBIF */
1773 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
1774 spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
1775 }
1776
1777 /* Halt new client requests on GBIF */
1778 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
1779 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1780 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
1781
1782 /* Halt all AXI requests on GBIF */
1783 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
1784 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1785 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
1786
1787 /* The GBIF halt needs to be explicitly cleared */
1788 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
1789}
1790
Konrad Dybcio277b9672023-06-16 01:20:49 +02001791void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
1792{
Konrad Dybcioe7fc9392023-06-16 01:20:56 +02001793 /* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
1794 if (adreno_is_a610(to_adreno_gpu(gpu)))
1795 return;
1796
Konrad Dybcio277b9672023-06-16 01:20:49 +02001797 gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
1798 /* Perform a bogus read and add a brief delay to ensure ordering. */
1799 gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
1800 udelay(1);
1801
1802 /* The reset line needs to be asserted for at least 100 us */
1803 if (assert)
1804 udelay(100);
1805}
1806
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001807static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001808{
1809 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1810 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1811 int ret;
1812
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001813 gpu->needs_hw_init = true;
1814
Rob Clarkec1cb6e2020-09-01 08:41:56 -07001815 trace_msm_gpu_resume(0);
1816
Rob Clarkf6f59072021-09-27 11:00:04 -07001817 mutex_lock(&a6xx_gpu->gmu.lock);
Jordan Crouse41570b72019-02-04 09:15:43 -07001818 ret = a6xx_gmu_resume(a6xx_gpu);
Rob Clarkf6f59072021-09-27 11:00:04 -07001819 mutex_unlock(&a6xx_gpu->gmu.lock);
Jordan Crouse41570b72019-02-04 09:15:43 -07001820 if (ret)
1821 return ret;
1822
Rob Clarkaf5b4ff2021-07-26 07:46:48 -07001823 msm_devfreq_resume(gpu);
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05301824
Rob Clark0776ad92024-01-02 11:33:45 -08001825 adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
Sharat Masetty474dadb2020-11-25 12:30:15 +05301826
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001827 return ret;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001828}
1829
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001830static int a6xx_pm_resume(struct msm_gpu *gpu)
1831{
1832 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1833 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1834 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1835 unsigned long freq = gpu->fast_rate;
1836 struct dev_pm_opp *opp;
1837 int ret;
1838
1839 gpu->needs_hw_init = true;
1840
1841 trace_msm_gpu_resume(0);
1842
1843 mutex_lock(&a6xx_gpu->gmu.lock);
1844
1845 opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
1846 if (IS_ERR(opp)) {
1847 ret = PTR_ERR(opp);
1848 goto err_set_opp;
1849 }
1850 dev_pm_opp_put(opp);
1851
1852 /* Set the core clock and bus bw, having VDD scaling in mind */
1853 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
1854
1855 pm_runtime_resume_and_get(gmu->dev);
1856 pm_runtime_resume_and_get(gmu->gxpd);
1857
1858 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
1859 if (ret)
1860 goto err_bulk_clk;
1861
Konrad Dybcio8296ff02023-06-16 01:20:55 +02001862 if (adreno_is_a619_holi(adreno_gpu))
1863 a6xx_sptprac_enable(gmu);
1864
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001865 /* If anything goes south, tear the GPU down piece by piece.. */
1866 if (ret) {
1867err_bulk_clk:
1868 pm_runtime_put(gmu->gxpd);
1869 pm_runtime_put(gmu->dev);
1870 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1871 }
1872err_set_opp:
1873 mutex_unlock(&a6xx_gpu->gmu.lock);
1874
1875 if (!ret)
1876 msm_devfreq_resume(gpu);
1877
1878 return ret;
1879}
1880
1881static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001882{
1883 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1884 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Rob Clarke8b0b992020-11-10 10:23:06 -08001885 int i, ret;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001886
Rob Clarkec1cb6e2020-09-01 08:41:56 -07001887 trace_msm_gpu_suspend(0);
1888
Sharat Masetty474dadb2020-11-25 12:30:15 +05301889 a6xx_llc_deactivate(a6xx_gpu);
1890
Rob Clarkaf5b4ff2021-07-26 07:46:48 -07001891 msm_devfreq_suspend(gpu);
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05301892
Rob Clarkf6f59072021-09-27 11:00:04 -07001893 mutex_lock(&a6xx_gpu->gmu.lock);
Rob Clarke8b0b992020-11-10 10:23:06 -08001894 ret = a6xx_gmu_stop(a6xx_gpu);
Rob Clarkf6f59072021-09-27 11:00:04 -07001895 mutex_unlock(&a6xx_gpu->gmu.lock);
Rob Clarke8b0b992020-11-10 10:23:06 -08001896 if (ret)
1897 return ret;
1898
Jonathan Marekce86c232021-05-13 13:14:00 -04001899 if (a6xx_gpu->shadow_bo)
Rob Clarke8b0b992020-11-10 10:23:06 -08001900 for (i = 0; i < gpu->nr_rings; i++)
1901 a6xx_gpu->shadow[i] = 0;
1902
Rob Clark860a7b22022-01-13 08:32:13 -08001903 gpu->suspend_count++;
1904
Rob Clarke8b0b992020-11-10 10:23:06 -08001905 return 0;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001906}
1907
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001908static int a6xx_pm_suspend(struct msm_gpu *gpu)
1909{
1910 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1911 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1912 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1913 int i;
1914
1915 trace_msm_gpu_suspend(0);
1916
1917 msm_devfreq_suspend(gpu);
1918
1919 mutex_lock(&a6xx_gpu->gmu.lock);
1920
1921 /* Drain the outstanding traffic on memory buses */
1922 a6xx_bus_clear_pending_transactions(adreno_gpu, true);
1923
Konrad Dybcio8296ff02023-06-16 01:20:55 +02001924 if (adreno_is_a619_holi(adreno_gpu))
1925 a6xx_sptprac_disable(gmu);
1926
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001927 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
1928
1929 pm_runtime_put_sync(gmu->gxpd);
1930 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1931 pm_runtime_put_sync(gmu->dev);
1932
1933 mutex_unlock(&a6xx_gpu->gmu.lock);
1934
1935 if (a6xx_gpu->shadow_bo)
1936 for (i = 0; i < gpu->nr_rings; i++)
1937 a6xx_gpu->shadow[i] = 0;
1938
1939 gpu->suspend_count++;
1940
1941 return 0;
1942}
1943
1944static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001945{
1946 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1947 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Eric Anholt5f98b332021-01-28 13:03:31 -08001948
Rob Clarkf6f59072021-09-27 11:00:04 -07001949 mutex_lock(&a6xx_gpu->gmu.lock);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001950
1951 /* Force the GPU power on so we can read this register */
Eric Anholt7a7cbf22021-01-28 13:03:30 -08001952 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001953
Rob Clarkf73343f2023-03-20 11:54:14 -07001954 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001955
Eric Anholt7a7cbf22021-01-28 13:03:30 -08001956 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
Rob Clarkf6f59072021-09-27 11:00:04 -07001957
1958 mutex_unlock(&a6xx_gpu->gmu.lock);
1959
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001960 return 0;
1961}
1962
Konrad Dybcio5a903a42023-06-16 01:20:53 +02001963static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1964{
1965 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
1966 return 0;
1967}
1968
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001969static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
1970{
1971 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1972 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1973
1974 return a6xx_gpu->cur_ring;
1975}
1976
1977static void a6xx_destroy(struct msm_gpu *gpu)
1978{
1979 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1980 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
1981
1982 if (a6xx_gpu->sqe_bo) {
Jordan Crouse7ad0e8c2018-11-07 15:35:51 -07001983 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
Emil Velikovf7d33952020-05-15 10:51:04 +01001984 drm_gem_object_put(a6xx_gpu->sqe_bo);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001985 }
1986
Jordan Croused3a569f2020-09-14 16:40:22 -06001987 if (a6xx_gpu->shadow_bo) {
1988 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
1989 drm_gem_object_put(a6xx_gpu->shadow_bo);
1990 }
1991
Sharat Masetty474dadb2020-11-25 12:30:15 +05301992 a6xx_llc_slices_destroy(a6xx_gpu);
1993
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001994 a6xx_gmu_remove(a6xx_gpu);
1995
1996 adreno_gpu_cleanup(adreno_gpu);
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05301997
Jordan Crouse4b565ca2018-08-06 11:33:24 -06001998 kfree(a6xx_gpu);
1999}
2000
Chia-I Wu15c41192022-04-15 17:33:13 -07002001static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05302002{
2003 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2004 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Chia-I Wu15c41192022-04-15 17:33:13 -07002005 u64 busy_cycles;
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05302006
Chia-I Wu15c41192022-04-15 17:33:13 -07002007 /* 19.2MHz */
2008 *out_sample_rate = 19200000;
Jordan Crouseeadf7922020-05-01 13:43:26 -06002009
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05302010 busy_cycles = gmu_read64(&a6xx_gpu->gmu,
2011 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
2012 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
2013
Chia-I Wu15c41192022-04-15 17:33:13 -07002014 return busy_cycles;
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05302015}
2016
Douglas Anderson66944822022-06-10 12:47:31 -07002017static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
2018 bool suspended)
Rob Clarkf6f59072021-09-27 11:00:04 -07002019{
2020 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2021 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2022
2023 mutex_lock(&a6xx_gpu->gmu.lock);
Douglas Anderson66944822022-06-10 12:47:31 -07002024 a6xx_gmu_set_freq(gpu, opp, suspended);
Rob Clarkf6f59072021-09-27 11:00:04 -07002025 mutex_unlock(&a6xx_gpu->gmu.lock);
2026}
2027
Jordan Crouse84c31ee2020-08-17 15:01:41 -07002028static struct msm_gem_address_space *
Sai Prakash Ranjan45596f22021-01-11 17:34:09 +05302029a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
2030{
2031 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2032 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
Dmitry Baryshkov32361302022-11-02 20:54:48 +03002033 unsigned long quirks = 0;
Sai Prakash Ranjan45596f22021-01-11 17:34:09 +05302034
2035 /*
2036 * This allows GPU to set the bus attributes required to use system
2037 * cache on behalf of the iommu page table walker.
2038 */
Dmitry Baryshkov38e27a6f2023-04-10 21:52:26 +03002039 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
2040 !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
Dmitry Baryshkov32361302022-11-02 20:54:48 +03002041 quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
Sai Prakash Ranjan45596f22021-01-11 17:34:09 +05302042
Dmitry Baryshkov822ff992022-11-02 20:54:49 +03002043 return adreno_iommu_create_address_space(gpu, pdev, quirks);
Sai Prakash Ranjan45596f22021-01-11 17:34:09 +05302044}
2045
2046static struct msm_gem_address_space *
Jordan Crouse84c31ee2020-08-17 15:01:41 -07002047a6xx_create_private_address_space(struct msm_gpu *gpu)
2048{
2049 struct msm_mmu *mmu;
2050
2051 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
2052
2053 if (IS_ERR(mmu))
2054 return ERR_CAST(mmu);
2055
2056 return msm_gem_address_space_create(mmu,
Rob Clark36bbfdb2022-05-29 11:04:23 -07002057 "gpu", 0x100000000ULL,
2058 adreno_private_address_space_size(gpu));
Jordan Crouse84c31ee2020-08-17 15:01:41 -07002059}
2060
Jordan Croused3a569f2020-09-14 16:40:22 -06002061static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
2062{
2063 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2064 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2065
2066 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
2067 return a6xx_gpu->shadow[ring->id];
2068
2069 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
2070}
2071
Rob Clarkd73b1d02022-11-14 11:30:41 -08002072static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
2073{
2074 struct msm_cp_state cp_state = {
2075 .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
2076 .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
2077 .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
2078 .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
2079 };
2080 bool progress;
2081
2082 /*
2083 * Adjust the remaining data to account for what has already been
2084 * fetched from memory, but not yet consumed by the SQE.
2085 *
2086 * This is not *technically* correct, the amount buffered could
2087 * exceed the IB size due to hw prefetching ahead, but:
2088 *
2089 * (1) We aren't trying to find the exact position, just whether
2090 * progress has been made
2091 * (2) The CP_REG_TO_MEM at the end of a submit should be enough
2092 * to prevent prefetching into an unrelated submit. (And
2093 * either way, at some point the ROQ will be full.)
2094 */
Rob Clarkf73343f2023-03-20 11:54:14 -07002095 cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16;
2096 cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16;
Rob Clarkd73b1d02022-11-14 11:30:41 -08002097
2098 progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
2099
2100 ring->last_cp_state = cp_state;
2101
2102 return progress;
2103}
2104
Rob Clarkc928a052023-07-27 14:20:12 -07002105static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse)
Konrad Dybciocd036d52023-06-16 01:21:01 +02002106{
Rob Clarkc928a052023-07-27 14:20:12 -07002107 if (!info->speedbins)
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302108 return UINT_MAX;
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302109
Rob Clarkc928a052023-07-27 14:20:12 -07002110 for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++)
2111 if (info->speedbins[i].fuse == fuse)
2112 return BIT(info->speedbins[i].speedbin);
2113
2114 return UINT_MAX;
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302115}
2116
Rob Clarkc928a052023-07-27 14:20:12 -07002117static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info)
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302118{
Rob Clarkf6d19182022-11-15 07:46:34 -08002119 u32 supp_hw;
Douglas Andersonc9f737c2021-05-21 13:45:50 -07002120 u32 speedbin;
Douglas Anderson7bf168c2021-02-26 16:26:01 -08002121 int ret;
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302122
Akhil P Oommenafab9d92022-02-26 00:51:31 +05302123 ret = adreno_read_speedbin(dev, &speedbin);
John Stultz2b0b2192021-03-30 01:34:08 +00002124 /*
2125 * -ENOENT means that the platform doesn't support speedbin which is
2126 * fine
2127 */
2128 if (ret == -ENOENT) {
2129 return 0;
2130 } else if (ret) {
Rob Clarkf6d19182022-11-15 07:46:34 -08002131 dev_err_probe(dev, ret,
2132 "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
2133 return ret;
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302134 }
2135
Rob Clarkc928a052023-07-27 14:20:12 -07002136 supp_hw = fuse_to_supp_hw(info, speedbin);
2137
2138 if (supp_hw == UINT_MAX) {
2139 DRM_DEV_ERROR(dev,
2140 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
2141 speedbin);
Konrad Dybcio75cb60d2023-09-26 20:24:36 +02002142 supp_hw = BIT(0); /* Default */
Rob Clarkc928a052023-07-27 14:20:12 -07002143 }
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302144
Yangtao Li11120e92021-03-14 19:34:04 +03002145 ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
2146 if (ret)
2147 return ret;
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302148
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302149 return 0;
2150}
2151
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002152static const struct adreno_gpu_funcs funcs = {
2153 .base = {
2154 .get_param = adreno_get_param,
Rob Clarkf7ddbf52022-03-03 16:52:15 -08002155 .set_param = adreno_set_param,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002156 .hw_init = a6xx_hw_init,
Rob Clark8ead9672023-03-20 07:43:35 -07002157 .ucode_load = a6xx_ucode_load,
Konrad Dybcio5a903a42023-06-16 01:20:53 +02002158 .pm_suspend = a6xx_gmu_pm_suspend,
2159 .pm_resume = a6xx_gmu_pm_resume,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002160 .recover = a6xx_recover,
2161 .submit = a6xx_submit,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002162 .active_ring = a6xx_active_ring,
2163 .irq = a6xx_irq,
2164 .destroy = a6xx_destroy,
Jordan Crouseb02872d2019-04-10 10:58:16 -06002165#if defined(CONFIG_DRM_MSM_GPU_STATE)
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002166 .show = a6xx_show,
2167#endif
Sharat Masettya2c3c0a2018-10-04 15:11:43 +05302168 .gpu_busy = a6xx_gpu_busy,
2169 .gpu_get_freq = a6xx_gmu_get_freq,
Rob Clarkf6f59072021-09-27 11:00:04 -07002170 .gpu_set_freq = a6xx_gpu_set_freq,
Jordan Crouseb02872d2019-04-10 10:58:16 -06002171#if defined(CONFIG_DRM_MSM_GPU_STATE)
Jordan Crouse1707add2018-11-02 09:25:25 -06002172 .gpu_state_get = a6xx_gpu_state_get,
2173 .gpu_state_put = a6xx_gpu_state_put,
Jordan Crouseb02872d2019-04-10 10:58:16 -06002174#endif
Sai Prakash Ranjan45596f22021-01-11 17:34:09 +05302175 .create_address_space = a6xx_create_address_space,
Jordan Crouse84c31ee2020-08-17 15:01:41 -07002176 .create_private_address_space = a6xx_create_private_address_space,
Jordan Croused3a569f2020-09-14 16:40:22 -06002177 .get_rptr = a6xx_get_rptr,
Rob Clarkd73b1d02022-11-14 11:30:41 -08002178 .progress = a6xx_progress,
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002179 },
Konrad Dybcio5a903a42023-06-16 01:20:53 +02002180 .get_timestamp = a6xx_gmu_get_timestamp,
2181};
2182
2183static const struct adreno_gpu_funcs funcs_gmuwrapper = {
2184 .base = {
2185 .get_param = adreno_get_param,
2186 .set_param = adreno_set_param,
2187 .hw_init = a6xx_hw_init,
2188 .ucode_load = a6xx_ucode_load,
2189 .pm_suspend = a6xx_pm_suspend,
2190 .pm_resume = a6xx_pm_resume,
2191 .recover = a6xx_recover,
2192 .submit = a6xx_submit,
2193 .active_ring = a6xx_active_ring,
2194 .irq = a6xx_irq,
2195 .destroy = a6xx_destroy,
2196#if defined(CONFIG_DRM_MSM_GPU_STATE)
2197 .show = a6xx_show,
2198#endif
2199 .gpu_busy = a6xx_gpu_busy,
2200#if defined(CONFIG_DRM_MSM_GPU_STATE)
2201 .gpu_state_get = a6xx_gpu_state_get,
2202 .gpu_state_put = a6xx_gpu_state_put,
2203#endif
2204 .create_address_space = a6xx_create_address_space,
2205 .create_private_address_space = a6xx_create_private_address_space,
2206 .get_rptr = a6xx_get_rptr,
2207 .progress = a6xx_progress,
2208 },
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002209 .get_timestamp = a6xx_get_timestamp,
2210};
2211
Konrad Dybcioaf667062023-09-25 16:50:34 +02002212static const struct adreno_gpu_funcs funcs_a7xx = {
2213 .base = {
2214 .get_param = adreno_get_param,
2215 .set_param = adreno_set_param,
2216 .hw_init = a6xx_hw_init,
2217 .ucode_load = a6xx_ucode_load,
2218 .pm_suspend = a6xx_gmu_pm_suspend,
2219 .pm_resume = a6xx_gmu_pm_resume,
2220 .recover = a6xx_recover,
2221 .submit = a7xx_submit,
2222 .active_ring = a6xx_active_ring,
2223 .irq = a6xx_irq,
2224 .destroy = a6xx_destroy,
2225#if defined(CONFIG_DRM_MSM_GPU_STATE)
2226 .show = a6xx_show,
2227#endif
2228 .gpu_busy = a6xx_gpu_busy,
2229 .gpu_get_freq = a6xx_gmu_get_freq,
2230 .gpu_set_freq = a6xx_gpu_set_freq,
2231#if defined(CONFIG_DRM_MSM_GPU_STATE)
2232 .gpu_state_get = a6xx_gpu_state_get,
2233 .gpu_state_put = a6xx_gpu_state_put,
2234#endif
2235 .create_address_space = a6xx_create_address_space,
2236 .create_private_address_space = a6xx_create_private_address_space,
2237 .get_rptr = a6xx_get_rptr,
2238 .progress = a6xx_progress,
2239 },
2240 .get_timestamp = a6xx_gmu_get_timestamp,
2241};
2242
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002243struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
2244{
2245 struct msm_drm_private *priv = dev->dev_private;
2246 struct platform_device *pdev = priv->gpu_pdev;
Jordan Crousee9ba8d52020-09-15 10:35:51 -06002247 struct adreno_platform_config *config = pdev->dev.platform_data;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002248 struct device_node *node;
2249 struct a6xx_gpu *a6xx_gpu;
2250 struct adreno_gpu *adreno_gpu;
2251 struct msm_gpu *gpu;
Konrad Dybcioaf667062023-09-25 16:50:34 +02002252 bool is_a7xx;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002253 int ret;
2254
2255 a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
2256 if (!a6xx_gpu)
2257 return ERR_PTR(-ENOMEM);
2258
2259 adreno_gpu = &a6xx_gpu->base;
2260 gpu = &adreno_gpu->base;
2261
Dmitry Baryshkov12abd7352023-04-10 19:59:08 +03002262 mutex_init(&a6xx_gpu->gmu.lock);
2263
Jordan Crouse1707add2018-11-02 09:25:25 -06002264 adreno_gpu->registers = NULL;
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002265
Konrad Dybcio5a903a42023-06-16 01:20:53 +02002266 /* Check if there is a GMU phandle and set it up */
2267 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
2268 /* FIXME: How do we gracefully handle this? */
2269 BUG_ON(!node);
2270
2271 adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
2272
Rob Clark47bd37f2023-07-27 14:20:16 -07002273 adreno_gpu->base.hw_apriv =
2274 !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
Jordan Crouse604234f2020-09-03 20:03:11 -06002275
Konrad Dybcioaf667062023-09-25 16:50:34 +02002276 /* gpu->info only gets assigned in adreno_gpu_init() */
Konrad Dybcio1f8c29e2023-09-25 16:50:38 +02002277 is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
Neil Armstrongd2bcca02024-02-16 12:03:52 +01002278 config->info->family == ADRENO_7XX_GEN2 ||
2279 config->info->family == ADRENO_7XX_GEN3;
Konrad Dybcioaf667062023-09-25 16:50:34 +02002280
2281 a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
Sharat Masetty474dadb2020-11-25 12:30:15 +05302282
Rob Clark47bd37f2023-07-27 14:20:16 -07002283 ret = a6xx_set_supported_hw(&pdev->dev, config->info);
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302284 if (ret) {
Konrad Dybcio46d4efc2024-04-12 10:53:25 +02002285 a6xx_llc_slices_destroy(a6xx_gpu);
2286 kfree(a6xx_gpu);
Akhil P Oommenfe7952c2021-01-08 23:45:30 +05302287 return ERR_PTR(ret);
2288 }
2289
Konrad Dybcioaf667062023-09-25 16:50:34 +02002290 if (is_a7xx)
2291 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
2292 else if (adreno_has_gmu_wrapper(adreno_gpu))
Konrad Dybcio5a903a42023-06-16 01:20:53 +02002293 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
2294 else
2295 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002296 if (ret) {
2297 a6xx_destroy(&(a6xx_gpu->base.base));
2298 return ERR_PTR(ret);
2299 }
2300
Rob Clark2c1b7742022-11-15 07:55:33 -08002301 /*
2302 * For now only clamp to idle freq for devices where this is known not
2303 * to cause power supply issues:
2304 */
2305 if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
Rob Clark6563f602023-01-10 15:14:42 -08002306 priv->gpu_clamp_to_idle = true;
Rob Clark2c1b7742022-11-15 07:55:33 -08002307
Konrad Dybcio5a903a42023-06-16 01:20:53 +02002308 if (adreno_has_gmu_wrapper(adreno_gpu))
2309 ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
2310 else
2311 ret = a6xx_gmu_init(a6xx_gpu, node);
Miaoqian Linc56de482022-05-12 16:19:50 +04002312 of_node_put(node);
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002313 if (ret) {
2314 a6xx_destroy(&(a6xx_gpu->base.base));
2315 return ERR_PTR(ret);
2316 }
2317
Connor Abbott14b27d52024-04-30 11:43:18 +01002318 if (adreno_is_a7xx(adreno_gpu)) {
2319 ret = a7xx_cx_mem_init(a6xx_gpu);
2320 if (ret) {
2321 a6xx_destroy(&(a6xx_gpu->base.base));
2322 return ERR_PTR(ret);
2323 }
2324 }
2325
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002326 if (gpu->aspace)
2327 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
2328 a6xx_fault_handler);
2329
Connor Abbott88144552023-12-07 21:30:47 +00002330 a6xx_calc_ubwc_config(adreno_gpu);
2331
Jordan Crouse4b565ca2018-08-06 11:33:24 -06002332 return gpu;
2333}