blob: 9cd6690c6a3faece01132b757d613a0a09a9430e [file] [log] [blame]
Leo Liu88b5af72016-12-28 11:57:38 -05001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "soc15d.h"
29#include "soc15_common.h"
30
31#include "vega10/soc15ip.h"
32#include "raven1/VCN/vcn_1_0_offset.h"
33#include "raven1/VCN/vcn_1_0_sh_mask.h"
34#include "vega10/HDP/hdp_4_0_offset.h"
35#include "raven1/MMHUB/mmhub_9_1_offset.h"
36#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
37
38static int vcn_v1_0_start(struct amdgpu_device *adev);
39static int vcn_v1_0_stop(struct amdgpu_device *adev);
Leo Liucca69fe2017-05-05 11:40:59 -040040static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
Leo Liua319f442016-12-28 13:22:18 -050041static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
Leo Liu88b5af72016-12-28 11:57:38 -050042
43/**
44 * vcn_v1_0_early_init - set function pointers
45 *
46 * @handle: amdgpu_device pointer
47 *
48 * Set ring and irq function pointers
49 */
50static int vcn_v1_0_early_init(void *handle)
51{
Leo Liucca69fe2017-05-05 11:40:59 -040052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
53
54 vcn_v1_0_set_dec_ring_funcs(adev);
Leo Liua319f442016-12-28 13:22:18 -050055 vcn_v1_0_set_irq_funcs(adev);
Leo Liucca69fe2017-05-05 11:40:59 -040056
Leo Liu88b5af72016-12-28 11:57:38 -050057 return 0;
58}
59
60/**
61 * vcn_v1_0_sw_init - sw init for VCN block
62 *
63 * @handle: amdgpu_device pointer
64 *
65 * Load firmware and sw initialization
66 */
67static int vcn_v1_0_sw_init(void *handle)
68{
Leo Liua4bf608b2016-12-28 12:16:48 -050069 struct amdgpu_ring *ring;
Leo Liu88b5af72016-12-28 11:57:38 -050070 int r;
71 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
72
73 /* VCN TRAP */
74 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
75 if (r)
76 return r;
77
78 r = amdgpu_vcn_sw_init(adev);
79 if (r)
80 return r;
81
82 r = amdgpu_vcn_resume(adev);
83 if (r)
84 return r;
85
Leo Liua4bf608b2016-12-28 12:16:48 -050086 ring = &adev->vcn.ring_dec;
87 sprintf(ring->name, "vcn_dec");
88 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
89
Leo Liu88b5af72016-12-28 11:57:38 -050090 return r;
91}
92
93/**
94 * vcn_v1_0_sw_fini - sw fini for VCN block
95 *
96 * @handle: amdgpu_device pointer
97 *
98 * VCN suspend and free up sw allocation
99 */
100static int vcn_v1_0_sw_fini(void *handle)
101{
102 int r;
103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104
105 r = amdgpu_vcn_suspend(adev);
106 if (r)
107 return r;
108
109 r = amdgpu_vcn_sw_fini(adev);
110
111 return r;
112}
113
114/**
115 * vcn_v1_0_hw_init - start and test VCN block
116 *
117 * @handle: amdgpu_device pointer
118 *
119 * Initialize the hardware, boot up the VCPU and do some testing
120 */
121static int vcn_v1_0_hw_init(void *handle)
122{
123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
124 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
125 int r;
126
127 r = vcn_v1_0_start(adev);
128 if (r)
129 goto done;
130
131 ring->ready = true;
132 r = amdgpu_ring_test_ring(ring);
133 if (r) {
134 ring->ready = false;
135 goto done;
136 }
137
138done:
139 if (!r)
140 DRM_INFO("VCN decode initialized successfully.\n");
141
142 return r;
143}
144
145/**
146 * vcn_v1_0_hw_fini - stop the hardware block
147 *
148 * @handle: amdgpu_device pointer
149 *
150 * Stop the VCN block, mark ring as not ready any more
151 */
152static int vcn_v1_0_hw_fini(void *handle)
153{
154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
155 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
156 int r;
157
158 r = vcn_v1_0_stop(adev);
159 if (r)
160 return r;
161
162 ring->ready = false;
163
164 return 0;
165}
166
167/**
168 * vcn_v1_0_suspend - suspend VCN block
169 *
170 * @handle: amdgpu_device pointer
171 *
172 * HW fini and suspend VCN block
173 */
174static int vcn_v1_0_suspend(void *handle)
175{
176 int r;
177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178
179 r = vcn_v1_0_hw_fini(adev);
180 if (r)
181 return r;
182
183 r = amdgpu_vcn_suspend(adev);
184
185 return r;
186}
187
188/**
189 * vcn_v1_0_resume - resume VCN block
190 *
191 * @handle: amdgpu_device pointer
192 *
193 * Resume firmware and hw init VCN block
194 */
195static int vcn_v1_0_resume(void *handle)
196{
197 int r;
198 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
199
200 r = amdgpu_vcn_resume(adev);
201 if (r)
202 return r;
203
204 r = vcn_v1_0_hw_init(adev);
205
206 return r;
207}
208
209/**
210 * vcn_v1_0_mc_resume - memory controller programming
211 *
212 * @adev: amdgpu_device pointer
213 *
214 * Let the VCN memory controller know it's offsets
215 */
216static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
217{
218 uint64_t offset;
219 uint32_t size;
220
221 /* programm memory controller bits 0-27 */
222 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
223 lower_32_bits(adev->vcn.gpu_addr));
224 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
225 upper_32_bits(adev->vcn.gpu_addr));
226
227 /* Current FW has no signed header, but will be added later on */
228 /* offset = AMDGPU_VCN_FIRMWARE_OFFSET; */
229 offset = 0;
230 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
231 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), offset >> 3);
232 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
233
234 offset += size;
235 size = AMDGPU_VCN_HEAP_SIZE;
236 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), offset >> 3);
237 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), size);
238
239 offset += size;
240 size = AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40);
241 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), offset >> 3);
242 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), size);
243
244 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
245 adev->gfx.config.gb_addr_config);
246 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
247 adev->gfx.config.gb_addr_config);
248 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
249 adev->gfx.config.gb_addr_config);
250}
251
252/**
253 * vcn_v1_0_start - start VCN block
254 *
255 * @adev: amdgpu_device pointer
256 *
257 * Setup and start the VCN block
258 */
259static int vcn_v1_0_start(struct amdgpu_device *adev)
260{
Leo Liua4bf608b2016-12-28 12:16:48 -0500261 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
262 uint32_t rb_bufsz, tmp;
Leo Liu88b5af72016-12-28 11:57:38 -0500263 uint32_t lmi_swap_cntl;
264 int i, j, r;
265
266 /* disable byte swapping */
267 lmi_swap_cntl = 0;
268
269 vcn_v1_0_mc_resume(adev);
270
271 /* disable clock gating */
272 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
273 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
274
275 /* disable interupt */
276 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
277 ~UVD_MASTINT_EN__VCPU_EN_MASK);
278
279 /* stall UMC and register bus before resetting VCPU */
280 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
281 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
282 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
283 mdelay(1);
284
285 /* put LMI, VCPU, RBC etc... into reset */
286 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
287 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
288 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
289 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
290 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
291 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
292 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
293 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
294 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
295 mdelay(5);
296
297 /* initialize VCN memory controller */
298 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
299 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
300 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
301 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
302 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
303 UVD_LMI_CTRL__REQ_MODE_MASK |
304 0x00100000L);
305
306#ifdef __BIG_ENDIAN
307 /* swap (8 in 32) RB and IB */
308 lmi_swap_cntl = 0xa;
309#endif
310 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
311
312 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
313 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
314 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
315 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
316 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
317 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
318
319 /* take all subblocks out of reset, except VCPU */
320 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
321 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
322 mdelay(5);
323
324 /* enable VCPU clock */
325 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
326 UVD_VCPU_CNTL__CLK_EN_MASK);
327
328 /* enable UMC */
329 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
330 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
331
332 /* boot up the VCPU */
333 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
334 mdelay(10);
335
336 for (i = 0; i < 10; ++i) {
337 uint32_t status;
338
339 for (j = 0; j < 100; ++j) {
340 status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
341 if (status & 2)
342 break;
343 mdelay(10);
344 }
345 r = 0;
346 if (status & 2)
347 break;
348
349 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
350 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
351 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
352 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
353 mdelay(10);
354 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
355 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
356 mdelay(10);
357 r = -1;
358 }
359
360 if (r) {
361 DRM_ERROR("VCN decode not responding, giving up!!!\n");
362 return r;
363 }
364 /* enable master interrupt */
365 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
366 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
367 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
368
369 /* clear the bit 4 of VCN_STATUS */
370 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
371 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
372
Leo Liua4bf608b2016-12-28 12:16:48 -0500373 /* force RBC into idle state */
374 rb_bufsz = order_base_2(ring->ring_size);
375 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
376 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
377 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
378 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
379 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
380 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
381 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
382
383 /* set the write pointer delay */
384 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
385
386 /* set the wb address */
387 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
388 (upper_32_bits(ring->gpu_addr) >> 2));
389
390 /* programm the RB_BASE for ring buffer */
391 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
392 lower_32_bits(ring->gpu_addr));
393 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
394 upper_32_bits(ring->gpu_addr));
395
396 /* Initialize the ring buffer's read and write pointers */
397 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
398
399 ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
400 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
401 lower_32_bits(ring->wptr));
402
403 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
404 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
405
Leo Liu88b5af72016-12-28 11:57:38 -0500406 return 0;
407}
408
409/**
410 * vcn_v1_0_stop - stop VCN block
411 *
412 * @adev: amdgpu_device pointer
413 *
414 * stop the VCN block
415 */
416static int vcn_v1_0_stop(struct amdgpu_device *adev)
417{
Leo Liua4bf608b2016-12-28 12:16:48 -0500418 /* force RBC into idle state */
419 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
420
Leo Liu88b5af72016-12-28 11:57:38 -0500421 /* Stall UMC and register bus before resetting VCPU */
422 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
423 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
424 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
425 mdelay(1);
426
427 /* put VCPU into reset */
428 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
429 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
430 mdelay(5);
431
432 /* disable VCPU clock */
433 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
434
435 /* Unstall UMC and register bus */
436 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
437 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
438
439 return 0;
440}
441
442static int vcn_v1_0_set_clockgating_state(void *handle,
443 enum amd_clockgating_state state)
444{
445 /* needed for driver unload*/
446 return 0;
447}
448
Leo Liucca69fe2017-05-05 11:40:59 -0400449/**
450 * vcn_v1_0_dec_ring_get_rptr - get read pointer
451 *
452 * @ring: amdgpu_ring pointer
453 *
454 * Returns the current hardware read pointer
455 */
456static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
457{
458 struct amdgpu_device *adev = ring->adev;
459
460 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
461}
462
463/**
464 * vcn_v1_0_dec_ring_get_wptr - get write pointer
465 *
466 * @ring: amdgpu_ring pointer
467 *
468 * Returns the current hardware write pointer
469 */
470static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
471{
472 struct amdgpu_device *adev = ring->adev;
473
474 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
475}
476
477/**
478 * vcn_v1_0_dec_ring_set_wptr - set write pointer
479 *
480 * @ring: amdgpu_ring pointer
481 *
482 * Commits the write pointer to the hardware
483 */
484static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
485{
486 struct amdgpu_device *adev = ring->adev;
487
488 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
489}
490
491/**
492 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
493 *
494 * @ring: amdgpu_ring pointer
495 * @fence: fence to emit
496 *
497 * Write a fence and a trap command to the ring.
498 */
499static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
500 unsigned flags)
501{
502 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
503
504 amdgpu_ring_write(ring,
505 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
506 amdgpu_ring_write(ring, seq);
507 amdgpu_ring_write(ring,
508 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
509 amdgpu_ring_write(ring, addr & 0xffffffff);
510 amdgpu_ring_write(ring,
511 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
512 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
513 amdgpu_ring_write(ring,
514 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
515 amdgpu_ring_write(ring, 0);
516
517 amdgpu_ring_write(ring,
518 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
519 amdgpu_ring_write(ring, 0);
520 amdgpu_ring_write(ring,
521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
522 amdgpu_ring_write(ring, 0);
523 amdgpu_ring_write(ring,
524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
525 amdgpu_ring_write(ring, 2);
526}
527
528/**
529 * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
530 *
531 * @ring: amdgpu_ring pointer
532 *
533 * Emits an hdp invalidate.
534 */
535static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
536{
537 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
538 amdgpu_ring_write(ring, 1);
539}
540
541/**
Leo Liucca69fe2017-05-05 11:40:59 -0400542 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
543 *
544 * @ring: amdgpu_ring pointer
545 * @ib: indirect buffer to execute
546 *
547 * Write ring commands to execute the indirect buffer
548 */
549static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
550 struct amdgpu_ib *ib,
551 unsigned vm_id, bool ctx_switch)
552{
553 amdgpu_ring_write(ring,
554 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
555 amdgpu_ring_write(ring, vm_id);
556
557 amdgpu_ring_write(ring,
558 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
559 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
560 amdgpu_ring_write(ring,
561 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
562 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
563 amdgpu_ring_write(ring,
564 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
565 amdgpu_ring_write(ring, ib->length_dw);
566}
567
568static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
569 uint32_t data0, uint32_t data1)
570{
571 amdgpu_ring_write(ring,
572 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
573 amdgpu_ring_write(ring, data0);
574 amdgpu_ring_write(ring,
575 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
576 amdgpu_ring_write(ring, data1);
577 amdgpu_ring_write(ring,
578 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
579 amdgpu_ring_write(ring, 8);
580}
581
582static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
583 uint32_t data0, uint32_t data1, uint32_t mask)
584{
585 amdgpu_ring_write(ring,
586 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
587 amdgpu_ring_write(ring, data0);
588 amdgpu_ring_write(ring,
589 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
590 amdgpu_ring_write(ring, data1);
591 amdgpu_ring_write(ring,
592 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
593 amdgpu_ring_write(ring, mask);
594 amdgpu_ring_write(ring,
595 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
596 amdgpu_ring_write(ring, 12);
597}
598
599static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
600 unsigned vm_id, uint64_t pd_addr)
601{
602 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
603 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
604 uint32_t data0, data1, mask;
605 unsigned eng = ring->vm_inv_eng;
606
607 pd_addr = pd_addr | 0x1; /* valid bit */
608 /* now only use physical base address of PDE and valid */
609 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
610
611 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
612 data1 = upper_32_bits(pd_addr);
613 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
614
615 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
616 data1 = lower_32_bits(pd_addr);
617 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
618
619 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
620 data1 = lower_32_bits(pd_addr);
621 mask = 0xffffffff;
622 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
623
624 /* flush TLB */
625 data0 = (hub->vm_inv_eng0_req + eng) << 2;
626 data1 = req;
627 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
628
629 /* wait for flush */
630 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
631 data1 = 1 << vm_id;
632 mask = 1 << vm_id;
633 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
634}
635
Leo Liua319f442016-12-28 13:22:18 -0500636static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
637 struct amdgpu_irq_src *source,
638 unsigned type,
639 enum amdgpu_interrupt_state state)
640{
641 return 0;
642}
643
644static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
645 struct amdgpu_irq_src *source,
646 struct amdgpu_iv_entry *entry)
647{
648 DRM_DEBUG("IH: VCN TRAP\n");
649
650 amdgpu_fence_process(&adev->vcn.ring_dec);
651
652 return 0;
653}
654
Leo Liu88b5af72016-12-28 11:57:38 -0500655static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
656 .name = "vcn_v1_0",
657 .early_init = vcn_v1_0_early_init,
658 .late_init = NULL,
659 .sw_init = vcn_v1_0_sw_init,
660 .sw_fini = vcn_v1_0_sw_fini,
661 .hw_init = vcn_v1_0_hw_init,
662 .hw_fini = vcn_v1_0_hw_fini,
663 .suspend = vcn_v1_0_suspend,
664 .resume = vcn_v1_0_resume,
665 .is_idle = NULL /* vcn_v1_0_is_idle */,
666 .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
667 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
668 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
669 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
670 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
671 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
672 .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
673};
Leo Liucca69fe2017-05-05 11:40:59 -0400674
675static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
676 .type = AMDGPU_RING_TYPE_VCN_DEC,
677 .align_mask = 0xf,
678 .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
679 .support_64bit_ptrs = false,
680 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
681 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
682 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
683 .emit_frame_size =
684 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
685 34 * AMDGPU_MAX_VMHUBS + /* vcn_v1_0_dec_ring_emit_vm_flush */
686 14 + 14, /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
687 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
688 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
689 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
690 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
691 .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
Leo Liu8c303c02017-02-06 11:52:46 -0500692 .test_ring = amdgpu_vcn_dec_ring_test_ring,
Leo Liucca69fe2017-05-05 11:40:59 -0400693 .test_ib = amdgpu_vcn_dec_ring_test_ib,
694 .insert_nop = amdgpu_ring_insert_nop,
695 .pad_ib = amdgpu_ring_generic_pad_ib,
696 .begin_use = amdgpu_vcn_ring_begin_use,
697 .end_use = amdgpu_vcn_ring_end_use,
698};
699
700static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
701{
702 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
703 DRM_INFO("VCN decode is enabled in VM mode\n");
704}
Leo Liua319f442016-12-28 13:22:18 -0500705
706static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
707 .set = vcn_v1_0_set_interrupt_state,
708 .process = vcn_v1_0_process_interrupt,
709};
710
711static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
712{
713 adev->vcn.irq.num_types = 1;
714 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
715}
Leo Liu3ea975e2016-12-28 13:04:16 -0500716
717const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
718{
719 .type = AMD_IP_BLOCK_TYPE_VCN,
720 .major = 1,
721 .minor = 0,
722 .rev = 0,
723 .funcs = &vcn_v1_0_ip_funcs,
724};