blob: 904f21e1380cd9630b3260238d750cf323fc1cd2 [file] [log] [blame]
Mika Kuoppala84734a02013-07-12 16:50:57 +03001/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
Jordan Crouse489cae62018-07-24 10:33:19 -060030#include <linux/ascii85.h>
Jani Nikulae9b67ec22022-03-03 20:19:31 +020031#include <linux/highmem.h>
Chris Wilson0e390372018-11-23 13:23:25 +000032#include <linux/nmi.h>
Chris Wilson3bdd4f82019-07-22 23:28:47 +010033#include <linux/pagevec.h>
Chris Wilson0e390372018-11-23 13:23:25 +000034#include <linux/scatterlist.h>
Lucas De Marchi01fabda2022-02-25 15:46:28 -080035#include <linux/string_helpers.h>
Chris Wilson0e390372018-11-23 13:23:25 +000036#include <linux/utsname.h>
37#include <linux/zlib.h>
38
Jani Nikula5f2ec902022-02-10 17:45:48 +020039#include <drm/drm_cache.h>
Chris Wilson0e390372018-11-23 13:23:25 +000040#include <drm/drm_print.h>
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +000041
Anusha Srivatsa32f94022021-05-18 14:34:44 -070042#include "display/intel_dmc.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030043#include "display/intel_overlay.h"
44
Chris Wilson10be98a2019-05-28 10:29:49 +010045#include "gem/i915_gem_context.h"
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +000046#include "gem/i915_gem_lmem.h"
Matt Roper202b1f42022-01-10 21:15:56 -080047#include "gt/intel_engine_regs.h"
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -070048#include "gt/intel_gt.h"
Matt Roper9a927322022-07-01 16:20:06 -070049#include "gt/intel_gt_mcr.h"
Chris Wilson742379c2020-01-10 12:30:56 +000050#include "gt/intel_gt_pm.h"
Matt Roper0d6419e2022-01-27 15:43:33 -080051#include "gt/intel_gt_regs.h"
Alan Previna6f0f9c2022-03-21 09:45:26 -070052#include "gt/uc/intel_guc_capture.h"
Chris Wilson10be98a2019-05-28 10:29:49 +010053
Jani Nikula24524e32022-02-09 14:31:21 +020054#include "i915_driver.h"
Mika Kuoppala84734a02013-07-12 16:50:57 +030055#include "i915_drv.h"
Jani Nikula05ca9302019-04-29 15:29:31 +030056#include "i915_gpu_error.h"
Jani Nikula9c9082b2019-08-08 16:42:47 +030057#include "i915_memcpy.h"
Jani Nikula801543b2022-11-09 17:35:22 +020058#include "i915_reg.h"
Chris Wilson37d63f82019-05-28 10:29:50 +010059#include "i915_scatterlist.h"
Tvrtko Ursulina7f46d52022-03-29 10:02:04 +010060#include "i915_utils.h"
Mika Kuoppala84734a02013-07-12 16:50:57 +030061
Thomas Hellström8b91cdd2021-11-08 18:45:45 +010062#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
Chris Wilson3bdd4f82019-07-22 23:28:47 +010063#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
64
Chris Wilson0e390372018-11-23 13:23:25 +000065static void __sg_set_buf(struct scatterlist *sg,
66 void *addr, unsigned int len, loff_t it)
Mika Kuoppala84734a02013-07-12 16:50:57 +030067{
Chris Wilson0e390372018-11-23 13:23:25 +000068 sg->page_link = (unsigned long)virt_to_page(addr);
69 sg->offset = offset_in_page(addr);
70 sg->length = len;
71 sg->dma_address = it;
Mika Kuoppala84734a02013-07-12 16:50:57 +030072}
73
Chris Wilson0e390372018-11-23 13:23:25 +000074static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
Mika Kuoppala84734a02013-07-12 16:50:57 +030075{
Chris Wilson0e390372018-11-23 13:23:25 +000076 if (!len)
Mika Kuoppala84734a02013-07-12 16:50:57 +030077 return false;
Chris Wilson0e390372018-11-23 13:23:25 +000078
79 if (e->bytes + len + 1 <= e->size)
80 return true;
81
82 if (e->bytes) {
83 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
84 e->iter += e->bytes;
85 e->buf = NULL;
86 e->bytes = 0;
Mika Kuoppala84734a02013-07-12 16:50:57 +030087 }
88
Chris Wilson0e390372018-11-23 13:23:25 +000089 if (e->cur == e->end) {
90 struct scatterlist *sgl;
Mika Kuoppala84734a02013-07-12 16:50:57 +030091
Chris Wilson3bdd4f82019-07-22 23:28:47 +010092 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
Chris Wilson0e390372018-11-23 13:23:25 +000093 if (!sgl) {
94 e->err = -ENOMEM;
95 return false;
Mika Kuoppala84734a02013-07-12 16:50:57 +030096 }
97
Chris Wilson0e390372018-11-23 13:23:25 +000098 if (e->cur) {
99 e->cur->offset = 0;
100 e->cur->length = 0;
101 e->cur->page_link =
102 (unsigned long)sgl | SG_CHAIN;
103 } else {
104 e->sgl = sgl;
105 }
106
107 e->cur = sgl;
108 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300109 }
110
Chris Wilson0e390372018-11-23 13:23:25 +0000111 e->size = ALIGN(len + 1, SZ_64K);
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100112 e->buf = kmalloc(e->size, ALLOW_FAIL);
Chris Wilson0e390372018-11-23 13:23:25 +0000113 if (!e->buf) {
114 e->size = PAGE_ALIGN(len + 1);
115 e->buf = kmalloc(e->size, GFP_KERNEL);
116 }
117 if (!e->buf) {
118 e->err = -ENOMEM;
119 return false;
120 }
121
122 return true;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300123}
124
Chris Wilsondda35932017-01-14 10:51:12 +0000125__printf(2, 0)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300126static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
Chris Wilson0e390372018-11-23 13:23:25 +0000127 const char *fmt, va_list args)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300128{
Chris Wilson0e390372018-11-23 13:23:25 +0000129 va_list ap;
130 int len;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300131
Chris Wilson0e390372018-11-23 13:23:25 +0000132 if (e->err)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300133 return;
134
Chris Wilson0e390372018-11-23 13:23:25 +0000135 va_copy(ap, args);
136 len = vsnprintf(NULL, 0, fmt, ap);
137 va_end(ap);
138 if (len <= 0) {
139 e->err = len;
140 return;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300141 }
142
Chris Wilson0e390372018-11-23 13:23:25 +0000143 if (!__i915_error_grow(e, len))
144 return;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300145
Chris Wilson0e390372018-11-23 13:23:25 +0000146 GEM_BUG_ON(e->bytes >= e->size);
147 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
148 if (len < 0) {
149 e->err = len;
150 return;
151 }
152 e->bytes += len;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300153}
154
Chris Wilson0e390372018-11-23 13:23:25 +0000155static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300156{
157 unsigned len;
158
Chris Wilson0e390372018-11-23 13:23:25 +0000159 if (e->err || !str)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300160 return;
161
162 len = strlen(str);
Chris Wilson0e390372018-11-23 13:23:25 +0000163 if (!__i915_error_grow(e, len))
164 return;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300165
Chris Wilson0e390372018-11-23 13:23:25 +0000166 GEM_BUG_ON(e->bytes + len > e->size);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300167 memcpy(e->buf + e->bytes, str, len);
Chris Wilson0e390372018-11-23 13:23:25 +0000168 e->bytes += len;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300169}
170
171#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172#define err_puts(e, s) i915_error_puts(e, s)
173
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000174static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
175{
176 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
177}
178
179static inline struct drm_printer
180i915_error_printer(struct drm_i915_error_state_buf *e)
181{
182 struct drm_printer p = {
183 .printfn = __i915_printfn_error,
184 .arg = e,
185 };
186 return p;
187}
188
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100189/* single threaded page allocator with a reserved stash for emergencies */
190static void pool_fini(struct pagevec *pv)
191{
192 pagevec_release(pv);
193}
194
195static int pool_refill(struct pagevec *pv, gfp_t gfp)
196{
197 while (pagevec_space(pv)) {
198 struct page *p;
199
200 p = alloc_page(gfp);
201 if (!p)
202 return -ENOMEM;
203
204 pagevec_add(pv, p);
205 }
206
207 return 0;
208}
209
210static int pool_init(struct pagevec *pv, gfp_t gfp)
211{
212 int err;
213
214 pagevec_init(pv);
215
216 err = pool_refill(pv, gfp);
217 if (err)
218 pool_fini(pv);
219
220 return err;
221}
222
223static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
224{
225 struct page *p;
226
227 p = alloc_page(gfp);
228 if (!p && pagevec_count(pv))
229 p = pv->pages[--pv->nr];
230
231 return p ? page_address(p) : NULL;
232}
233
234static void pool_free(struct pagevec *pv, void *addr)
235{
236 struct page *p = virt_to_page(addr);
237
238 if (pagevec_space(pv))
239 pagevec_add(pv, p);
240 else
241 __free_page(p);
242}
243
Chris Wilson0a970152016-10-12 10:05:22 +0100244#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
245
Chris Wilson742379c2020-01-10 12:30:56 +0000246struct i915_vma_compress {
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100247 struct pagevec pool;
Chris Wilsond637c172016-12-06 12:40:51 +0000248 struct z_stream_s zstream;
249 void *tmp;
250};
251
Chris Wilson742379c2020-01-10 12:30:56 +0000252static bool compress_init(struct i915_vma_compress *c)
Chris Wilson0a970152016-10-12 10:05:22 +0100253{
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100254 struct z_stream_s *zstream = &c->zstream;
255
256 if (pool_init(&c->pool, ALLOW_FAIL))
257 return false;
Chris Wilson0a970152016-10-12 10:05:22 +0100258
259 zstream->workspace =
260 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100261 ALLOW_FAIL);
262 if (!zstream->workspace) {
263 pool_fini(&c->pool);
Chris Wilson0a970152016-10-12 10:05:22 +0100264 return false;
265 }
266
Chris Wilsond637c172016-12-06 12:40:51 +0000267 c->tmp = NULL;
Chris Wilsonc4d3ae62017-01-06 15:20:09 +0000268 if (i915_has_memcpy_from_wc())
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100269 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
Chris Wilsond637c172016-12-06 12:40:51 +0000270
Chris Wilson0a970152016-10-12 10:05:22 +0100271 return true;
272}
273
Chris Wilson742379c2020-01-10 12:30:56 +0000274static bool compress_start(struct i915_vma_compress *c)
Chris Wilson83bc0f52018-10-03 09:24:22 +0100275{
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100276 struct z_stream_s *zstream = &c->zstream;
277 void *workspace = zstream->workspace;
278
279 memset(zstream, 0, sizeof(*zstream));
280 zstream->workspace = workspace;
281
282 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
283}
284
Chris Wilson742379c2020-01-10 12:30:56 +0000285static void *compress_next_page(struct i915_vma_compress *c,
286 struct i915_vma_coredump *dst)
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100287{
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100288 void *page_addr;
289 struct page *page;
Chris Wilson83bc0f52018-10-03 09:24:22 +0100290
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100291 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
292 if (!page_addr)
Chris Wilson83bc0f52018-10-03 09:24:22 +0100293 return ERR_PTR(-ENOMEM);
294
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100295 page = virt_to_page(page_addr);
296 list_add_tail(&page->lru, &dst->page_list);
297 return page_addr;
Chris Wilson83bc0f52018-10-03 09:24:22 +0100298}
299
Chris Wilson742379c2020-01-10 12:30:56 +0000300static int compress_page(struct i915_vma_compress *c,
Chris Wilson0a970152016-10-12 10:05:22 +0100301 void *src,
Chris Wilson742379c2020-01-10 12:30:56 +0000302 struct i915_vma_coredump *dst,
303 bool wc)
Chris Wilson0a970152016-10-12 10:05:22 +0100304{
Chris Wilsond637c172016-12-06 12:40:51 +0000305 struct z_stream_s *zstream = &c->zstream;
306
Chris Wilson0a970152016-10-12 10:05:22 +0100307 zstream->next_in = src;
Chris Wilson742379c2020-01-10 12:30:56 +0000308 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
Chris Wilsond637c172016-12-06 12:40:51 +0000309 zstream->next_in = c->tmp;
Chris Wilson0a970152016-10-12 10:05:22 +0100310 zstream->avail_in = PAGE_SIZE;
311
312 do {
313 if (zstream->avail_out == 0) {
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100314 zstream->next_out = compress_next_page(c, dst);
Chris Wilson83bc0f52018-10-03 09:24:22 +0100315 if (IS_ERR(zstream->next_out))
316 return PTR_ERR(zstream->next_out);
Chris Wilson0a970152016-10-12 10:05:22 +0100317
Chris Wilson0a970152016-10-12 10:05:22 +0100318 zstream->avail_out = PAGE_SIZE;
319 }
320
Chris Wilson83bc0f52018-10-03 09:24:22 +0100321 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
Chris Wilson0a970152016-10-12 10:05:22 +0100322 return -EIO;
Chris Wilson7d555312020-09-16 10:00:58 +0100323
324 cond_resched();
Chris Wilson0a970152016-10-12 10:05:22 +0100325 } while (zstream->avail_in);
326
327 /* Fallback to uncompressed if we increase size? */
328 if (0 && zstream->total_out > zstream->total_in)
329 return -E2BIG;
330
331 return 0;
332}
333
Chris Wilson742379c2020-01-10 12:30:56 +0000334static int compress_flush(struct i915_vma_compress *c,
335 struct i915_vma_coredump *dst)
Chris Wilson83bc0f52018-10-03 09:24:22 +0100336{
337 struct z_stream_s *zstream = &c->zstream;
338
339 do {
340 switch (zlib_deflate(zstream, Z_FINISH)) {
341 case Z_OK: /* more space requested */
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100342 zstream->next_out = compress_next_page(c, dst);
Chris Wilson83bc0f52018-10-03 09:24:22 +0100343 if (IS_ERR(zstream->next_out))
344 return PTR_ERR(zstream->next_out);
345
346 zstream->avail_out = PAGE_SIZE;
347 break;
348
349 case Z_STREAM_END:
350 goto end;
351
352 default: /* any error */
353 return -EIO;
354 }
355 } while (1);
356
357end:
358 memset(zstream->next_out, 0, zstream->avail_out);
359 dst->unused = zstream->avail_out;
360 return 0;
361}
362
Chris Wilson742379c2020-01-10 12:30:56 +0000363static void compress_finish(struct i915_vma_compress *c)
Chris Wilson0a970152016-10-12 10:05:22 +0100364{
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100365 zlib_deflateEnd(&c->zstream);
366}
Chris Wilsond637c172016-12-06 12:40:51 +0000367
Chris Wilson742379c2020-01-10 12:30:56 +0000368static void compress_fini(struct i915_vma_compress *c)
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100369{
370 kfree(c->zstream.workspace);
Chris Wilsond637c172016-12-06 12:40:51 +0000371 if (c->tmp)
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100372 pool_free(&c->pool, c->tmp);
373 pool_fini(&c->pool);
Chris Wilson0a970152016-10-12 10:05:22 +0100374}
375
376static void err_compression_marker(struct drm_i915_error_state_buf *m)
377{
378 err_puts(m, ":");
379}
380
381#else
382
Chris Wilson742379c2020-01-10 12:30:56 +0000383struct i915_vma_compress {
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100384 struct pagevec pool;
Chris Wilsond637c172016-12-06 12:40:51 +0000385};
386
Chris Wilson742379c2020-01-10 12:30:56 +0000387static bool compress_init(struct i915_vma_compress *c)
Chris Wilson0a970152016-10-12 10:05:22 +0100388{
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100389 return pool_init(&c->pool, ALLOW_FAIL) == 0;
390}
391
Chris Wilson742379c2020-01-10 12:30:56 +0000392static bool compress_start(struct i915_vma_compress *c)
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100393{
Chris Wilson0a970152016-10-12 10:05:22 +0100394 return true;
395}
396
Chris Wilson742379c2020-01-10 12:30:56 +0000397static int compress_page(struct i915_vma_compress *c,
Chris Wilson0a970152016-10-12 10:05:22 +0100398 void *src,
Chris Wilson742379c2020-01-10 12:30:56 +0000399 struct i915_vma_coredump *dst,
400 bool wc)
Chris Wilson0a970152016-10-12 10:05:22 +0100401{
Chris Wilsond637c172016-12-06 12:40:51 +0000402 void *ptr;
Chris Wilson0a970152016-10-12 10:05:22 +0100403
Chris Wilson79c7a282019-07-25 23:38:43 +0100404 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100405 if (!ptr)
Chris Wilson0a970152016-10-12 10:05:22 +0100406 return -ENOMEM;
407
Chris Wilson742379c2020-01-10 12:30:56 +0000408 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
Chris Wilsond637c172016-12-06 12:40:51 +0000409 memcpy(ptr, src, PAGE_SIZE);
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100410 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
Chris Wilson7d555312020-09-16 10:00:58 +0100411 cond_resched();
Chris Wilson0a970152016-10-12 10:05:22 +0100412
413 return 0;
414}
415
Chris Wilson742379c2020-01-10 12:30:56 +0000416static int compress_flush(struct i915_vma_compress *c,
417 struct i915_vma_coredump *dst)
Chris Wilson83bc0f52018-10-03 09:24:22 +0100418{
419 return 0;
420}
421
Chris Wilson742379c2020-01-10 12:30:56 +0000422static void compress_finish(struct i915_vma_compress *c)
Chris Wilson0a970152016-10-12 10:05:22 +0100423{
424}
425
Chris Wilson742379c2020-01-10 12:30:56 +0000426static void compress_fini(struct i915_vma_compress *c)
Chris Wilson3bdd4f82019-07-22 23:28:47 +0100427{
428 pool_fini(&c->pool);
429}
430
Chris Wilson0a970152016-10-12 10:05:22 +0100431static void err_compression_marker(struct drm_i915_error_state_buf *m)
432{
433 err_puts(m, "~");
434}
435
436#endif
437
Ben Widawskyd6369512016-09-20 16:54:32 +0300438static void error_print_instdone(struct drm_i915_error_state_buf *m,
Chris Wilson742379c2020-01-10 12:30:56 +0000439 const struct intel_engine_coredump *ee)
Ben Widawskyd6369512016-09-20 16:54:32 +0300440{
Ben Widawskyf9e61372016-09-20 16:54:33 +0300441 int slice;
442 int subslice;
Matt Roper89f2e7a2021-08-05 09:36:41 -0700443 int iter;
Ben Widawskyf9e61372016-09-20 16:54:33 +0300444
Ben Widawskyd6369512016-09-20 16:54:32 +0300445 err_printf(m, " INSTDONE: 0x%08x\n",
446 ee->instdone.instdone);
447
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700448 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
Ben Widawskyd6369512016-09-20 16:54:32 +0300449 return;
450
451 err_printf(m, " SC_INSTDONE: 0x%08x\n",
452 ee->instdone.slice_common);
453
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700454 if (GRAPHICS_VER(m->i915) <= 6)
Ben Widawskyd6369512016-09-20 16:54:32 +0300455 return;
456
Matt Roper9a927322022-07-01 16:20:06 -0700457 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
459 slice, subslice,
460 ee->instdone.sampler[slice][subslice]);
Matt Roperfa9899d2021-08-05 09:36:40 -0700461
Matt Roper9a927322022-07-01 16:20:06 -0700462 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
464 slice, subslice,
465 ee->instdone.row[slice][subslice]);
Lionel Landwerlinf7043102020-01-29 20:16:38 +0200466
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700467 if (GRAPHICS_VER(m->i915) < 12)
Lionel Landwerlinf7043102020-01-29 20:16:38 +0200468 return;
469
Matt Roper89f2e7a2021-08-05 09:36:41 -0700470 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
Matt Roper9a927322022-07-01 16:20:06 -0700471 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
Matt Roper89f2e7a2021-08-05 09:36:41 -0700472 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
473 slice, subslice,
474 ee->instdone.geom_svg[slice][subslice]);
475 }
476
Lionel Landwerlinf7043102020-01-29 20:16:38 +0200477 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
478 ee->instdone.slice_common_extra[0]);
479 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
480 ee->instdone.slice_common_extra[1]);
Ben Widawskyd6369512016-09-20 16:54:32 +0300481}
482
Chris Wilson35ca0392016-10-13 11:18:14 +0100483static void error_print_request(struct drm_i915_error_state_buf *m,
484 const char *prefix,
Chris Wilson742379c2020-01-10 12:30:56 +0000485 const struct i915_request_coredump *erq)
Chris Wilson35ca0392016-10-13 11:18:14 +0100486{
487 if (!erq->seqno)
488 return;
489
Chris Wilson9669a502020-04-24 20:14:10 +0100490 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
Chris Wilson7f4127c2019-02-19 12:21:52 +0000491 prefix, erq->pid, erq->context, erq->seqno,
Chris Wilson52c0fdb2019-01-29 20:52:29 +0000492 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 &erq->flags) ? "!" : "",
494 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 &erq->flags) ? "+" : "",
496 erq->sched_attr.priority,
Chris Wilson9669a502020-04-24 20:14:10 +0100497 erq->head, erq->tail);
Chris Wilson35ca0392016-10-13 11:18:14 +0100498}
499
Chris Wilson4fa60532017-01-29 09:24:33 +0000500static void error_print_context(struct drm_i915_error_state_buf *m,
501 const char *header,
Chris Wilson742379c2020-01-10 12:30:56 +0000502 const struct i915_gem_context_coredump *ctx)
Chris Wilson4fa60532017-01-29 09:24:33 +0000503{
Tvrtko Ursulin1883a0a2020-02-16 13:36:20 +0000504 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
Chris Wilson2935ed52019-10-04 14:40:08 +0100505 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
Tvrtko Ursulin1883a0a2020-02-16 13:36:20 +0000506 ctx->guilty, ctx->active,
Tvrtko Ursulinbb6287c2022-04-01 15:22:02 +0100507 ctx->total_runtime, ctx->avg_runtime);
Chris Wilson4fa60532017-01-29 09:24:33 +0000508}
509
Chris Wilson742379c2020-01-10 12:30:56 +0000510static struct i915_vma_coredump *
511__find_vma(struct i915_vma_coredump *vma, const char *name)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300512{
Chris Wilson742379c2020-01-10 12:30:56 +0000513 while (vma) {
514 if (strcmp(vma->name, name) == 0)
515 return vma;
516 vma = vma->next;
517 }
518
519 return NULL;
520}
521
Alan Previna0f1f7b2022-03-21 09:45:27 -0700522struct i915_vma_coredump *
523intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
Chris Wilson742379c2020-01-10 12:30:56 +0000524{
525 return __find_vma(ee->vma, "batch");
526}
527
528static void error_print_engine(struct drm_i915_error_state_buf *m,
529 const struct intel_engine_coredump *ee)
530{
531 struct i915_vma_coredump *batch;
Mika Kuoppala76e70082017-09-22 15:43:07 +0300532 int n;
533
Chris Wilsonc990b4c2019-08-08 15:45:11 +0100534 err_printf(m, "%s command stream:\n", ee->engine->name);
Chris Wilson742379c2020-01-10 12:30:56 +0000535 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
Chris Wilson6361f4b2016-07-27 09:07:28 +0100536 err_printf(m, " START: 0x%08x\n", ee->start);
Chris Wilson06392e3b2016-10-13 11:18:15 +0100537 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
Chris Wilsoncdb324b2016-10-04 21:11:30 +0100538 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
539 ee->tail, ee->rq_post, ee->rq_tail);
Chris Wilson6361f4b2016-07-27 09:07:28 +0100540 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
Chris Wilson21a2c582016-08-15 10:49:11 +0100541 err_printf(m, " MODE: 0x%08x\n", ee->mode);
Chris Wilson6361f4b2016-07-27 09:07:28 +0100542 err_printf(m, " HWS: 0x%08x\n", ee->hws);
543 err_printf(m, " ACTHD: 0x%08x %08x\n",
544 (u32)(ee->acthd>>32), (u32)ee->acthd);
545 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
546 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
Chris Wilson70a76a92020-01-28 20:43:15 +0000547 err_printf(m, " ESR: 0x%08x\n", ee->esr);
Ben Widawskyd6369512016-09-20 16:54:32 +0300548
549 error_print_instdone(m, ee);
550
Alan Previna0f1f7b2022-03-21 09:45:27 -0700551 batch = intel_gpu_error_find_batch(ee);
Chris Wilson742379c2020-01-10 12:30:56 +0000552 if (batch) {
553 u64 start = batch->gtt_offset;
554 u64 end = start + batch->gtt_size;
Chris Wilson03382df2016-08-15 10:49:09 +0100555
556 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
557 upper_32_bits(start), lower_32_bits(start),
558 upper_32_bits(end), lower_32_bits(end));
559 }
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700560 if (GRAPHICS_VER(m->i915) >= 4) {
Chris Wilson03382df2016-08-15 10:49:09 +0100561 err_printf(m, " BBADDR: 0x%08x_%08x\n",
Chris Wilson6361f4b2016-07-27 09:07:28 +0100562 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
563 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
564 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200565 }
Chris Wilson6361f4b2016-07-27 09:07:28 +0100566 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
567 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
568 lower_32_bits(ee->faddr));
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700569 if (GRAPHICS_VER(m->i915) >= 6) {
Chris Wilson6361f4b2016-07-27 09:07:28 +0100570 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
571 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300572 }
Stuart Summersb729cfe2022-06-01 14:06:46 -0700573 if (GRAPHICS_VER(m->i915) >= 11) {
574 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
575 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
576 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
577 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
578 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
579 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
580 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
581 }
Chris Wilson4bdafb92018-09-26 21:12:22 +0100582 if (HAS_PPGTT(m->i915)) {
Chris Wilson6361f4b2016-07-27 09:07:28 +0100583 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800584
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700585 if (GRAPHICS_VER(m->i915) >= 8) {
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800586 int i;
587 for (i = 0; i < 4; i++)
588 err_printf(m, " PDP%d: 0x%016llx\n",
Chris Wilson6361f4b2016-07-27 09:07:28 +0100589 i, ee->vm_info.pdp[i]);
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800590 } else {
591 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
Chris Wilson6361f4b2016-07-27 09:07:28 +0100592 ee->vm_info.pp_dir_base);
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800593 }
594 }
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200595
Mika Kuoppala76e70082017-09-22 15:43:07 +0300596 for (n = 0; n < ee->num_ports; n++) {
597 err_printf(m, " ELSP[%d]:", n);
Chris Wilson742379c2020-01-10 12:30:56 +0000598 error_print_request(m, " ", &ee->execlist[n]);
Mika Kuoppala76e70082017-09-22 15:43:07 +0300599 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300600}
601
602void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
603{
604 va_list args;
605
606 va_start(args, f);
607 i915_error_vprintf(e, f, args);
608 va_end(args);
609}
610
Alan Previna0f1f7b2022-03-21 09:45:27 -0700611void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
612 const struct intel_engine_cs *engine,
613 const struct i915_vma_coredump *vma)
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200614{
Jordan Crouse489cae62018-07-24 10:33:19 -0600615 char out[ASCII85_BUFSZ];
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100616 struct page *page;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200617
Chris Wilson742379c2020-01-10 12:30:56 +0000618 if (!vma)
Chris Wilsonfc4c79c2016-10-12 10:05:21 +0100619 return;
620
Chris Wilson742379c2020-01-10 12:30:56 +0000621 err_printf(m, "%s --- %s = 0x%08x %08x\n",
622 engine ? engine->name : "global", vma->name,
623 upper_32_bits(vma->gtt_offset),
624 lower_32_bits(vma->gtt_offset));
Chris Wilsonfc4c79c2016-10-12 10:05:21 +0100625
Chris Wilson742379c2020-01-10 12:30:56 +0000626 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
627 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
Matthew Auldfd521d32019-09-09 18:16:46 +0100628
Chris Wilson0a970152016-10-12 10:05:22 +0100629 err_compression_marker(m);
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100630 list_for_each_entry(page, &vma->page_list, lru) {
Chris Wilson0a970152016-10-12 10:05:22 +0100631 int i, len;
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100632 const u32 *addr = page_address(page);
Chris Wilson0a970152016-10-12 10:05:22 +0100633
634 len = PAGE_SIZE;
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100635 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
Chris Wilson742379c2020-01-10 12:30:56 +0000636 len -= vma->unused;
Chris Wilson0a970152016-10-12 10:05:22 +0100637 len = ascii85_encode_len(len);
638
Jordan Crouse489cae62018-07-24 10:33:19 -0600639 for (i = 0; i < len; i++)
Thomas Hellströme45b98b2021-11-08 18:45:44 +0100640 err_puts(m, ascii85_encode(addr[i], out));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200641 }
Chris Wilson0a970152016-10-12 10:05:22 +0100642 err_puts(m, "\n");
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200643}
644
Chris Wilson2bd160a2016-08-15 10:48:45 +0100645static void err_print_capabilities(struct drm_i915_error_state_buf *m,
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -0700646 struct i915_gpu_coredump *error)
Chris Wilson2bd160a2016-08-15 10:48:45 +0100647{
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +0000648 struct drm_printer p = i915_error_printer(m);
649
Jani Nikulac7d3c842022-08-19 15:02:35 +0300650 intel_device_info_print(&error->device_info, &error->runtime_info, &p);
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -0700651 intel_driver_caps_print(&error->driver_caps, &p);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100652}
653
Chris Wilson642c8a72017-02-06 21:36:07 +0000654static void err_print_params(struct drm_i915_error_state_buf *m,
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +0000655 const struct i915_params *params)
Chris Wilson642c8a72017-02-06 21:36:07 +0000656{
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +0000657 struct drm_printer p = i915_error_printer(m);
658
659 i915_params_dump(params, &p);
Chris Wilson642c8a72017-02-06 21:36:07 +0000660}
661
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000662static void err_print_pciid(struct drm_i915_error_state_buf *m,
663 struct drm_i915_private *i915)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300664{
Thomas Zimmermann8ff54462021-01-28 14:31:23 +0100665 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000666
667 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
668 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
669 err_printf(m, "PCI Subsystem: %04x:%04x\n",
670 pdev->subsystem_vendor,
671 pdev->subsystem_device);
672}
673
John Harrisonc5de70f2022-07-27 19:20:25 -0700674static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
675 const char *name,
676 const struct intel_ctb_coredump *ctb)
677{
678 if (!ctb->size)
679 return;
680
681 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
682 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
683 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
684}
685
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000686static void err_print_uc(struct drm_i915_error_state_buf *m,
Chris Wilson742379c2020-01-10 12:30:56 +0000687 const struct intel_uc_coredump *error_uc)
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000688{
689 struct drm_printer p = i915_error_printer(m);
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000690
691 intel_uc_fw_dump(&error_uc->guc_fw, &p);
692 intel_uc_fw_dump(&error_uc->huc_fw, &p);
John Harrisonc5de70f2022-07-27 19:20:25 -0700693 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
694 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
695 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
696 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
697 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
698 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +0000699}
700
Chris Wilson0e390372018-11-23 13:23:25 +0000701static void err_free_sgl(struct scatterlist *sgl)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000702{
Chris Wilson0e390372018-11-23 13:23:25 +0000703 while (sgl) {
704 struct scatterlist *sg;
705
706 for (sg = sgl; !sg_is_chain(sg); sg++) {
707 kfree(sg_virt(sg));
708 if (sg_is_last(sg))
709 break;
710 }
711
712 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
713 free_page((unsigned long)sgl);
714 sgl = sg;
715 }
716}
717
Chris Wilson68172f22020-07-10 20:32:39 +0100718static void err_print_gt_info(struct drm_i915_error_state_buf *m,
719 struct intel_gt_coredump *gt)
720{
721 struct drm_printer p = i915_error_printer(m);
722
723 intel_gt_info_print(&gt->info, &p);
Matt Ropercc1338f2022-03-11 14:54:59 -0800724 intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
Chris Wilson68172f22020-07-10 20:32:39 +0100725}
726
Alan Previna6f0f9c2022-03-21 09:45:26 -0700727static void err_print_gt_display(struct drm_i915_error_state_buf *m,
728 struct intel_gt_coredump *gt)
Chris Wilson0e390372018-11-23 13:23:25 +0000729{
Alan Previna6f0f9c2022-03-21 09:45:26 -0700730 err_printf(m, "IER: 0x%08x\n", gt->ier);
731 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
732}
733
734static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
735 struct intel_gt_coredump *gt)
736{
Chris Wilson1a8585b2020-01-10 12:30:59 +0000737 int i;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300738
Lucas De Marchi01fabda2022-02-25 15:46:28 -0800739 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
John Harrison368d1792022-07-27 19:20:24 -0700740 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
741 gt->clock_frequency, gt->clock_period_ns);
Chris Wilson742379c2020-01-10 12:30:56 +0000742 err_printf(m, "EIR: 0x%08x\n", gt->eir);
Alan Previna6f0f9c2022-03-21 09:45:26 -0700743 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
744
Chris Wilson742379c2020-01-10 12:30:56 +0000745 for (i = 0; i < gt->ngtier; i++)
746 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
Alan Previna6f0f9c2022-03-21 09:45:26 -0700747}
Chris Wilson742379c2020-01-10 12:30:56 +0000748
Alan Previna6f0f9c2022-03-21 09:45:26 -0700749static void err_print_gt_global(struct drm_i915_error_state_buf *m,
750 struct intel_gt_coredump *gt)
751{
752 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
Chris Wilson742379c2020-01-10 12:30:56 +0000753
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700754 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
Chris Wilson742379c2020-01-10 12:30:56 +0000755 err_printf(m, "ERROR: 0x%08x\n", gt->error);
756 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
757 }
758
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700759 if (GRAPHICS_VER(m->i915) >= 8)
Chris Wilson742379c2020-01-10 12:30:56 +0000760 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
761 gt->fault_data1, gt->fault_data0);
762
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700763 if (GRAPHICS_VER(m->i915) == 7)
Chris Wilson742379c2020-01-10 12:30:56 +0000764 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
765
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700766 if (IS_GRAPHICS_VER(m->i915, 8, 11))
Chris Wilson742379c2020-01-10 12:30:56 +0000767 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
768
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700769 if (GRAPHICS_VER(m->i915) == 12)
Chris Wilson742379c2020-01-10 12:30:56 +0000770 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
771
Lucas De Marchi651e7d42021-06-05 21:50:49 -0700772 if (GRAPHICS_VER(m->i915) >= 12) {
Chris Wilson742379c2020-01-10 12:30:56 +0000773 int i;
774
Matt Roper239bbb22022-03-10 22:28:35 -0800775 for (i = 0; i < I915_MAX_SFC; i++) {
Matt Roper24d032e2021-08-06 10:41:30 -0700776 /*
777 * SFC_DONE resides in the VD forcewake domain, so it
778 * only exists if the corresponding VCS engine is
779 * present.
780 */
Matt Roper45f63792021-09-17 09:12:03 -0700781 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
782 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
Matt Roper24d032e2021-08-06 10:41:30 -0700783 continue;
784
Chris Wilson742379c2020-01-10 12:30:56 +0000785 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
786 gt->sfc_done[i]);
Matt Roper24d032e2021-08-06 10:41:30 -0700787 }
Chris Wilson742379c2020-01-10 12:30:56 +0000788
789 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
790 }
Alan Previna6f0f9c2022-03-21 09:45:26 -0700791}
792
793static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
794 struct intel_gt_coredump *gt)
795{
796 int i;
797
798 for (i = 0; i < gt->nfence; i++)
799 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
800}
801
802static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
803 struct intel_gt_coredump *gt)
804{
805 const struct intel_engine_coredump *ee;
Chris Wilson742379c2020-01-10 12:30:56 +0000806
807 for (ee = gt->engine; ee; ee = ee->next) {
808 const struct i915_vma_coredump *vma;
809
Alan Previna6f0f9c2022-03-21 09:45:26 -0700810 if (ee->guc_capture_node)
811 intel_guc_capture_print_engine_node(m, ee);
812 else
813 error_print_engine(m, ee);
814
815 err_printf(m, " hung: %u\n", ee->hung);
816 err_printf(m, " engine reset count: %u\n", ee->reset_count);
817 error_print_context(m, " Active context: ", &ee->context);
818
Chris Wilson742379c2020-01-10 12:30:56 +0000819 for (vma = ee->vma; vma; vma = vma->next)
Alan Previna0f1f7b2022-03-21 09:45:27 -0700820 intel_gpu_error_print_vma(m, ee->engine, vma);
Chris Wilson742379c2020-01-10 12:30:56 +0000821 }
822
Chris Wilson742379c2020-01-10 12:30:56 +0000823}
824
825static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
826 struct i915_gpu_coredump *error)
827{
828 const struct intel_engine_coredump *ee;
829 struct timespec64 ts;
830
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000831 if (*error->error_msg)
832 err_printf(m, "%s\n", error->error_msg);
Chris Wilson57428bc2019-01-03 10:12:45 +0000833 err_printf(m, "Kernel: %s %s\n",
834 init_utsname()->release,
835 init_utsname()->machine);
Chris Wilsond71c4b02019-08-07 12:38:27 +0100836 err_printf(m, "Driver: %s\n", DRIVER_DATE);
Arnd Bergmannc6270db2018-01-17 16:48:53 +0100837 ts = ktime_to_timespec64(error->time);
838 err_printf(m, "Time: %lld s %ld us\n",
839 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
840 ts = ktime_to_timespec64(error->boottime);
841 err_printf(m, "Boottime: %lld s %ld us\n",
842 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
843 ts = ktime_to_timespec64(error->uptime);
844 err_printf(m, "Uptime: %lld s %ld us\n",
845 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
Chris Wilson058179e2019-10-23 14:31:08 +0100846 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
847 error->capture, jiffies_to_msecs(jiffies - error->capture));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +0200848
Chris Wilson742379c2020-01-10 12:30:56 +0000849 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
Chris Wilson7f4127c2019-02-19 12:21:52 +0000850 err_printf(m, "Active process (on ring %s): %s [%d]\n",
Chris Wilsonc990b4c2019-08-08 15:45:11 +0100851 ee->engine->name,
852 ee->context.comm,
853 ee->context.pid);
854
Mika Kuoppala48b031e2014-02-25 17:11:27 +0200855 err_printf(m, "Reset count: %u\n", error->reset_count);
Mika Kuoppala62d5d692014-02-25 17:11:28 +0200856 err_printf(m, "Suspend count: %u\n", error->suspend_count);
Jani Nikula2e0d26f2016-12-01 14:49:55 +0200857 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
Tvrtko Ursulin805446c2019-03-27 14:23:28 +0000858 err_printf(m, "Subplatform: 0x%x\n",
859 intel_subplatform(&error->runtime_info,
860 error->device_info.platform));
Chris Wilson0e390372018-11-23 13:23:25 +0000861 err_print_pciid(m, m->i915);
Chris Wilson642c8a72017-02-06 21:36:07 +0000862
Chris Wilsoneb5be9d2015-08-07 20:24:15 +0100863 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
Mika Kuoppala0ac76552015-10-29 15:21:19 +0200864
Jani Nikula5efde052022-03-30 14:34:15 +0300865 intel_dmc_print_error_state(m, m->i915);
Mika Kuoppala0ac76552015-10-29 15:21:19 +0200866
Lucas De Marchi01fabda2022-02-25 15:46:28 -0800867 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
868 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300869
Alan Previna6f0f9c2022-03-21 09:45:26 -0700870 if (error->gt) {
871 bool print_guc_capture = false;
872
John Harrisonc5de70f2022-07-27 19:20:25 -0700873 if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
Alan Previna6f0f9c2022-03-21 09:45:26 -0700874 print_guc_capture = true;
875
876 err_print_gt_display(m, error->gt);
877 err_print_gt_global_nonguc(m, error->gt);
878 err_print_gt_fences(m, error->gt);
879
880 /*
881 * GuC dumped global, eng-class and eng-instance registers together
882 * as part of engine state dump so we print in err_print_gt_engines
883 */
884 if (!print_guc_capture)
885 err_print_gt_global(m, error->gt);
886
887 err_print_gt_engines(m, error->gt);
888
889 if (error->gt->uc)
890 err_print_uc(m, error->gt->uc);
891
892 err_print_gt_info(m, error->gt);
893 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300894
895 if (error->overlay)
896 intel_overlay_print_error_state(m, error->overlay);
897
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -0700898 err_print_capabilities(m, error);
Chris Wilson642c8a72017-02-06 21:36:07 +0000899 err_print_params(m, &error->params);
Chris Wilson0e390372018-11-23 13:23:25 +0000900}
Chris Wilson642c8a72017-02-06 21:36:07 +0000901
Chris Wilson742379c2020-01-10 12:30:56 +0000902static int err_print_to_sgl(struct i915_gpu_coredump *error)
Chris Wilson0e390372018-11-23 13:23:25 +0000903{
904 struct drm_i915_error_state_buf m;
905
906 if (IS_ERR(error))
907 return PTR_ERR(error);
908
909 if (READ_ONCE(error->sgl))
910 return 0;
911
912 memset(&m, 0, sizeof(m));
913 m.i915 = error->i915;
914
915 __err_print_to_sgl(&m, error);
916
917 if (m.buf) {
918 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
919 m.bytes = 0;
920 m.buf = NULL;
921 }
922 if (m.cur) {
923 GEM_BUG_ON(m.end < m.cur);
924 sg_mark_end(m.cur - 1);
925 }
926 GEM_BUG_ON(m.sgl && !m.cur);
927
928 if (m.err) {
929 err_free_sgl(m.sgl);
930 return m.err;
931 }
932
933 if (cmpxchg(&error->sgl, NULL, m.sgl))
934 err_free_sgl(m.sgl);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300935
936 return 0;
937}
938
Chris Wilson742379c2020-01-10 12:30:56 +0000939ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
940 char *buf, loff_t off, size_t rem)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300941{
Chris Wilson0e390372018-11-23 13:23:25 +0000942 struct scatterlist *sg;
943 size_t count;
944 loff_t pos;
945 int err;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300946
Chris Wilson0e390372018-11-23 13:23:25 +0000947 if (!error || !rem)
948 return 0;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300949
Chris Wilson0e390372018-11-23 13:23:25 +0000950 err = err_print_to_sgl(error);
951 if (err)
952 return err;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300953
Chris Wilson0e390372018-11-23 13:23:25 +0000954 sg = READ_ONCE(error->fit);
955 if (!sg || off < sg->dma_address)
956 sg = error->sgl;
957 if (!sg)
958 return 0;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300959
Chris Wilson0e390372018-11-23 13:23:25 +0000960 pos = sg->dma_address;
961 count = 0;
962 do {
963 size_t len, start;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300964
Chris Wilson0e390372018-11-23 13:23:25 +0000965 if (sg_is_chain(sg)) {
966 sg = sg_chain_ptr(sg);
967 GEM_BUG_ON(sg_is_chain(sg));
968 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300969
Chris Wilson0e390372018-11-23 13:23:25 +0000970 len = sg->length;
971 if (pos + len <= off) {
972 pos += len;
973 continue;
974 }
975
976 start = sg->offset;
977 if (pos < off) {
978 GEM_BUG_ON(off - pos > len);
979 len -= off - pos;
980 start += off - pos;
981 pos = off;
982 }
983
984 len = min(len, rem);
985 GEM_BUG_ON(!len || len > sg->length);
986
987 memcpy(buf, page_address(sg_page(sg)) + start, len);
988
989 count += len;
990 pos += len;
991
992 buf += len;
993 rem -= len;
994 if (!rem) {
995 WRITE_ONCE(error->fit, sg);
996 break;
997 }
998 } while (!sg_is_last(sg++));
999
1000 return count;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001001}
1002
Chris Wilson742379c2020-01-10 12:30:56 +00001003static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001004{
Chris Wilson742379c2020-01-10 12:30:56 +00001005 while (vma) {
1006 struct i915_vma_coredump *next = vma->next;
Thomas Hellströme45b98b2021-11-08 18:45:44 +01001007 struct page *page, *n;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001008
Thomas Hellströme45b98b2021-11-08 18:45:44 +01001009 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1010 list_del_init(&page->lru);
1011 __free_page(page);
1012 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001013
Chris Wilson742379c2020-01-10 12:30:56 +00001014 kfree(vma);
1015 vma = next;
1016 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001017}
1018
Chris Wilson742379c2020-01-10 12:30:56 +00001019static void cleanup_params(struct i915_gpu_coredump *error)
Michal Wajdeczko84a20a82017-10-26 17:36:57 +00001020{
Jani Nikula16cabb12018-12-27 16:33:38 +02001021 i915_params_free(&error->params);
Michal Wajdeczko84a20a82017-10-26 17:36:57 +00001022}
1023
Chris Wilson742379c2020-01-10 12:30:56 +00001024static void cleanup_uc(struct intel_uc_coredump *uc)
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001025{
John Harrison665ae9c2022-09-06 16:01:46 -07001026 kfree(uc->guc_fw.file_selected.path);
1027 kfree(uc->huc_fw.file_selected.path);
1028 kfree(uc->guc_fw.file_wanted.path);
1029 kfree(uc->huc_fw.file_wanted.path);
John Harrisonc5de70f2022-07-27 19:20:25 -07001030 i915_vma_coredump_free(uc->guc.vma_log);
1031 i915_vma_coredump_free(uc->guc.vma_ctb);
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001032
Chris Wilson742379c2020-01-10 12:30:56 +00001033 kfree(uc);
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001034}
1035
Chris Wilson742379c2020-01-10 12:30:56 +00001036static void cleanup_gt(struct intel_gt_coredump *gt)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001037{
Chris Wilson742379c2020-01-10 12:30:56 +00001038 while (gt->engine) {
1039 struct intel_engine_coredump *ee = gt->engine;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001040
Chris Wilson742379c2020-01-10 12:30:56 +00001041 gt->engine = ee->next;
Chris Wilson6361f4b2016-07-27 09:07:28 +01001042
Chris Wilson742379c2020-01-10 12:30:56 +00001043 i915_vma_coredump_free(ee->vma);
Alan Previna6f0f9c2022-03-21 09:45:26 -07001044 intel_guc_capture_free_node(ee);
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001045 kfree(ee);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001046 }
1047
Chris Wilson742379c2020-01-10 12:30:56 +00001048 if (gt->uc)
1049 cleanup_uc(gt->uc);
1050
1051 kfree(gt);
1052}
1053
1054void __i915_gpu_coredump_free(struct kref *error_ref)
1055{
1056 struct i915_gpu_coredump *error =
1057 container_of(error_ref, typeof(*error), ref);
1058
1059 while (error->gt) {
1060 struct intel_gt_coredump *gt = error->gt;
1061
1062 error->gt = gt->next;
1063 cleanup_gt(gt);
1064 }
1065
Mika Kuoppala84734a02013-07-12 16:50:57 +03001066 kfree(error->overlay);
Chris Wilson1d6aa7a2017-02-21 16:26:19 +00001067
Michal Wajdeczko84a20a82017-10-26 17:36:57 +00001068 cleanup_params(error);
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001069
Chris Wilson0e390372018-11-23 13:23:25 +00001070 err_free_sgl(error->sgl);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001071 kfree(error);
1072}
1073
Chris Wilson742379c2020-01-10 12:30:56 +00001074static struct i915_vma_coredump *
1075i915_vma_coredump_create(const struct intel_gt *gt,
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001076 const struct i915_vma_resource *vma_res,
1077 struct i915_vma_compress *compress,
1078 const char *name)
1079
Mika Kuoppala84734a02013-07-12 16:50:57 +03001080{
Chris Wilson742379c2020-01-10 12:30:56 +00001081 struct i915_ggtt *ggtt = gt->ggtt;
Chris Wilson95374d72016-10-12 10:05:20 +01001082 const u64 slot = ggtt->error_capture.start;
Chris Wilson742379c2020-01-10 12:30:56 +00001083 struct i915_vma_coredump *dst;
Chris Wilson95374d72016-10-12 10:05:20 +01001084 struct sgt_iter iter;
Chris Wilson83bc0f52018-10-03 09:24:22 +01001085 int ret;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001086
Chris Wilson79c7a282019-07-25 23:38:43 +01001087 might_sleep();
1088
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001089 if (!vma_res || !vma_res->bi.pages || !compress)
Chris Wilson058d88c2016-08-15 10:49:06 +01001090 return NULL;
1091
Thomas Hellströme45b98b2021-11-08 18:45:44 +01001092 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
Chris Wilson058d88c2016-08-15 10:49:06 +01001093 if (!dst)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001094 return NULL;
1095
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001096 if (!compress_start(compress)) {
1097 kfree(dst);
1098 return NULL;
1099 }
1100
Thomas Hellströme45b98b2021-11-08 18:45:44 +01001101 INIT_LIST_HEAD(&dst->page_list);
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001102 strcpy(dst->name, name);
Chris Wilson742379c2020-01-10 12:30:56 +00001103 dst->next = NULL;
1104
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001105 dst->gtt_offset = vma_res->start;
1106 dst->gtt_size = vma_res->node_size;
1107 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
Chris Wilson0a970152016-10-12 10:05:22 +01001108 dst->unused = 0;
1109
Chris Wilson83bc0f52018-10-03 09:24:22 +01001110 ret = -EINVAL;
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001111 if (drm_mm_node_allocated(&ggtt->error_capture)) {
Chris Wilson95374d72016-10-12 10:05:20 +01001112 void __iomem *s;
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001113 dma_addr_t dma;
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +01001114
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001115 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
Chris Wilsonf2acf742020-09-16 10:00:59 +01001116 mutex_lock(&ggtt->error_mutex);
Nirmoy Dasa0696852022-06-24 13:08:21 +02001117 if (ggtt->vm.raw_insert_page)
1118 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1119 I915_CACHE_NONE, 0);
1120 else
1121 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1122 I915_CACHE_NONE, 0);
Chris Wilson742379c2020-01-10 12:30:56 +00001123 mb();
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +01001124
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001125 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
Chris Wilson742379c2020-01-10 12:30:56 +00001126 ret = compress_page(compress,
1127 (void __force *)s, dst,
1128 true);
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001129 io_mapping_unmap(s);
Chris Wilsonf2acf742020-09-16 10:00:59 +01001130
1131 mb();
1132 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1133 mutex_unlock(&ggtt->error_mutex);
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001134 if (ret)
1135 break;
1136 }
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001137 } else if (vma_res->bi.lmem) {
1138 struct intel_memory_region *mem = vma_res->mr;
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001139 dma_addr_t dma;
1140
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001141 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
Matthew Auldd42a738e2022-06-29 18:43:44 +01001142 dma_addr_t offset = dma - mem->region.start;
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001143 void __iomem *s;
1144
Matthew Auldd42a738e2022-06-29 18:43:44 +01001145 if (offset + PAGE_SIZE > mem->io_size) {
1146 ret = -EINVAL;
1147 break;
1148 }
1149
1150 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
Chris Wilson742379c2020-01-10 12:30:56 +00001151 ret = compress_page(compress,
1152 (void __force *)s, dst,
1153 true);
Bruce Chang48715f72019-11-13 15:11:04 -08001154 io_mapping_unmap(s);
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001155 if (ret)
1156 break;
1157 }
1158 } else {
1159 struct page *page;
1160
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001161 for_each_sgt_page(page, iter, vma_res->bi.pages) {
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001162 void *s;
1163
1164 drm_clflush_pages(&page, 1);
1165
Bruce Chang48715f72019-11-13 15:11:04 -08001166 s = kmap(page);
Chris Wilson742379c2020-01-10 12:30:56 +00001167 ret = compress_page(compress, s, dst, false);
Chris Wilsonbae21da2019-11-25 09:14:09 +00001168 kunmap(page);
Daniele Ceraolo Spurio895d8eb2019-10-29 09:58:53 +00001169
1170 drm_clflush_pages(&page, 1);
1171
1172 if (ret)
1173 break;
1174 }
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +01001175 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001176
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001177 if (ret || compress_flush(compress, dst)) {
Thomas Hellströme45b98b2021-11-08 18:45:44 +01001178 struct page *page, *n;
1179
1180 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1181 list_del_init(&page->lru);
1182 pool_free(&compress->pool, page_address(page));
1183 }
1184
Chris Wilson83bc0f52018-10-03 09:24:22 +01001185 kfree(dst);
1186 dst = NULL;
1187 }
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001188 compress_finish(compress);
Chris Wilson95374d72016-10-12 10:05:20 +01001189
Chris Wilson95374d72016-10-12 10:05:20 +01001190 return dst;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001191}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001192
Chris Wilson742379c2020-01-10 12:30:56 +00001193static void gt_record_fences(struct intel_gt_coredump *gt)
Ben Widawsky011cf572014-02-04 12:18:55 +00001194{
Chris Wilson742379c2020-01-10 12:30:56 +00001195 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1196 struct intel_uncore *uncore = gt->_gt->uncore;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001197 int i;
1198
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001199 if (GRAPHICS_VER(uncore->i915) >= 6) {
Chris Wilson742379c2020-01-10 12:30:56 +00001200 for (i = 0; i < ggtt->num_fences; i++)
1201 gt->fence[i] =
Tvrtko Ursulin7f1502d2019-06-10 13:06:06 +01001202 intel_uncore_read64(uncore,
1203 FENCE_REG_GEN6_LO(i));
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001204 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
Chris Wilson742379c2020-01-10 12:30:56 +00001205 for (i = 0; i < ggtt->num_fences; i++)
1206 gt->fence[i] =
Tvrtko Ursulin7f1502d2019-06-10 13:06:06 +01001207 intel_uncore_read64(uncore,
1208 FENCE_REG_965_LO(i));
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001209 } else {
Chris Wilson742379c2020-01-10 12:30:56 +00001210 for (i = 0; i < ggtt->num_fences; i++)
1211 gt->fence[i] =
Tvrtko Ursulin7f1502d2019-06-10 13:06:06 +01001212 intel_uncore_read(uncore, FENCE_REG(i));
Ville Syrjäläeecf613a42015-09-21 18:05:14 +03001213 }
Chris Wilson742379c2020-01-10 12:30:56 +00001214 gt->nfence = i;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001215}
1216
Chris Wilson742379c2020-01-10 12:30:56 +00001217static void engine_record_registers(struct intel_engine_coredump *ee)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001218{
Chris Wilson742379c2020-01-10 12:30:56 +00001219 const struct intel_engine_cs *engine = ee->engine;
1220 struct drm_i915_private *i915 = engine->i915;
Chris Wilson6361f4b2016-07-27 09:07:28 +01001221
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001222 if (GRAPHICS_VER(i915) >= 6) {
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001223 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
Lucas De Marchi91b59cd2019-07-30 11:04:03 -07001224
Matt Roperab1b2d42022-10-14 16:02:31 -07001225 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1226 ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1227 XEHP_RING_FAULT_REG);
1228 else if (GRAPHICS_VER(i915) >= 12)
Chris Wilson742379c2020-01-10 12:30:56 +00001229 ee->fault_reg = intel_uncore_read(engine->uncore,
1230 GEN12_RING_FAULT_REG);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001231 else if (GRAPHICS_VER(i915) >= 8)
Chris Wilson742379c2020-01-10 12:30:56 +00001232 ee->fault_reg = intel_uncore_read(engine->uncore,
1233 GEN8_RING_FAULT_REG);
Chris Wilson62acc7e2019-03-05 15:09:14 +00001234 else
Tvrtko Ursulin77a302e2019-06-07 11:15:35 +01001235 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -07001236 }
1237
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001238 if (GRAPHICS_VER(i915) >= 4) {
Chris Wilson70a76a92020-01-28 20:43:15 +00001239 ee->esr = ENGINE_READ(engine, RING_ESR);
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001240 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1241 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1242 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1243 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1244 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
Chris Wilson742379c2020-01-10 12:30:56 +00001245 ee->ccid = ENGINE_READ(engine, CCID);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001246 if (GRAPHICS_VER(i915) >= 8) {
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001247 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1248 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
Ben Widawsky13ffadd2014-04-01 16:31:07 -07001249 }
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001250 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001251 } else {
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001252 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1253 ee->ipeir = ENGINE_READ(engine, IPEIR);
1254 ee->ipehr = ENGINE_READ(engine, IPEHR);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001255 }
1256
Stuart Summersb729cfe2022-06-01 14:06:46 -07001257 if (GRAPHICS_VER(i915) >= 11) {
1258 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1259 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1260 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1261 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1262 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1263 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1264 ee->excc = ENGINE_READ(engine, RING_EXCC);
1265 }
1266
Chris Wilson0e704472016-10-12 10:05:17 +01001267 intel_engine_get_instdone(engine, &ee->instdone);
Ben Widawskyd6369512016-09-20 16:54:32 +03001268
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001269 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
Chris Wilson7e37f882016-08-02 22:50:21 +01001270 ee->acthd = intel_engine_get_active_head(engine);
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001271 ee->start = ENGINE_READ(engine, RING_START);
1272 ee->head = ENGINE_READ(engine, RING_HEAD);
1273 ee->tail = ENGINE_READ(engine, RING_TAIL);
1274 ee->ctl = ENGINE_READ(engine, RING_CTL);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001275 if (GRAPHICS_VER(i915) > 2)
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001276 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001277
Chris Wilson742379c2020-01-10 12:30:56 +00001278 if (!HWS_NEEDS_PHYSICAL(i915)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001279 i915_reg_t mmio;
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001280
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001281 if (GRAPHICS_VER(i915) == 7) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001282 switch (engine->id) {
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001283 default:
Chris Wilson8a68d462019-03-05 18:03:30 +00001284 MISSING_CASE(engine->id);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001285 fallthrough;
Chris Wilson8a68d462019-03-05 18:03:30 +00001286 case RCS0:
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001287 mmio = RENDER_HWS_PGA_GEN7;
1288 break;
Chris Wilson8a68d462019-03-05 18:03:30 +00001289 case BCS0:
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001290 mmio = BLT_HWS_PGA_GEN7;
1291 break;
Chris Wilson8a68d462019-03-05 18:03:30 +00001292 case VCS0:
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001293 mmio = BSD_HWS_PGA_GEN7;
1294 break;
Chris Wilson8a68d462019-03-05 18:03:30 +00001295 case VECS0:
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001296 mmio = VEBOX_HWS_PGA_GEN7;
1297 break;
1298 }
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001299 } else if (GRAPHICS_VER(engine->i915) == 6) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001300 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001301 } else {
1302 /* XXX: gen8 returns to sanity */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001303 mmio = RING_HWS_PGA(engine->mmio_base);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001304 }
1305
Chris Wilson742379c2020-01-10 12:30:56 +00001306 ee->hws = intel_uncore_read(engine->uncore, mmio);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001307 }
1308
Chris Wilson742379c2020-01-10 12:30:56 +00001309 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001310
Chris Wilson742379c2020-01-10 12:30:56 +00001311 if (HAS_PPGTT(i915)) {
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001312 int i;
1313
Tvrtko Ursulindbc65182019-06-07 09:45:20 +01001314 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001315
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001316 if (GRAPHICS_VER(i915) == 6) {
Chris Wilson6361f4b2016-07-27 09:07:28 +01001317 ee->vm_info.pp_dir_base =
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -07001318 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001319 } else if (GRAPHICS_VER(i915) == 7) {
Chris Wilson6361f4b2016-07-27 09:07:28 +01001320 ee->vm_info.pp_dir_base =
Chris Wilson6d425722019-04-05 13:38:31 +01001321 ENGINE_READ(engine, RING_PP_DIR_BASE);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001322 } else if (GRAPHICS_VER(i915) >= 8) {
Chris Wilson6d425722019-04-05 13:38:31 +01001323 u32 base = engine->mmio_base;
1324
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001325 for (i = 0; i < 4; i++) {
Chris Wilson6361f4b2016-07-27 09:07:28 +01001326 ee->vm_info.pdp[i] =
Chris Wilson742379c2020-01-10 12:30:56 +00001327 intel_uncore_read(engine->uncore,
1328 GEN8_RING_PDP_UDW(base, i));
Chris Wilson6361f4b2016-07-27 09:07:28 +01001329 ee->vm_info.pdp[i] <<= 32;
1330 ee->vm_info.pdp[i] |=
Chris Wilson742379c2020-01-10 12:30:56 +00001331 intel_uncore_read(engine->uncore,
1332 GEN8_RING_PDP_LDW(base, i));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001333 }
Chris Wilson6d425722019-04-05 13:38:31 +01001334 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001335 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001336}
1337
Chris Wilson22b7a422019-06-20 15:20:51 +01001338static void record_request(const struct i915_request *request,
Chris Wilson742379c2020-01-10 12:30:56 +00001339 struct i915_request_coredump *erq)
Chris Wilson35ca0392016-10-13 11:18:14 +01001340{
Chris Wilson52c0fdb2019-01-29 20:52:29 +00001341 erq->flags = request->fence.flags;
Chris Wilsonb300fde2019-02-26 09:49:21 +00001342 erq->context = request->fence.context;
1343 erq->seqno = request->fence.seqno;
Chris Wilsonb7268c52018-04-18 19:40:52 +01001344 erq->sched_attr = request->sched.attr;
Chris Wilson35ca0392016-10-13 11:18:14 +01001345 erq->head = request->head;
1346 erq->tail = request->tail;
Chris Wilson6a8679c2019-12-22 23:35:58 +00001347
1348 erq->pid = 0;
1349 rcu_read_lock();
Chris Wilson24aac332020-04-28 10:02:55 +01001350 if (!intel_context_is_closed(request->context)) {
1351 const struct i915_gem_context *ctx;
1352
1353 ctx = rcu_dereference(request->context->gem_context);
1354 if (ctx)
1355 erq->pid = pid_nr(ctx->pid);
1356 }
Chris Wilson6a8679c2019-12-22 23:35:58 +00001357 rcu_read_unlock();
Chris Wilson35ca0392016-10-13 11:18:14 +01001358}
1359
Chris Wilson742379c2020-01-10 12:30:56 +00001360static void engine_record_execlists(struct intel_engine_coredump *ee)
Chris Wilson35ca0392016-10-13 11:18:14 +01001361{
Chris Wilson742379c2020-01-10 12:30:56 +00001362 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1363 struct i915_request * const *port = el->active;
Chris Wilson22b7a422019-06-20 15:20:51 +01001364 unsigned int n = 0;
Chris Wilson35ca0392016-10-13 11:18:14 +01001365
Chris Wilson22b7a422019-06-20 15:20:51 +01001366 while (*port)
1367 record_request(*port++, &ee->execlist[n++]);
Mika Kuoppala76e70082017-09-22 15:43:07 +03001368
1369 ee->num_ports = n;
Chris Wilson35ca0392016-10-13 11:18:14 +01001370}
1371
Chris Wilson742379c2020-01-10 12:30:56 +00001372static bool record_context(struct i915_gem_context_coredump *e,
John Harrisone8a33192023-01-26 16:28:38 -08001373 struct intel_context *ce)
Chris Wilson4fa60532017-01-29 09:24:33 +00001374{
Chris Wilson6a8679c2019-12-22 23:35:58 +00001375 struct i915_gem_context *ctx;
1376 struct task_struct *task;
Chris Wilson03d0ed82020-01-28 11:34:26 +00001377 bool simulated;
Chris Wilson9f3ccd42019-12-20 10:12:29 +00001378
Chris Wilson6a8679c2019-12-22 23:35:58 +00001379 rcu_read_lock();
John Harrisone8a33192023-01-26 16:28:38 -08001380 ctx = rcu_dereference(ce->gem_context);
Chris Wilson6a8679c2019-12-22 23:35:58 +00001381 if (ctx && !kref_get_unless_zero(&ctx->ref))
1382 ctx = NULL;
1383 rcu_read_unlock();
Chris Wilson9f3ccd42019-12-20 10:12:29 +00001384 if (!ctx)
Chris Wilson03d0ed82020-01-28 11:34:26 +00001385 return true;
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001386
Chris Wilson6a8679c2019-12-22 23:35:58 +00001387 rcu_read_lock();
1388 task = pid_task(ctx->pid, PIDTYPE_PID);
1389 if (task) {
1390 strcpy(e->comm, task->comm);
1391 e->pid = task->pid;
Chris Wilson4fa60532017-01-29 09:24:33 +00001392 }
Chris Wilson6a8679c2019-12-22 23:35:58 +00001393 rcu_read_unlock();
Chris Wilson4fa60532017-01-29 09:24:33 +00001394
Chris Wilsonb7268c52018-04-18 19:40:52 +01001395 e->sched_attr = ctx->sched;
Chris Wilson77b25a92017-07-21 13:32:30 +01001396 e->guilty = atomic_read(&ctx->guilty_count);
1397 e->active = atomic_read(&ctx->active_count);
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001398
John Harrisone8a33192023-01-26 16:28:38 -08001399 e->total_runtime = intel_context_get_total_runtime_ns(ce);
1400 e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
Tvrtko Ursulin1883a0a2020-02-16 13:36:20 +00001401
Chris Wilson03d0ed82020-01-28 11:34:26 +00001402 simulated = i915_gem_context_no_error_capture(ctx);
Chris Wilson6a8679c2019-12-22 23:35:58 +00001403
1404 i915_gem_context_put(ctx);
Chris Wilson03d0ed82020-01-28 11:34:26 +00001405 return simulated;
Chris Wilson4fa60532017-01-29 09:24:33 +00001406}
1407
Chris Wilson742379c2020-01-10 12:30:56 +00001408struct intel_engine_capture_vma {
1409 struct intel_engine_capture_vma *next;
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001410 struct i915_vma_resource *vma_res;
Chris Wilson742379c2020-01-10 12:30:56 +00001411 char name[16];
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001412 bool lockdep_cookie;
Chris Wilson79c7a282019-07-25 23:38:43 +01001413};
1414
Chris Wilson742379c2020-01-10 12:30:56 +00001415static struct intel_engine_capture_vma *
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001416capture_vma_snapshot(struct intel_engine_capture_vma *next,
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001417 struct i915_vma_resource *vma_res,
1418 gfp_t gfp, const char *name)
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001419{
1420 struct intel_engine_capture_vma *c;
1421
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001422 if (!vma_res)
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001423 return next;
1424
1425 c = kmalloc(sizeof(*c), gfp);
1426 if (!c)
1427 return next;
1428
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001429 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001430 kfree(c);
1431 return next;
1432 }
1433
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001434 strcpy(c->name, name);
1435 c->vma_res = i915_vma_resource_get(vma_res);
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001436
1437 c->next = next;
1438 return c;
1439}
1440
1441static struct intel_engine_capture_vma *
Chris Wilson742379c2020-01-10 12:30:56 +00001442capture_vma(struct intel_engine_capture_vma *next,
Chris Wilson79c7a282019-07-25 23:38:43 +01001443 struct i915_vma *vma,
Chris Wilson742379c2020-01-10 12:30:56 +00001444 const char *name,
1445 gfp_t gfp)
Chris Wilson79c7a282019-07-25 23:38:43 +01001446{
Chris Wilson79c7a282019-07-25 23:38:43 +01001447 if (!vma)
1448 return next;
1449
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001450 /*
1451 * If the vma isn't pinned, then the vma should be snapshotted
1452 * to a struct i915_vma_snapshot at command submission time.
1453 * Not here.
1454 */
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001455 if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
Chris Wilson79c7a282019-07-25 23:38:43 +01001456 return next;
1457
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001458 next = capture_vma_snapshot(next, vma->resource, gfp, name);
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001459
1460 return next;
Chris Wilson79c7a282019-07-25 23:38:43 +01001461}
1462
Chris Wilson742379c2020-01-10 12:30:56 +00001463static struct intel_engine_capture_vma *
1464capture_user(struct intel_engine_capture_vma *capture,
1465 const struct i915_request *rq,
1466 gfp_t gfp)
Chris Wilsonb0fd47a2017-04-15 10:39:02 +01001467{
Chris Wilsone61e0f52018-02-21 09:56:36 +00001468 struct i915_capture_list *c;
Chris Wilsonb0fd47a2017-04-15 10:39:02 +01001469
Chris Wilson742379c2020-01-10 12:30:56 +00001470 for (c = rq->capture_list; c; c = c->next)
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001471 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1472 "user");
Chris Wilson79c7a282019-07-25 23:38:43 +01001473
1474 return capture;
Chris Wilsonb0fd47a2017-04-15 10:39:02 +01001475}
1476
Chris Wilson742379c2020-01-10 12:30:56 +00001477static void add_vma(struct intel_engine_coredump *ee,
1478 struct i915_vma_coredump *vma)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001479{
Chris Wilson742379c2020-01-10 12:30:56 +00001480 if (vma) {
1481 vma->next = ee->vma;
1482 ee->vma = vma;
1483 }
1484}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001485
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001486static struct i915_vma_coredump *
1487create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1488 const char *name, struct i915_vma_compress *compress)
1489{
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001490 struct i915_vma_coredump *ret = NULL;
1491 struct i915_vma_resource *vma_res;
1492 bool lockdep_cookie;
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001493
1494 if (!vma)
1495 return NULL;
1496
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001497 vma_res = vma->resource;
1498
1499 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1500 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1501 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1502 }
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001503
1504 return ret;
1505}
1506
1507static void add_vma_coredump(struct intel_engine_coredump *ee,
1508 const struct intel_gt *gt,
1509 struct i915_vma *vma,
1510 const char *name,
1511 struct i915_vma_compress *compress)
1512{
1513 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1514}
1515
Chris Wilson742379c2020-01-10 12:30:56 +00001516struct intel_engine_coredump *
Alan Previna6f0f9c2022-03-21 09:45:26 -07001517intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
Chris Wilson742379c2020-01-10 12:30:56 +00001518{
1519 struct intel_engine_coredump *ee;
1520
1521 ee = kzalloc(sizeof(*ee), gfp);
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001522 if (!ee)
Chris Wilson742379c2020-01-10 12:30:56 +00001523 return NULL;
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001524
Chris Wilson742379c2020-01-10 12:30:56 +00001525 ee->engine = engine;
1526
Alan Previna6f0f9c2022-03-21 09:45:26 -07001527 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1528 engine_record_registers(ee);
1529 engine_record_execlists(ee);
1530 }
Chris Wilson742379c2020-01-10 12:30:56 +00001531
1532 return ee;
1533}
1534
John Harrisone8a33192023-01-26 16:28:38 -08001535static struct intel_engine_capture_vma *
1536engine_coredump_add_context(struct intel_engine_coredump *ee,
1537 struct intel_context *ce,
1538 gfp_t gfp)
1539{
1540 struct intel_engine_capture_vma *vma = NULL;
1541
1542 ee->simulated |= record_context(&ee->context, ce);
1543 if (ee->simulated)
1544 return NULL;
1545
1546 /*
1547 * We need to copy these to an anonymous buffer
1548 * as the simplest method to avoid being overwritten
1549 * by userspace.
1550 */
1551 vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1552 vma = capture_vma(vma, ce->state, "HW context", gfp);
1553
1554 return vma;
1555}
1556
Chris Wilson742379c2020-01-10 12:30:56 +00001557struct intel_engine_capture_vma *
1558intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1559 struct i915_request *rq,
1560 gfp_t gfp)
1561{
John Harrisone8a33192023-01-26 16:28:38 -08001562 struct intel_engine_capture_vma *vma;
Chris Wilson742379c2020-01-10 12:30:56 +00001563
John Harrisone8a33192023-01-26 16:28:38 -08001564 vma = engine_coredump_add_context(ee, rq->context, gfp);
1565 if (!vma)
Chris Wilson742379c2020-01-10 12:30:56 +00001566 return NULL;
1567
1568 /*
1569 * We need to copy these to an anonymous buffer
1570 * as the simplest method to avoid being overwritten
1571 * by userspace.
1572 */
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001573 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
Chris Wilson742379c2020-01-10 12:30:56 +00001574 vma = capture_user(vma, rq, gfp);
Chris Wilson742379c2020-01-10 12:30:56 +00001575
Chris Wilson742379c2020-01-10 12:30:56 +00001576 ee->rq_head = rq->head;
1577 ee->rq_post = rq->postfix;
1578 ee->rq_tail = rq->tail;
1579
1580 return vma;
1581}
1582
1583void
1584intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1585 struct intel_engine_capture_vma *capture,
1586 struct i915_vma_compress *compress)
1587{
1588 const struct intel_engine_cs *engine = ee->engine;
1589
1590 while (capture) {
1591 struct intel_engine_capture_vma *this = capture;
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001592 struct i915_vma_resource *vma_res = this->vma_res;
Chris Wilson742379c2020-01-10 12:30:56 +00001593
1594 add_vma(ee,
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001595 i915_vma_coredump_create(engine->gt, vma_res,
1596 compress, this->name));
Chris Wilson742379c2020-01-10 12:30:56 +00001597
Thomas Hellström60dc43d2022-01-10 18:22:19 +01001598 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1599 i915_vma_resource_put(vma_res);
Chris Wilson742379c2020-01-10 12:30:56 +00001600
1601 capture = this->next;
1602 kfree(this);
1603 }
1604
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001605 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1606 "HW Status", compress);
Chris Wilson742379c2020-01-10 12:30:56 +00001607
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001608 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1609 "WA context", compress);
Chris Wilson742379c2020-01-10 12:30:56 +00001610}
1611
1612static struct intel_engine_coredump *
1613capture_engine(struct intel_engine_cs *engine,
Alan Previna6f0f9c2022-03-21 09:45:26 -07001614 struct i915_vma_compress *compress,
1615 u32 dump_flags)
Chris Wilson742379c2020-01-10 12:30:56 +00001616{
Chris Wilson1a8585b2020-01-10 12:30:59 +00001617 struct intel_engine_capture_vma *capture = NULL;
Chris Wilson742379c2020-01-10 12:30:56 +00001618 struct intel_engine_coredump *ee;
John Harrisona4be3dca2023-01-26 16:28:37 -08001619 struct intel_context *ce = NULL;
Matthew Brost573ba122021-07-26 17:23:33 -07001620 struct i915_request *rq = NULL;
Chris Wilson742379c2020-01-10 12:30:56 +00001621
Alan Previna6f0f9c2022-03-21 09:45:26 -07001622 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
Chris Wilson742379c2020-01-10 12:30:56 +00001623 if (!ee)
1624 return NULL;
1625
John Harrisona4be3dca2023-01-26 16:28:37 -08001626 intel_engine_get_hung_entity(engine, &ce, &rq);
John Harrisone7696d62023-01-26 16:28:39 -08001627 if (rq && !i915_request_started(rq))
John Harrisone8a33192023-01-26 16:28:38 -08001628 drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1629 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
Thomas Hellströmff20afc42021-11-29 21:22:45 +01001630
John Harrisone8a33192023-01-26 16:28:38 -08001631 if (rq) {
1632 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1633 i915_request_put(rq);
1634 } else if (ce) {
1635 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1636 }
Chris Wilson742379c2020-01-10 12:30:56 +00001637
John Harrisone8a33192023-01-26 16:28:38 -08001638 if (capture) {
1639 intel_engine_coredump_add_vma(ee, capture, compress);
1640
1641 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1642 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1643 } else {
1644 kfree(ee);
1645 ee = NULL;
1646 }
Chris Wilson742379c2020-01-10 12:30:56 +00001647
1648 return ee;
1649}
1650
1651static void
1652gt_record_engines(struct intel_gt_coredump *gt,
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00001653 intel_engine_mask_t engine_mask,
Alan Previna6f0f9c2022-03-21 09:45:26 -07001654 struct i915_vma_compress *compress,
1655 u32 dump_flags)
Chris Wilson742379c2020-01-10 12:30:56 +00001656{
1657 struct intel_engine_cs *engine;
1658 enum intel_engine_id id;
1659
1660 for_each_engine(engine, gt->_gt, id) {
1661 struct intel_engine_coredump *ee;
Chris Wilson372fbb82014-01-27 13:52:34 +00001662
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001663 /* Refill our page pool before entering atomic section */
1664 pool_refill(&compress->pool, ALLOW_FAIL);
1665
Alan Previna6f0f9c2022-03-21 09:45:26 -07001666 ee = capture_engine(engine, compress, dump_flags);
Chris Wilson742379c2020-01-10 12:30:56 +00001667 if (!ee)
1668 continue;
1669
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00001670 ee->hung = engine->mask & engine_mask;
1671
Chris Wilson742379c2020-01-10 12:30:56 +00001672 gt->simulated |= ee->simulated;
1673 if (ee->simulated) {
Alan Previna6f0f9c2022-03-21 09:45:26 -07001674 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1675 intel_guc_capture_free_node(ee);
Chris Wilson742379c2020-01-10 12:30:56 +00001676 kfree(ee);
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001677 continue;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001678 }
Chris Wilsonc990b4c2019-08-08 15:45:11 +01001679
Chris Wilson742379c2020-01-10 12:30:56 +00001680 ee->next = gt->engine;
1681 gt->engine = ee;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001682 }
1683}
1684
John Harrisonc5de70f2022-07-27 19:20:25 -07001685static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1686 const struct intel_guc_ct_buffer *ctb,
1687 const void *blob_ptr, struct intel_guc *guc)
1688{
1689 if (!ctb || !ctb->desc)
1690 return;
1691
1692 saved->raw_status = ctb->desc->status;
1693 saved->raw_head = ctb->desc->head;
1694 saved->raw_tail = ctb->desc->tail;
1695 saved->head = ctb->head;
1696 saved->tail = ctb->tail;
1697 saved->size = ctb->size;
1698 saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1699 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1700}
1701
Chris Wilson742379c2020-01-10 12:30:56 +00001702static struct intel_uc_coredump *
1703gt_record_uc(struct intel_gt_coredump *gt,
1704 struct i915_vma_compress *compress)
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001705{
Chris Wilson742379c2020-01-10 12:30:56 +00001706 const struct intel_uc *uc = &gt->_gt->uc;
1707 struct intel_uc_coredump *error_uc;
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001708
Chris Wilson742379c2020-01-10 12:30:56 +00001709 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1710 if (!error_uc)
1711 return NULL;
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001712
Michal Wajdeczkoabb042f2019-08-13 08:15:59 +00001713 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1714 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
Michal Wajdeczko7d41ef32017-10-26 17:36:55 +00001715
John Harrison665ae9c2022-09-06 16:01:46 -07001716 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1717 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1718 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1719 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
John Harrison368d1792022-07-27 19:20:24 -07001720
1721 /*
1722 * Save the GuC log and include a timestamp reference for converting the
1723 * log times to system times (in conjunction with the error->boottime and
1724 * gt->clock_frequency fields saved elsewhere).
1725 */
John Harrisonc5de70f2022-07-27 19:20:25 -07001726 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1727 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1728 "GuC log buffer", compress);
1729 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1730 "GuC CT buffer", compress);
1731 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1732 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1733 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1734 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1735 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
Chris Wilson742379c2020-01-10 12:30:56 +00001736
1737 return error_uc;
1738}
1739
Alan Previna6f0f9c2022-03-21 09:45:26 -07001740/* Capture display registers. */
1741static void gt_record_display_regs(struct intel_gt_coredump *gt)
1742{
1743 struct intel_uncore *uncore = gt->_gt->uncore;
1744 struct drm_i915_private *i915 = uncore->i915;
1745
1746 if (GRAPHICS_VER(i915) >= 6)
1747 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1748
1749 if (GRAPHICS_VER(i915) >= 8)
1750 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1751 else if (IS_VALLEYVIEW(i915))
1752 gt->ier = intel_uncore_read(uncore, VLV_IER);
1753 else if (HAS_PCH_SPLIT(i915))
1754 gt->ier = intel_uncore_read(uncore, DEIER);
1755 else if (GRAPHICS_VER(i915) == 2)
1756 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1757 else
1758 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1759}
1760
1761/* Capture all other registers that GuC doesn't capture. */
1762static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1763{
1764 struct intel_uncore *uncore = gt->_gt->uncore;
1765 struct drm_i915_private *i915 = uncore->i915;
1766 int i;
1767
1768 if (IS_VALLEYVIEW(i915)) {
1769 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1770 gt->ngtier = 1;
1771 } else if (GRAPHICS_VER(i915) >= 11) {
1772 gt->gtier[0] =
1773 intel_uncore_read(uncore,
1774 GEN11_RENDER_COPY_INTR_ENABLE);
1775 gt->gtier[1] =
1776 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1777 gt->gtier[2] =
1778 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1779 gt->gtier[3] =
1780 intel_uncore_read(uncore,
1781 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1782 gt->gtier[4] =
1783 intel_uncore_read(uncore,
1784 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1785 gt->gtier[5] =
1786 intel_uncore_read(uncore,
1787 GEN11_GUNIT_CSME_INTR_ENABLE);
1788 gt->ngtier = 6;
1789 } else if (GRAPHICS_VER(i915) >= 8) {
1790 for (i = 0; i < 4; i++)
1791 gt->gtier[i] =
1792 intel_uncore_read(uncore, GEN8_GT_IER(i));
1793 gt->ngtier = 4;
1794 } else if (HAS_PCH_SPLIT(i915)) {
1795 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1796 gt->ngtier = 1;
1797 }
1798
1799 gt->eir = intel_uncore_read(uncore, EIR);
1800 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1801}
1802
1803/*
1804 * Capture all registers that relate to workload submission.
1805 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1806 */
1807static void gt_record_global_regs(struct intel_gt_coredump *gt)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001808{
Chris Wilson742379c2020-01-10 12:30:56 +00001809 struct intel_uncore *uncore = gt->_gt->uncore;
1810 struct drm_i915_private *i915 = uncore->i915;
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001811 int i;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001812
Chris Wilson742379c2020-01-10 12:30:56 +00001813 /*
1814 * General organization
Ben Widawsky654c90c2014-01-30 00:19:36 -08001815 * 1. Registers specific to a single generation
1816 * 2. Registers which belong to multiple generations
1817 * 3. Feature specific registers.
1818 * 4. Everything else
1819 * Please try to follow the order.
1820 */
1821
1822 /* 1: Registers specific to a single generation */
Alan Previna6f0f9c2022-03-21 09:45:26 -07001823 if (IS_VALLEYVIEW(i915))
Chris Wilson742379c2020-01-10 12:30:56 +00001824 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001825
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001826 if (GRAPHICS_VER(i915) == 7)
Chris Wilson742379c2020-01-10 12:30:56 +00001827 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001828
Matt Roperab1b2d42022-10-14 16:02:31 -07001829 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1830 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1831 XEHP_FAULT_TLB_DATA0);
1832 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1833 XEHP_FAULT_TLB_DATA1);
1834 } else if (GRAPHICS_VER(i915) >= 12) {
Chris Wilson742379c2020-01-10 12:30:56 +00001835 gt->fault_data0 = intel_uncore_read(uncore,
1836 GEN12_FAULT_TLB_DATA0);
1837 gt->fault_data1 = intel_uncore_read(uncore,
1838 GEN12_FAULT_TLB_DATA1);
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001839 } else if (GRAPHICS_VER(i915) >= 8) {
Chris Wilson742379c2020-01-10 12:30:56 +00001840 gt->fault_data0 = intel_uncore_read(uncore,
1841 GEN8_FAULT_TLB_DATA0);
1842 gt->fault_data1 = intel_uncore_read(uncore,
1843 GEN8_FAULT_TLB_DATA1);
Mika Kuoppala6c826f32015-03-24 14:54:19 +02001844 }
1845
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001846 if (GRAPHICS_VER(i915) == 6) {
Chris Wilson742379c2020-01-10 12:30:56 +00001847 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1848 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1849 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001850 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001851
Ben Widawsky654c90c2014-01-30 00:19:36 -08001852 /* 2: Registers which belong to multiple generations */
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001853 if (GRAPHICS_VER(i915) >= 7)
Chris Wilson742379c2020-01-10 12:30:56 +00001854 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001855
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001856 if (GRAPHICS_VER(i915) >= 6) {
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001857 if (GRAPHICS_VER(i915) < 12) {
Chris Wilson742379c2020-01-10 12:30:56 +00001858 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1859 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
Lucas De Marchi23dea052019-07-30 11:04:04 -07001860 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001861 }
1862
1863 /* 3: Feature specific registers */
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001864 if (IS_GRAPHICS_VER(i915, 6, 7)) {
Chris Wilson742379c2020-01-10 12:30:56 +00001865 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1866 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001867 }
1868
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001869 if (IS_GRAPHICS_VER(i915, 8, 11))
Chris Wilson742379c2020-01-10 12:30:56 +00001870 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
Matthew Auldfd521d32019-09-09 18:16:46 +01001871
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001872 if (GRAPHICS_VER(i915) == 12)
Chris Wilson742379c2020-01-10 12:30:56 +00001873 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
Lionel Landwerlinba1d18e2019-10-25 15:17:18 +03001874
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001875 if (GRAPHICS_VER(i915) >= 12) {
Matt Roper239bbb22022-03-10 22:28:35 -08001876 for (i = 0; i < I915_MAX_SFC; i++) {
Matt Roper24d032e2021-08-06 10:41:30 -07001877 /*
1878 * SFC_DONE resides in the VD forcewake domain, so it
1879 * only exists if the corresponding VCS engine is
1880 * present.
1881 */
Matt Roper45f63792021-09-17 09:12:03 -07001882 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1883 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
Matt Roper24d032e2021-08-06 10:41:30 -07001884 continue;
1885
Chris Wilson742379c2020-01-10 12:30:56 +00001886 gt->sfc_done[i] =
Mika Kuoppalae50dbdb2019-10-29 18:38:40 +02001887 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1888 }
Mika Kuoppala811bb3d2019-10-29 18:38:41 +02001889
Chris Wilson742379c2020-01-10 12:30:56 +00001890 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
Mika Kuoppalae50dbdb2019-10-29 18:38:40 +02001891 }
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001892}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001893
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -07001894static void gt_record_info(struct intel_gt_coredump *gt)
1895{
1896 memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
John Harrison368d1792022-07-27 19:20:24 -07001897 gt->clock_frequency = gt->_gt->clock_frequency;
1898 gt->clock_period_ns = gt->_gt->clock_period_ns;
Daniele Ceraolo Spurio792592e2020-07-07 17:39:47 -07001899}
1900
Chris Wilson742379c2020-01-10 12:30:56 +00001901/*
1902 * Generate a semi-unique error code. The code is not meant to have meaning, The
1903 * code's only purpose is to try to prevent false duplicated bug reports by
1904 * grossly estimating a GPU error state.
1905 *
1906 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1907 * the hang if we could strip the GTT offset information from it.
1908 *
1909 * It's only a small step better than a random number in its current form.
1910 */
1911static u32 generate_ecode(const struct intel_engine_coredump *ee)
Mika Kuoppalacb383002014-02-25 17:11:25 +02001912{
Chris Wilson742379c2020-01-10 12:30:56 +00001913 /*
1914 * IPEHR would be an ideal way to detect errors, as it's the gross
1915 * measure of "the command that hung." However, has some very common
1916 * synchronization commands which almost always appear in the case
1917 * strictly a client bug. Use instdone to differentiate those some.
1918 */
1919 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1920}
1921
1922static const char *error_msg(struct i915_gpu_coredump *error)
1923{
1924 struct intel_engine_coredump *first = NULL;
Tvrtko Ursulin2dae0c82020-11-05 11:38:42 +00001925 unsigned int hung_classes = 0;
Chris Wilson742379c2020-01-10 12:30:56 +00001926 struct intel_gt_coredump *gt;
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001927 int len;
Mika Kuoppalacb383002014-02-25 17:11:25 +02001928
Chris Wilson742379c2020-01-10 12:30:56 +00001929 for (gt = error->gt; gt; gt = gt->next) {
1930 struct intel_engine_coredump *cs;
1931
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00001932 for (cs = gt->engine; cs; cs = cs->next) {
1933 if (cs->hung) {
Tvrtko Ursulin2dae0c82020-11-05 11:38:42 +00001934 hung_classes |= BIT(cs->engine->uabi_class);
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00001935 if (!first)
1936 first = cs;
1937 }
1938 }
Chris Wilson742379c2020-01-10 12:30:56 +00001939 }
1940
Mika Kuoppala58174462014-02-25 17:11:26 +02001941 len = scnprintf(error->error_msg, sizeof(error->error_msg),
Chris Wilson742379c2020-01-10 12:30:56 +00001942 "GPU HANG: ecode %d:%x:%08x",
Lucas De Marchi651e7d42021-06-05 21:50:49 -07001943 GRAPHICS_VER(error->i915), hung_classes,
Chris Wilson742379c2020-01-10 12:30:56 +00001944 generate_ecode(first));
Chris Wilson29baf3a2020-01-21 13:21:07 +00001945 if (first && first->context.pid) {
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001946 /* Just show the first executing process, more is confusing */
Mika Kuoppala58174462014-02-25 17:11:26 +02001947 len += scnprintf(error->error_msg + len,
1948 sizeof(error->error_msg) - len,
1949 ", in %s [%d]",
Chris Wilson742379c2020-01-10 12:30:56 +00001950 first->context.comm, first->context.pid);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001951 }
Mika Kuoppala58174462014-02-25 17:11:26 +02001952
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001953 return error->error_msg;
Mika Kuoppalacb383002014-02-25 17:11:25 +02001954}
1955
Chris Wilson742379c2020-01-10 12:30:56 +00001956static void capture_gen(struct i915_gpu_coredump *error)
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001957{
Daniele Ceraolo Spurio53b725c2018-03-05 14:21:21 -08001958 struct drm_i915_private *i915 = error->i915;
1959
Daniele Ceraolo Spurio53b725c2018-03-05 14:21:21 -08001960 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1961 error->suspended = i915->runtime_pm.suspended;
Chris Wilsonf73b5672017-03-02 15:03:56 +00001962
Tvrtko Ursulina7f46d52022-03-29 10:02:04 +01001963 error->iommu = i915_vtd_active(i915);
Daniele Ceraolo Spurio53b725c2018-03-05 14:21:21 -08001964 error->reset_count = i915_reset_count(&i915->gpu_error);
1965 error->suspend_count = i915->suspend_count;
Chris Wilson2bd160a2016-08-15 10:48:45 +01001966
Jani Nikula8a25c4b2020-06-18 18:04:02 +03001967 i915_params_copy(&error->params, &i915->params);
Chris Wilson2bd160a2016-08-15 10:48:45 +01001968 memcpy(&error->device_info,
Daniele Ceraolo Spurio53b725c2018-03-05 14:21:21 -08001969 INTEL_INFO(i915),
Chris Wilson2bd160a2016-08-15 10:48:45 +01001970 sizeof(error->device_info));
Jani Nikula02584042018-12-31 16:56:41 +02001971 memcpy(&error->runtime_info,
1972 RUNTIME_INFO(i915),
1973 sizeof(error->runtime_info));
Daniele Ceraolo Spurio53b725c2018-03-05 14:21:21 -08001974 error->driver_caps = i915->caps;
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001975}
1976
Chris Wilson742379c2020-01-10 12:30:56 +00001977struct i915_gpu_coredump *
1978i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
Michal Wajdeczko84a20a82017-10-26 17:36:57 +00001979{
Chris Wilson742379c2020-01-10 12:30:56 +00001980 struct i915_gpu_coredump *error;
Michal Wajdeczko84a20a82017-10-26 17:36:57 +00001981
Jani Nikula8a25c4b2020-06-18 18:04:02 +03001982 if (!i915->params.error_capture)
Chris Wilson742379c2020-01-10 12:30:56 +00001983 return NULL;
Chris Wilson8f5c6fe2018-10-01 20:44:46 +01001984
Chris Wilson742379c2020-01-10 12:30:56 +00001985 error = kzalloc(sizeof(*error), gfp);
1986 if (!error)
1987 return NULL;
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001988
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001989 kref_init(&error->ref);
1990 error->i915 = i915;
1991
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001992 error->time = ktime_get_real();
1993 error->boottime = ktime_get_boottime();
Michał Winiarski2cbc8762021-12-14 21:33:39 +02001994 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
Chris Wilson3bdd4f82019-07-22 23:28:47 +01001995 error->capture = jiffies;
1996
Chris Wilson742379c2020-01-10 12:30:56 +00001997 capture_gen(error);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001998
1999 return error;
2000}
2001
Chris Wilson742379c2020-01-10 12:30:56 +00002002#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2003
2004struct intel_gt_coredump *
Alan Previna6f0f9c2022-03-21 09:45:26 -07002005intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
Ben Widawsky1d762aa2014-01-30 00:19:35 -08002006{
Chris Wilson742379c2020-01-10 12:30:56 +00002007 struct intel_gt_coredump *gc;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08002008
Chris Wilson742379c2020-01-10 12:30:56 +00002009 gc = kzalloc(sizeof(*gc), gfp);
2010 if (!gc)
2011 return NULL;
2012
2013 gc->_gt = gt;
2014 gc->awake = intel_gt_pm_is_awake(gt);
2015
Alan Previna6f0f9c2022-03-21 09:45:26 -07002016 gt_record_display_regs(gc);
2017 gt_record_global_nonguc_regs(gc);
2018
2019 /*
2020 * GuC dumps global, eng-class and eng-instance registers
2021 * (that can change as part of engine state during execution)
2022 * before an engine is reset due to a hung context.
2023 * GuC captures and reports all three groups of registers
2024 * together as a single set before the engine is reset.
2025 * Thus, if GuC triggered the context reset we retrieve
2026 * the register values as part of gt_record_engines.
2027 */
2028 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2029 gt_record_global_regs(gc);
2030
Chris Wilson742379c2020-01-10 12:30:56 +00002031 gt_record_fences(gc);
2032
2033 return gc;
2034}
2035
2036struct i915_vma_compress *
2037i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2038{
2039 struct i915_vma_compress *compress;
2040
2041 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2042 if (!compress)
2043 return NULL;
2044
2045 if (!compress_init(compress)) {
2046 kfree(compress);
2047 return NULL;
2048 }
2049
Chris Wilson742379c2020-01-10 12:30:56 +00002050 return compress;
2051}
2052
2053void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2054 struct i915_vma_compress *compress)
2055{
2056 if (!compress)
Chris Wilson98a2f412016-10-12 10:05:18 +01002057 return;
2058
Chris Wilson742379c2020-01-10 12:30:56 +00002059 compress_fini(compress);
2060 kfree(compress);
2061}
2062
Thomas Hellströmff20afc42021-11-29 21:22:45 +01002063static struct i915_gpu_coredump *
Alan Previna6f0f9c2022-03-21 09:45:26 -07002064__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
Chris Wilson742379c2020-01-10 12:30:56 +00002065{
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00002066 struct drm_i915_private *i915 = gt->i915;
Chris Wilson742379c2020-01-10 12:30:56 +00002067 struct i915_gpu_coredump *error;
2068
2069 /* Check if GPU capture has been disabled */
2070 error = READ_ONCE(i915->gpu_error.first_error);
Chris Wilsone6154e42018-12-07 11:05:54 +00002071 if (IS_ERR(error))
Chris Wilson742379c2020-01-10 12:30:56 +00002072 return error;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08002073
Chris Wilson742379c2020-01-10 12:30:56 +00002074 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2075 if (!error)
2076 return ERR_PTR(-ENOMEM);
Mika Kuoppalacb383002014-02-25 17:11:25 +02002077
Alan Previna6f0f9c2022-03-21 09:45:26 -07002078 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
Chris Wilson742379c2020-01-10 12:30:56 +00002079 if (error->gt) {
2080 struct i915_vma_compress *compress;
2081
2082 compress = i915_vma_capture_prepare(error->gt);
2083 if (!compress) {
2084 kfree(error->gt);
2085 kfree(error);
2086 return ERR_PTR(-ENOMEM);
Chris Wilsonbc3d6742016-07-04 08:08:39 +01002087 }
Chris Wilson742379c2020-01-10 12:30:56 +00002088
Tvrtko Ursulin39921e52022-05-19 10:08:02 +01002089 if (INTEL_INFO(i915)->has_gt_uc) {
Chris Wilson742379c2020-01-10 12:30:56 +00002090 error->gt->uc = gt_record_uc(error->gt, compress);
Alan Previna6f0f9c2022-03-21 09:45:26 -07002091 if (error->gt->uc) {
2092 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
John Harrisonc5de70f2022-07-27 19:20:25 -07002093 error->gt->uc->guc.is_guc_capture = true;
Alan Previna6f0f9c2022-03-21 09:45:26 -07002094 else
John Harrisonc5de70f2022-07-27 19:20:25 -07002095 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
Alan Previna6f0f9c2022-03-21 09:45:26 -07002096 }
2097 }
2098
2099 gt_record_info(error->gt);
2100 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2101
Chris Wilson742379c2020-01-10 12:30:56 +00002102
2103 i915_vma_capture_finish(error->gt, compress);
2104
2105 error->simulated |= error->gt->simulated;
Mika Kuoppala84734a02013-07-12 16:50:57 +03002106 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03002107
Chris Wilson742379c2020-01-10 12:30:56 +00002108 error->overlay = intel_overlay_capture_error_state(i915);
Chris Wilson742379c2020-01-10 12:30:56 +00002109
2110 return error;
2111}
2112
Thomas Hellströmff20afc42021-11-29 21:22:45 +01002113struct i915_gpu_coredump *
Alan Previna6f0f9c2022-03-21 09:45:26 -07002114i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
Thomas Hellströmff20afc42021-11-29 21:22:45 +01002115{
2116 static DEFINE_MUTEX(capture_mutex);
2117 int ret = mutex_lock_interruptible(&capture_mutex);
2118 struct i915_gpu_coredump *dump;
2119
2120 if (ret)
2121 return ERR_PTR(ret);
2122
Alan Previna6f0f9c2022-03-21 09:45:26 -07002123 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
Thomas Hellströmff20afc42021-11-29 21:22:45 +01002124 mutex_unlock(&capture_mutex);
2125
2126 return dump;
2127}
2128
Chris Wilson742379c2020-01-10 12:30:56 +00002129void i915_error_state_store(struct i915_gpu_coredump *error)
2130{
2131 struct drm_i915_private *i915;
2132 static bool warned;
2133
2134 if (IS_ERR_OR_NULL(error))
Mika Kuoppalacb383002014-02-25 17:11:25 +02002135 return;
Chris Wilson742379c2020-01-10 12:30:56 +00002136
2137 i915 = error->i915;
Jani Nikula58f44aa2020-04-02 14:48:13 +03002138 drm_info(&i915->drm, "%s\n", error_msg(error));
Chris Wilson742379c2020-01-10 12:30:56 +00002139
2140 if (error->simulated ||
2141 cmpxchg(&i915->gpu_error.first_error, NULL, error))
2142 return;
2143
2144 i915_gpu_coredump_get(error);
Mika Kuoppalacb383002014-02-25 17:11:25 +02002145
Chris Wilsona1e37b02019-08-19 08:58:21 +01002146 if (!xchg(&warned, true) &&
Chris Wilsoneafc4892016-10-14 14:44:28 +01002147 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
Chris Wilson88f80652019-08-15 10:36:04 +01002148 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
Jani Nikuladdae4d72020-02-12 18:04:34 +02002149 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2150 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
Chris Wilson88f80652019-08-15 10:36:04 +01002151 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2152 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2153 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2154 i915->drm.primary->index);
Mika Kuoppalacb383002014-02-25 17:11:25 +02002155 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03002156}
2157
Chris Wilson742379c2020-01-10 12:30:56 +00002158/**
2159 * i915_capture_error_state - capture an error record for later analysis
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00002160 * @gt: intel_gt which originated the hang
2161 * @engine_mask: hung engines
2162 *
Chris Wilson742379c2020-01-10 12:30:56 +00002163 *
2164 * Should be called when an error is detected (either a hang or an error
2165 * interrupt) to capture error state from the time of the error. Fills
2166 * out a structure which becomes available in debugfs for user level tools
2167 * to pick up.
2168 */
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00002169void i915_capture_error_state(struct intel_gt *gt,
Alan Previna6f0f9c2022-03-21 09:45:26 -07002170 intel_engine_mask_t engine_mask, u32 dump_flags)
Chris Wilson742379c2020-01-10 12:30:56 +00002171{
2172 struct i915_gpu_coredump *error;
2173
Alan Previna6f0f9c2022-03-21 09:45:26 -07002174 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
Chris Wilson742379c2020-01-10 12:30:56 +00002175 if (IS_ERR(error)) {
Tvrtko Ursulinbda30022020-11-04 13:47:42 +00002176 cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
Chris Wilson742379c2020-01-10 12:30:56 +00002177 return;
2178 }
2179
2180 i915_error_state_store(error);
2181 i915_gpu_coredump_put(error);
2182}
2183
2184struct i915_gpu_coredump *
Chris Wilson5a4c6f12017-02-14 16:46:11 +00002185i915_first_error_state(struct drm_i915_private *i915)
Mika Kuoppala84734a02013-07-12 16:50:57 +03002186{
Chris Wilson742379c2020-01-10 12:30:56 +00002187 struct i915_gpu_coredump *error;
Mika Kuoppala84734a02013-07-12 16:50:57 +03002188
Chris Wilson5a4c6f12017-02-14 16:46:11 +00002189 spin_lock_irq(&i915->gpu_error.lock);
2190 error = i915->gpu_error.first_error;
Chris Wilsone6154e42018-12-07 11:05:54 +00002191 if (!IS_ERR_OR_NULL(error))
Chris Wilson742379c2020-01-10 12:30:56 +00002192 i915_gpu_coredump_get(error);
Chris Wilson5a4c6f12017-02-14 16:46:11 +00002193 spin_unlock_irq(&i915->gpu_error.lock);
2194
2195 return error;
2196}
2197
2198void i915_reset_error_state(struct drm_i915_private *i915)
2199{
Chris Wilson742379c2020-01-10 12:30:56 +00002200 struct i915_gpu_coredump *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +00002201
2202 spin_lock_irq(&i915->gpu_error.lock);
2203 error = i915->gpu_error.first_error;
Chris Wilsone6154e42018-12-07 11:05:54 +00002204 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2205 i915->gpu_error.first_error = NULL;
Chris Wilson5a4c6f12017-02-14 16:46:11 +00002206 spin_unlock_irq(&i915->gpu_error.lock);
2207
Chris Wilsone6154e42018-12-07 11:05:54 +00002208 if (!IS_ERR_OR_NULL(error))
Chris Wilson742379c2020-01-10 12:30:56 +00002209 i915_gpu_coredump_put(error);
Chris Wilsonfb6f0b62018-11-02 16:12:12 +00002210}
2211
2212void i915_disable_error_state(struct drm_i915_private *i915, int err)
2213{
2214 spin_lock_irq(&i915->gpu_error.lock);
2215 if (!i915->gpu_error.first_error)
2216 i915->gpu_error.first_error = ERR_PTR(err);
2217 spin_unlock_irq(&i915->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03002218}