1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021-2022 Intel Corporation 4 */ 5 6 #include <linux/types.h> 7 8 #include <drm/drm_print.h> 9 10 #include "gt/intel_engine_regs.h" 11 #include "gt/intel_gt.h" 12 #include "gt/intel_gt_mcr.h" 13 #include "gt/intel_gt_regs.h" 14 #include "gt/intel_lrc.h" 15 #include "guc_capture_fwif.h" 16 #include "intel_guc_capture.h" 17 #include "intel_guc_fwif.h" 18 #include "intel_guc_print.h" 19 #include "i915_drv.h" 20 #include "i915_gpu_error.h" 21 #include "i915_irq.h" 22 #include "i915_memcpy.h" 23 #include "i915_reg.h" 24 25 /* 26 * Define all device tables of GuC error capture register lists 27 * NOTE: For engine-registers, GuC only needs the register offsets 28 * from the engine-mmio-base 29 */ 30 #define COMMON_BASE_GLOBAL \ 31 { FORCEWAKE_MT, 0, 0, "FORCEWAKE" } 32 33 #define COMMON_GEN8BASE_GLOBAL \ 34 { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \ 35 { DONE_REG, 0, 0, "DONE_REG" }, \ 36 { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" } 37 38 #define GEN8_GLOBAL \ 39 { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \ 40 { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" } 41 42 #define COMMON_GEN12BASE_GLOBAL \ 43 { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \ 44 { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \ 45 { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \ 46 { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \ 47 { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" } 48 49 #define COMMON_BASE_ENGINE_INSTANCE \ 50 { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \ 51 { RING_ESR(0), 0, 0, "ESR" }, \ 52 { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \ 53 { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \ 54 { RING_IPEIR(0), 0, 0, "IPEIR" }, \ 55 { RING_IPEHR(0), 0, 0, "IPEHR" }, \ 56 { RING_INSTPS(0), 0, 0, "INSTPS" }, \ 57 { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \ 58 { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \ 59 { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \ 60 { CCID(0), 0, 0, "CCID" }, \ 61 { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \ 62 { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \ 63 { RING_INSTPM(0), 0, 0, "INSTPM" }, \ 64 { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \ 65 { RING_NOPID(0), 0, 0, "RING_NOPID" }, \ 66 { RING_START(0), 0, 0, "START" }, \ 67 { RING_HEAD(0), 0, 0, "HEAD" }, \ 68 { RING_TAIL(0), 0, 0, "TAIL" }, \ 69 { RING_CTL(0), 0, 0, "CTL" }, \ 70 { RING_MI_MODE(0), 0, 0, "MODE" }, \ 71 { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \ 72 { RING_HWS_PGA(0), 0, 0, "HWS" }, \ 73 { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \ 74 { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \ 75 { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \ 76 { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \ 77 { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \ 78 { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \ 79 { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \ 80 { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \ 81 { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" } 82 83 #define COMMON_BASE_HAS_EU \ 84 { EIR, 0, 0, "EIR" } 85 86 #define COMMON_BASE_RENDER \ 87 { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" } 88 89 #define COMMON_GEN12BASE_RENDER \ 90 { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \ 91 { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" } 92 93 #define COMMON_GEN12BASE_VEC \ 94 { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \ 95 { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \ 96 { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \ 97 { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" } 98 99 /* XE_LP Global */ 100 static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = { 101 COMMON_BASE_GLOBAL, 102 COMMON_GEN8BASE_GLOBAL, 103 COMMON_GEN12BASE_GLOBAL, 104 }; 105 106 /* XE_LP Render / Compute Per-Class */ 107 static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = { 108 COMMON_BASE_HAS_EU, 109 COMMON_BASE_RENDER, 110 COMMON_GEN12BASE_RENDER, 111 }; 112 113 /* GEN8+ Render / Compute Per-Engine-Instance */ 114 static const struct __guc_mmio_reg_descr gen8_rc_inst_regs[] = { 115 COMMON_BASE_ENGINE_INSTANCE, 116 }; 117 118 /* GEN8+ Media Decode/Encode Per-Engine-Instance */ 119 static const struct __guc_mmio_reg_descr gen8_vd_inst_regs[] = { 120 COMMON_BASE_ENGINE_INSTANCE, 121 }; 122 123 /* XE_LP Video Enhancement Per-Class */ 124 static const struct __guc_mmio_reg_descr xe_lp_vec_class_regs[] = { 125 COMMON_GEN12BASE_VEC, 126 }; 127 128 /* GEN8+ Video Enhancement Per-Engine-Instance */ 129 static const struct __guc_mmio_reg_descr gen8_vec_inst_regs[] = { 130 COMMON_BASE_ENGINE_INSTANCE, 131 }; 132 133 /* GEN8+ Blitter Per-Engine-Instance */ 134 static const struct __guc_mmio_reg_descr gen8_blt_inst_regs[] = { 135 COMMON_BASE_ENGINE_INSTANCE, 136 }; 137 138 /* XE_LP - GSC Per-Engine-Instance */ 139 static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = { 140 COMMON_BASE_ENGINE_INSTANCE, 141 }; 142 143 /* GEN8 - Global */ 144 static const struct __guc_mmio_reg_descr gen8_global_regs[] = { 145 COMMON_BASE_GLOBAL, 146 COMMON_GEN8BASE_GLOBAL, 147 GEN8_GLOBAL, 148 }; 149 150 static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = { 151 COMMON_BASE_HAS_EU, 152 COMMON_BASE_RENDER, 153 }; 154 155 /* 156 * Empty list to prevent warnings about unknown class/instance types 157 * as not all class/instanace types have entries on all platforms. 158 */ 159 static const struct __guc_mmio_reg_descr empty_regs_list[] = { 160 }; 161 162 #define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x) 163 #define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x) 164 #define MAKE_REGLIST(regslist, regsowner, regstype, class) \ 165 { \ 166 regslist, \ 167 ARRAY_SIZE(regslist), \ 168 TO_GCAP_DEF_OWNER(regsowner), \ 169 TO_GCAP_DEF_TYPE(regstype), \ 170 class, \ 171 NULL, \ 172 } 173 174 /* List of lists */ 175 static const struct __guc_mmio_reg_descr_group gen8_lists[] = { 176 MAKE_REGLIST(gen8_global_regs, PF, GLOBAL, 0), 177 MAKE_REGLIST(gen8_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), 178 MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), 179 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), 180 MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), 181 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), 182 MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), 183 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), 184 MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), 185 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), 186 MAKE_REGLIST(empty_regs_list, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), 187 {} 188 }; 189 190 static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = { 191 MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0), 192 MAKE_REGLIST(xe_lp_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), 193 MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE), 194 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO), 195 MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO), 196 MAKE_REGLIST(xe_lp_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), 197 MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE), 198 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER), 199 MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER), 200 MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), 201 MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER), 202 {} 203 }; 204 205 static const struct __guc_mmio_reg_descr_group * 206 guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists, 207 u32 owner, u32 type, u32 id) 208 { 209 int i; 210 211 if (!reglists) 212 return NULL; 213 214 for (i = 0; reglists[i].list; ++i) { 215 if (reglists[i].owner == owner && reglists[i].type == type && 216 (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL)) 217 return ®lists[i]; 218 } 219 220 return NULL; 221 } 222 223 static struct __guc_mmio_reg_descr_group * 224 guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists, 225 u32 owner, u32 type, u32 id) 226 { 227 int i; 228 229 if (!reglists) 230 return NULL; 231 232 for (i = 0; reglists[i].extlist; ++i) { 233 if (reglists[i].owner == owner && reglists[i].type == type && 234 (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL)) 235 return ®lists[i]; 236 } 237 238 return NULL; 239 } 240 241 static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists) 242 { 243 int i = 0; 244 245 if (!reglists) 246 return; 247 248 while (reglists[i].extlist) 249 kfree(reglists[i++].extlist); 250 } 251 252 struct __ext_steer_reg { 253 const char *name; 254 i915_mcr_reg_t reg; 255 }; 256 257 static const struct __ext_steer_reg gen8_extregs[] = { 258 {"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE}, 259 {"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE} 260 }; 261 262 static const struct __ext_steer_reg xehpg_extregs[] = { 263 {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG} 264 }; 265 266 static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, 267 const struct __ext_steer_reg *extlist, 268 int slice_id, int subslice_id) 269 { 270 ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg)); 271 ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); 272 ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); 273 ext->regname = extlist->name; 274 } 275 276 static int 277 __alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist, 278 const struct __guc_mmio_reg_descr_group *rootlist, int num_regs) 279 { 280 struct __guc_mmio_reg_descr *list; 281 282 list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL); 283 if (!list) 284 return -ENOMEM; 285 286 newlist->extlist = list; 287 newlist->num_regs = num_regs; 288 newlist->owner = rootlist->owner; 289 newlist->engine = rootlist->engine; 290 newlist->type = rootlist->type; 291 292 return 0; 293 } 294 295 static void 296 guc_capture_alloc_steered_lists(struct intel_guc *guc, 297 const struct __guc_mmio_reg_descr_group *lists) 298 { 299 struct intel_gt *gt = guc_to_gt(guc); 300 int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0; 301 const struct __guc_mmio_reg_descr_group *list; 302 struct __guc_mmio_reg_descr_group *extlists; 303 struct __guc_mmio_reg_descr *extarray; 304 bool has_xehpg_extregs; 305 306 /* steered registers currently only exist for the render-class */ 307 list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF, 308 GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, 309 GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE); 310 /* skip if extlists was previously allocated */ 311 if (!list || guc->capture->extlists) 312 return; 313 314 has_xehpg_extregs = GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55); 315 316 num_steer_regs = ARRAY_SIZE(gen8_extregs); 317 if (has_xehpg_extregs) 318 num_steer_regs += ARRAY_SIZE(xehpg_extregs); 319 320 for_each_ss_steering(iter, gt, slice, subslice) 321 num_tot_regs += num_steer_regs; 322 323 if (!num_tot_regs) 324 return; 325 326 /* allocate an extra for an end marker */ 327 extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL); 328 if (!extlists) 329 return; 330 331 if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) { 332 kfree(extlists); 333 return; 334 } 335 336 extarray = extlists[0].extlist; 337 for_each_ss_steering(iter, gt, slice, subslice) { 338 for (i = 0; i < ARRAY_SIZE(gen8_extregs); ++i) { 339 __fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice); 340 ++extarray; 341 } 342 343 if (has_xehpg_extregs) { 344 for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) { 345 __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice); 346 ++extarray; 347 } 348 } 349 } 350 351 guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs); 352 guc->capture->extlists = extlists; 353 } 354 355 static const struct __guc_mmio_reg_descr_group * 356 guc_capture_get_device_reglist(struct intel_guc *guc) 357 { 358 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 359 const struct __guc_mmio_reg_descr_group *lists; 360 361 if (GRAPHICS_VER(i915) >= 12) 362 lists = xe_lp_lists; 363 else 364 lists = gen8_lists; 365 366 /* 367 * For certain engine classes, there are slice and subslice 368 * level registers requiring steering. We allocate and populate 369 * these at init time based on hw config add it as an extension 370 * list at the end of the pre-populated render list. 371 */ 372 guc_capture_alloc_steered_lists(guc, lists); 373 374 return lists; 375 } 376 377 static const char * 378 __stringify_type(u32 type) 379 { 380 switch (type) { 381 case GUC_CAPTURE_LIST_TYPE_GLOBAL: 382 return "Global"; 383 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: 384 return "Class"; 385 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: 386 return "Instance"; 387 default: 388 break; 389 } 390 391 return "unknown"; 392 } 393 394 static const char * 395 __stringify_engclass(u32 class) 396 { 397 switch (class) { 398 case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE: 399 return "Render/Compute"; 400 case GUC_CAPTURE_LIST_CLASS_VIDEO: 401 return "Video"; 402 case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE: 403 return "VideoEnhance"; 404 case GUC_CAPTURE_LIST_CLASS_BLITTER: 405 return "Blitter"; 406 case GUC_CAPTURE_LIST_CLASS_GSC_OTHER: 407 return "GSC-Other"; 408 default: 409 break; 410 } 411 412 return "unknown"; 413 } 414 415 static int 416 guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, 417 struct guc_mmio_reg *ptr, u16 num_entries) 418 { 419 u32 i = 0, j = 0; 420 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; 421 struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; 422 const struct __guc_mmio_reg_descr_group *match; 423 struct __guc_mmio_reg_descr_group *matchext; 424 425 if (!reglists) 426 return -ENODEV; 427 428 match = guc_capture_get_one_list(reglists, owner, type, classid); 429 if (!match) 430 return -ENODATA; 431 432 for (i = 0; i < num_entries && i < match->num_regs; ++i) { 433 ptr[i].offset = match->list[i].reg.reg; 434 ptr[i].value = 0xDEADF00D; 435 ptr[i].flags = match->list[i].flags; 436 ptr[i].mask = match->list[i].mask; 437 } 438 439 matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid); 440 if (matchext) { 441 for (i = match->num_regs, j = 0; i < num_entries && 442 i < (match->num_regs + matchext->num_regs) && 443 j < matchext->num_regs; ++i, ++j) { 444 ptr[i].offset = matchext->extlist[j].reg.reg; 445 ptr[i].value = 0xDEADF00D; 446 ptr[i].flags = matchext->extlist[j].flags; 447 ptr[i].mask = matchext->extlist[j].mask; 448 } 449 } 450 if (i < num_entries) 451 guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries); 452 453 return 0; 454 } 455 456 static int 457 guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid) 458 { 459 const struct __guc_mmio_reg_descr_group *match; 460 struct __guc_mmio_reg_descr_group *matchext; 461 int num_regs; 462 463 match = guc_capture_get_one_list(gc->reglists, owner, type, classid); 464 if (!match) 465 return 0; 466 467 num_regs = match->num_regs; 468 469 matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid); 470 if (matchext) 471 num_regs += matchext->num_regs; 472 473 return num_regs; 474 } 475 476 static int 477 guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, 478 size_t *size, bool is_purpose_est) 479 { 480 struct intel_guc_state_capture *gc = guc->capture; 481 struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; 482 int num_regs; 483 484 if (!gc->reglists) { 485 guc_warn(guc, "No capture reglist for this device\n"); 486 return -ENODEV; 487 } 488 489 if (cache->is_valid) { 490 *size = cache->size; 491 return cache->status; 492 } 493 494 if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF && 495 !guc_capture_get_one_list(gc->reglists, owner, type, classid)) { 496 if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL) 497 guc_warn(guc, "Missing capture reglist: global!\n"); 498 else 499 guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n", 500 __stringify_type(type), type, 501 __stringify_engclass(classid), classid); 502 return -ENODATA; 503 } 504 505 num_regs = guc_cap_list_num_regs(gc, owner, type, classid); 506 /* intentional empty lists can exist depending on hw config */ 507 if (!num_regs) 508 return -ENODATA; 509 510 if (size) 511 *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + 512 (num_regs * sizeof(struct guc_mmio_reg))); 513 514 return 0; 515 } 516 517 int 518 intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, 519 size_t *size) 520 { 521 return guc_capture_getlistsize(guc, owner, type, classid, size, false); 522 } 523 524 static void guc_capture_create_prealloc_nodes(struct intel_guc *guc); 525 526 int 527 intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid, 528 void **outptr) 529 { 530 struct intel_guc_state_capture *gc = guc->capture; 531 struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; 532 struct guc_debug_capture_list *listnode; 533 int ret, num_regs; 534 u8 *caplist, *tmp; 535 size_t size = 0; 536 537 if (!gc->reglists) 538 return -ENODEV; 539 540 if (cache->is_valid) { 541 *outptr = cache->ptr; 542 return cache->status; 543 } 544 545 /* 546 * ADS population of input registers is a good 547 * time to pre-allocate cachelist output nodes 548 */ 549 guc_capture_create_prealloc_nodes(guc); 550 551 ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size); 552 if (ret) { 553 cache->is_valid = true; 554 cache->ptr = NULL; 555 cache->size = 0; 556 cache->status = ret; 557 return ret; 558 } 559 560 caplist = kzalloc(size, GFP_KERNEL); 561 if (!caplist) { 562 guc_dbg(guc, "Failed to alloc cached register capture list"); 563 return -ENOMEM; 564 } 565 566 /* populate capture list header */ 567 tmp = caplist; 568 num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid); 569 listnode = (struct guc_debug_capture_list *)tmp; 570 listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs); 571 572 /* populate list of register descriptor */ 573 tmp += sizeof(struct guc_debug_capture_list); 574 guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs); 575 576 /* cache this list */ 577 cache->is_valid = true; 578 cache->ptr = caplist; 579 cache->size = size; 580 cache->status = 0; 581 582 *outptr = caplist; 583 584 return 0; 585 } 586 587 int 588 intel_guc_capture_getnullheader(struct intel_guc *guc, 589 void **outptr, size_t *size) 590 { 591 struct intel_guc_state_capture *gc = guc->capture; 592 int tmp = sizeof(u32) * 4; 593 void *null_header; 594 595 if (gc->ads_null_cache) { 596 *outptr = gc->ads_null_cache; 597 *size = tmp; 598 return 0; 599 } 600 601 null_header = kzalloc(tmp, GFP_KERNEL); 602 if (!null_header) { 603 guc_dbg(guc, "Failed to alloc cached register capture null list"); 604 return -ENOMEM; 605 } 606 607 gc->ads_null_cache = null_header; 608 *outptr = null_header; 609 *size = tmp; 610 611 return 0; 612 } 613 614 static int 615 guc_capture_output_min_size_est(struct intel_guc *guc) 616 { 617 struct intel_gt *gt = guc_to_gt(guc); 618 struct intel_engine_cs *engine; 619 enum intel_engine_id id; 620 int worst_min_size = 0; 621 size_t tmp = 0; 622 623 if (!guc->capture) 624 return -ENODEV; 625 626 /* 627 * If every single engine-instance suffered a failure in quick succession but 628 * were all unrelated, then a burst of multiple error-capture events would dump 629 * registers for every one engine instance, one at a time. In this case, GuC 630 * would even dump the global-registers repeatedly. 631 * 632 * For each engine instance, there would be 1 x guc_state_capture_group_t output 633 * followed by 3 x guc_state_capture_t lists. The latter is how the register 634 * dumps are split across different register types (where the '3' are global vs class 635 * vs instance). 636 */ 637 for_each_engine(engine, gt, id) { 638 worst_min_size += sizeof(struct guc_state_capture_group_header_t) + 639 (3 * sizeof(struct guc_state_capture_header_t)); 640 641 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true)) 642 worst_min_size += tmp; 643 644 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, 645 engine->class, &tmp, true)) { 646 worst_min_size += tmp; 647 } 648 if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, 649 engine->class, &tmp, true)) { 650 worst_min_size += tmp; 651 } 652 } 653 654 return worst_min_size; 655 } 656 657 /* 658 * Add on a 3x multiplier to allow for multiple back-to-back captures occurring 659 * before the i915 can read the data out and process it 660 */ 661 #define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3 662 663 static void check_guc_capture_size(struct intel_guc *guc) 664 { 665 int min_size = guc_capture_output_min_size_est(guc); 666 int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER; 667 u32 buffer_size = intel_guc_log_section_size_capture(&guc->log); 668 669 /* 670 * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB) 671 * Additionally, its based on space needed to fit all engines getting reset at once 672 * within the same G2H handler task slot. This is very unlikely. However, if GuC really 673 * does run out of space for whatever reason, we will see an separate warning message 674 * when processing the G2H event capture-notification, search for: 675 * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE. 676 */ 677 if (min_size < 0) 678 guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n", 679 min_size); 680 else if (min_size > buffer_size) 681 guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n", 682 buffer_size, min_size); 683 else if (spare_size > buffer_size) 684 guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n", 685 buffer_size, spare_size, min_size); 686 } 687 688 /* 689 * KMD Init time flows: 690 * -------------------- 691 * --> alloc A: GuC input capture regs lists (registered to GuC via ADS). 692 * intel_guc_ads acquires the register lists by calling 693 * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times, 694 * where n = 1 for global-reg-list + 695 * num_engine_classes for class-reg-list + 696 * num_engine_classes for instance-reg-list 697 * (since all instances of the same engine-class type 698 * have an identical engine-instance register-list). 699 * ADS module also calls separately for PF vs VF. 700 * 701 * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param)) 702 * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small) 703 * Note2: 'x 3' to hold multiple capture groups 704 * 705 * GUC Runtime notify capture: 706 * -------------------------- 707 * --> G2H STATE_CAPTURE_NOTIFICATION 708 * L--> intel_guc_capture_process 709 * L--> Loop through B (head..tail) and for each engine instance's 710 * err-state-captured register-list we find, we alloc 'C': 711 * --> alloc C: A capture-output-node structure that includes misc capture info along 712 * with 3 register list dumps (global, engine-class and engine-instance) 713 * This node is created from a pre-allocated list of blank nodes in 714 * guc->capture->cachelist and populated with the error-capture 715 * data from GuC and then it's added into guc->capture->outlist linked 716 * list. This list is used for matchup and printout by i915_gpu_coredump 717 * and err_print_gt, (when user invokes the error capture sysfs). 718 * 719 * GUC --> notify context reset: 720 * ----------------------------- 721 * --> G2H CONTEXT RESET 722 * L--> guc_handle_context_reset --> i915_capture_error_state 723 * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines 724 * --> capture_engine(..IS_GUC_CAPTURE) 725 * L--> intel_guc_capture_get_matching_node is where 726 * detach C from internal linked list and add it into 727 * intel_engine_coredump struct (if the context and 728 * engine of the event notification matches a node 729 * in the link list). 730 * 731 * User Sysfs / Debugfs 732 * -------------------- 733 * --> i915_gpu_coredump_copy_to_buffer-> 734 * L--> err_print_to_sgl --> err_print_gt 735 * L--> error_print_guc_captures 736 * L--> intel_guc_capture_print_node prints the 737 * register lists values of the attached node 738 * on the error-engine-dump being reported. 739 * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free 740 * L--> ... cleanup_gt --> 741 * L--> intel_guc_capture_free_node returns the 742 * capture-output-node back to the internal 743 * cachelist for reuse. 744 * 745 */ 746 747 static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf) 748 { 749 if (buf->wr >= buf->rd) 750 return (buf->wr - buf->rd); 751 return (buf->size - buf->rd) + buf->wr; 752 } 753 754 static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf) 755 { 756 if (buf->rd > buf->wr) 757 return (buf->size - buf->rd); 758 return (buf->wr - buf->rd); 759 } 760 761 /* 762 * GuC's error-capture output is a ring buffer populated in a byte-stream fashion: 763 * 764 * The GuC Log buffer region for error-capture is managed like a ring buffer. 765 * The GuC firmware dumps error capture logs into this ring in a byte-stream flow. 766 * Additionally, as per the current and foreseeable future, all packed error- 767 * capture output structures are dword aligned. 768 * 769 * That said, if the GuC firmware is in the midst of writing a structure that is larger 770 * than one dword but the tail end of the err-capture buffer-region has lesser space left, 771 * we would need to extract that structure one dword at a time straddled across the end, 772 * onto the start of the ring. 773 * 774 * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this 775 * function would typically do a straight-up memcpy from the ring contents and will only 776 * call this helper if their structure-extraction is straddling across the end of the 777 * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease 778 * scalability for future expansion of output data types without requiring a redesign 779 * of the flow controls. 780 */ 781 static int 782 guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf, 783 u32 *dw) 784 { 785 int tries = 2; 786 int avail = 0; 787 u32 *src_data; 788 789 if (!guc_capture_buf_cnt(buf)) 790 return 0; 791 792 while (tries--) { 793 avail = guc_capture_buf_cnt_to_end(buf); 794 if (avail >= sizeof(u32)) { 795 src_data = (u32 *)(buf->data + buf->rd); 796 *dw = *src_data; 797 buf->rd += 4; 798 return 4; 799 } 800 if (avail) 801 guc_dbg(guc, "Register capture log not dword aligned, skipping.\n"); 802 buf->rd = 0; 803 } 804 805 return 0; 806 } 807 808 static bool 809 guc_capture_data_extracted(struct __guc_capture_bufstate *b, 810 int size, void *dest) 811 { 812 if (guc_capture_buf_cnt_to_end(b) >= size) { 813 memcpy(dest, (b->data + b->rd), size); 814 b->rd += size; 815 return true; 816 } 817 return false; 818 } 819 820 static int 821 guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf, 822 struct guc_state_capture_group_header_t *ghdr) 823 { 824 int read = 0; 825 int fullsize = sizeof(struct guc_state_capture_group_header_t); 826 827 if (fullsize > guc_capture_buf_cnt(buf)) 828 return -1; 829 830 if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr)) 831 return 0; 832 833 read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner); 834 read += guc_capture_log_remove_dw(guc, buf, &ghdr->info); 835 if (read != fullsize) 836 return -1; 837 838 return 0; 839 } 840 841 static int 842 guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf, 843 struct guc_state_capture_header_t *hdr) 844 { 845 int read = 0; 846 int fullsize = sizeof(struct guc_state_capture_header_t); 847 848 if (fullsize > guc_capture_buf_cnt(buf)) 849 return -1; 850 851 if (guc_capture_data_extracted(buf, fullsize, (void *)hdr)) 852 return 0; 853 854 read += guc_capture_log_remove_dw(guc, buf, &hdr->owner); 855 read += guc_capture_log_remove_dw(guc, buf, &hdr->info); 856 read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca); 857 read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id); 858 read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios); 859 if (read != fullsize) 860 return -1; 861 862 return 0; 863 } 864 865 static int 866 guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf, 867 struct guc_mmio_reg *reg) 868 { 869 int read = 0; 870 int fullsize = sizeof(struct guc_mmio_reg); 871 872 if (fullsize > guc_capture_buf_cnt(buf)) 873 return -1; 874 875 if (guc_capture_data_extracted(buf, fullsize, (void *)reg)) 876 return 0; 877 878 read += guc_capture_log_remove_dw(guc, buf, ®->offset); 879 read += guc_capture_log_remove_dw(guc, buf, ®->value); 880 read += guc_capture_log_remove_dw(guc, buf, ®->flags); 881 read += guc_capture_log_remove_dw(guc, buf, ®->mask); 882 if (read != fullsize) 883 return -1; 884 885 return 0; 886 } 887 888 static void 889 guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node) 890 { 891 int i; 892 893 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) 894 kfree(node->reginfo[i].regs); 895 list_del(&node->link); 896 kfree(node); 897 } 898 899 static void 900 guc_capture_delete_prealloc_nodes(struct intel_guc *guc) 901 { 902 struct __guc_capture_parsed_output *n, *ntmp; 903 904 /* 905 * NOTE: At the end of driver operation, we must assume that we 906 * have prealloc nodes in both the cachelist as well as outlist 907 * if unclaimed error capture events occurred prior to shutdown. 908 */ 909 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) 910 guc_capture_delete_one_node(guc, n); 911 912 list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) 913 guc_capture_delete_one_node(guc, n); 914 } 915 916 static void 917 guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node, 918 struct list_head *list) 919 { 920 list_add_tail(&node->link, list); 921 } 922 923 static void 924 guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc, 925 struct __guc_capture_parsed_output *node) 926 { 927 guc_capture_add_node_to_list(node, &gc->outlist); 928 } 929 930 static void 931 guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc, 932 struct __guc_capture_parsed_output *node) 933 { 934 guc_capture_add_node_to_list(node, &gc->cachelist); 935 } 936 937 static void 938 guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node) 939 { 940 struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX]; 941 int i; 942 943 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 944 tmp[i] = node->reginfo[i].regs; 945 memset(tmp[i], 0, sizeof(struct guc_mmio_reg) * 946 guc->capture->max_mmio_per_node); 947 } 948 memset(node, 0, sizeof(*node)); 949 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) 950 node->reginfo[i].regs = tmp[i]; 951 952 INIT_LIST_HEAD(&node->link); 953 } 954 955 static struct __guc_capture_parsed_output * 956 guc_capture_get_prealloc_node(struct intel_guc *guc) 957 { 958 struct __guc_capture_parsed_output *found = NULL; 959 960 if (!list_empty(&guc->capture->cachelist)) { 961 struct __guc_capture_parsed_output *n, *ntmp; 962 963 /* get first avail node from the cache list */ 964 list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) { 965 found = n; 966 list_del(&n->link); 967 break; 968 } 969 } else { 970 struct __guc_capture_parsed_output *n, *ntmp; 971 972 /* traverse down and steal back the oldest node already allocated */ 973 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { 974 found = n; 975 } 976 if (found) 977 list_del(&found->link); 978 } 979 if (found) 980 guc_capture_init_node(guc, found); 981 982 return found; 983 } 984 985 static struct __guc_capture_parsed_output * 986 guc_capture_alloc_one_node(struct intel_guc *guc) 987 { 988 struct __guc_capture_parsed_output *new; 989 int i; 990 991 new = kzalloc(sizeof(*new), GFP_KERNEL); 992 if (!new) 993 return NULL; 994 995 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 996 new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node, 997 sizeof(struct guc_mmio_reg), GFP_KERNEL); 998 if (!new->reginfo[i].regs) { 999 while (i) 1000 kfree(new->reginfo[--i].regs); 1001 kfree(new); 1002 return NULL; 1003 } 1004 } 1005 guc_capture_init_node(guc, new); 1006 1007 return new; 1008 } 1009 1010 static struct __guc_capture_parsed_output * 1011 guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original, 1012 u32 keep_reglist_mask) 1013 { 1014 struct __guc_capture_parsed_output *new; 1015 int i; 1016 1017 new = guc_capture_get_prealloc_node(guc); 1018 if (!new) 1019 return NULL; 1020 if (!original) 1021 return new; 1022 1023 new->is_partial = original->is_partial; 1024 1025 /* copy reg-lists that we want to clone */ 1026 for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 1027 if (keep_reglist_mask & BIT(i)) { 1028 GEM_BUG_ON(original->reginfo[i].num_regs > 1029 guc->capture->max_mmio_per_node); 1030 1031 memcpy(new->reginfo[i].regs, original->reginfo[i].regs, 1032 original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg)); 1033 1034 new->reginfo[i].num_regs = original->reginfo[i].num_regs; 1035 new->reginfo[i].vfid = original->reginfo[i].vfid; 1036 1037 if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) { 1038 new->eng_class = original->eng_class; 1039 } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) { 1040 new->eng_inst = original->eng_inst; 1041 new->guc_id = original->guc_id; 1042 new->lrca = original->lrca; 1043 } 1044 } 1045 } 1046 1047 return new; 1048 } 1049 1050 static void 1051 __guc_capture_create_prealloc_nodes(struct intel_guc *guc) 1052 { 1053 struct __guc_capture_parsed_output *node = NULL; 1054 int i; 1055 1056 for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) { 1057 node = guc_capture_alloc_one_node(guc); 1058 if (!node) { 1059 guc_warn(guc, "Register capture pre-alloc-cache failure\n"); 1060 /* dont free the priors, use what we got and cleanup at shutdown */ 1061 return; 1062 } 1063 guc_capture_add_node_to_cachelist(guc->capture, node); 1064 } 1065 } 1066 1067 static int 1068 guc_get_max_reglist_count(struct intel_guc *guc) 1069 { 1070 int i, j, k, tmp, maxregcount = 0; 1071 1072 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { 1073 for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) { 1074 for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) { 1075 if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0) 1076 continue; 1077 1078 tmp = guc_cap_list_num_regs(guc->capture, i, j, k); 1079 if (tmp > maxregcount) 1080 maxregcount = tmp; 1081 } 1082 } 1083 } 1084 if (!maxregcount) 1085 maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS; 1086 1087 return maxregcount; 1088 } 1089 1090 static void 1091 guc_capture_create_prealloc_nodes(struct intel_guc *guc) 1092 { 1093 /* skip if we've already done the pre-alloc */ 1094 if (guc->capture->max_mmio_per_node) 1095 return; 1096 1097 guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); 1098 __guc_capture_create_prealloc_nodes(guc); 1099 } 1100 1101 static int 1102 guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf) 1103 { 1104 struct guc_state_capture_group_header_t ghdr = {0}; 1105 struct guc_state_capture_header_t hdr = {0}; 1106 struct __guc_capture_parsed_output *node = NULL; 1107 struct guc_mmio_reg *regs = NULL; 1108 int i, numlists, numregs, ret = 0; 1109 enum guc_capture_type datatype; 1110 struct guc_mmio_reg tmp; 1111 bool is_partial = false; 1112 1113 i = guc_capture_buf_cnt(buf); 1114 if (!i) 1115 return -ENODATA; 1116 if (i % sizeof(u32)) { 1117 guc_warn(guc, "Got mis-aligned register capture entries\n"); 1118 ret = -EIO; 1119 goto bailout; 1120 } 1121 1122 /* first get the capture group header */ 1123 if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) { 1124 ret = -EIO; 1125 goto bailout; 1126 } 1127 /* 1128 * we would typically expect a layout as below where n would be expected to be 1129 * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine 1130 * instances being reset together. 1131 * ____________________________________________ 1132 * | Capture Group | 1133 * | ________________________________________ | 1134 * | | Capture Group Header: | | 1135 * | | - num_captures = 5 | | 1136 * | |______________________________________| | 1137 * | ________________________________________ | 1138 * | | Capture1: | | 1139 * | | Hdr: GLOBAL, numregs=a | | 1140 * | | ____________________________________ | | 1141 * | | | Reglist | | | 1142 * | | | - reg1, reg2, ... rega | | | 1143 * | | |__________________________________| | | 1144 * | |______________________________________| | 1145 * | ________________________________________ | 1146 * | | Capture2: | | 1147 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| | 1148 * | | ____________________________________ | | 1149 * | | | Reglist | | | 1150 * | | | - reg1, reg2, ... regb | | | 1151 * | | |__________________________________| | | 1152 * | |______________________________________| | 1153 * | ________________________________________ | 1154 * | | Capture3: | | 1155 * | | Hdr: INSTANCE=RCS, numregs=c | | 1156 * | | ____________________________________ | | 1157 * | | | Reglist | | | 1158 * | | | - reg1, reg2, ... regc | | | 1159 * | | |__________________________________| | | 1160 * | |______________________________________| | 1161 * | ________________________________________ | 1162 * | | Capture4: | | 1163 * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| | 1164 * | | ____________________________________ | | 1165 * | | | Reglist | | | 1166 * | | | - reg1, reg2, ... regd | | | 1167 * | | |__________________________________| | | 1168 * | |______________________________________| | 1169 * | ________________________________________ | 1170 * | | Capture5: | | 1171 * | | Hdr: INSTANCE=CCS0, numregs=e | | 1172 * | | ____________________________________ | | 1173 * | | | Reglist | | | 1174 * | | | - reg1, reg2, ... rege | | | 1175 * | | |__________________________________| | | 1176 * | |______________________________________| | 1177 * |__________________________________________| 1178 */ 1179 is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info); 1180 numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info); 1181 1182 while (numlists--) { 1183 if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) { 1184 ret = -EIO; 1185 break; 1186 } 1187 1188 datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info); 1189 if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) { 1190 /* unknown capture type - skip over to next capture set */ 1191 numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios); 1192 while (numregs--) { 1193 if (guc_capture_log_get_register(guc, buf, &tmp)) { 1194 ret = -EIO; 1195 break; 1196 } 1197 } 1198 continue; 1199 } else if (node) { 1200 /* 1201 * Based on the current capture type and what we have so far, 1202 * decide if we should add the current node into the internal 1203 * linked list for match-up when i915_gpu_coredump calls later 1204 * (and alloc a blank node for the next set of reglists) 1205 * or continue with the same node or clone the current node 1206 * but only retain the global or class registers (such as the 1207 * case of dependent engine resets). 1208 */ 1209 if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) { 1210 guc_capture_add_node_to_outlist(guc->capture, node); 1211 node = NULL; 1212 } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS && 1213 node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) { 1214 /* Add to list, clone node and duplicate global list */ 1215 guc_capture_add_node_to_outlist(guc->capture, node); 1216 node = guc_capture_clone_node(guc, node, 1217 GCAP_PARSED_REGLIST_INDEX_GLOBAL); 1218 } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE && 1219 node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) { 1220 /* Add to list, clone node and duplicate global + class lists */ 1221 guc_capture_add_node_to_outlist(guc->capture, node); 1222 node = guc_capture_clone_node(guc, node, 1223 (GCAP_PARSED_REGLIST_INDEX_GLOBAL | 1224 GCAP_PARSED_REGLIST_INDEX_ENGCLASS)); 1225 } 1226 } 1227 1228 if (!node) { 1229 node = guc_capture_get_prealloc_node(guc); 1230 if (!node) { 1231 ret = -ENOMEM; 1232 break; 1233 } 1234 if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL) 1235 guc_dbg(guc, "Register capture missing global dump: %08x!\n", 1236 datatype); 1237 } 1238 node->is_partial = is_partial; 1239 node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner); 1240 switch (datatype) { 1241 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: 1242 node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info); 1243 node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info); 1244 node->lrca = hdr.lrca; 1245 node->guc_id = hdr.guc_id; 1246 break; 1247 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: 1248 node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info); 1249 break; 1250 default: 1251 break; 1252 } 1253 1254 numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios); 1255 if (numregs > guc->capture->max_mmio_per_node) { 1256 guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n"); 1257 numregs = guc->capture->max_mmio_per_node; 1258 } 1259 node->reginfo[datatype].num_regs = numregs; 1260 regs = node->reginfo[datatype].regs; 1261 i = 0; 1262 while (numregs--) { 1263 if (guc_capture_log_get_register(guc, buf, ®s[i++])) { 1264 ret = -EIO; 1265 break; 1266 } 1267 } 1268 } 1269 1270 bailout: 1271 if (node) { 1272 /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */ 1273 for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 1274 if (node->reginfo[i].regs) { 1275 guc_capture_add_node_to_outlist(guc->capture, node); 1276 node = NULL; 1277 break; 1278 } 1279 } 1280 if (node) /* else return it back to cache list */ 1281 guc_capture_add_node_to_cachelist(guc->capture, node); 1282 } 1283 return ret; 1284 } 1285 1286 static int __guc_capture_flushlog_complete(struct intel_guc *guc) 1287 { 1288 u32 action[] = { 1289 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, 1290 GUC_CAPTURE_LOG_BUFFER 1291 }; 1292 1293 return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); 1294 1295 } 1296 1297 static void __guc_capture_process_output(struct intel_guc *guc) 1298 { 1299 unsigned int buffer_size, read_offset, write_offset, full_count; 1300 struct intel_uc *uc = container_of(guc, typeof(*uc), guc); 1301 struct guc_log_buffer_state log_buf_state_local; 1302 struct guc_log_buffer_state *log_buf_state; 1303 struct __guc_capture_bufstate buf; 1304 void *src_data = NULL; 1305 bool new_overflow; 1306 int ret; 1307 1308 log_buf_state = guc->log.buf_addr + 1309 (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER); 1310 src_data = guc->log.buf_addr + 1311 intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER); 1312 1313 /* 1314 * Make a copy of the state structure, inside GuC log buffer 1315 * (which is uncached mapped), on the stack to avoid reading 1316 * from it multiple times. 1317 */ 1318 memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state)); 1319 buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER); 1320 read_offset = log_buf_state_local.read_ptr; 1321 write_offset = log_buf_state_local.sampled_write_ptr; 1322 full_count = log_buf_state_local.buffer_full_cnt; 1323 1324 /* Bookkeeping stuff */ 1325 guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file; 1326 new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER, 1327 full_count); 1328 1329 /* Now copy the actual logs. */ 1330 if (unlikely(new_overflow)) { 1331 /* copy the whole buffer in case of overflow */ 1332 read_offset = 0; 1333 write_offset = buffer_size; 1334 } else if (unlikely((read_offset > buffer_size) || 1335 (write_offset > buffer_size))) { 1336 guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n", 1337 read_offset, buffer_size); 1338 /* copy whole buffer as offsets are unreliable */ 1339 read_offset = 0; 1340 write_offset = buffer_size; 1341 } 1342 1343 buf.size = buffer_size; 1344 buf.rd = read_offset; 1345 buf.wr = write_offset; 1346 buf.data = src_data; 1347 1348 if (!uc->reset_in_progress) { 1349 do { 1350 ret = guc_capture_extract_reglists(guc, &buf); 1351 } while (ret >= 0); 1352 } 1353 1354 /* Update the state of log buffer err-cap state */ 1355 log_buf_state->read_ptr = write_offset; 1356 log_buf_state->flush_to_file = 0; 1357 __guc_capture_flushlog_complete(guc); 1358 } 1359 1360 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1361 1362 static const char * 1363 guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type, 1364 u32 class, u32 id, u32 offset, u32 *is_ext) 1365 { 1366 const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists; 1367 struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists; 1368 const struct __guc_mmio_reg_descr_group *match; 1369 struct __guc_mmio_reg_descr_group *matchext; 1370 int j; 1371 1372 *is_ext = 0; 1373 if (!reglists) 1374 return NULL; 1375 1376 match = guc_capture_get_one_list(reglists, owner, type, id); 1377 if (!match) 1378 return NULL; 1379 1380 for (j = 0; j < match->num_regs; ++j) { 1381 if (offset == match->list[j].reg.reg) 1382 return match->list[j].regname; 1383 } 1384 if (extlists) { 1385 matchext = guc_capture_get_one_ext_list(extlists, owner, type, id); 1386 if (!matchext) 1387 return NULL; 1388 for (j = 0; j < matchext->num_regs; ++j) { 1389 if (offset == matchext->extlist[j].reg.reg) { 1390 *is_ext = 1; 1391 return matchext->extlist[j].regname; 1392 } 1393 } 1394 } 1395 1396 return NULL; 1397 } 1398 1399 #define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \ 1400 do { \ 1401 i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \ 1402 (eng)->name); \ 1403 i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \ 1404 i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \ 1405 i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \ 1406 (eng)->logical_mask); \ 1407 } while (0) 1408 1409 #define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \ 1410 do { \ 1411 i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \ 1412 (node)->eng_inst); \ 1413 i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \ 1414 i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \ 1415 } while (0) 1416 1417 int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf, 1418 const struct intel_engine_coredump *ee) 1419 { 1420 const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = { 1421 "full-capture", 1422 "partial-capture" 1423 }; 1424 const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = { 1425 "Global", 1426 "Engine-Class", 1427 "Engine-Instance" 1428 }; 1429 struct intel_guc_state_capture *cap; 1430 struct __guc_capture_parsed_output *node; 1431 struct intel_engine_cs *eng; 1432 struct guc_mmio_reg *regs; 1433 struct intel_guc *guc; 1434 const char *str; 1435 int numregs, i, j; 1436 u32 is_ext; 1437 1438 if (!ebuf || !ee) 1439 return -EINVAL; 1440 cap = ee->guc_capture; 1441 if (!cap || !ee->engine) 1442 return -ENODEV; 1443 1444 guc = &ee->engine->gt->uc.guc; 1445 1446 i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n", 1447 ee->engine->name); 1448 1449 node = ee->guc_capture_node; 1450 if (!node) { 1451 i915_error_printf(ebuf, " No matching ee-node\n"); 1452 return 0; 1453 } 1454 1455 i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]); 1456 1457 for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) { 1458 i915_error_printf(ebuf, " RegListType: %s\n", 1459 datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]); 1460 i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid); 1461 1462 switch (i) { 1463 case GUC_CAPTURE_LIST_TYPE_GLOBAL: 1464 default: 1465 break; 1466 case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: 1467 i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class); 1468 i915_error_printf(ebuf, " i915-Eng-Class: %d\n", 1469 guc_class_to_engine_class(node->eng_class)); 1470 break; 1471 case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: 1472 eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst); 1473 if (eng) 1474 GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng); 1475 else 1476 i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n"); 1477 GCAP_PRINT_GUC_INST_INFO(ebuf, node); 1478 break; 1479 } 1480 1481 numregs = node->reginfo[i].num_regs; 1482 i915_error_printf(ebuf, " NumRegs: %d\n", numregs); 1483 j = 0; 1484 while (numregs--) { 1485 regs = node->reginfo[i].regs; 1486 str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i, 1487 node->eng_class, 0, regs[j].offset, &is_ext); 1488 if (!str) 1489 i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset); 1490 else 1491 i915_error_printf(ebuf, " %s", str); 1492 if (is_ext) 1493 i915_error_printf(ebuf, "[%ld][%ld]", 1494 FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags), 1495 FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags)); 1496 i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value); 1497 ++j; 1498 } 1499 } 1500 return 0; 1501 } 1502 1503 #endif //CONFIG_DRM_I915_CAPTURE_ERROR 1504 1505 static void guc_capture_find_ecode(struct intel_engine_coredump *ee) 1506 { 1507 struct gcap_reg_list_info *reginfo; 1508 struct guc_mmio_reg *regs; 1509 i915_reg_t reg_ipehr = RING_IPEHR(0); 1510 i915_reg_t reg_instdone = RING_INSTDONE(0); 1511 int i; 1512 1513 if (!ee->guc_capture_node) 1514 return; 1515 1516 reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE; 1517 regs = reginfo->regs; 1518 for (i = 0; i < reginfo->num_regs; i++) { 1519 if (regs[i].offset == reg_ipehr.reg) 1520 ee->ipehr = regs[i].value; 1521 else if (regs[i].offset == reg_instdone.reg) 1522 ee->instdone.instdone = regs[i].value; 1523 } 1524 } 1525 1526 void intel_guc_capture_free_node(struct intel_engine_coredump *ee) 1527 { 1528 if (!ee || !ee->guc_capture_node) 1529 return; 1530 1531 guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node); 1532 ee->guc_capture = NULL; 1533 ee->guc_capture_node = NULL; 1534 } 1535 1536 bool intel_guc_capture_is_matching_engine(struct intel_gt *gt, 1537 struct intel_context *ce, 1538 struct intel_engine_cs *engine) 1539 { 1540 struct __guc_capture_parsed_output *n; 1541 struct intel_guc *guc; 1542 1543 if (!gt || !ce || !engine) 1544 return false; 1545 1546 guc = >->uc.guc; 1547 if (!guc->capture) 1548 return false; 1549 1550 /* 1551 * Look for a matching GuC reported error capture node from 1552 * the internal output link-list based on lrca, guc-id and engine 1553 * identification. 1554 */ 1555 list_for_each_entry(n, &guc->capture->outlist, link) { 1556 if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(engine->guc_id) && 1557 n->eng_class == GUC_ID_TO_ENGINE_CLASS(engine->guc_id) && 1558 n->guc_id == ce->guc_id.id && 1559 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) 1560 return true; 1561 } 1562 1563 return false; 1564 } 1565 1566 void intel_guc_capture_get_matching_node(struct intel_gt *gt, 1567 struct intel_engine_coredump *ee, 1568 struct intel_context *ce) 1569 { 1570 struct __guc_capture_parsed_output *n, *ntmp; 1571 struct intel_guc *guc; 1572 1573 if (!gt || !ee || !ce) 1574 return; 1575 1576 guc = >->uc.guc; 1577 if (!guc->capture) 1578 return; 1579 1580 GEM_BUG_ON(ee->guc_capture_node); 1581 1582 /* 1583 * Look for a matching GuC reported error capture node from 1584 * the internal output link-list based on lrca, guc-id and engine 1585 * identification. 1586 */ 1587 list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) { 1588 if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) && 1589 n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) && 1590 n->guc_id == ce->guc_id.id && 1591 (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) { 1592 list_del(&n->link); 1593 ee->guc_capture_node = n; 1594 ee->guc_capture = guc->capture; 1595 guc_capture_find_ecode(ee); 1596 return; 1597 } 1598 } 1599 1600 guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n", 1601 ce->guc_id.id, ce->lrc.lrca); 1602 } 1603 1604 void intel_guc_capture_process(struct intel_guc *guc) 1605 { 1606 if (guc->capture) 1607 __guc_capture_process_output(guc); 1608 } 1609 1610 static void 1611 guc_capture_free_ads_cache(struct intel_guc_state_capture *gc) 1612 { 1613 int i, j, k; 1614 struct __guc_capture_ads_cache *cache; 1615 1616 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) { 1617 for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) { 1618 for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) { 1619 cache = &gc->ads_cache[i][j][k]; 1620 if (cache->is_valid) 1621 kfree(cache->ptr); 1622 } 1623 } 1624 } 1625 kfree(gc->ads_null_cache); 1626 } 1627 1628 void intel_guc_capture_destroy(struct intel_guc *guc) 1629 { 1630 if (!guc->capture) 1631 return; 1632 1633 guc_capture_free_ads_cache(guc->capture); 1634 1635 guc_capture_delete_prealloc_nodes(guc); 1636 1637 guc_capture_free_extlists(guc->capture->extlists); 1638 kfree(guc->capture->extlists); 1639 1640 kfree(guc->capture); 1641 guc->capture = NULL; 1642 } 1643 1644 int intel_guc_capture_init(struct intel_guc *guc) 1645 { 1646 guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL); 1647 if (!guc->capture) 1648 return -ENOMEM; 1649 1650 guc->capture->reglists = guc_capture_get_device_reglist(guc); 1651 1652 INIT_LIST_HEAD(&guc->capture->outlist); 1653 INIT_LIST_HEAD(&guc->capture->cachelist); 1654 1655 check_guc_capture_size(guc); 1656 1657 return 0; 1658 } 1659