1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 8 #include "gt/intel_gt.h" 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "i915_memcpy.h" 12 #include "intel_guc_log.h" 13 14 static void guc_log_capture_logs(struct intel_guc_log *log); 15 16 /** 17 * DOC: GuC firmware log 18 * 19 * Firmware log is enabled by setting i915.guc_log_level to the positive level. 20 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from 21 * i915_guc_load_status will print out firmware loading status and scratch 22 * registers value. 23 */ 24 25 static int guc_action_flush_log_complete(struct intel_guc *guc) 26 { 27 u32 action[] = { 28 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE 29 }; 30 31 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 32 } 33 34 static int guc_action_flush_log(struct intel_guc *guc) 35 { 36 u32 action[] = { 37 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, 38 0 39 }; 40 41 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 42 } 43 44 static int guc_action_control_log(struct intel_guc *guc, bool enable, 45 bool default_logging, u32 verbosity) 46 { 47 u32 action[] = { 48 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, 49 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | 50 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | 51 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) 52 }; 53 54 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); 55 56 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 57 } 58 59 static void guc_log_enable_flush_events(struct intel_guc_log *log) 60 { 61 intel_guc_enable_msg(log_to_guc(log), 62 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | 63 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); 64 } 65 66 static void guc_log_disable_flush_events(struct intel_guc_log *log) 67 { 68 intel_guc_disable_msg(log_to_guc(log), 69 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | 70 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); 71 } 72 73 /* 74 * Sub buffer switch callback. Called whenever relay has to switch to a new 75 * sub buffer, relay stays on the same sub buffer if 0 is returned. 76 */ 77 static int subbuf_start_callback(struct rchan_buf *buf, 78 void *subbuf, 79 void *prev_subbuf, 80 size_t prev_padding) 81 { 82 /* 83 * Use no-overwrite mode by default, where relay will stop accepting 84 * new data if there are no empty sub buffers left. 85 * There is no strict synchronization enforced by relay between Consumer 86 * and Producer. In overwrite mode, there is a possibility of getting 87 * inconsistent/garbled data, the producer could be writing on to the 88 * same sub buffer from which Consumer is reading. This can't be avoided 89 * unless Consumer is fast enough and can always run in tandem with 90 * Producer. 91 */ 92 if (relay_buf_full(buf)) 93 return 0; 94 95 return 1; 96 } 97 98 /* 99 * file_create() callback. Creates relay file in debugfs. 100 */ 101 static struct dentry *create_buf_file_callback(const char *filename, 102 struct dentry *parent, 103 umode_t mode, 104 struct rchan_buf *buf, 105 int *is_global) 106 { 107 struct dentry *buf_file; 108 109 /* 110 * This to enable the use of a single buffer for the relay channel and 111 * correspondingly have a single file exposed to User, through which 112 * it can collect the logs in order without any post-processing. 113 * Need to set 'is_global' even if parent is NULL for early logging. 114 */ 115 *is_global = 1; 116 117 if (!parent) 118 return NULL; 119 120 buf_file = debugfs_create_file(filename, mode, 121 parent, buf, &relay_file_operations); 122 if (IS_ERR(buf_file)) 123 return NULL; 124 125 return buf_file; 126 } 127 128 /* 129 * file_remove() default callback. Removes relay file in debugfs. 130 */ 131 static int remove_buf_file_callback(struct dentry *dentry) 132 { 133 debugfs_remove(dentry); 134 return 0; 135 } 136 137 /* relay channel callbacks */ 138 static const struct rchan_callbacks relay_callbacks = { 139 .subbuf_start = subbuf_start_callback, 140 .create_buf_file = create_buf_file_callback, 141 .remove_buf_file = remove_buf_file_callback, 142 }; 143 144 static void guc_move_to_next_buf(struct intel_guc_log *log) 145 { 146 /* 147 * Make sure the updates made in the sub buffer are visible when 148 * Consumer sees the following update to offset inside the sub buffer. 149 */ 150 smp_wmb(); 151 152 /* All data has been written, so now move the offset of sub buffer. */ 153 relay_reserve(log->relay.channel, log->vma->obj->base.size); 154 155 /* Switch to the next sub buffer */ 156 relay_flush(log->relay.channel); 157 } 158 159 static void *guc_get_write_buffer(struct intel_guc_log *log) 160 { 161 /* 162 * Just get the base address of a new sub buffer and copy data into it 163 * ourselves. NULL will be returned in no-overwrite mode, if all sub 164 * buffers are full. Could have used the relay_write() to indirectly 165 * copy the data, but that would have been bit convoluted, as we need to 166 * write to only certain locations inside a sub buffer which cannot be 167 * done without using relay_reserve() along with relay_write(). So its 168 * better to use relay_reserve() alone. 169 */ 170 return relay_reserve(log->relay.channel, 0); 171 } 172 173 static bool guc_check_log_buf_overflow(struct intel_guc_log *log, 174 enum guc_log_buffer_type type, 175 unsigned int full_cnt) 176 { 177 unsigned int prev_full_cnt = log->stats[type].sampled_overflow; 178 bool overflow = false; 179 180 if (full_cnt != prev_full_cnt) { 181 overflow = true; 182 183 log->stats[type].overflow = full_cnt; 184 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; 185 186 if (full_cnt < prev_full_cnt) { 187 /* buffer_full_cnt is a 4 bit counter */ 188 log->stats[type].sampled_overflow += 16; 189 } 190 191 dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, 192 "GuC log buffer overflow\n"); 193 } 194 195 return overflow; 196 } 197 198 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) 199 { 200 switch (type) { 201 case GUC_DEBUG_LOG_BUFFER: 202 return DEBUG_BUFFER_SIZE; 203 case GUC_CRASH_DUMP_LOG_BUFFER: 204 return CRASH_BUFFER_SIZE; 205 default: 206 MISSING_CASE(type); 207 } 208 209 return 0; 210 } 211 212 static void guc_read_update_log_buffer(struct intel_guc_log *log) 213 { 214 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; 215 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; 216 struct guc_log_buffer_state log_buf_state_local; 217 enum guc_log_buffer_type type; 218 void *src_data, *dst_data; 219 bool new_overflow; 220 221 mutex_lock(&log->relay.lock); 222 223 if (WARN_ON(!intel_guc_log_relay_created(log))) 224 goto out_unlock; 225 226 /* Get the pointer to shared GuC log buffer */ 227 log_buf_state = src_data = log->relay.buf_addr; 228 229 /* Get the pointer to local buffer to store the logs */ 230 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); 231 232 if (unlikely(!log_buf_snapshot_state)) { 233 /* 234 * Used rate limited to avoid deluge of messages, logs might be 235 * getting consumed by User at a slow rate. 236 */ 237 DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); 238 log->relay.full_count++; 239 240 goto out_unlock; 241 } 242 243 /* Actual logs are present from the 2nd page */ 244 src_data += PAGE_SIZE; 245 dst_data += PAGE_SIZE; 246 247 for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { 248 /* 249 * Make a copy of the state structure, inside GuC log buffer 250 * (which is uncached mapped), on the stack to avoid reading 251 * from it multiple times. 252 */ 253 memcpy(&log_buf_state_local, log_buf_state, 254 sizeof(struct guc_log_buffer_state)); 255 buffer_size = guc_get_log_buffer_size(type); 256 read_offset = log_buf_state_local.read_ptr; 257 write_offset = log_buf_state_local.sampled_write_ptr; 258 full_cnt = log_buf_state_local.buffer_full_cnt; 259 260 /* Bookkeeping stuff */ 261 log->stats[type].flush += log_buf_state_local.flush_to_file; 262 new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); 263 264 /* Update the state of shared log buffer */ 265 log_buf_state->read_ptr = write_offset; 266 log_buf_state->flush_to_file = 0; 267 log_buf_state++; 268 269 /* First copy the state structure in snapshot buffer */ 270 memcpy(log_buf_snapshot_state, &log_buf_state_local, 271 sizeof(struct guc_log_buffer_state)); 272 273 /* 274 * The write pointer could have been updated by GuC firmware, 275 * after sending the flush interrupt to Host, for consistency 276 * set write pointer value to same value of sampled_write_ptr 277 * in the snapshot buffer. 278 */ 279 log_buf_snapshot_state->write_ptr = write_offset; 280 log_buf_snapshot_state++; 281 282 /* Now copy the actual logs. */ 283 if (unlikely(new_overflow)) { 284 /* copy the whole buffer in case of overflow */ 285 read_offset = 0; 286 write_offset = buffer_size; 287 } else if (unlikely((read_offset > buffer_size) || 288 (write_offset > buffer_size))) { 289 DRM_ERROR("invalid log buffer state\n"); 290 /* copy whole buffer as offsets are unreliable */ 291 read_offset = 0; 292 write_offset = buffer_size; 293 } 294 295 /* Just copy the newly written data */ 296 if (read_offset > write_offset) { 297 i915_memcpy_from_wc(dst_data, src_data, write_offset); 298 bytes_to_copy = buffer_size - read_offset; 299 } else { 300 bytes_to_copy = write_offset - read_offset; 301 } 302 i915_memcpy_from_wc(dst_data + read_offset, 303 src_data + read_offset, bytes_to_copy); 304 305 src_data += buffer_size; 306 dst_data += buffer_size; 307 } 308 309 guc_move_to_next_buf(log); 310 311 out_unlock: 312 mutex_unlock(&log->relay.lock); 313 } 314 315 static void capture_logs_work(struct work_struct *work) 316 { 317 struct intel_guc_log *log = 318 container_of(work, struct intel_guc_log, relay.flush_work); 319 320 guc_log_capture_logs(log); 321 } 322 323 static int guc_log_map(struct intel_guc_log *log) 324 { 325 void *vaddr; 326 327 lockdep_assert_held(&log->relay.lock); 328 329 if (!log->vma) 330 return -ENODEV; 331 332 /* 333 * Create a WC (Uncached for read) vmalloc mapping of log 334 * buffer pages, so that we can directly get the data 335 * (up-to-date) from memory. 336 */ 337 vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); 338 if (IS_ERR(vaddr)) 339 return PTR_ERR(vaddr); 340 341 log->relay.buf_addr = vaddr; 342 343 return 0; 344 } 345 346 static void guc_log_unmap(struct intel_guc_log *log) 347 { 348 lockdep_assert_held(&log->relay.lock); 349 350 i915_gem_object_unpin_map(log->vma->obj); 351 log->relay.buf_addr = NULL; 352 } 353 354 void intel_guc_log_init_early(struct intel_guc_log *log) 355 { 356 mutex_init(&log->relay.lock); 357 INIT_WORK(&log->relay.flush_work, capture_logs_work); 358 log->relay.started = false; 359 } 360 361 static int guc_log_relay_create(struct intel_guc_log *log) 362 { 363 struct intel_guc *guc = log_to_guc(log); 364 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 365 struct rchan *guc_log_relay_chan; 366 size_t n_subbufs, subbuf_size; 367 int ret; 368 369 lockdep_assert_held(&log->relay.lock); 370 GEM_BUG_ON(!log->vma); 371 372 /* Keep the size of sub buffers same as shared log buffer */ 373 subbuf_size = log->vma->size; 374 375 /* 376 * Store up to 8 snapshots, which is large enough to buffer sufficient 377 * boot time logs and provides enough leeway to User, in terms of 378 * latency, for consuming the logs from relay. Also doesn't take 379 * up too much memory. 380 */ 381 n_subbufs = 8; 382 383 guc_log_relay_chan = relay_open("guc_log", 384 dev_priv->drm.primary->debugfs_root, 385 subbuf_size, n_subbufs, 386 &relay_callbacks, dev_priv); 387 if (!guc_log_relay_chan) { 388 DRM_ERROR("Couldn't create relay chan for GuC logging\n"); 389 390 ret = -ENOMEM; 391 return ret; 392 } 393 394 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); 395 log->relay.channel = guc_log_relay_chan; 396 397 return 0; 398 } 399 400 static void guc_log_relay_destroy(struct intel_guc_log *log) 401 { 402 lockdep_assert_held(&log->relay.lock); 403 404 relay_close(log->relay.channel); 405 log->relay.channel = NULL; 406 } 407 408 static void guc_log_capture_logs(struct intel_guc_log *log) 409 { 410 struct intel_guc *guc = log_to_guc(log); 411 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 412 intel_wakeref_t wakeref; 413 414 guc_read_update_log_buffer(log); 415 416 /* 417 * Generally device is expected to be active only at this 418 * time, so get/put should be really quick. 419 */ 420 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 421 guc_action_flush_log_complete(guc); 422 } 423 424 static u32 __get_default_log_level(struct intel_guc_log *log) 425 { 426 struct intel_guc *guc = log_to_guc(log); 427 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 428 429 /* A negative value means "use platform/config default" */ 430 if (i915->params.guc_log_level < 0) { 431 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 432 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 433 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; 434 } 435 436 if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { 437 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 438 "guc_log_level", i915->params.guc_log_level, 439 "verbosity too high"); 440 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 441 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 442 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; 443 } 444 445 GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED); 446 GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX); 447 return i915->params.guc_log_level; 448 } 449 450 int intel_guc_log_create(struct intel_guc_log *log) 451 { 452 struct intel_guc *guc = log_to_guc(log); 453 struct i915_vma *vma; 454 u32 guc_log_size; 455 int ret; 456 457 GEM_BUG_ON(log->vma); 458 459 /* 460 * GuC Log buffer Layout 461 * 462 * +===============================+ 00B 463 * | Crash dump state header | 464 * +-------------------------------+ 32B 465 * | Debug state header | 466 * +-------------------------------+ 64B 467 * | | 468 * +===============================+ PAGE_SIZE (4KB) 469 * | Crash Dump logs | 470 * +===============================+ + CRASH_SIZE 471 * | Debug logs | 472 * +===============================+ + DEBUG_SIZE 473 */ 474 guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE; 475 476 vma = intel_guc_allocate_vma(guc, guc_log_size); 477 if (IS_ERR(vma)) { 478 ret = PTR_ERR(vma); 479 goto err; 480 } 481 482 log->vma = vma; 483 484 log->level = __get_default_log_level(log); 485 DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", 486 log->level, enableddisabled(log->level), 487 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), 488 GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); 489 490 return 0; 491 492 err: 493 DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); 494 return ret; 495 } 496 497 void intel_guc_log_destroy(struct intel_guc_log *log) 498 { 499 i915_vma_unpin_and_release(&log->vma, 0); 500 } 501 502 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) 503 { 504 struct intel_guc *guc = log_to_guc(log); 505 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 506 intel_wakeref_t wakeref; 507 int ret = 0; 508 509 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); 510 GEM_BUG_ON(!log->vma); 511 512 /* 513 * GuC is recognizing log levels starting from 0 to max, we're using 0 514 * as indication that logging should be disabled. 515 */ 516 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) 517 return -EINVAL; 518 519 mutex_lock(&dev_priv->drm.struct_mutex); 520 521 if (log->level == level) 522 goto out_unlock; 523 524 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 525 ret = guc_action_control_log(guc, 526 GUC_LOG_LEVEL_IS_VERBOSE(level), 527 GUC_LOG_LEVEL_IS_ENABLED(level), 528 GUC_LOG_LEVEL_TO_VERBOSITY(level)); 529 if (ret) { 530 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); 531 goto out_unlock; 532 } 533 534 log->level = level; 535 536 out_unlock: 537 mutex_unlock(&dev_priv->drm.struct_mutex); 538 539 return ret; 540 } 541 542 bool intel_guc_log_relay_created(const struct intel_guc_log *log) 543 { 544 return log->relay.buf_addr; 545 } 546 547 int intel_guc_log_relay_open(struct intel_guc_log *log) 548 { 549 int ret; 550 551 if (!log->vma) 552 return -ENODEV; 553 554 mutex_lock(&log->relay.lock); 555 556 if (intel_guc_log_relay_created(log)) { 557 ret = -EEXIST; 558 goto out_unlock; 559 } 560 561 /* 562 * We require SSE 4.1 for fast reads from the GuC log buffer and 563 * it should be present on the chipsets supporting GuC based 564 * submisssions. 565 */ 566 if (!i915_has_memcpy_from_wc()) { 567 ret = -ENXIO; 568 goto out_unlock; 569 } 570 571 ret = guc_log_relay_create(log); 572 if (ret) 573 goto out_unlock; 574 575 ret = guc_log_map(log); 576 if (ret) 577 goto out_relay; 578 579 mutex_unlock(&log->relay.lock); 580 581 return 0; 582 583 out_relay: 584 guc_log_relay_destroy(log); 585 out_unlock: 586 mutex_unlock(&log->relay.lock); 587 588 return ret; 589 } 590 591 int intel_guc_log_relay_start(struct intel_guc_log *log) 592 { 593 if (log->relay.started) 594 return -EEXIST; 595 596 guc_log_enable_flush_events(log); 597 598 /* 599 * When GuC is logging without us relaying to userspace, we're ignoring 600 * the flush notification. This means that we need to unconditionally 601 * flush on relay enabling, since GuC only notifies us once. 602 */ 603 queue_work(system_highpri_wq, &log->relay.flush_work); 604 605 log->relay.started = true; 606 607 return 0; 608 } 609 610 void intel_guc_log_relay_flush(struct intel_guc_log *log) 611 { 612 struct intel_guc *guc = log_to_guc(log); 613 intel_wakeref_t wakeref; 614 615 if (!log->relay.started) 616 return; 617 618 /* 619 * Before initiating the forceful flush, wait for any pending/ongoing 620 * flush to complete otherwise forceful flush may not actually happen. 621 */ 622 flush_work(&log->relay.flush_work); 623 624 with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) 625 guc_action_flush_log(guc); 626 627 /* GuC would have updated log buffer by now, so capture it */ 628 guc_log_capture_logs(log); 629 } 630 631 /* 632 * Stops the relay log. Called from intel_guc_log_relay_close(), so no 633 * possibility of race with start/flush since relay_write cannot race 634 * relay_close. 635 */ 636 static void guc_log_relay_stop(struct intel_guc_log *log) 637 { 638 struct intel_guc *guc = log_to_guc(log); 639 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 640 641 if (!log->relay.started) 642 return; 643 644 guc_log_disable_flush_events(log); 645 intel_synchronize_irq(i915); 646 647 flush_work(&log->relay.flush_work); 648 649 log->relay.started = false; 650 } 651 652 void intel_guc_log_relay_close(struct intel_guc_log *log) 653 { 654 guc_log_relay_stop(log); 655 656 mutex_lock(&log->relay.lock); 657 GEM_BUG_ON(!intel_guc_log_relay_created(log)); 658 guc_log_unmap(log); 659 guc_log_relay_destroy(log); 660 mutex_unlock(&log->relay.lock); 661 } 662 663 void intel_guc_log_handle_flush_event(struct intel_guc_log *log) 664 { 665 queue_work(system_highpri_wq, &log->relay.flush_work); 666 } 667 668 static const char * 669 stringify_guc_log_type(enum guc_log_buffer_type type) 670 { 671 switch (type) { 672 case GUC_DEBUG_LOG_BUFFER: 673 return "DEBUG"; 674 case GUC_CRASH_DUMP_LOG_BUFFER: 675 return "CRASH"; 676 default: 677 MISSING_CASE(type); 678 } 679 680 return ""; 681 } 682 683 /** 684 * intel_guc_log_info - dump information about GuC log relay 685 * @log: the GuC log 686 * @p: the &drm_printer 687 * 688 * Pretty printer for GuC log info 689 */ 690 void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p) 691 { 692 enum guc_log_buffer_type type; 693 694 if (!intel_guc_log_relay_created(log)) { 695 drm_puts(p, "GuC log relay not created\n"); 696 return; 697 } 698 699 drm_puts(p, "GuC logging stats:\n"); 700 701 drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count); 702 703 for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { 704 drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n", 705 stringify_guc_log_type(type), 706 log->stats[type].flush, 707 log->stats[type].sampled_overflow); 708 } 709 } 710 711 /** 712 * intel_guc_log_dump - dump the contents of the GuC log 713 * @log: the GuC log 714 * @p: the &drm_printer 715 * @dump_load_err: dump the log saved on GuC load error 716 * 717 * Pretty printer for the GuC log 718 */ 719 int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, 720 bool dump_load_err) 721 { 722 struct intel_guc *guc = log_to_guc(log); 723 struct intel_uc *uc = container_of(guc, struct intel_uc, guc); 724 struct drm_i915_gem_object *obj = NULL; 725 u32 *map; 726 int i = 0; 727 728 if (!intel_guc_is_supported(guc)) 729 return -ENODEV; 730 731 if (dump_load_err) 732 obj = uc->load_err_log; 733 else if (guc->log.vma) 734 obj = guc->log.vma->obj; 735 736 if (!obj) 737 return 0; 738 739 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 740 if (IS_ERR(map)) { 741 DRM_DEBUG("Failed to pin object\n"); 742 drm_puts(p, "(log data unaccessible)\n"); 743 return PTR_ERR(map); 744 } 745 746 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 747 drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", 748 *(map + i), *(map + i + 1), 749 *(map + i + 2), *(map + i + 3)); 750 751 drm_puts(p, "\n"); 752 753 i915_gem_object_unpin_map(obj); 754 755 return 0; 756 } 757