1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 #include <linux/string_helpers.h> 8 9 #include "gt/intel_gt.h" 10 #include "i915_drv.h" 11 #include "i915_irq.h" 12 #include "i915_memcpy.h" 13 #include "intel_guc_capture.h" 14 #include "intel_guc_log.h" 15 16 static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log); 17 18 static u32 intel_guc_log_size(struct intel_guc_log *log) 19 { 20 /* 21 * GuC Log buffer Layout: 22 * 23 * NB: Ordering must follow "enum guc_log_buffer_type". 24 * 25 * +===============================+ 00B 26 * | Debug state header | 27 * +-------------------------------+ 32B 28 * | Crash dump state header | 29 * +-------------------------------+ 64B 30 * | Capture state header | 31 * +-------------------------------+ 96B 32 * | | 33 * +===============================+ PAGE_SIZE (4KB) 34 * | Debug logs | 35 * +===============================+ + DEBUG_SIZE 36 * | Crash Dump logs | 37 * +===============================+ + CRASH_SIZE 38 * | Capture logs | 39 * +===============================+ + CAPTURE_SIZE 40 */ 41 return PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE + CAPTURE_BUFFER_SIZE; 42 } 43 44 /** 45 * DOC: GuC firmware log 46 * 47 * Firmware log is enabled by setting i915.guc_log_level to the positive level. 48 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from 49 * i915_guc_load_status will print out firmware loading status and scratch 50 * registers value. 51 */ 52 53 static int guc_action_flush_log_complete(struct intel_guc *guc) 54 { 55 u32 action[] = { 56 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, 57 GUC_DEBUG_LOG_BUFFER 58 }; 59 60 return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); 61 } 62 63 static int guc_action_flush_log(struct intel_guc *guc) 64 { 65 u32 action[] = { 66 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, 67 0 68 }; 69 70 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 71 } 72 73 static int guc_action_control_log(struct intel_guc *guc, bool enable, 74 bool default_logging, u32 verbosity) 75 { 76 u32 action[] = { 77 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, 78 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | 79 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | 80 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) 81 }; 82 83 GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); 84 85 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 86 } 87 88 /* 89 * Sub buffer switch callback. Called whenever relay has to switch to a new 90 * sub buffer, relay stays on the same sub buffer if 0 is returned. 91 */ 92 static int subbuf_start_callback(struct rchan_buf *buf, 93 void *subbuf, 94 void *prev_subbuf, 95 size_t prev_padding) 96 { 97 /* 98 * Use no-overwrite mode by default, where relay will stop accepting 99 * new data if there are no empty sub buffers left. 100 * There is no strict synchronization enforced by relay between Consumer 101 * and Producer. In overwrite mode, there is a possibility of getting 102 * inconsistent/garbled data, the producer could be writing on to the 103 * same sub buffer from which Consumer is reading. This can't be avoided 104 * unless Consumer is fast enough and can always run in tandem with 105 * Producer. 106 */ 107 if (relay_buf_full(buf)) 108 return 0; 109 110 return 1; 111 } 112 113 /* 114 * file_create() callback. Creates relay file in debugfs. 115 */ 116 static struct dentry *create_buf_file_callback(const char *filename, 117 struct dentry *parent, 118 umode_t mode, 119 struct rchan_buf *buf, 120 int *is_global) 121 { 122 struct dentry *buf_file; 123 124 /* 125 * This to enable the use of a single buffer for the relay channel and 126 * correspondingly have a single file exposed to User, through which 127 * it can collect the logs in order without any post-processing. 128 * Need to set 'is_global' even if parent is NULL for early logging. 129 */ 130 *is_global = 1; 131 132 if (!parent) 133 return NULL; 134 135 buf_file = debugfs_create_file(filename, mode, 136 parent, buf, &relay_file_operations); 137 if (IS_ERR(buf_file)) 138 return NULL; 139 140 return buf_file; 141 } 142 143 /* 144 * file_remove() default callback. Removes relay file in debugfs. 145 */ 146 static int remove_buf_file_callback(struct dentry *dentry) 147 { 148 debugfs_remove(dentry); 149 return 0; 150 } 151 152 /* relay channel callbacks */ 153 static const struct rchan_callbacks relay_callbacks = { 154 .subbuf_start = subbuf_start_callback, 155 .create_buf_file = create_buf_file_callback, 156 .remove_buf_file = remove_buf_file_callback, 157 }; 158 159 static void guc_move_to_next_buf(struct intel_guc_log *log) 160 { 161 /* 162 * Make sure the updates made in the sub buffer are visible when 163 * Consumer sees the following update to offset inside the sub buffer. 164 */ 165 smp_wmb(); 166 167 /* All data has been written, so now move the offset of sub buffer. */ 168 relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE); 169 170 /* Switch to the next sub buffer */ 171 relay_flush(log->relay.channel); 172 } 173 174 static void *guc_get_write_buffer(struct intel_guc_log *log) 175 { 176 /* 177 * Just get the base address of a new sub buffer and copy data into it 178 * ourselves. NULL will be returned in no-overwrite mode, if all sub 179 * buffers are full. Could have used the relay_write() to indirectly 180 * copy the data, but that would have been bit convoluted, as we need to 181 * write to only certain locations inside a sub buffer which cannot be 182 * done without using relay_reserve() along with relay_write(). So its 183 * better to use relay_reserve() alone. 184 */ 185 return relay_reserve(log->relay.channel, 0); 186 } 187 188 bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, 189 enum guc_log_buffer_type type, 190 unsigned int full_cnt) 191 { 192 unsigned int prev_full_cnt = log->stats[type].sampled_overflow; 193 bool overflow = false; 194 195 if (full_cnt != prev_full_cnt) { 196 overflow = true; 197 198 log->stats[type].overflow = full_cnt; 199 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; 200 201 if (full_cnt < prev_full_cnt) { 202 /* buffer_full_cnt is a 4 bit counter */ 203 log->stats[type].sampled_overflow += 16; 204 } 205 206 dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, 207 "GuC log buffer overflow\n"); 208 } 209 210 return overflow; 211 } 212 213 unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type) 214 { 215 switch (type) { 216 case GUC_DEBUG_LOG_BUFFER: 217 return DEBUG_BUFFER_SIZE; 218 case GUC_CRASH_DUMP_LOG_BUFFER: 219 return CRASH_BUFFER_SIZE; 220 case GUC_CAPTURE_LOG_BUFFER: 221 return CAPTURE_BUFFER_SIZE; 222 default: 223 MISSING_CASE(type); 224 } 225 226 return 0; 227 } 228 229 size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type) 230 { 231 enum guc_log_buffer_type i; 232 size_t offset = PAGE_SIZE;/* for the log_buffer_states */ 233 234 for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) { 235 if (i == type) 236 break; 237 offset += intel_guc_get_log_buffer_size(i); 238 } 239 240 return offset; 241 } 242 243 static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) 244 { 245 unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; 246 struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; 247 struct guc_log_buffer_state log_buf_state_local; 248 enum guc_log_buffer_type type; 249 void *src_data, *dst_data; 250 bool new_overflow; 251 252 mutex_lock(&log->relay.lock); 253 254 if (WARN_ON(!intel_guc_log_relay_created(log))) 255 goto out_unlock; 256 257 /* Get the pointer to shared GuC log buffer */ 258 src_data = log->buf_addr; 259 log_buf_state = src_data; 260 261 /* Get the pointer to local buffer to store the logs */ 262 log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); 263 264 if (unlikely(!log_buf_snapshot_state)) { 265 /* 266 * Used rate limited to avoid deluge of messages, logs might be 267 * getting consumed by User at a slow rate. 268 */ 269 DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n"); 270 log->relay.full_count++; 271 272 goto out_unlock; 273 } 274 275 /* Actual logs are present from the 2nd page */ 276 src_data += PAGE_SIZE; 277 dst_data += PAGE_SIZE; 278 279 /* For relay logging, we exclude error state capture */ 280 for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) { 281 /* 282 * Make a copy of the state structure, inside GuC log buffer 283 * (which is uncached mapped), on the stack to avoid reading 284 * from it multiple times. 285 */ 286 memcpy(&log_buf_state_local, log_buf_state, 287 sizeof(struct guc_log_buffer_state)); 288 buffer_size = intel_guc_get_log_buffer_size(type); 289 read_offset = log_buf_state_local.read_ptr; 290 write_offset = log_buf_state_local.sampled_write_ptr; 291 full_cnt = log_buf_state_local.buffer_full_cnt; 292 293 /* Bookkeeping stuff */ 294 log->stats[type].flush += log_buf_state_local.flush_to_file; 295 new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt); 296 297 /* Update the state of shared log buffer */ 298 log_buf_state->read_ptr = write_offset; 299 log_buf_state->flush_to_file = 0; 300 log_buf_state++; 301 302 /* First copy the state structure in snapshot buffer */ 303 memcpy(log_buf_snapshot_state, &log_buf_state_local, 304 sizeof(struct guc_log_buffer_state)); 305 306 /* 307 * The write pointer could have been updated by GuC firmware, 308 * after sending the flush interrupt to Host, for consistency 309 * set write pointer value to same value of sampled_write_ptr 310 * in the snapshot buffer. 311 */ 312 log_buf_snapshot_state->write_ptr = write_offset; 313 log_buf_snapshot_state++; 314 315 /* Now copy the actual logs. */ 316 if (unlikely(new_overflow)) { 317 /* copy the whole buffer in case of overflow */ 318 read_offset = 0; 319 write_offset = buffer_size; 320 } else if (unlikely((read_offset > buffer_size) || 321 (write_offset > buffer_size))) { 322 DRM_ERROR("invalid log buffer state\n"); 323 /* copy whole buffer as offsets are unreliable */ 324 read_offset = 0; 325 write_offset = buffer_size; 326 } 327 328 /* Just copy the newly written data */ 329 if (read_offset > write_offset) { 330 i915_memcpy_from_wc(dst_data, src_data, write_offset); 331 bytes_to_copy = buffer_size - read_offset; 332 } else { 333 bytes_to_copy = write_offset - read_offset; 334 } 335 i915_memcpy_from_wc(dst_data + read_offset, 336 src_data + read_offset, bytes_to_copy); 337 338 src_data += buffer_size; 339 dst_data += buffer_size; 340 } 341 342 guc_move_to_next_buf(log); 343 344 out_unlock: 345 mutex_unlock(&log->relay.lock); 346 } 347 348 static void copy_debug_logs_work(struct work_struct *work) 349 { 350 struct intel_guc_log *log = 351 container_of(work, struct intel_guc_log, relay.flush_work); 352 353 guc_log_copy_debuglogs_for_relay(log); 354 } 355 356 static int guc_log_relay_map(struct intel_guc_log *log) 357 { 358 lockdep_assert_held(&log->relay.lock); 359 360 if (!log->vma || !log->buf_addr) 361 return -ENODEV; 362 363 /* 364 * WC vmalloc mapping of log buffer pages was done at 365 * GuC Log Init time, but lets keep a ref for book-keeping 366 */ 367 i915_gem_object_get(log->vma->obj); 368 log->relay.buf_in_use = true; 369 370 return 0; 371 } 372 373 static void guc_log_relay_unmap(struct intel_guc_log *log) 374 { 375 lockdep_assert_held(&log->relay.lock); 376 377 i915_gem_object_put(log->vma->obj); 378 log->relay.buf_in_use = false; 379 } 380 381 void intel_guc_log_init_early(struct intel_guc_log *log) 382 { 383 mutex_init(&log->relay.lock); 384 INIT_WORK(&log->relay.flush_work, copy_debug_logs_work); 385 log->relay.started = false; 386 } 387 388 static int guc_log_relay_create(struct intel_guc_log *log) 389 { 390 struct intel_guc *guc = log_to_guc(log); 391 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 392 struct rchan *guc_log_relay_chan; 393 size_t n_subbufs, subbuf_size; 394 int ret; 395 396 lockdep_assert_held(&log->relay.lock); 397 GEM_BUG_ON(!log->vma); 398 399 /* 400 * Keep the size of sub buffers same as shared log buffer 401 * but GuC log-events excludes the error-state-capture logs 402 */ 403 subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE; 404 405 /* 406 * Store up to 8 snapshots, which is large enough to buffer sufficient 407 * boot time logs and provides enough leeway to User, in terms of 408 * latency, for consuming the logs from relay. Also doesn't take 409 * up too much memory. 410 */ 411 n_subbufs = 8; 412 413 guc_log_relay_chan = relay_open("guc_log", 414 dev_priv->drm.primary->debugfs_root, 415 subbuf_size, n_subbufs, 416 &relay_callbacks, dev_priv); 417 if (!guc_log_relay_chan) { 418 DRM_ERROR("Couldn't create relay chan for GuC logging\n"); 419 420 ret = -ENOMEM; 421 return ret; 422 } 423 424 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); 425 log->relay.channel = guc_log_relay_chan; 426 427 return 0; 428 } 429 430 static void guc_log_relay_destroy(struct intel_guc_log *log) 431 { 432 lockdep_assert_held(&log->relay.lock); 433 434 relay_close(log->relay.channel); 435 log->relay.channel = NULL; 436 } 437 438 static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) 439 { 440 struct intel_guc *guc = log_to_guc(log); 441 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 442 intel_wakeref_t wakeref; 443 444 _guc_log_copy_debuglogs_for_relay(log); 445 446 /* 447 * Generally device is expected to be active only at this 448 * time, so get/put should be really quick. 449 */ 450 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 451 guc_action_flush_log_complete(guc); 452 } 453 454 static u32 __get_default_log_level(struct intel_guc_log *log) 455 { 456 struct intel_guc *guc = log_to_guc(log); 457 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 458 459 /* A negative value means "use platform/config default" */ 460 if (i915->params.guc_log_level < 0) { 461 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 462 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 463 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; 464 } 465 466 if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { 467 DRM_WARN("Incompatible option detected: %s=%d, %s!\n", 468 "guc_log_level", i915->params.guc_log_level, 469 "verbosity too high"); 470 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || 471 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? 472 GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; 473 } 474 475 GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED); 476 GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX); 477 return i915->params.guc_log_level; 478 } 479 480 int intel_guc_log_create(struct intel_guc_log *log) 481 { 482 struct intel_guc *guc = log_to_guc(log); 483 struct i915_vma *vma; 484 void *vaddr; 485 u32 guc_log_size; 486 int ret; 487 488 GEM_BUG_ON(log->vma); 489 490 guc_log_size = intel_guc_log_size(log); 491 492 vma = intel_guc_allocate_vma(guc, guc_log_size); 493 if (IS_ERR(vma)) { 494 ret = PTR_ERR(vma); 495 goto err; 496 } 497 498 log->vma = vma; 499 /* 500 * Create a WC (Uncached for read) vmalloc mapping up front immediate access to 501 * data from memory during critical events such as error capture 502 */ 503 vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); 504 if (IS_ERR(vaddr)) { 505 ret = PTR_ERR(vaddr); 506 i915_vma_unpin_and_release(&log->vma, 0); 507 goto err; 508 } 509 log->buf_addr = vaddr; 510 511 log->level = __get_default_log_level(log); 512 DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", 513 log->level, str_enabled_disabled(log->level), 514 str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), 515 GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); 516 517 return 0; 518 519 err: 520 DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret); 521 return ret; 522 } 523 524 void intel_guc_log_destroy(struct intel_guc_log *log) 525 { 526 log->buf_addr = NULL; 527 i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP); 528 } 529 530 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) 531 { 532 struct intel_guc *guc = log_to_guc(log); 533 struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 534 intel_wakeref_t wakeref; 535 int ret = 0; 536 537 BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); 538 GEM_BUG_ON(!log->vma); 539 540 /* 541 * GuC is recognizing log levels starting from 0 to max, we're using 0 542 * as indication that logging should be disabled. 543 */ 544 if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) 545 return -EINVAL; 546 547 mutex_lock(&dev_priv->drm.struct_mutex); 548 549 if (log->level == level) 550 goto out_unlock; 551 552 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 553 ret = guc_action_control_log(guc, 554 GUC_LOG_LEVEL_IS_VERBOSE(level), 555 GUC_LOG_LEVEL_IS_ENABLED(level), 556 GUC_LOG_LEVEL_TO_VERBOSITY(level)); 557 if (ret) { 558 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); 559 goto out_unlock; 560 } 561 562 log->level = level; 563 564 out_unlock: 565 mutex_unlock(&dev_priv->drm.struct_mutex); 566 567 return ret; 568 } 569 570 bool intel_guc_log_relay_created(const struct intel_guc_log *log) 571 { 572 return log->buf_addr; 573 } 574 575 int intel_guc_log_relay_open(struct intel_guc_log *log) 576 { 577 int ret; 578 579 if (!log->vma) 580 return -ENODEV; 581 582 mutex_lock(&log->relay.lock); 583 584 if (intel_guc_log_relay_created(log)) { 585 ret = -EEXIST; 586 goto out_unlock; 587 } 588 589 /* 590 * We require SSE 4.1 for fast reads from the GuC log buffer and 591 * it should be present on the chipsets supporting GuC based 592 * submissions. 593 */ 594 if (!i915_has_memcpy_from_wc()) { 595 ret = -ENXIO; 596 goto out_unlock; 597 } 598 599 ret = guc_log_relay_create(log); 600 if (ret) 601 goto out_unlock; 602 603 ret = guc_log_relay_map(log); 604 if (ret) 605 goto out_relay; 606 607 mutex_unlock(&log->relay.lock); 608 609 return 0; 610 611 out_relay: 612 guc_log_relay_destroy(log); 613 out_unlock: 614 mutex_unlock(&log->relay.lock); 615 616 return ret; 617 } 618 619 int intel_guc_log_relay_start(struct intel_guc_log *log) 620 { 621 if (log->relay.started) 622 return -EEXIST; 623 624 /* 625 * When GuC is logging without us relaying to userspace, we're ignoring 626 * the flush notification. This means that we need to unconditionally 627 * flush on relay enabling, since GuC only notifies us once. 628 */ 629 queue_work(system_highpri_wq, &log->relay.flush_work); 630 631 log->relay.started = true; 632 633 return 0; 634 } 635 636 void intel_guc_log_relay_flush(struct intel_guc_log *log) 637 { 638 struct intel_guc *guc = log_to_guc(log); 639 intel_wakeref_t wakeref; 640 641 if (!log->relay.started) 642 return; 643 644 /* 645 * Before initiating the forceful flush, wait for any pending/ongoing 646 * flush to complete otherwise forceful flush may not actually happen. 647 */ 648 flush_work(&log->relay.flush_work); 649 650 with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) 651 guc_action_flush_log(guc); 652 653 /* GuC would have updated log buffer by now, so copy it */ 654 guc_log_copy_debuglogs_for_relay(log); 655 } 656 657 /* 658 * Stops the relay log. Called from intel_guc_log_relay_close(), so no 659 * possibility of race with start/flush since relay_write cannot race 660 * relay_close. 661 */ 662 static void guc_log_relay_stop(struct intel_guc_log *log) 663 { 664 struct intel_guc *guc = log_to_guc(log); 665 struct drm_i915_private *i915 = guc_to_gt(guc)->i915; 666 667 if (!log->relay.started) 668 return; 669 670 intel_synchronize_irq(i915); 671 672 flush_work(&log->relay.flush_work); 673 674 log->relay.started = false; 675 } 676 677 void intel_guc_log_relay_close(struct intel_guc_log *log) 678 { 679 guc_log_relay_stop(log); 680 681 mutex_lock(&log->relay.lock); 682 GEM_BUG_ON(!intel_guc_log_relay_created(log)); 683 guc_log_relay_unmap(log); 684 guc_log_relay_destroy(log); 685 mutex_unlock(&log->relay.lock); 686 } 687 688 void intel_guc_log_handle_flush_event(struct intel_guc_log *log) 689 { 690 if (log->relay.started) 691 queue_work(system_highpri_wq, &log->relay.flush_work); 692 } 693 694 static const char * 695 stringify_guc_log_type(enum guc_log_buffer_type type) 696 { 697 switch (type) { 698 case GUC_DEBUG_LOG_BUFFER: 699 return "DEBUG"; 700 case GUC_CRASH_DUMP_LOG_BUFFER: 701 return "CRASH"; 702 case GUC_CAPTURE_LOG_BUFFER: 703 return "CAPTURE"; 704 default: 705 MISSING_CASE(type); 706 } 707 708 return ""; 709 } 710 711 /** 712 * intel_guc_log_info - dump information about GuC log relay 713 * @log: the GuC log 714 * @p: the &drm_printer 715 * 716 * Pretty printer for GuC log info 717 */ 718 void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p) 719 { 720 enum guc_log_buffer_type type; 721 722 if (!intel_guc_log_relay_created(log)) { 723 drm_puts(p, "GuC log relay not created\n"); 724 return; 725 } 726 727 drm_puts(p, "GuC logging stats:\n"); 728 729 drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count); 730 731 for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { 732 drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n", 733 stringify_guc_log_type(type), 734 log->stats[type].flush, 735 log->stats[type].sampled_overflow); 736 } 737 } 738 739 /** 740 * intel_guc_log_dump - dump the contents of the GuC log 741 * @log: the GuC log 742 * @p: the &drm_printer 743 * @dump_load_err: dump the log saved on GuC load error 744 * 745 * Pretty printer for the GuC log 746 */ 747 int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, 748 bool dump_load_err) 749 { 750 struct intel_guc *guc = log_to_guc(log); 751 struct intel_uc *uc = container_of(guc, struct intel_uc, guc); 752 struct drm_i915_gem_object *obj = NULL; 753 void *map; 754 u32 *page; 755 int i, j; 756 757 if (!intel_guc_is_supported(guc)) 758 return -ENODEV; 759 760 if (dump_load_err) 761 obj = uc->load_err_log; 762 else if (guc->log.vma) 763 obj = guc->log.vma->obj; 764 765 if (!obj) 766 return 0; 767 768 page = (u32 *)__get_free_page(GFP_KERNEL); 769 if (!page) 770 return -ENOMEM; 771 772 intel_guc_dump_time_info(guc, p); 773 774 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); 775 if (IS_ERR(map)) { 776 DRM_DEBUG("Failed to pin object\n"); 777 drm_puts(p, "(log data unaccessible)\n"); 778 free_page((unsigned long)page); 779 return PTR_ERR(map); 780 } 781 782 for (i = 0; i < obj->base.size; i += PAGE_SIZE) { 783 if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE)) 784 memcpy(page, map + i, PAGE_SIZE); 785 786 for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4) 787 drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", 788 *(page + j + 0), *(page + j + 1), 789 *(page + j + 2), *(page + j + 3)); 790 } 791 792 drm_puts(p, "\n"); 793 794 i915_gem_object_unpin_map(obj); 795 free_page((unsigned long)page); 796 797 return 0; 798 } 799