1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include <linux/debugfs.h>
7 
8 #include "gt/intel_gt.h"
9 #include "i915_drv.h"
10 #include "i915_memcpy.h"
11 #include "intel_guc_log.h"
12 
13 static void guc_log_capture_logs(struct intel_guc_log *log);
14 
15 /**
16  * DOC: GuC firmware log
17  *
18  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
19  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
20  * i915_guc_load_status will print out firmware loading status and scratch
21  * registers value.
22  */
23 
24 static int guc_action_flush_log_complete(struct intel_guc *guc)
25 {
26 	u32 action[] = {
27 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
28 	};
29 
30 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
31 }
32 
33 static int guc_action_flush_log(struct intel_guc *guc)
34 {
35 	u32 action[] = {
36 		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
37 		0
38 	};
39 
40 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
41 }
42 
43 static int guc_action_control_log(struct intel_guc *guc, bool enable,
44 				  bool default_logging, u32 verbosity)
45 {
46 	u32 action[] = {
47 		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
48 		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
49 		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
50 		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
51 	};
52 
53 	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
54 
55 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
56 }
57 
58 static void guc_log_enable_flush_events(struct intel_guc_log *log)
59 {
60 	intel_guc_enable_msg(log_to_guc(log),
61 			     INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
62 			     INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
63 }
64 
65 static void guc_log_disable_flush_events(struct intel_guc_log *log)
66 {
67 	intel_guc_disable_msg(log_to_guc(log),
68 			      INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
69 			      INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
70 }
71 
72 /*
73  * Sub buffer switch callback. Called whenever relay has to switch to a new
74  * sub buffer, relay stays on the same sub buffer if 0 is returned.
75  */
76 static int subbuf_start_callback(struct rchan_buf *buf,
77 				 void *subbuf,
78 				 void *prev_subbuf,
79 				 size_t prev_padding)
80 {
81 	/*
82 	 * Use no-overwrite mode by default, where relay will stop accepting
83 	 * new data if there are no empty sub buffers left.
84 	 * There is no strict synchronization enforced by relay between Consumer
85 	 * and Producer. In overwrite mode, there is a possibility of getting
86 	 * inconsistent/garbled data, the producer could be writing on to the
87 	 * same sub buffer from which Consumer is reading. This can't be avoided
88 	 * unless Consumer is fast enough and can always run in tandem with
89 	 * Producer.
90 	 */
91 	if (relay_buf_full(buf))
92 		return 0;
93 
94 	return 1;
95 }
96 
97 /*
98  * file_create() callback. Creates relay file in debugfs.
99  */
100 static struct dentry *create_buf_file_callback(const char *filename,
101 					       struct dentry *parent,
102 					       umode_t mode,
103 					       struct rchan_buf *buf,
104 					       int *is_global)
105 {
106 	struct dentry *buf_file;
107 
108 	/*
109 	 * This to enable the use of a single buffer for the relay channel and
110 	 * correspondingly have a single file exposed to User, through which
111 	 * it can collect the logs in order without any post-processing.
112 	 * Need to set 'is_global' even if parent is NULL for early logging.
113 	 */
114 	*is_global = 1;
115 
116 	if (!parent)
117 		return NULL;
118 
119 	buf_file = debugfs_create_file(filename, mode,
120 				       parent, buf, &relay_file_operations);
121 	if (IS_ERR(buf_file))
122 		return NULL;
123 
124 	return buf_file;
125 }
126 
127 /*
128  * file_remove() default callback. Removes relay file in debugfs.
129  */
130 static int remove_buf_file_callback(struct dentry *dentry)
131 {
132 	debugfs_remove(dentry);
133 	return 0;
134 }
135 
136 /* relay channel callbacks */
137 static struct rchan_callbacks relay_callbacks = {
138 	.subbuf_start = subbuf_start_callback,
139 	.create_buf_file = create_buf_file_callback,
140 	.remove_buf_file = remove_buf_file_callback,
141 };
142 
143 static void guc_move_to_next_buf(struct intel_guc_log *log)
144 {
145 	/*
146 	 * Make sure the updates made in the sub buffer are visible when
147 	 * Consumer sees the following update to offset inside the sub buffer.
148 	 */
149 	smp_wmb();
150 
151 	/* All data has been written, so now move the offset of sub buffer. */
152 	relay_reserve(log->relay.channel, log->vma->obj->base.size);
153 
154 	/* Switch to the next sub buffer */
155 	relay_flush(log->relay.channel);
156 }
157 
158 static void *guc_get_write_buffer(struct intel_guc_log *log)
159 {
160 	/*
161 	 * Just get the base address of a new sub buffer and copy data into it
162 	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
163 	 * buffers are full. Could have used the relay_write() to indirectly
164 	 * copy the data, but that would have been bit convoluted, as we need to
165 	 * write to only certain locations inside a sub buffer which cannot be
166 	 * done without using relay_reserve() along with relay_write(). So its
167 	 * better to use relay_reserve() alone.
168 	 */
169 	return relay_reserve(log->relay.channel, 0);
170 }
171 
172 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
173 				       enum guc_log_buffer_type type,
174 				       unsigned int full_cnt)
175 {
176 	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
177 	bool overflow = false;
178 
179 	if (full_cnt != prev_full_cnt) {
180 		overflow = true;
181 
182 		log->stats[type].overflow = full_cnt;
183 		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
184 
185 		if (full_cnt < prev_full_cnt) {
186 			/* buffer_full_cnt is a 4 bit counter */
187 			log->stats[type].sampled_overflow += 16;
188 		}
189 
190 		dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
191 				       "GuC log buffer overflow\n");
192 	}
193 
194 	return overflow;
195 }
196 
197 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
198 {
199 	switch (type) {
200 	case GUC_ISR_LOG_BUFFER:
201 		return ISR_BUFFER_SIZE;
202 	case GUC_DPC_LOG_BUFFER:
203 		return DPC_BUFFER_SIZE;
204 	case GUC_CRASH_DUMP_LOG_BUFFER:
205 		return CRASH_BUFFER_SIZE;
206 	default:
207 		MISSING_CASE(type);
208 	}
209 
210 	return 0;
211 }
212 
213 static void guc_read_update_log_buffer(struct intel_guc_log *log)
214 {
215 	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
216 	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
217 	struct guc_log_buffer_state log_buf_state_local;
218 	enum guc_log_buffer_type type;
219 	void *src_data, *dst_data;
220 	bool new_overflow;
221 
222 	mutex_lock(&log->relay.lock);
223 
224 	if (WARN_ON(!intel_guc_log_relay_created(log)))
225 		goto out_unlock;
226 
227 	/* Get the pointer to shared GuC log buffer */
228 	log_buf_state = src_data = log->relay.buf_addr;
229 
230 	/* Get the pointer to local buffer to store the logs */
231 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
232 
233 	if (unlikely(!log_buf_snapshot_state)) {
234 		/*
235 		 * Used rate limited to avoid deluge of messages, logs might be
236 		 * getting consumed by User at a slow rate.
237 		 */
238 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
239 		log->relay.full_count++;
240 
241 		goto out_unlock;
242 	}
243 
244 	/* Actual logs are present from the 2nd page */
245 	src_data += PAGE_SIZE;
246 	dst_data += PAGE_SIZE;
247 
248 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
249 		/*
250 		 * Make a copy of the state structure, inside GuC log buffer
251 		 * (which is uncached mapped), on the stack to avoid reading
252 		 * from it multiple times.
253 		 */
254 		memcpy(&log_buf_state_local, log_buf_state,
255 		       sizeof(struct guc_log_buffer_state));
256 		buffer_size = guc_get_log_buffer_size(type);
257 		read_offset = log_buf_state_local.read_ptr;
258 		write_offset = log_buf_state_local.sampled_write_ptr;
259 		full_cnt = log_buf_state_local.buffer_full_cnt;
260 
261 		/* Bookkeeping stuff */
262 		log->stats[type].flush += log_buf_state_local.flush_to_file;
263 		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
264 
265 		/* Update the state of shared log buffer */
266 		log_buf_state->read_ptr = write_offset;
267 		log_buf_state->flush_to_file = 0;
268 		log_buf_state++;
269 
270 		/* First copy the state structure in snapshot buffer */
271 		memcpy(log_buf_snapshot_state, &log_buf_state_local,
272 		       sizeof(struct guc_log_buffer_state));
273 
274 		/*
275 		 * The write pointer could have been updated by GuC firmware,
276 		 * after sending the flush interrupt to Host, for consistency
277 		 * set write pointer value to same value of sampled_write_ptr
278 		 * in the snapshot buffer.
279 		 */
280 		log_buf_snapshot_state->write_ptr = write_offset;
281 		log_buf_snapshot_state++;
282 
283 		/* Now copy the actual logs. */
284 		if (unlikely(new_overflow)) {
285 			/* copy the whole buffer in case of overflow */
286 			read_offset = 0;
287 			write_offset = buffer_size;
288 		} else if (unlikely((read_offset > buffer_size) ||
289 				    (write_offset > buffer_size))) {
290 			DRM_ERROR("invalid log buffer state\n");
291 			/* copy whole buffer as offsets are unreliable */
292 			read_offset = 0;
293 			write_offset = buffer_size;
294 		}
295 
296 		/* Just copy the newly written data */
297 		if (read_offset > write_offset) {
298 			i915_memcpy_from_wc(dst_data, src_data, write_offset);
299 			bytes_to_copy = buffer_size - read_offset;
300 		} else {
301 			bytes_to_copy = write_offset - read_offset;
302 		}
303 		i915_memcpy_from_wc(dst_data + read_offset,
304 				    src_data + read_offset, bytes_to_copy);
305 
306 		src_data += buffer_size;
307 		dst_data += buffer_size;
308 	}
309 
310 	guc_move_to_next_buf(log);
311 
312 out_unlock:
313 	mutex_unlock(&log->relay.lock);
314 }
315 
316 static void capture_logs_work(struct work_struct *work)
317 {
318 	struct intel_guc_log *log =
319 		container_of(work, struct intel_guc_log, relay.flush_work);
320 
321 	guc_log_capture_logs(log);
322 }
323 
324 static int guc_log_map(struct intel_guc_log *log)
325 {
326 	void *vaddr;
327 
328 	lockdep_assert_held(&log->relay.lock);
329 
330 	if (!log->vma)
331 		return -ENODEV;
332 
333 	/*
334 	 * Create a WC (Uncached for read) vmalloc mapping of log
335 	 * buffer pages, so that we can directly get the data
336 	 * (up-to-date) from memory.
337 	 */
338 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
339 	if (IS_ERR(vaddr))
340 		return PTR_ERR(vaddr);
341 
342 	log->relay.buf_addr = vaddr;
343 
344 	return 0;
345 }
346 
347 static void guc_log_unmap(struct intel_guc_log *log)
348 {
349 	lockdep_assert_held(&log->relay.lock);
350 
351 	i915_gem_object_unpin_map(log->vma->obj);
352 	log->relay.buf_addr = NULL;
353 }
354 
355 void intel_guc_log_init_early(struct intel_guc_log *log)
356 {
357 	mutex_init(&log->relay.lock);
358 	INIT_WORK(&log->relay.flush_work, capture_logs_work);
359 	log->relay.started = false;
360 }
361 
362 static int guc_log_relay_create(struct intel_guc_log *log)
363 {
364 	struct intel_guc *guc = log_to_guc(log);
365 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
366 	struct rchan *guc_log_relay_chan;
367 	size_t n_subbufs, subbuf_size;
368 	int ret;
369 
370 	lockdep_assert_held(&log->relay.lock);
371 	GEM_BUG_ON(!log->vma);
372 
373 	 /* Keep the size of sub buffers same as shared log buffer */
374 	subbuf_size = log->vma->size;
375 
376 	/*
377 	 * Store up to 8 snapshots, which is large enough to buffer sufficient
378 	 * boot time logs and provides enough leeway to User, in terms of
379 	 * latency, for consuming the logs from relay. Also doesn't take
380 	 * up too much memory.
381 	 */
382 	n_subbufs = 8;
383 
384 	guc_log_relay_chan = relay_open("guc_log",
385 					dev_priv->drm.primary->debugfs_root,
386 					subbuf_size, n_subbufs,
387 					&relay_callbacks, dev_priv);
388 	if (!guc_log_relay_chan) {
389 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
390 
391 		ret = -ENOMEM;
392 		return ret;
393 	}
394 
395 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
396 	log->relay.channel = guc_log_relay_chan;
397 
398 	return 0;
399 }
400 
401 static void guc_log_relay_destroy(struct intel_guc_log *log)
402 {
403 	lockdep_assert_held(&log->relay.lock);
404 
405 	relay_close(log->relay.channel);
406 	log->relay.channel = NULL;
407 }
408 
409 static void guc_log_capture_logs(struct intel_guc_log *log)
410 {
411 	struct intel_guc *guc = log_to_guc(log);
412 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
413 	intel_wakeref_t wakeref;
414 
415 	guc_read_update_log_buffer(log);
416 
417 	/*
418 	 * Generally device is expected to be active only at this
419 	 * time, so get/put should be really quick.
420 	 */
421 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
422 		guc_action_flush_log_complete(guc);
423 }
424 
425 static u32 __get_default_log_level(struct intel_guc_log *log)
426 {
427 	/* A negative value means "use platform/config default" */
428 	if (i915_modparams.guc_log_level < 0) {
429 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
430 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
431 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
432 	}
433 
434 	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
435 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
436 			 "guc_log_level", i915_modparams.guc_log_level,
437 			 "verbosity too high");
438 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
439 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
440 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
441 	}
442 
443 	GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
444 	GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
445 	return i915_modparams.guc_log_level;
446 }
447 
448 int intel_guc_log_create(struct intel_guc_log *log)
449 {
450 	struct intel_guc *guc = log_to_guc(log);
451 	struct i915_vma *vma;
452 	u32 guc_log_size;
453 	int ret;
454 
455 	GEM_BUG_ON(log->vma);
456 
457 	/*
458 	 *  GuC Log buffer Layout
459 	 *
460 	 *  +===============================+ 00B
461 	 *  |    Crash dump state header    |
462 	 *  +-------------------------------+ 32B
463 	 *  |       DPC state header        |
464 	 *  +-------------------------------+ 64B
465 	 *  |       ISR state header        |
466 	 *  +-------------------------------+ 96B
467 	 *  |                               |
468 	 *  +===============================+ PAGE_SIZE (4KB)
469 	 *  |        Crash Dump logs        |
470 	 *  +===============================+ + CRASH_SIZE
471 	 *  |           DPC logs            |
472 	 *  +===============================+ + DPC_SIZE
473 	 *  |           ISR logs            |
474 	 *  +===============================+ + ISR_SIZE
475 	 */
476 	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
477 			ISR_BUFFER_SIZE;
478 
479 	vma = intel_guc_allocate_vma(guc, guc_log_size);
480 	if (IS_ERR(vma)) {
481 		ret = PTR_ERR(vma);
482 		goto err;
483 	}
484 
485 	log->vma = vma;
486 
487 	log->level = __get_default_log_level(log);
488 	DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
489 			 log->level, enableddisabled(log->level),
490 			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
491 			 GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
492 
493 	return 0;
494 
495 err:
496 	DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
497 	return ret;
498 }
499 
500 void intel_guc_log_destroy(struct intel_guc_log *log)
501 {
502 	i915_vma_unpin_and_release(&log->vma, 0);
503 }
504 
505 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
506 {
507 	struct intel_guc *guc = log_to_guc(log);
508 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
509 	intel_wakeref_t wakeref;
510 	int ret = 0;
511 
512 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
513 	GEM_BUG_ON(!log->vma);
514 
515 	/*
516 	 * GuC is recognizing log levels starting from 0 to max, we're using 0
517 	 * as indication that logging should be disabled.
518 	 */
519 	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
520 		return -EINVAL;
521 
522 	mutex_lock(&dev_priv->drm.struct_mutex);
523 
524 	if (log->level == level)
525 		goto out_unlock;
526 
527 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
528 		ret = guc_action_control_log(guc,
529 					     GUC_LOG_LEVEL_IS_VERBOSE(level),
530 					     GUC_LOG_LEVEL_IS_ENABLED(level),
531 					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
532 	if (ret) {
533 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
534 		goto out_unlock;
535 	}
536 
537 	log->level = level;
538 
539 out_unlock:
540 	mutex_unlock(&dev_priv->drm.struct_mutex);
541 
542 	return ret;
543 }
544 
545 bool intel_guc_log_relay_created(const struct intel_guc_log *log)
546 {
547 	return log->relay.buf_addr;
548 }
549 
550 int intel_guc_log_relay_open(struct intel_guc_log *log)
551 {
552 	int ret;
553 
554 	if (!log->vma)
555 		return -ENODEV;
556 
557 	mutex_lock(&log->relay.lock);
558 
559 	if (intel_guc_log_relay_created(log)) {
560 		ret = -EEXIST;
561 		goto out_unlock;
562 	}
563 
564 	/*
565 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
566 	 * it should be present on the chipsets supporting GuC based
567 	 * submisssions.
568 	 */
569 	if (!i915_has_memcpy_from_wc()) {
570 		ret = -ENXIO;
571 		goto out_unlock;
572 	}
573 
574 	ret = guc_log_relay_create(log);
575 	if (ret)
576 		goto out_unlock;
577 
578 	ret = guc_log_map(log);
579 	if (ret)
580 		goto out_relay;
581 
582 	mutex_unlock(&log->relay.lock);
583 
584 	return 0;
585 
586 out_relay:
587 	guc_log_relay_destroy(log);
588 out_unlock:
589 	mutex_unlock(&log->relay.lock);
590 
591 	return ret;
592 }
593 
594 int intel_guc_log_relay_start(struct intel_guc_log *log)
595 {
596 	if (log->relay.started)
597 		return -EEXIST;
598 
599 	guc_log_enable_flush_events(log);
600 
601 	/*
602 	 * When GuC is logging without us relaying to userspace, we're ignoring
603 	 * the flush notification. This means that we need to unconditionally
604 	 * flush on relay enabling, since GuC only notifies us once.
605 	 */
606 	queue_work(system_highpri_wq, &log->relay.flush_work);
607 
608 	log->relay.started = true;
609 
610 	return 0;
611 }
612 
613 void intel_guc_log_relay_flush(struct intel_guc_log *log)
614 {
615 	struct intel_guc *guc = log_to_guc(log);
616 	intel_wakeref_t wakeref;
617 
618 	if (!log->relay.started)
619 		return;
620 
621 	/*
622 	 * Before initiating the forceful flush, wait for any pending/ongoing
623 	 * flush to complete otherwise forceful flush may not actually happen.
624 	 */
625 	flush_work(&log->relay.flush_work);
626 
627 	with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
628 		guc_action_flush_log(guc);
629 
630 	/* GuC would have updated log buffer by now, so capture it */
631 	guc_log_capture_logs(log);
632 }
633 
634 /*
635  * Stops the relay log. Called from intel_guc_log_relay_close(), so no
636  * possibility of race with start/flush since relay_write cannot race
637  * relay_close.
638  */
639 static void guc_log_relay_stop(struct intel_guc_log *log)
640 {
641 	struct intel_guc *guc = log_to_guc(log);
642 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
643 
644 	if (!log->relay.started)
645 		return;
646 
647 	guc_log_disable_flush_events(log);
648 	intel_synchronize_irq(i915);
649 
650 	flush_work(&log->relay.flush_work);
651 
652 	log->relay.started = false;
653 }
654 
655 void intel_guc_log_relay_close(struct intel_guc_log *log)
656 {
657 	guc_log_relay_stop(log);
658 
659 	mutex_lock(&log->relay.lock);
660 	GEM_BUG_ON(!intel_guc_log_relay_created(log));
661 	guc_log_unmap(log);
662 	guc_log_relay_destroy(log);
663 	mutex_unlock(&log->relay.lock);
664 }
665 
666 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
667 {
668 	queue_work(system_highpri_wq, &log->relay.flush_work);
669 }
670 
671 static const char *
672 stringify_guc_log_type(enum guc_log_buffer_type type)
673 {
674 	switch (type) {
675 	case GUC_ISR_LOG_BUFFER:
676 		return "ISR";
677 	case GUC_DPC_LOG_BUFFER:
678 		return "DPC";
679 	case GUC_CRASH_DUMP_LOG_BUFFER:
680 		return "CRASH";
681 	default:
682 		MISSING_CASE(type);
683 	}
684 
685 	return "";
686 }
687 
688 /**
689  * intel_guc_log_info - dump information about GuC log relay
690  * @log: the GuC log
691  * @p: the &drm_printer
692  *
693  * Pretty printer for GuC log info
694  */
695 void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
696 {
697 	enum guc_log_buffer_type type;
698 
699 	if (!intel_guc_log_relay_created(log)) {
700 		drm_puts(p, "GuC log relay not created\n");
701 		return;
702 	}
703 
704 	drm_puts(p, "GuC logging stats:\n");
705 
706 	drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
707 
708 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
709 		drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
710 			   stringify_guc_log_type(type),
711 			   log->stats[type].flush,
712 			   log->stats[type].sampled_overflow);
713 	}
714 }
715 
716 /**
717  * intel_guc_log_dump - dump the contents of the GuC log
718  * @log: the GuC log
719  * @p: the &drm_printer
720  * @dump_load_err: dump the log saved on GuC load error
721  *
722  * Pretty printer for the GuC log
723  */
724 int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
725 		       bool dump_load_err)
726 {
727 	struct intel_guc *guc = log_to_guc(log);
728 	struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
729 	struct drm_i915_gem_object *obj = NULL;
730 	u32 *map;
731 	int i = 0;
732 
733 	if (!intel_guc_is_supported(guc))
734 		return -ENODEV;
735 
736 	if (dump_load_err)
737 		obj = uc->load_err_log;
738 	else if (guc->log.vma)
739 		obj = guc->log.vma->obj;
740 
741 	if (!obj)
742 		return 0;
743 
744 	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
745 	if (IS_ERR(map)) {
746 		DRM_DEBUG("Failed to pin object\n");
747 		drm_puts(p, "(log data unaccessible)\n");
748 		return PTR_ERR(map);
749 	}
750 
751 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
752 		drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
753 			   *(map + i), *(map + i + 1),
754 			   *(map + i + 2), *(map + i + 3));
755 
756 	drm_puts(p, "\n");
757 
758 	i915_gem_object_unpin_map(obj);
759 
760 	return 0;
761 }
762