1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include <linux/debugfs.h>
7 
8 #include "gt/intel_gt.h"
9 #include "i915_drv.h"
10 #include "i915_memcpy.h"
11 #include "intel_guc_log.h"
12 
13 static void guc_log_capture_logs(struct intel_guc_log *log);
14 
15 /**
16  * DOC: GuC firmware log
17  *
18  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
19  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
20  * i915_guc_load_status will print out firmware loading status and scratch
21  * registers value.
22  */
23 
24 static int guc_action_flush_log_complete(struct intel_guc *guc)
25 {
26 	u32 action[] = {
27 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
28 	};
29 
30 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
31 }
32 
33 static int guc_action_flush_log(struct intel_guc *guc)
34 {
35 	u32 action[] = {
36 		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
37 		0
38 	};
39 
40 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
41 }
42 
43 static int guc_action_control_log(struct intel_guc *guc, bool enable,
44 				  bool default_logging, u32 verbosity)
45 {
46 	u32 action[] = {
47 		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
48 		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
49 		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
50 		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
51 	};
52 
53 	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
54 
55 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
56 }
57 
58 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
59 {
60 	return container_of(log, struct intel_guc, log);
61 }
62 
63 static void guc_log_enable_flush_events(struct intel_guc_log *log)
64 {
65 	intel_guc_enable_msg(log_to_guc(log),
66 			     INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
67 			     INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
68 }
69 
70 static void guc_log_disable_flush_events(struct intel_guc_log *log)
71 {
72 	intel_guc_disable_msg(log_to_guc(log),
73 			      INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
74 			      INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
75 }
76 
77 /*
78  * Sub buffer switch callback. Called whenever relay has to switch to a new
79  * sub buffer, relay stays on the same sub buffer if 0 is returned.
80  */
81 static int subbuf_start_callback(struct rchan_buf *buf,
82 				 void *subbuf,
83 				 void *prev_subbuf,
84 				 size_t prev_padding)
85 {
86 	/*
87 	 * Use no-overwrite mode by default, where relay will stop accepting
88 	 * new data if there are no empty sub buffers left.
89 	 * There is no strict synchronization enforced by relay between Consumer
90 	 * and Producer. In overwrite mode, there is a possibility of getting
91 	 * inconsistent/garbled data, the producer could be writing on to the
92 	 * same sub buffer from which Consumer is reading. This can't be avoided
93 	 * unless Consumer is fast enough and can always run in tandem with
94 	 * Producer.
95 	 */
96 	if (relay_buf_full(buf))
97 		return 0;
98 
99 	return 1;
100 }
101 
102 /*
103  * file_create() callback. Creates relay file in debugfs.
104  */
105 static struct dentry *create_buf_file_callback(const char *filename,
106 					       struct dentry *parent,
107 					       umode_t mode,
108 					       struct rchan_buf *buf,
109 					       int *is_global)
110 {
111 	struct dentry *buf_file;
112 
113 	/*
114 	 * This to enable the use of a single buffer for the relay channel and
115 	 * correspondingly have a single file exposed to User, through which
116 	 * it can collect the logs in order without any post-processing.
117 	 * Need to set 'is_global' even if parent is NULL for early logging.
118 	 */
119 	*is_global = 1;
120 
121 	if (!parent)
122 		return NULL;
123 
124 	buf_file = debugfs_create_file(filename, mode,
125 				       parent, buf, &relay_file_operations);
126 	if (IS_ERR(buf_file))
127 		return NULL;
128 
129 	return buf_file;
130 }
131 
132 /*
133  * file_remove() default callback. Removes relay file in debugfs.
134  */
135 static int remove_buf_file_callback(struct dentry *dentry)
136 {
137 	debugfs_remove(dentry);
138 	return 0;
139 }
140 
141 /* relay channel callbacks */
142 static struct rchan_callbacks relay_callbacks = {
143 	.subbuf_start = subbuf_start_callback,
144 	.create_buf_file = create_buf_file_callback,
145 	.remove_buf_file = remove_buf_file_callback,
146 };
147 
148 static void guc_move_to_next_buf(struct intel_guc_log *log)
149 {
150 	/*
151 	 * Make sure the updates made in the sub buffer are visible when
152 	 * Consumer sees the following update to offset inside the sub buffer.
153 	 */
154 	smp_wmb();
155 
156 	/* All data has been written, so now move the offset of sub buffer. */
157 	relay_reserve(log->relay.channel, log->vma->obj->base.size);
158 
159 	/* Switch to the next sub buffer */
160 	relay_flush(log->relay.channel);
161 }
162 
163 static void *guc_get_write_buffer(struct intel_guc_log *log)
164 {
165 	/*
166 	 * Just get the base address of a new sub buffer and copy data into it
167 	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
168 	 * buffers are full. Could have used the relay_write() to indirectly
169 	 * copy the data, but that would have been bit convoluted, as we need to
170 	 * write to only certain locations inside a sub buffer which cannot be
171 	 * done without using relay_reserve() along with relay_write(). So its
172 	 * better to use relay_reserve() alone.
173 	 */
174 	return relay_reserve(log->relay.channel, 0);
175 }
176 
177 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
178 				       enum guc_log_buffer_type type,
179 				       unsigned int full_cnt)
180 {
181 	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
182 	bool overflow = false;
183 
184 	if (full_cnt != prev_full_cnt) {
185 		overflow = true;
186 
187 		log->stats[type].overflow = full_cnt;
188 		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
189 
190 		if (full_cnt < prev_full_cnt) {
191 			/* buffer_full_cnt is a 4 bit counter */
192 			log->stats[type].sampled_overflow += 16;
193 		}
194 
195 		dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
196 				       "GuC log buffer overflow\n");
197 	}
198 
199 	return overflow;
200 }
201 
202 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
203 {
204 	switch (type) {
205 	case GUC_ISR_LOG_BUFFER:
206 		return ISR_BUFFER_SIZE;
207 	case GUC_DPC_LOG_BUFFER:
208 		return DPC_BUFFER_SIZE;
209 	case GUC_CRASH_DUMP_LOG_BUFFER:
210 		return CRASH_BUFFER_SIZE;
211 	default:
212 		MISSING_CASE(type);
213 	}
214 
215 	return 0;
216 }
217 
218 static void guc_read_update_log_buffer(struct intel_guc_log *log)
219 {
220 	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
221 	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
222 	struct guc_log_buffer_state log_buf_state_local;
223 	enum guc_log_buffer_type type;
224 	void *src_data, *dst_data;
225 	bool new_overflow;
226 
227 	mutex_lock(&log->relay.lock);
228 
229 	if (WARN_ON(!intel_guc_log_relay_created(log)))
230 		goto out_unlock;
231 
232 	/* Get the pointer to shared GuC log buffer */
233 	log_buf_state = src_data = log->relay.buf_addr;
234 
235 	/* Get the pointer to local buffer to store the logs */
236 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
237 
238 	if (unlikely(!log_buf_snapshot_state)) {
239 		/*
240 		 * Used rate limited to avoid deluge of messages, logs might be
241 		 * getting consumed by User at a slow rate.
242 		 */
243 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
244 		log->relay.full_count++;
245 
246 		goto out_unlock;
247 	}
248 
249 	/* Actual logs are present from the 2nd page */
250 	src_data += PAGE_SIZE;
251 	dst_data += PAGE_SIZE;
252 
253 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
254 		/*
255 		 * Make a copy of the state structure, inside GuC log buffer
256 		 * (which is uncached mapped), on the stack to avoid reading
257 		 * from it multiple times.
258 		 */
259 		memcpy(&log_buf_state_local, log_buf_state,
260 		       sizeof(struct guc_log_buffer_state));
261 		buffer_size = guc_get_log_buffer_size(type);
262 		read_offset = log_buf_state_local.read_ptr;
263 		write_offset = log_buf_state_local.sampled_write_ptr;
264 		full_cnt = log_buf_state_local.buffer_full_cnt;
265 
266 		/* Bookkeeping stuff */
267 		log->stats[type].flush += log_buf_state_local.flush_to_file;
268 		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
269 
270 		/* Update the state of shared log buffer */
271 		log_buf_state->read_ptr = write_offset;
272 		log_buf_state->flush_to_file = 0;
273 		log_buf_state++;
274 
275 		/* First copy the state structure in snapshot buffer */
276 		memcpy(log_buf_snapshot_state, &log_buf_state_local,
277 		       sizeof(struct guc_log_buffer_state));
278 
279 		/*
280 		 * The write pointer could have been updated by GuC firmware,
281 		 * after sending the flush interrupt to Host, for consistency
282 		 * set write pointer value to same value of sampled_write_ptr
283 		 * in the snapshot buffer.
284 		 */
285 		log_buf_snapshot_state->write_ptr = write_offset;
286 		log_buf_snapshot_state++;
287 
288 		/* Now copy the actual logs. */
289 		if (unlikely(new_overflow)) {
290 			/* copy the whole buffer in case of overflow */
291 			read_offset = 0;
292 			write_offset = buffer_size;
293 		} else if (unlikely((read_offset > buffer_size) ||
294 				    (write_offset > buffer_size))) {
295 			DRM_ERROR("invalid log buffer state\n");
296 			/* copy whole buffer as offsets are unreliable */
297 			read_offset = 0;
298 			write_offset = buffer_size;
299 		}
300 
301 		/* Just copy the newly written data */
302 		if (read_offset > write_offset) {
303 			i915_memcpy_from_wc(dst_data, src_data, write_offset);
304 			bytes_to_copy = buffer_size - read_offset;
305 		} else {
306 			bytes_to_copy = write_offset - read_offset;
307 		}
308 		i915_memcpy_from_wc(dst_data + read_offset,
309 				    src_data + read_offset, bytes_to_copy);
310 
311 		src_data += buffer_size;
312 		dst_data += buffer_size;
313 	}
314 
315 	guc_move_to_next_buf(log);
316 
317 out_unlock:
318 	mutex_unlock(&log->relay.lock);
319 }
320 
321 static void capture_logs_work(struct work_struct *work)
322 {
323 	struct intel_guc_log *log =
324 		container_of(work, struct intel_guc_log, relay.flush_work);
325 
326 	guc_log_capture_logs(log);
327 }
328 
329 static int guc_log_map(struct intel_guc_log *log)
330 {
331 	void *vaddr;
332 
333 	lockdep_assert_held(&log->relay.lock);
334 
335 	if (!log->vma)
336 		return -ENODEV;
337 
338 	/*
339 	 * Create a WC (Uncached for read) vmalloc mapping of log
340 	 * buffer pages, so that we can directly get the data
341 	 * (up-to-date) from memory.
342 	 */
343 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
344 	if (IS_ERR(vaddr))
345 		return PTR_ERR(vaddr);
346 
347 	log->relay.buf_addr = vaddr;
348 
349 	return 0;
350 }
351 
352 static void guc_log_unmap(struct intel_guc_log *log)
353 {
354 	lockdep_assert_held(&log->relay.lock);
355 
356 	i915_gem_object_unpin_map(log->vma->obj);
357 	log->relay.buf_addr = NULL;
358 }
359 
360 void intel_guc_log_init_early(struct intel_guc_log *log)
361 {
362 	mutex_init(&log->relay.lock);
363 	INIT_WORK(&log->relay.flush_work, capture_logs_work);
364 	log->relay.started = false;
365 }
366 
367 static int guc_log_relay_create(struct intel_guc_log *log)
368 {
369 	struct intel_guc *guc = log_to_guc(log);
370 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
371 	struct rchan *guc_log_relay_chan;
372 	size_t n_subbufs, subbuf_size;
373 	int ret;
374 
375 	lockdep_assert_held(&log->relay.lock);
376 	GEM_BUG_ON(!log->vma);
377 
378 	 /* Keep the size of sub buffers same as shared log buffer */
379 	subbuf_size = log->vma->size;
380 
381 	/*
382 	 * Store up to 8 snapshots, which is large enough to buffer sufficient
383 	 * boot time logs and provides enough leeway to User, in terms of
384 	 * latency, for consuming the logs from relay. Also doesn't take
385 	 * up too much memory.
386 	 */
387 	n_subbufs = 8;
388 
389 	guc_log_relay_chan = relay_open("guc_log",
390 					dev_priv->drm.primary->debugfs_root,
391 					subbuf_size, n_subbufs,
392 					&relay_callbacks, dev_priv);
393 	if (!guc_log_relay_chan) {
394 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
395 
396 		ret = -ENOMEM;
397 		return ret;
398 	}
399 
400 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
401 	log->relay.channel = guc_log_relay_chan;
402 
403 	return 0;
404 }
405 
406 static void guc_log_relay_destroy(struct intel_guc_log *log)
407 {
408 	lockdep_assert_held(&log->relay.lock);
409 
410 	relay_close(log->relay.channel);
411 	log->relay.channel = NULL;
412 }
413 
414 static void guc_log_capture_logs(struct intel_guc_log *log)
415 {
416 	struct intel_guc *guc = log_to_guc(log);
417 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
418 	intel_wakeref_t wakeref;
419 
420 	guc_read_update_log_buffer(log);
421 
422 	/*
423 	 * Generally device is expected to be active only at this
424 	 * time, so get/put should be really quick.
425 	 */
426 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
427 		guc_action_flush_log_complete(guc);
428 }
429 
430 static u32 __get_default_log_level(struct intel_guc_log *log)
431 {
432 	/* A negative value means "use platform/config default" */
433 	if (i915_modparams.guc_log_level < 0) {
434 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
435 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
436 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
437 	}
438 
439 	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
440 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
441 			 "guc_log_level", i915_modparams.guc_log_level,
442 			 "verbosity too high");
443 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
444 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
445 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
446 	}
447 
448 	GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
449 	GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
450 	return i915_modparams.guc_log_level;
451 }
452 
453 int intel_guc_log_create(struct intel_guc_log *log)
454 {
455 	struct intel_guc *guc = log_to_guc(log);
456 	struct i915_vma *vma;
457 	u32 guc_log_size;
458 	int ret;
459 
460 	GEM_BUG_ON(log->vma);
461 
462 	/*
463 	 *  GuC Log buffer Layout
464 	 *
465 	 *  +===============================+ 00B
466 	 *  |    Crash dump state header    |
467 	 *  +-------------------------------+ 32B
468 	 *  |       DPC state header        |
469 	 *  +-------------------------------+ 64B
470 	 *  |       ISR state header        |
471 	 *  +-------------------------------+ 96B
472 	 *  |                               |
473 	 *  +===============================+ PAGE_SIZE (4KB)
474 	 *  |        Crash Dump logs        |
475 	 *  +===============================+ + CRASH_SIZE
476 	 *  |           DPC logs            |
477 	 *  +===============================+ + DPC_SIZE
478 	 *  |           ISR logs            |
479 	 *  +===============================+ + ISR_SIZE
480 	 */
481 	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
482 			ISR_BUFFER_SIZE;
483 
484 	vma = intel_guc_allocate_vma(guc, guc_log_size);
485 	if (IS_ERR(vma)) {
486 		ret = PTR_ERR(vma);
487 		goto err;
488 	}
489 
490 	log->vma = vma;
491 
492 	log->level = __get_default_log_level(log);
493 	DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
494 			 log->level, enableddisabled(log->level),
495 			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
496 			 GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
497 
498 	return 0;
499 
500 err:
501 	DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
502 	return ret;
503 }
504 
505 void intel_guc_log_destroy(struct intel_guc_log *log)
506 {
507 	i915_vma_unpin_and_release(&log->vma, 0);
508 }
509 
510 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
511 {
512 	struct intel_guc *guc = log_to_guc(log);
513 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
514 	intel_wakeref_t wakeref;
515 	int ret = 0;
516 
517 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
518 	GEM_BUG_ON(!log->vma);
519 
520 	/*
521 	 * GuC is recognizing log levels starting from 0 to max, we're using 0
522 	 * as indication that logging should be disabled.
523 	 */
524 	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
525 		return -EINVAL;
526 
527 	mutex_lock(&dev_priv->drm.struct_mutex);
528 
529 	if (log->level == level)
530 		goto out_unlock;
531 
532 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
533 		ret = guc_action_control_log(guc,
534 					     GUC_LOG_LEVEL_IS_VERBOSE(level),
535 					     GUC_LOG_LEVEL_IS_ENABLED(level),
536 					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
537 	if (ret) {
538 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
539 		goto out_unlock;
540 	}
541 
542 	log->level = level;
543 
544 out_unlock:
545 	mutex_unlock(&dev_priv->drm.struct_mutex);
546 
547 	return ret;
548 }
549 
550 bool intel_guc_log_relay_created(const struct intel_guc_log *log)
551 {
552 	return log->relay.buf_addr;
553 }
554 
555 int intel_guc_log_relay_open(struct intel_guc_log *log)
556 {
557 	int ret;
558 
559 	if (!log->vma)
560 		return -ENODEV;
561 
562 	mutex_lock(&log->relay.lock);
563 
564 	if (intel_guc_log_relay_created(log)) {
565 		ret = -EEXIST;
566 		goto out_unlock;
567 	}
568 
569 	/*
570 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
571 	 * it should be present on the chipsets supporting GuC based
572 	 * submisssions.
573 	 */
574 	if (!i915_has_memcpy_from_wc()) {
575 		ret = -ENXIO;
576 		goto out_unlock;
577 	}
578 
579 	ret = guc_log_relay_create(log);
580 	if (ret)
581 		goto out_unlock;
582 
583 	ret = guc_log_map(log);
584 	if (ret)
585 		goto out_relay;
586 
587 	mutex_unlock(&log->relay.lock);
588 
589 	return 0;
590 
591 out_relay:
592 	guc_log_relay_destroy(log);
593 out_unlock:
594 	mutex_unlock(&log->relay.lock);
595 
596 	return ret;
597 }
598 
599 int intel_guc_log_relay_start(struct intel_guc_log *log)
600 {
601 	if (log->relay.started)
602 		return -EEXIST;
603 
604 	guc_log_enable_flush_events(log);
605 
606 	/*
607 	 * When GuC is logging without us relaying to userspace, we're ignoring
608 	 * the flush notification. This means that we need to unconditionally
609 	 * flush on relay enabling, since GuC only notifies us once.
610 	 */
611 	queue_work(system_highpri_wq, &log->relay.flush_work);
612 
613 	log->relay.started = true;
614 
615 	return 0;
616 }
617 
618 void intel_guc_log_relay_flush(struct intel_guc_log *log)
619 {
620 	struct intel_guc *guc = log_to_guc(log);
621 	intel_wakeref_t wakeref;
622 
623 	if (!log->relay.started)
624 		return;
625 
626 	/*
627 	 * Before initiating the forceful flush, wait for any pending/ongoing
628 	 * flush to complete otherwise forceful flush may not actually happen.
629 	 */
630 	flush_work(&log->relay.flush_work);
631 
632 	with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
633 		guc_action_flush_log(guc);
634 
635 	/* GuC would have updated log buffer by now, so capture it */
636 	guc_log_capture_logs(log);
637 }
638 
639 /*
640  * Stops the relay log. Called from intel_guc_log_relay_close(), so no
641  * possibility of race with start/flush since relay_write cannot race
642  * relay_close.
643  */
644 static void guc_log_relay_stop(struct intel_guc_log *log)
645 {
646 	struct intel_guc *guc = log_to_guc(log);
647 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
648 
649 	if (!log->relay.started)
650 		return;
651 
652 	guc_log_disable_flush_events(log);
653 	intel_synchronize_irq(i915);
654 
655 	flush_work(&log->relay.flush_work);
656 
657 	log->relay.started = false;
658 }
659 
660 void intel_guc_log_relay_close(struct intel_guc_log *log)
661 {
662 	guc_log_relay_stop(log);
663 
664 	mutex_lock(&log->relay.lock);
665 	GEM_BUG_ON(!intel_guc_log_relay_created(log));
666 	guc_log_unmap(log);
667 	guc_log_relay_destroy(log);
668 	mutex_unlock(&log->relay.lock);
669 }
670 
671 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
672 {
673 	queue_work(system_highpri_wq, &log->relay.flush_work);
674 }
675