xref: /openbmc/linux/kernel/trace/blktrace.c (revision 29c37341)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4  *
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
22 
23 #include "../../block/blk.h"
24 
25 #include <trace/events/block.h>
26 
27 #include "trace_output.h"
28 
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
30 
31 static unsigned int blktrace_seq __read_mostly = 1;
32 
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
35 
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
38 
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC	0x1
41 #define TRACE_BLK_OPT_CGROUP	0x2
42 #define TRACE_BLK_OPT_CGNAME	0x4
43 
44 static struct tracer_opt blk_tracer_opts[] = {
45 	/* Default disable the minimalistic output */
46 	{ TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 	{ TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 	{ TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50 #endif
51 	{ }
52 };
53 
54 static struct tracer_flags blk_tracer_flags = {
55 	.val  = 0,
56 	.opts = blk_tracer_opts,
57 };
58 
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
62 
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
65 
66 /*
67  * Send out a notify message.
68  */
69 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70 		       const void *data, size_t len, u64 cgid)
71 {
72 	struct blk_io_trace *t;
73 	struct ring_buffer_event *event = NULL;
74 	struct trace_buffer *buffer = NULL;
75 	int pc = 0;
76 	int cpu = smp_processor_id();
77 	bool blk_tracer = blk_tracer_enabled;
78 	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
79 
80 	if (blk_tracer) {
81 		buffer = blk_tr->array_buffer.buffer;
82 		pc = preempt_count();
83 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
84 						  sizeof(*t) + len + cgid_len,
85 						  0, pc);
86 		if (!event)
87 			return;
88 		t = ring_buffer_event_data(event);
89 		goto record_it;
90 	}
91 
92 	if (!bt->rchan)
93 		return;
94 
95 	t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
96 	if (t) {
97 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
98 		t->time = ktime_to_ns(ktime_get());
99 record_it:
100 		t->device = bt->dev;
101 		t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
102 		t->pid = pid;
103 		t->cpu = cpu;
104 		t->pdu_len = len + cgid_len;
105 		if (cgid_len)
106 			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
107 		memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
108 
109 		if (blk_tracer)
110 			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
111 	}
112 }
113 
114 /*
115  * Send out a notify for this process, if we haven't done so since a trace
116  * started
117  */
118 static void trace_note_tsk(struct task_struct *tsk)
119 {
120 	unsigned long flags;
121 	struct blk_trace *bt;
122 
123 	tsk->btrace_seq = blktrace_seq;
124 	spin_lock_irqsave(&running_trace_lock, flags);
125 	list_for_each_entry(bt, &running_trace_list, running_list) {
126 		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
127 			   sizeof(tsk->comm), 0);
128 	}
129 	spin_unlock_irqrestore(&running_trace_lock, flags);
130 }
131 
132 static void trace_note_time(struct blk_trace *bt)
133 {
134 	struct timespec64 now;
135 	unsigned long flags;
136 	u32 words[2];
137 
138 	/* need to check user space to see if this breaks in y2038 or y2106 */
139 	ktime_get_real_ts64(&now);
140 	words[0] = (u32)now.tv_sec;
141 	words[1] = now.tv_nsec;
142 
143 	local_irq_save(flags);
144 	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
145 	local_irq_restore(flags);
146 }
147 
148 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
149 	const char *fmt, ...)
150 {
151 	int n;
152 	va_list args;
153 	unsigned long flags;
154 	char *buf;
155 
156 	if (unlikely(bt->trace_state != Blktrace_running &&
157 		     !blk_tracer_enabled))
158 		return;
159 
160 	/*
161 	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
162 	 * message to the trace.
163 	 */
164 	if (!(bt->act_mask & BLK_TC_NOTIFY))
165 		return;
166 
167 	local_irq_save(flags);
168 	buf = this_cpu_ptr(bt->msg_data);
169 	va_start(args, fmt);
170 	n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
171 	va_end(args);
172 
173 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
174 		blkcg = NULL;
175 #ifdef CONFIG_BLK_CGROUP
176 	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
177 		   blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
178 #else
179 	trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
180 #endif
181 	local_irq_restore(flags);
182 }
183 EXPORT_SYMBOL_GPL(__trace_note_message);
184 
185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186 			 pid_t pid)
187 {
188 	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189 		return 1;
190 	if (sector && (sector < bt->start_lba || sector > bt->end_lba))
191 		return 1;
192 	if (bt->pid && pid != bt->pid)
193 		return 1;
194 
195 	return 0;
196 }
197 
198 /*
199  * Data direction bit lookup
200  */
201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 				 BLK_TC_ACT(BLK_TC_WRITE) };
203 
204 #define BLK_TC_RAHEAD		BLK_TC_AHEAD
205 #define BLK_TC_PREFLUSH		BLK_TC_FLUSH
206 
207 /* The ilog2() calls fall out because they're constant */
208 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
209 	  (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
210 
211 /*
212  * The worker for the various blk_add_trace*() types. Fills out a
213  * blk_io_trace structure and places it in a per-cpu subbuffer.
214  */
215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
216 		     int op, int op_flags, u32 what, int error, int pdu_len,
217 		     void *pdu_data, u64 cgid)
218 {
219 	struct task_struct *tsk = current;
220 	struct ring_buffer_event *event = NULL;
221 	struct trace_buffer *buffer = NULL;
222 	struct blk_io_trace *t;
223 	unsigned long flags = 0;
224 	unsigned long *sequence;
225 	pid_t pid;
226 	int cpu, pc = 0;
227 	bool blk_tracer = blk_tracer_enabled;
228 	ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
229 
230 	if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
231 		return;
232 
233 	what |= ddir_act[op_is_write(op) ? WRITE : READ];
234 	what |= MASK_TC_BIT(op_flags, SYNC);
235 	what |= MASK_TC_BIT(op_flags, RAHEAD);
236 	what |= MASK_TC_BIT(op_flags, META);
237 	what |= MASK_TC_BIT(op_flags, PREFLUSH);
238 	what |= MASK_TC_BIT(op_flags, FUA);
239 	if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
240 		what |= BLK_TC_ACT(BLK_TC_DISCARD);
241 	if (op == REQ_OP_FLUSH)
242 		what |= BLK_TC_ACT(BLK_TC_FLUSH);
243 	if (cgid)
244 		what |= __BLK_TA_CGROUP;
245 
246 	pid = tsk->pid;
247 	if (act_log_check(bt, what, sector, pid))
248 		return;
249 	cpu = raw_smp_processor_id();
250 
251 	if (blk_tracer) {
252 		tracing_record_cmdline(current);
253 
254 		buffer = blk_tr->array_buffer.buffer;
255 		pc = preempt_count();
256 		event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
257 						  sizeof(*t) + pdu_len + cgid_len,
258 						  0, pc);
259 		if (!event)
260 			return;
261 		t = ring_buffer_event_data(event);
262 		goto record_it;
263 	}
264 
265 	if (unlikely(tsk->btrace_seq != blktrace_seq))
266 		trace_note_tsk(tsk);
267 
268 	/*
269 	 * A word about the locking here - we disable interrupts to reserve
270 	 * some space in the relay per-cpu buffer, to prevent an irq
271 	 * from coming in and stepping on our toes.
272 	 */
273 	local_irq_save(flags);
274 	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
275 	if (t) {
276 		sequence = per_cpu_ptr(bt->sequence, cpu);
277 
278 		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
279 		t->sequence = ++(*sequence);
280 		t->time = ktime_to_ns(ktime_get());
281 record_it:
282 		/*
283 		 * These two are not needed in ftrace as they are in the
284 		 * generic trace_entry, filled by tracing_generic_entry_update,
285 		 * but for the trace_event->bin() synthesizer benefit we do it
286 		 * here too.
287 		 */
288 		t->cpu = cpu;
289 		t->pid = pid;
290 
291 		t->sector = sector;
292 		t->bytes = bytes;
293 		t->action = what;
294 		t->device = bt->dev;
295 		t->error = error;
296 		t->pdu_len = pdu_len + cgid_len;
297 
298 		if (cgid_len)
299 			memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
300 		if (pdu_len)
301 			memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
302 
303 		if (blk_tracer) {
304 			trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
305 			return;
306 		}
307 	}
308 
309 	local_irq_restore(flags);
310 }
311 
312 static void blk_trace_free(struct blk_trace *bt)
313 {
314 	debugfs_remove(bt->msg_file);
315 	debugfs_remove(bt->dropped_file);
316 	relay_close(bt->rchan);
317 	debugfs_remove(bt->dir);
318 	free_percpu(bt->sequence);
319 	free_percpu(bt->msg_data);
320 	kfree(bt);
321 }
322 
323 static void get_probe_ref(void)
324 {
325 	mutex_lock(&blk_probe_mutex);
326 	if (++blk_probes_ref == 1)
327 		blk_register_tracepoints();
328 	mutex_unlock(&blk_probe_mutex);
329 }
330 
331 static void put_probe_ref(void)
332 {
333 	mutex_lock(&blk_probe_mutex);
334 	if (!--blk_probes_ref)
335 		blk_unregister_tracepoints();
336 	mutex_unlock(&blk_probe_mutex);
337 }
338 
339 static void blk_trace_cleanup(struct blk_trace *bt)
340 {
341 	synchronize_rcu();
342 	blk_trace_free(bt);
343 	put_probe_ref();
344 }
345 
346 static int __blk_trace_remove(struct request_queue *q)
347 {
348 	struct blk_trace *bt;
349 
350 	bt = rcu_replace_pointer(q->blk_trace, NULL,
351 				 lockdep_is_held(&q->debugfs_mutex));
352 	if (!bt)
353 		return -EINVAL;
354 
355 	if (bt->trace_state != Blktrace_running)
356 		blk_trace_cleanup(bt);
357 
358 	return 0;
359 }
360 
361 int blk_trace_remove(struct request_queue *q)
362 {
363 	int ret;
364 
365 	mutex_lock(&q->debugfs_mutex);
366 	ret = __blk_trace_remove(q);
367 	mutex_unlock(&q->debugfs_mutex);
368 
369 	return ret;
370 }
371 EXPORT_SYMBOL_GPL(blk_trace_remove);
372 
373 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
374 				size_t count, loff_t *ppos)
375 {
376 	struct blk_trace *bt = filp->private_data;
377 	char buf[16];
378 
379 	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
380 
381 	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
382 }
383 
384 static const struct file_operations blk_dropped_fops = {
385 	.owner =	THIS_MODULE,
386 	.open =		simple_open,
387 	.read =		blk_dropped_read,
388 	.llseek =	default_llseek,
389 };
390 
391 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
392 				size_t count, loff_t *ppos)
393 {
394 	char *msg;
395 	struct blk_trace *bt;
396 
397 	if (count >= BLK_TN_MAX_MSG)
398 		return -EINVAL;
399 
400 	msg = memdup_user_nul(buffer, count);
401 	if (IS_ERR(msg))
402 		return PTR_ERR(msg);
403 
404 	bt = filp->private_data;
405 	__trace_note_message(bt, NULL, "%s", msg);
406 	kfree(msg);
407 
408 	return count;
409 }
410 
411 static const struct file_operations blk_msg_fops = {
412 	.owner =	THIS_MODULE,
413 	.open =		simple_open,
414 	.write =	blk_msg_write,
415 	.llseek =	noop_llseek,
416 };
417 
418 /*
419  * Keep track of how many times we encountered a full subbuffer, to aid
420  * the user space app in telling how many lost events there were.
421  */
422 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
423 				     void *prev_subbuf, size_t prev_padding)
424 {
425 	struct blk_trace *bt;
426 
427 	if (!relay_buf_full(buf))
428 		return 1;
429 
430 	bt = buf->chan->private_data;
431 	atomic_inc(&bt->dropped);
432 	return 0;
433 }
434 
435 static int blk_remove_buf_file_callback(struct dentry *dentry)
436 {
437 	debugfs_remove(dentry);
438 
439 	return 0;
440 }
441 
442 static struct dentry *blk_create_buf_file_callback(const char *filename,
443 						   struct dentry *parent,
444 						   umode_t mode,
445 						   struct rchan_buf *buf,
446 						   int *is_global)
447 {
448 	return debugfs_create_file(filename, mode, parent, buf,
449 					&relay_file_operations);
450 }
451 
452 static struct rchan_callbacks blk_relay_callbacks = {
453 	.subbuf_start		= blk_subbuf_start_callback,
454 	.create_buf_file	= blk_create_buf_file_callback,
455 	.remove_buf_file	= blk_remove_buf_file_callback,
456 };
457 
458 static void blk_trace_setup_lba(struct blk_trace *bt,
459 				struct block_device *bdev)
460 {
461 	struct hd_struct *part = NULL;
462 
463 	if (bdev)
464 		part = bdev->bd_part;
465 
466 	if (part) {
467 		bt->start_lba = part->start_sect;
468 		bt->end_lba = part->start_sect + part->nr_sects;
469 	} else {
470 		bt->start_lba = 0;
471 		bt->end_lba = -1ULL;
472 	}
473 }
474 
475 /*
476  * Setup everything required to start tracing
477  */
478 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
479 			      struct block_device *bdev,
480 			      struct blk_user_trace_setup *buts)
481 {
482 	struct blk_trace *bt = NULL;
483 	struct dentry *dir = NULL;
484 	int ret;
485 
486 	lockdep_assert_held(&q->debugfs_mutex);
487 
488 	if (!buts->buf_size || !buts->buf_nr)
489 		return -EINVAL;
490 
491 	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
492 	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
493 
494 	/*
495 	 * some device names have larger paths - convert the slashes
496 	 * to underscores for this to work as expected
497 	 */
498 	strreplace(buts->name, '/', '_');
499 
500 	/*
501 	 * bdev can be NULL, as with scsi-generic, this is a helpful as
502 	 * we can be.
503 	 */
504 	if (rcu_dereference_protected(q->blk_trace,
505 				      lockdep_is_held(&q->debugfs_mutex))) {
506 		pr_warn("Concurrent blktraces are not allowed on %s\n",
507 			buts->name);
508 		return -EBUSY;
509 	}
510 
511 	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
512 	if (!bt)
513 		return -ENOMEM;
514 
515 	ret = -ENOMEM;
516 	bt->sequence = alloc_percpu(unsigned long);
517 	if (!bt->sequence)
518 		goto err;
519 
520 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
521 	if (!bt->msg_data)
522 		goto err;
523 
524 	/*
525 	 * When tracing the whole disk reuse the existing debugfs directory
526 	 * created by the block layer on init. For partitions block devices,
527 	 * and scsi-generic block devices we create a temporary new debugfs
528 	 * directory that will be removed once the trace ends.
529 	 */
530 	if (bdev && bdev == bdev->bd_contains)
531 		dir = q->debugfs_dir;
532 	else
533 		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
534 
535 	/*
536 	 * As blktrace relies on debugfs for its interface the debugfs directory
537 	 * is required, contrary to the usual mantra of not checking for debugfs
538 	 * files or directories.
539 	 */
540 	if (IS_ERR_OR_NULL(dir)) {
541 		pr_warn("debugfs_dir not present for %s so skipping\n",
542 			buts->name);
543 		ret = -ENOENT;
544 		goto err;
545 	}
546 
547 	bt->dev = dev;
548 	atomic_set(&bt->dropped, 0);
549 	INIT_LIST_HEAD(&bt->running_list);
550 
551 	ret = -EIO;
552 	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
553 					       &blk_dropped_fops);
554 
555 	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
556 
557 	bt->rchan = relay_open("trace", dir, buts->buf_size,
558 				buts->buf_nr, &blk_relay_callbacks, bt);
559 	if (!bt->rchan)
560 		goto err;
561 
562 	bt->act_mask = buts->act_mask;
563 	if (!bt->act_mask)
564 		bt->act_mask = (u16) -1;
565 
566 	blk_trace_setup_lba(bt, bdev);
567 
568 	/* overwrite with user settings */
569 	if (buts->start_lba)
570 		bt->start_lba = buts->start_lba;
571 	if (buts->end_lba)
572 		bt->end_lba = buts->end_lba;
573 
574 	bt->pid = buts->pid;
575 	bt->trace_state = Blktrace_setup;
576 
577 	rcu_assign_pointer(q->blk_trace, bt);
578 	get_probe_ref();
579 
580 	ret = 0;
581 err:
582 	if (ret)
583 		blk_trace_free(bt);
584 	return ret;
585 }
586 
587 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
588 			     struct block_device *bdev, char __user *arg)
589 {
590 	struct blk_user_trace_setup buts;
591 	int ret;
592 
593 	ret = copy_from_user(&buts, arg, sizeof(buts));
594 	if (ret)
595 		return -EFAULT;
596 
597 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
598 	if (ret)
599 		return ret;
600 
601 	if (copy_to_user(arg, &buts, sizeof(buts))) {
602 		__blk_trace_remove(q);
603 		return -EFAULT;
604 	}
605 	return 0;
606 }
607 
608 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
609 		    struct block_device *bdev,
610 		    char __user *arg)
611 {
612 	int ret;
613 
614 	mutex_lock(&q->debugfs_mutex);
615 	ret = __blk_trace_setup(q, name, dev, bdev, arg);
616 	mutex_unlock(&q->debugfs_mutex);
617 
618 	return ret;
619 }
620 EXPORT_SYMBOL_GPL(blk_trace_setup);
621 
622 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
623 static int compat_blk_trace_setup(struct request_queue *q, char *name,
624 				  dev_t dev, struct block_device *bdev,
625 				  char __user *arg)
626 {
627 	struct blk_user_trace_setup buts;
628 	struct compat_blk_user_trace_setup cbuts;
629 	int ret;
630 
631 	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
632 		return -EFAULT;
633 
634 	buts = (struct blk_user_trace_setup) {
635 		.act_mask = cbuts.act_mask,
636 		.buf_size = cbuts.buf_size,
637 		.buf_nr = cbuts.buf_nr,
638 		.start_lba = cbuts.start_lba,
639 		.end_lba = cbuts.end_lba,
640 		.pid = cbuts.pid,
641 	};
642 
643 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
644 	if (ret)
645 		return ret;
646 
647 	if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
648 		__blk_trace_remove(q);
649 		return -EFAULT;
650 	}
651 
652 	return 0;
653 }
654 #endif
655 
656 static int __blk_trace_startstop(struct request_queue *q, int start)
657 {
658 	int ret;
659 	struct blk_trace *bt;
660 
661 	bt = rcu_dereference_protected(q->blk_trace,
662 				       lockdep_is_held(&q->debugfs_mutex));
663 	if (bt == NULL)
664 		return -EINVAL;
665 
666 	/*
667 	 * For starting a trace, we can transition from a setup or stopped
668 	 * trace. For stopping a trace, the state must be running
669 	 */
670 	ret = -EINVAL;
671 	if (start) {
672 		if (bt->trace_state == Blktrace_setup ||
673 		    bt->trace_state == Blktrace_stopped) {
674 			blktrace_seq++;
675 			smp_mb();
676 			bt->trace_state = Blktrace_running;
677 			spin_lock_irq(&running_trace_lock);
678 			list_add(&bt->running_list, &running_trace_list);
679 			spin_unlock_irq(&running_trace_lock);
680 
681 			trace_note_time(bt);
682 			ret = 0;
683 		}
684 	} else {
685 		if (bt->trace_state == Blktrace_running) {
686 			bt->trace_state = Blktrace_stopped;
687 			spin_lock_irq(&running_trace_lock);
688 			list_del_init(&bt->running_list);
689 			spin_unlock_irq(&running_trace_lock);
690 			relay_flush(bt->rchan);
691 			ret = 0;
692 		}
693 	}
694 
695 	return ret;
696 }
697 
698 int blk_trace_startstop(struct request_queue *q, int start)
699 {
700 	int ret;
701 
702 	mutex_lock(&q->debugfs_mutex);
703 	ret = __blk_trace_startstop(q, start);
704 	mutex_unlock(&q->debugfs_mutex);
705 
706 	return ret;
707 }
708 EXPORT_SYMBOL_GPL(blk_trace_startstop);
709 
710 /*
711  * When reading or writing the blktrace sysfs files, the references to the
712  * opened sysfs or device files should prevent the underlying block device
713  * from being removed. So no further delete protection is really needed.
714  */
715 
716 /**
717  * blk_trace_ioctl: - handle the ioctls associated with tracing
718  * @bdev:	the block device
719  * @cmd:	the ioctl cmd
720  * @arg:	the argument data, if any
721  *
722  **/
723 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
724 {
725 	struct request_queue *q;
726 	int ret, start = 0;
727 	char b[BDEVNAME_SIZE];
728 
729 	q = bdev_get_queue(bdev);
730 	if (!q)
731 		return -ENXIO;
732 
733 	mutex_lock(&q->debugfs_mutex);
734 
735 	switch (cmd) {
736 	case BLKTRACESETUP:
737 		bdevname(bdev, b);
738 		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
739 		break;
740 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
741 	case BLKTRACESETUP32:
742 		bdevname(bdev, b);
743 		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
744 		break;
745 #endif
746 	case BLKTRACESTART:
747 		start = 1;
748 		/* fall through */
749 	case BLKTRACESTOP:
750 		ret = __blk_trace_startstop(q, start);
751 		break;
752 	case BLKTRACETEARDOWN:
753 		ret = __blk_trace_remove(q);
754 		break;
755 	default:
756 		ret = -ENOTTY;
757 		break;
758 	}
759 
760 	mutex_unlock(&q->debugfs_mutex);
761 	return ret;
762 }
763 
764 /**
765  * blk_trace_shutdown: - stop and cleanup trace structures
766  * @q:    the request queue associated with the device
767  *
768  **/
769 void blk_trace_shutdown(struct request_queue *q)
770 {
771 	mutex_lock(&q->debugfs_mutex);
772 	if (rcu_dereference_protected(q->blk_trace,
773 				      lockdep_is_held(&q->debugfs_mutex))) {
774 		__blk_trace_startstop(q, 0);
775 		__blk_trace_remove(q);
776 	}
777 
778 	mutex_unlock(&q->debugfs_mutex);
779 }
780 
781 #ifdef CONFIG_BLK_CGROUP
782 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
783 {
784 	struct blk_trace *bt;
785 
786 	/* We don't use the 'bt' value here except as an optimization... */
787 	bt = rcu_dereference_protected(q->blk_trace, 1);
788 	if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
789 		return 0;
790 
791 	if (!bio->bi_blkg)
792 		return 0;
793 	return cgroup_id(bio_blkcg(bio)->css.cgroup);
794 }
795 #else
796 u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
797 {
798 	return 0;
799 }
800 #endif
801 
802 static u64
803 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
804 {
805 	if (!rq->bio)
806 		return 0;
807 	/* Use the first bio */
808 	return blk_trace_bio_get_cgid(q, rq->bio);
809 }
810 
811 /*
812  * blktrace probes
813  */
814 
815 /**
816  * blk_add_trace_rq - Add a trace for a request oriented action
817  * @rq:		the source request
818  * @error:	return status to log
819  * @nr_bytes:	number of completed bytes
820  * @what:	the action
821  * @cgid:	the cgroup info
822  *
823  * Description:
824  *     Records an action against a request. Will log the bio offset + size.
825  *
826  **/
827 static void blk_add_trace_rq(struct request *rq, int error,
828 			     unsigned int nr_bytes, u32 what, u64 cgid)
829 {
830 	struct blk_trace *bt;
831 
832 	rcu_read_lock();
833 	bt = rcu_dereference(rq->q->blk_trace);
834 	if (likely(!bt)) {
835 		rcu_read_unlock();
836 		return;
837 	}
838 
839 	if (blk_rq_is_passthrough(rq))
840 		what |= BLK_TC_ACT(BLK_TC_PC);
841 	else
842 		what |= BLK_TC_ACT(BLK_TC_FS);
843 
844 	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
845 			rq->cmd_flags, what, error, 0, NULL, cgid);
846 	rcu_read_unlock();
847 }
848 
849 static void blk_add_trace_rq_insert(void *ignore,
850 				    struct request_queue *q, struct request *rq)
851 {
852 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
853 			 blk_trace_request_get_cgid(q, rq));
854 }
855 
856 static void blk_add_trace_rq_issue(void *ignore,
857 				   struct request_queue *q, struct request *rq)
858 {
859 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
860 			 blk_trace_request_get_cgid(q, rq));
861 }
862 
863 static void blk_add_trace_rq_merge(void *ignore,
864 				   struct request_queue *q, struct request *rq)
865 {
866 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
867 			 blk_trace_request_get_cgid(q, rq));
868 }
869 
870 static void blk_add_trace_rq_requeue(void *ignore,
871 				     struct request_queue *q,
872 				     struct request *rq)
873 {
874 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
875 			 blk_trace_request_get_cgid(q, rq));
876 }
877 
878 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
879 			int error, unsigned int nr_bytes)
880 {
881 	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
882 			 blk_trace_request_get_cgid(rq->q, rq));
883 }
884 
885 /**
886  * blk_add_trace_bio - Add a trace for a bio oriented action
887  * @q:		queue the io is for
888  * @bio:	the source bio
889  * @what:	the action
890  * @error:	error, if any
891  *
892  * Description:
893  *     Records an action against a bio. Will log the bio offset + size.
894  *
895  **/
896 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
897 			      u32 what, int error)
898 {
899 	struct blk_trace *bt;
900 
901 	rcu_read_lock();
902 	bt = rcu_dereference(q->blk_trace);
903 	if (likely(!bt)) {
904 		rcu_read_unlock();
905 		return;
906 	}
907 
908 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
909 			bio_op(bio), bio->bi_opf, what, error, 0, NULL,
910 			blk_trace_bio_get_cgid(q, bio));
911 	rcu_read_unlock();
912 }
913 
914 static void blk_add_trace_bio_bounce(void *ignore,
915 				     struct request_queue *q, struct bio *bio)
916 {
917 	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
918 }
919 
920 static void blk_add_trace_bio_complete(void *ignore,
921 				       struct request_queue *q, struct bio *bio)
922 {
923 	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
924 			  blk_status_to_errno(bio->bi_status));
925 }
926 
927 static void blk_add_trace_bio_backmerge(void *ignore,
928 					struct request_queue *q,
929 					struct request *rq,
930 					struct bio *bio)
931 {
932 	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
933 }
934 
935 static void blk_add_trace_bio_frontmerge(void *ignore,
936 					 struct request_queue *q,
937 					 struct request *rq,
938 					 struct bio *bio)
939 {
940 	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
941 }
942 
943 static void blk_add_trace_bio_queue(void *ignore,
944 				    struct request_queue *q, struct bio *bio)
945 {
946 	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
947 }
948 
949 static void blk_add_trace_getrq(void *ignore,
950 				struct request_queue *q,
951 				struct bio *bio, int rw)
952 {
953 	if (bio)
954 		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
955 	else {
956 		struct blk_trace *bt;
957 
958 		rcu_read_lock();
959 		bt = rcu_dereference(q->blk_trace);
960 		if (bt)
961 			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
962 					NULL, 0);
963 		rcu_read_unlock();
964 	}
965 }
966 
967 
968 static void blk_add_trace_sleeprq(void *ignore,
969 				  struct request_queue *q,
970 				  struct bio *bio, int rw)
971 {
972 	if (bio)
973 		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
974 	else {
975 		struct blk_trace *bt;
976 
977 		rcu_read_lock();
978 		bt = rcu_dereference(q->blk_trace);
979 		if (bt)
980 			__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
981 					0, 0, NULL, 0);
982 		rcu_read_unlock();
983 	}
984 }
985 
986 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
987 {
988 	struct blk_trace *bt;
989 
990 	rcu_read_lock();
991 	bt = rcu_dereference(q->blk_trace);
992 	if (bt)
993 		__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
994 	rcu_read_unlock();
995 }
996 
997 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
998 				    unsigned int depth, bool explicit)
999 {
1000 	struct blk_trace *bt;
1001 
1002 	rcu_read_lock();
1003 	bt = rcu_dereference(q->blk_trace);
1004 	if (bt) {
1005 		__be64 rpdu = cpu_to_be64(depth);
1006 		u32 what;
1007 
1008 		if (explicit)
1009 			what = BLK_TA_UNPLUG_IO;
1010 		else
1011 			what = BLK_TA_UNPLUG_TIMER;
1012 
1013 		__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
1014 	}
1015 	rcu_read_unlock();
1016 }
1017 
1018 static void blk_add_trace_split(void *ignore,
1019 				struct request_queue *q, struct bio *bio,
1020 				unsigned int pdu)
1021 {
1022 	struct blk_trace *bt;
1023 
1024 	rcu_read_lock();
1025 	bt = rcu_dereference(q->blk_trace);
1026 	if (bt) {
1027 		__be64 rpdu = cpu_to_be64(pdu);
1028 
1029 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
1030 				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
1031 				BLK_TA_SPLIT,
1032 				blk_status_to_errno(bio->bi_status),
1033 				sizeof(rpdu), &rpdu,
1034 				blk_trace_bio_get_cgid(q, bio));
1035 	}
1036 	rcu_read_unlock();
1037 }
1038 
1039 /**
1040  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1041  * @ignore:	trace callback data parameter (not used)
1042  * @q:		queue the io is for
1043  * @bio:	the source bio
1044  * @dev:	target device
1045  * @from:	source sector
1046  *
1047  * Description:
1048  *     Device mapper or raid target sometimes need to split a bio because
1049  *     it spans a stripe (or similar). Add a trace for that action.
1050  *
1051  **/
1052 static void blk_add_trace_bio_remap(void *ignore,
1053 				    struct request_queue *q, struct bio *bio,
1054 				    dev_t dev, sector_t from)
1055 {
1056 	struct blk_trace *bt;
1057 	struct blk_io_trace_remap r;
1058 
1059 	rcu_read_lock();
1060 	bt = rcu_dereference(q->blk_trace);
1061 	if (likely(!bt)) {
1062 		rcu_read_unlock();
1063 		return;
1064 	}
1065 
1066 	r.device_from = cpu_to_be32(dev);
1067 	r.device_to   = cpu_to_be32(bio_dev(bio));
1068 	r.sector_from = cpu_to_be64(from);
1069 
1070 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1071 			bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1072 			blk_status_to_errno(bio->bi_status),
1073 			sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1074 	rcu_read_unlock();
1075 }
1076 
1077 /**
1078  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1079  * @ignore:	trace callback data parameter (not used)
1080  * @q:		queue the io is for
1081  * @rq:		the source request
1082  * @dev:	target device
1083  * @from:	source sector
1084  *
1085  * Description:
1086  *     Device mapper remaps request to other devices.
1087  *     Add a trace for that action.
1088  *
1089  **/
1090 static void blk_add_trace_rq_remap(void *ignore,
1091 				   struct request_queue *q,
1092 				   struct request *rq, dev_t dev,
1093 				   sector_t from)
1094 {
1095 	struct blk_trace *bt;
1096 	struct blk_io_trace_remap r;
1097 
1098 	rcu_read_lock();
1099 	bt = rcu_dereference(q->blk_trace);
1100 	if (likely(!bt)) {
1101 		rcu_read_unlock();
1102 		return;
1103 	}
1104 
1105 	r.device_from = cpu_to_be32(dev);
1106 	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
1107 	r.sector_from = cpu_to_be64(from);
1108 
1109 	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1110 			rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1111 			sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1112 	rcu_read_unlock();
1113 }
1114 
1115 /**
1116  * blk_add_driver_data - Add binary message with driver-specific data
1117  * @q:		queue the io is for
1118  * @rq:		io request
1119  * @data:	driver-specific data
1120  * @len:	length of driver-specific data
1121  *
1122  * Description:
1123  *     Some drivers might want to write driver-specific data per request.
1124  *
1125  **/
1126 void blk_add_driver_data(struct request_queue *q,
1127 			 struct request *rq,
1128 			 void *data, size_t len)
1129 {
1130 	struct blk_trace *bt;
1131 
1132 	rcu_read_lock();
1133 	bt = rcu_dereference(q->blk_trace);
1134 	if (likely(!bt)) {
1135 		rcu_read_unlock();
1136 		return;
1137 	}
1138 
1139 	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1140 				BLK_TA_DRV_DATA, 0, len, data,
1141 				blk_trace_request_get_cgid(q, rq));
1142 	rcu_read_unlock();
1143 }
1144 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1145 
1146 static void blk_register_tracepoints(void)
1147 {
1148 	int ret;
1149 
1150 	ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1151 	WARN_ON(ret);
1152 	ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1153 	WARN_ON(ret);
1154 	ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1155 	WARN_ON(ret);
1156 	ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1157 	WARN_ON(ret);
1158 	ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1159 	WARN_ON(ret);
1160 	ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1161 	WARN_ON(ret);
1162 	ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1163 	WARN_ON(ret);
1164 	ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1165 	WARN_ON(ret);
1166 	ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1167 	WARN_ON(ret);
1168 	ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1169 	WARN_ON(ret);
1170 	ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1171 	WARN_ON(ret);
1172 	ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1173 	WARN_ON(ret);
1174 	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1175 	WARN_ON(ret);
1176 	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1177 	WARN_ON(ret);
1178 	ret = register_trace_block_split(blk_add_trace_split, NULL);
1179 	WARN_ON(ret);
1180 	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1181 	WARN_ON(ret);
1182 	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1183 	WARN_ON(ret);
1184 }
1185 
1186 static void blk_unregister_tracepoints(void)
1187 {
1188 	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1189 	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1190 	unregister_trace_block_split(blk_add_trace_split, NULL);
1191 	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1192 	unregister_trace_block_plug(blk_add_trace_plug, NULL);
1193 	unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1194 	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1195 	unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1196 	unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1197 	unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1198 	unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1199 	unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1200 	unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1201 	unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1202 	unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1203 	unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1204 	unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1205 
1206 	tracepoint_synchronize_unregister();
1207 }
1208 
1209 /*
1210  * struct blk_io_tracer formatting routines
1211  */
1212 
1213 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1214 {
1215 	int i = 0;
1216 	int tc = t->action >> BLK_TC_SHIFT;
1217 
1218 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1219 		rwbs[i++] = 'N';
1220 		goto out;
1221 	}
1222 
1223 	if (tc & BLK_TC_FLUSH)
1224 		rwbs[i++] = 'F';
1225 
1226 	if (tc & BLK_TC_DISCARD)
1227 		rwbs[i++] = 'D';
1228 	else if (tc & BLK_TC_WRITE)
1229 		rwbs[i++] = 'W';
1230 	else if (t->bytes)
1231 		rwbs[i++] = 'R';
1232 	else
1233 		rwbs[i++] = 'N';
1234 
1235 	if (tc & BLK_TC_FUA)
1236 		rwbs[i++] = 'F';
1237 	if (tc & BLK_TC_AHEAD)
1238 		rwbs[i++] = 'A';
1239 	if (tc & BLK_TC_SYNC)
1240 		rwbs[i++] = 'S';
1241 	if (tc & BLK_TC_META)
1242 		rwbs[i++] = 'M';
1243 out:
1244 	rwbs[i] = '\0';
1245 }
1246 
1247 static inline
1248 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1249 {
1250 	return (const struct blk_io_trace *)ent;
1251 }
1252 
1253 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1254 {
1255 	return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1256 }
1257 
1258 static inline u64 t_cgid(const struct trace_entry *ent)
1259 {
1260 	return *(u64 *)(te_blk_io_trace(ent) + 1);
1261 }
1262 
1263 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1264 {
1265 	return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1266 }
1267 
1268 static inline u32 t_action(const struct trace_entry *ent)
1269 {
1270 	return te_blk_io_trace(ent)->action;
1271 }
1272 
1273 static inline u32 t_bytes(const struct trace_entry *ent)
1274 {
1275 	return te_blk_io_trace(ent)->bytes;
1276 }
1277 
1278 static inline u32 t_sec(const struct trace_entry *ent)
1279 {
1280 	return te_blk_io_trace(ent)->bytes >> 9;
1281 }
1282 
1283 static inline unsigned long long t_sector(const struct trace_entry *ent)
1284 {
1285 	return te_blk_io_trace(ent)->sector;
1286 }
1287 
1288 static inline __u16 t_error(const struct trace_entry *ent)
1289 {
1290 	return te_blk_io_trace(ent)->error;
1291 }
1292 
1293 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1294 {
1295 	const __be64 *val = pdu_start(ent, has_cg);
1296 	return be64_to_cpu(*val);
1297 }
1298 
1299 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1300 	bool has_cg);
1301 
1302 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1303 	bool has_cg)
1304 {
1305 	char rwbs[RWBS_LEN];
1306 	unsigned long long ts  = iter->ts;
1307 	unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1308 	unsigned secs	       = (unsigned long)ts;
1309 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1310 
1311 	fill_rwbs(rwbs, t);
1312 
1313 	trace_seq_printf(&iter->seq,
1314 			 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1315 			 MAJOR(t->device), MINOR(t->device), iter->cpu,
1316 			 secs, nsec_rem, iter->ent->pid, act, rwbs);
1317 }
1318 
1319 static void blk_log_action(struct trace_iterator *iter, const char *act,
1320 	bool has_cg)
1321 {
1322 	char rwbs[RWBS_LEN];
1323 	const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1324 
1325 	fill_rwbs(rwbs, t);
1326 	if (has_cg) {
1327 		u64 id = t_cgid(iter->ent);
1328 
1329 		if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1330 			char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1331 
1332 			cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1333 				sizeof(blkcg_name_buf));
1334 			trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1335 				 MAJOR(t->device), MINOR(t->device),
1336 				 blkcg_name_buf, act, rwbs);
1337 		} else {
1338 			/*
1339 			 * The cgid portion used to be "INO,GEN".  Userland
1340 			 * builds a FILEID_INO32_GEN fid out of them and
1341 			 * opens the cgroup using open_by_handle_at(2).
1342 			 * While 32bit ino setups are still the same, 64bit
1343 			 * ones now use the 64bit ino as the whole ID and
1344 			 * no longer use generation.
1345 			 *
1346 			 * Regarldess of the content, always output
1347 			 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1348 			 * be mapped back to @id on both 64 and 32bit ino
1349 			 * setups.  See __kernfs_fh_to_dentry().
1350 			 */
1351 			trace_seq_printf(&iter->seq,
1352 				 "%3d,%-3d %llx,%-llx %2s %3s ",
1353 				 MAJOR(t->device), MINOR(t->device),
1354 				 id & U32_MAX, id >> 32, act, rwbs);
1355 		}
1356 	} else
1357 		trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1358 				 MAJOR(t->device), MINOR(t->device), act, rwbs);
1359 }
1360 
1361 static void blk_log_dump_pdu(struct trace_seq *s,
1362 	const struct trace_entry *ent, bool has_cg)
1363 {
1364 	const unsigned char *pdu_buf;
1365 	int pdu_len;
1366 	int i, end;
1367 
1368 	pdu_buf = pdu_start(ent, has_cg);
1369 	pdu_len = pdu_real_len(ent, has_cg);
1370 
1371 	if (!pdu_len)
1372 		return;
1373 
1374 	/* find the last zero that needs to be printed */
1375 	for (end = pdu_len - 1; end >= 0; end--)
1376 		if (pdu_buf[end])
1377 			break;
1378 	end++;
1379 
1380 	trace_seq_putc(s, '(');
1381 
1382 	for (i = 0; i < pdu_len; i++) {
1383 
1384 		trace_seq_printf(s, "%s%02x",
1385 				 i == 0 ? "" : " ", pdu_buf[i]);
1386 
1387 		/*
1388 		 * stop when the rest is just zeroes and indicate so
1389 		 * with a ".." appended
1390 		 */
1391 		if (i == end && end != pdu_len - 1) {
1392 			trace_seq_puts(s, " ..) ");
1393 			return;
1394 		}
1395 	}
1396 
1397 	trace_seq_puts(s, ") ");
1398 }
1399 
1400 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1401 {
1402 	char cmd[TASK_COMM_LEN];
1403 
1404 	trace_find_cmdline(ent->pid, cmd);
1405 
1406 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1407 		trace_seq_printf(s, "%u ", t_bytes(ent));
1408 		blk_log_dump_pdu(s, ent, has_cg);
1409 		trace_seq_printf(s, "[%s]\n", cmd);
1410 	} else {
1411 		if (t_sec(ent))
1412 			trace_seq_printf(s, "%llu + %u [%s]\n",
1413 						t_sector(ent), t_sec(ent), cmd);
1414 		else
1415 			trace_seq_printf(s, "[%s]\n", cmd);
1416 	}
1417 }
1418 
1419 static void blk_log_with_error(struct trace_seq *s,
1420 			      const struct trace_entry *ent, bool has_cg)
1421 {
1422 	if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1423 		blk_log_dump_pdu(s, ent, has_cg);
1424 		trace_seq_printf(s, "[%d]\n", t_error(ent));
1425 	} else {
1426 		if (t_sec(ent))
1427 			trace_seq_printf(s, "%llu + %u [%d]\n",
1428 					 t_sector(ent),
1429 					 t_sec(ent), t_error(ent));
1430 		else
1431 			trace_seq_printf(s, "%llu [%d]\n",
1432 					 t_sector(ent), t_error(ent));
1433 	}
1434 }
1435 
1436 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1437 {
1438 	const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1439 
1440 	trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1441 			 t_sector(ent), t_sec(ent),
1442 			 MAJOR(be32_to_cpu(__r->device_from)),
1443 			 MINOR(be32_to_cpu(__r->device_from)),
1444 			 be64_to_cpu(__r->sector_from));
1445 }
1446 
1447 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1448 {
1449 	char cmd[TASK_COMM_LEN];
1450 
1451 	trace_find_cmdline(ent->pid, cmd);
1452 
1453 	trace_seq_printf(s, "[%s]\n", cmd);
1454 }
1455 
1456 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1457 {
1458 	char cmd[TASK_COMM_LEN];
1459 
1460 	trace_find_cmdline(ent->pid, cmd);
1461 
1462 	trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1463 }
1464 
1465 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1466 {
1467 	char cmd[TASK_COMM_LEN];
1468 
1469 	trace_find_cmdline(ent->pid, cmd);
1470 
1471 	trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1472 			 get_pdu_int(ent, has_cg), cmd);
1473 }
1474 
1475 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1476 			bool has_cg)
1477 {
1478 
1479 	trace_seq_putmem(s, pdu_start(ent, has_cg),
1480 		pdu_real_len(ent, has_cg));
1481 	trace_seq_putc(s, '\n');
1482 }
1483 
1484 /*
1485  * struct tracer operations
1486  */
1487 
1488 static void blk_tracer_print_header(struct seq_file *m)
1489 {
1490 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1491 		return;
1492 	seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1493 		    "#  |     |     |           |   |   |\n");
1494 }
1495 
1496 static void blk_tracer_start(struct trace_array *tr)
1497 {
1498 	blk_tracer_enabled = true;
1499 }
1500 
1501 static int blk_tracer_init(struct trace_array *tr)
1502 {
1503 	blk_tr = tr;
1504 	blk_tracer_start(tr);
1505 	return 0;
1506 }
1507 
1508 static void blk_tracer_stop(struct trace_array *tr)
1509 {
1510 	blk_tracer_enabled = false;
1511 }
1512 
1513 static void blk_tracer_reset(struct trace_array *tr)
1514 {
1515 	blk_tracer_stop(tr);
1516 }
1517 
1518 static const struct {
1519 	const char *act[2];
1520 	void	   (*print)(struct trace_seq *s, const struct trace_entry *ent,
1521 			    bool has_cg);
1522 } what2act[] = {
1523 	[__BLK_TA_QUEUE]	= {{  "Q", "queue" },	   blk_log_generic },
1524 	[__BLK_TA_BACKMERGE]	= {{  "M", "backmerge" },  blk_log_generic },
1525 	[__BLK_TA_FRONTMERGE]	= {{  "F", "frontmerge" }, blk_log_generic },
1526 	[__BLK_TA_GETRQ]	= {{  "G", "getrq" },	   blk_log_generic },
1527 	[__BLK_TA_SLEEPRQ]	= {{  "S", "sleeprq" },	   blk_log_generic },
1528 	[__BLK_TA_REQUEUE]	= {{  "R", "requeue" },	   blk_log_with_error },
1529 	[__BLK_TA_ISSUE]	= {{  "D", "issue" },	   blk_log_generic },
1530 	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
1531 	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
1532 	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
1533 	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
1534 	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
1535 	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
1536 	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },
1537 	[__BLK_TA_REMAP]	= {{  "A", "remap" },	   blk_log_remap },
1538 };
1539 
1540 static enum print_line_t print_one_line(struct trace_iterator *iter,
1541 					bool classic)
1542 {
1543 	struct trace_array *tr = iter->tr;
1544 	struct trace_seq *s = &iter->seq;
1545 	const struct blk_io_trace *t;
1546 	u16 what;
1547 	bool long_act;
1548 	blk_log_action_t *log_action;
1549 	bool has_cg;
1550 
1551 	t	   = te_blk_io_trace(iter->ent);
1552 	what	   = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1553 	long_act   = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1554 	log_action = classic ? &blk_log_action_classic : &blk_log_action;
1555 	has_cg	   = t->action & __BLK_TA_CGROUP;
1556 
1557 	if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1558 		log_action(iter, long_act ? "message" : "m", has_cg);
1559 		blk_log_msg(s, iter->ent, has_cg);
1560 		return trace_handle_return(s);
1561 	}
1562 
1563 	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1564 		trace_seq_printf(s, "Unknown action %x\n", what);
1565 	else {
1566 		log_action(iter, what2act[what].act[long_act], has_cg);
1567 		what2act[what].print(s, iter->ent, has_cg);
1568 	}
1569 
1570 	return trace_handle_return(s);
1571 }
1572 
1573 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1574 					       int flags, struct trace_event *event)
1575 {
1576 	return print_one_line(iter, false);
1577 }
1578 
1579 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1580 {
1581 	struct trace_seq *s = &iter->seq;
1582 	struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1583 	const int offset = offsetof(struct blk_io_trace, sector);
1584 	struct blk_io_trace old = {
1585 		.magic	  = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1586 		.time     = iter->ts,
1587 	};
1588 
1589 	trace_seq_putmem(s, &old, offset);
1590 	trace_seq_putmem(s, &t->sector,
1591 			 sizeof(old) - offset + t->pdu_len);
1592 }
1593 
1594 static enum print_line_t
1595 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1596 			     struct trace_event *event)
1597 {
1598 	blk_trace_synthesize_old_trace(iter);
1599 
1600 	return trace_handle_return(&iter->seq);
1601 }
1602 
1603 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1604 {
1605 	if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1606 		return TRACE_TYPE_UNHANDLED;
1607 
1608 	return print_one_line(iter, true);
1609 }
1610 
1611 static int
1612 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1613 {
1614 	/* don't output context-info for blk_classic output */
1615 	if (bit == TRACE_BLK_OPT_CLASSIC) {
1616 		if (set)
1617 			tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1618 		else
1619 			tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1620 	}
1621 	return 0;
1622 }
1623 
1624 static struct tracer blk_tracer __read_mostly = {
1625 	.name		= "blk",
1626 	.init		= blk_tracer_init,
1627 	.reset		= blk_tracer_reset,
1628 	.start		= blk_tracer_start,
1629 	.stop		= blk_tracer_stop,
1630 	.print_header	= blk_tracer_print_header,
1631 	.print_line	= blk_tracer_print_line,
1632 	.flags		= &blk_tracer_flags,
1633 	.set_flag	= blk_tracer_set_flag,
1634 };
1635 
1636 static struct trace_event_functions trace_blk_event_funcs = {
1637 	.trace		= blk_trace_event_print,
1638 	.binary		= blk_trace_event_print_binary,
1639 };
1640 
1641 static struct trace_event trace_blk_event = {
1642 	.type		= TRACE_BLK,
1643 	.funcs		= &trace_blk_event_funcs,
1644 };
1645 
1646 static int __init init_blk_tracer(void)
1647 {
1648 	if (!register_trace_event(&trace_blk_event)) {
1649 		pr_warn("Warning: could not register block events\n");
1650 		return 1;
1651 	}
1652 
1653 	if (register_tracer(&blk_tracer) != 0) {
1654 		pr_warn("Warning: could not register the block tracer\n");
1655 		unregister_trace_event(&trace_blk_event);
1656 		return 1;
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 device_initcall(init_blk_tracer);
1663 
1664 static int blk_trace_remove_queue(struct request_queue *q)
1665 {
1666 	struct blk_trace *bt;
1667 
1668 	bt = rcu_replace_pointer(q->blk_trace, NULL,
1669 				 lockdep_is_held(&q->debugfs_mutex));
1670 	if (bt == NULL)
1671 		return -EINVAL;
1672 
1673 	put_probe_ref();
1674 	synchronize_rcu();
1675 	blk_trace_free(bt);
1676 	return 0;
1677 }
1678 
1679 /*
1680  * Setup everything required to start tracing
1681  */
1682 static int blk_trace_setup_queue(struct request_queue *q,
1683 				 struct block_device *bdev)
1684 {
1685 	struct blk_trace *bt = NULL;
1686 	int ret = -ENOMEM;
1687 
1688 	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1689 	if (!bt)
1690 		return -ENOMEM;
1691 
1692 	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1693 	if (!bt->msg_data)
1694 		goto free_bt;
1695 
1696 	bt->dev = bdev->bd_dev;
1697 	bt->act_mask = (u16)-1;
1698 
1699 	blk_trace_setup_lba(bt, bdev);
1700 
1701 	rcu_assign_pointer(q->blk_trace, bt);
1702 	get_probe_ref();
1703 	return 0;
1704 
1705 free_bt:
1706 	blk_trace_free(bt);
1707 	return ret;
1708 }
1709 
1710 /*
1711  * sysfs interface to enable and configure tracing
1712  */
1713 
1714 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1715 					 struct device_attribute *attr,
1716 					 char *buf);
1717 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1718 					  struct device_attribute *attr,
1719 					  const char *buf, size_t count);
1720 #define BLK_TRACE_DEVICE_ATTR(_name) \
1721 	DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1722 		    sysfs_blk_trace_attr_show, \
1723 		    sysfs_blk_trace_attr_store)
1724 
1725 static BLK_TRACE_DEVICE_ATTR(enable);
1726 static BLK_TRACE_DEVICE_ATTR(act_mask);
1727 static BLK_TRACE_DEVICE_ATTR(pid);
1728 static BLK_TRACE_DEVICE_ATTR(start_lba);
1729 static BLK_TRACE_DEVICE_ATTR(end_lba);
1730 
1731 static struct attribute *blk_trace_attrs[] = {
1732 	&dev_attr_enable.attr,
1733 	&dev_attr_act_mask.attr,
1734 	&dev_attr_pid.attr,
1735 	&dev_attr_start_lba.attr,
1736 	&dev_attr_end_lba.attr,
1737 	NULL
1738 };
1739 
1740 struct attribute_group blk_trace_attr_group = {
1741 	.name  = "trace",
1742 	.attrs = blk_trace_attrs,
1743 };
1744 
1745 static const struct {
1746 	int mask;
1747 	const char *str;
1748 } mask_maps[] = {
1749 	{ BLK_TC_READ,		"read"		},
1750 	{ BLK_TC_WRITE,		"write"		},
1751 	{ BLK_TC_FLUSH,		"flush"		},
1752 	{ BLK_TC_SYNC,		"sync"		},
1753 	{ BLK_TC_QUEUE,		"queue"		},
1754 	{ BLK_TC_REQUEUE,	"requeue"	},
1755 	{ BLK_TC_ISSUE,		"issue"		},
1756 	{ BLK_TC_COMPLETE,	"complete"	},
1757 	{ BLK_TC_FS,		"fs"		},
1758 	{ BLK_TC_PC,		"pc"		},
1759 	{ BLK_TC_NOTIFY,	"notify"	},
1760 	{ BLK_TC_AHEAD,		"ahead"		},
1761 	{ BLK_TC_META,		"meta"		},
1762 	{ BLK_TC_DISCARD,	"discard"	},
1763 	{ BLK_TC_DRV_DATA,	"drv_data"	},
1764 	{ BLK_TC_FUA,		"fua"		},
1765 };
1766 
1767 static int blk_trace_str2mask(const char *str)
1768 {
1769 	int i;
1770 	int mask = 0;
1771 	char *buf, *s, *token;
1772 
1773 	buf = kstrdup(str, GFP_KERNEL);
1774 	if (buf == NULL)
1775 		return -ENOMEM;
1776 	s = strstrip(buf);
1777 
1778 	while (1) {
1779 		token = strsep(&s, ",");
1780 		if (token == NULL)
1781 			break;
1782 
1783 		if (*token == '\0')
1784 			continue;
1785 
1786 		for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1787 			if (strcasecmp(token, mask_maps[i].str) == 0) {
1788 				mask |= mask_maps[i].mask;
1789 				break;
1790 			}
1791 		}
1792 		if (i == ARRAY_SIZE(mask_maps)) {
1793 			mask = -EINVAL;
1794 			break;
1795 		}
1796 	}
1797 	kfree(buf);
1798 
1799 	return mask;
1800 }
1801 
1802 static ssize_t blk_trace_mask2str(char *buf, int mask)
1803 {
1804 	int i;
1805 	char *p = buf;
1806 
1807 	for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1808 		if (mask & mask_maps[i].mask) {
1809 			p += sprintf(p, "%s%s",
1810 				    (p == buf) ? "" : ",", mask_maps[i].str);
1811 		}
1812 	}
1813 	*p++ = '\n';
1814 
1815 	return p - buf;
1816 }
1817 
1818 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1819 {
1820 	if (bdev->bd_disk == NULL)
1821 		return NULL;
1822 
1823 	return bdev_get_queue(bdev);
1824 }
1825 
1826 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1827 					 struct device_attribute *attr,
1828 					 char *buf)
1829 {
1830 	struct hd_struct *p = dev_to_part(dev);
1831 	struct request_queue *q;
1832 	struct block_device *bdev;
1833 	struct blk_trace *bt;
1834 	ssize_t ret = -ENXIO;
1835 
1836 	bdev = bdget(part_devt(p));
1837 	if (bdev == NULL)
1838 		goto out;
1839 
1840 	q = blk_trace_get_queue(bdev);
1841 	if (q == NULL)
1842 		goto out_bdput;
1843 
1844 	mutex_lock(&q->debugfs_mutex);
1845 
1846 	bt = rcu_dereference_protected(q->blk_trace,
1847 				       lockdep_is_held(&q->debugfs_mutex));
1848 	if (attr == &dev_attr_enable) {
1849 		ret = sprintf(buf, "%u\n", !!bt);
1850 		goto out_unlock_bdev;
1851 	}
1852 
1853 	if (bt == NULL)
1854 		ret = sprintf(buf, "disabled\n");
1855 	else if (attr == &dev_attr_act_mask)
1856 		ret = blk_trace_mask2str(buf, bt->act_mask);
1857 	else if (attr == &dev_attr_pid)
1858 		ret = sprintf(buf, "%u\n", bt->pid);
1859 	else if (attr == &dev_attr_start_lba)
1860 		ret = sprintf(buf, "%llu\n", bt->start_lba);
1861 	else if (attr == &dev_attr_end_lba)
1862 		ret = sprintf(buf, "%llu\n", bt->end_lba);
1863 
1864 out_unlock_bdev:
1865 	mutex_unlock(&q->debugfs_mutex);
1866 out_bdput:
1867 	bdput(bdev);
1868 out:
1869 	return ret;
1870 }
1871 
1872 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1873 					  struct device_attribute *attr,
1874 					  const char *buf, size_t count)
1875 {
1876 	struct block_device *bdev;
1877 	struct request_queue *q;
1878 	struct hd_struct *p;
1879 	struct blk_trace *bt;
1880 	u64 value;
1881 	ssize_t ret = -EINVAL;
1882 
1883 	if (count == 0)
1884 		goto out;
1885 
1886 	if (attr == &dev_attr_act_mask) {
1887 		if (kstrtoull(buf, 0, &value)) {
1888 			/* Assume it is a list of trace category names */
1889 			ret = blk_trace_str2mask(buf);
1890 			if (ret < 0)
1891 				goto out;
1892 			value = ret;
1893 		}
1894 	} else if (kstrtoull(buf, 0, &value))
1895 		goto out;
1896 
1897 	ret = -ENXIO;
1898 
1899 	p = dev_to_part(dev);
1900 	bdev = bdget(part_devt(p));
1901 	if (bdev == NULL)
1902 		goto out;
1903 
1904 	q = blk_trace_get_queue(bdev);
1905 	if (q == NULL)
1906 		goto out_bdput;
1907 
1908 	mutex_lock(&q->debugfs_mutex);
1909 
1910 	bt = rcu_dereference_protected(q->blk_trace,
1911 				       lockdep_is_held(&q->debugfs_mutex));
1912 	if (attr == &dev_attr_enable) {
1913 		if (!!value == !!bt) {
1914 			ret = 0;
1915 			goto out_unlock_bdev;
1916 		}
1917 		if (value)
1918 			ret = blk_trace_setup_queue(q, bdev);
1919 		else
1920 			ret = blk_trace_remove_queue(q);
1921 		goto out_unlock_bdev;
1922 	}
1923 
1924 	ret = 0;
1925 	if (bt == NULL) {
1926 		ret = blk_trace_setup_queue(q, bdev);
1927 		bt = rcu_dereference_protected(q->blk_trace,
1928 				lockdep_is_held(&q->debugfs_mutex));
1929 	}
1930 
1931 	if (ret == 0) {
1932 		if (attr == &dev_attr_act_mask)
1933 			bt->act_mask = value;
1934 		else if (attr == &dev_attr_pid)
1935 			bt->pid = value;
1936 		else if (attr == &dev_attr_start_lba)
1937 			bt->start_lba = value;
1938 		else if (attr == &dev_attr_end_lba)
1939 			bt->end_lba = value;
1940 	}
1941 
1942 out_unlock_bdev:
1943 	mutex_unlock(&q->debugfs_mutex);
1944 out_bdput:
1945 	bdput(bdev);
1946 out:
1947 	return ret ? ret : count;
1948 }
1949 
1950 int blk_trace_init_sysfs(struct device *dev)
1951 {
1952 	return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1953 }
1954 
1955 void blk_trace_remove_sysfs(struct device *dev)
1956 {
1957 	sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1958 }
1959 
1960 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1961 
1962 #ifdef CONFIG_EVENT_TRACING
1963 
1964 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1965 {
1966 	int i = 0;
1967 
1968 	if (op & REQ_PREFLUSH)
1969 		rwbs[i++] = 'F';
1970 
1971 	switch (op & REQ_OP_MASK) {
1972 	case REQ_OP_WRITE:
1973 	case REQ_OP_WRITE_SAME:
1974 		rwbs[i++] = 'W';
1975 		break;
1976 	case REQ_OP_DISCARD:
1977 		rwbs[i++] = 'D';
1978 		break;
1979 	case REQ_OP_SECURE_ERASE:
1980 		rwbs[i++] = 'D';
1981 		rwbs[i++] = 'E';
1982 		break;
1983 	case REQ_OP_FLUSH:
1984 		rwbs[i++] = 'F';
1985 		break;
1986 	case REQ_OP_READ:
1987 		rwbs[i++] = 'R';
1988 		break;
1989 	default:
1990 		rwbs[i++] = 'N';
1991 	}
1992 
1993 	if (op & REQ_FUA)
1994 		rwbs[i++] = 'F';
1995 	if (op & REQ_RAHEAD)
1996 		rwbs[i++] = 'A';
1997 	if (op & REQ_SYNC)
1998 		rwbs[i++] = 'S';
1999 	if (op & REQ_META)
2000 		rwbs[i++] = 'M';
2001 
2002 	rwbs[i] = '\0';
2003 }
2004 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
2005 
2006 #endif /* CONFIG_EVENT_TRACING */
2007 
2008