xref: /openbmc/linux/block/blk-sysfs.c (revision 9eda7c1f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/debugfs.h>
15 
16 #include "blk.h"
17 #include "blk-mq.h"
18 #include "blk-mq-debugfs.h"
19 #include "blk-wbt.h"
20 
21 struct queue_sysfs_entry {
22 	struct attribute attr;
23 	ssize_t (*show)(struct request_queue *, char *);
24 	ssize_t (*store)(struct request_queue *, const char *, size_t);
25 };
26 
27 static ssize_t
28 queue_var_show(unsigned long var, char *page)
29 {
30 	return sprintf(page, "%lu\n", var);
31 }
32 
33 static ssize_t
34 queue_var_store(unsigned long *var, const char *page, size_t count)
35 {
36 	int err;
37 	unsigned long v;
38 
39 	err = kstrtoul(page, 10, &v);
40 	if (err || v > UINT_MAX)
41 		return -EINVAL;
42 
43 	*var = v;
44 
45 	return count;
46 }
47 
48 static ssize_t queue_var_store64(s64 *var, const char *page)
49 {
50 	int err;
51 	s64 v;
52 
53 	err = kstrtos64(page, 10, &v);
54 	if (err < 0)
55 		return err;
56 
57 	*var = v;
58 	return 0;
59 }
60 
61 static ssize_t queue_requests_show(struct request_queue *q, char *page)
62 {
63 	return queue_var_show(q->nr_requests, (page));
64 }
65 
66 static ssize_t
67 queue_requests_store(struct request_queue *q, const char *page, size_t count)
68 {
69 	unsigned long nr;
70 	int ret, err;
71 
72 	if (!queue_is_mq(q))
73 		return -EINVAL;
74 
75 	ret = queue_var_store(&nr, page, count);
76 	if (ret < 0)
77 		return ret;
78 
79 	if (nr < BLKDEV_MIN_RQ)
80 		nr = BLKDEV_MIN_RQ;
81 
82 	err = blk_mq_update_nr_requests(q, nr);
83 	if (err)
84 		return err;
85 
86 	return ret;
87 }
88 
89 static ssize_t queue_ra_show(struct request_queue *q, char *page)
90 {
91 	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
92 					(PAGE_SHIFT - 10);
93 
94 	return queue_var_show(ra_kb, (page));
95 }
96 
97 static ssize_t
98 queue_ra_store(struct request_queue *q, const char *page, size_t count)
99 {
100 	unsigned long ra_kb;
101 	ssize_t ret = queue_var_store(&ra_kb, page, count);
102 
103 	if (ret < 0)
104 		return ret;
105 
106 	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
107 
108 	return ret;
109 }
110 
111 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
112 {
113 	int max_sectors_kb = queue_max_sectors(q) >> 1;
114 
115 	return queue_var_show(max_sectors_kb, (page));
116 }
117 
118 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
119 {
120 	return queue_var_show(queue_max_segments(q), (page));
121 }
122 
123 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
124 		char *page)
125 {
126 	return queue_var_show(queue_max_discard_segments(q), (page));
127 }
128 
129 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
130 {
131 	return queue_var_show(q->limits.max_integrity_segments, (page));
132 }
133 
134 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
135 {
136 	return queue_var_show(queue_max_segment_size(q), (page));
137 }
138 
139 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
140 {
141 	return queue_var_show(queue_logical_block_size(q), page);
142 }
143 
144 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
145 {
146 	return queue_var_show(queue_physical_block_size(q), page);
147 }
148 
149 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
150 {
151 	return queue_var_show(q->limits.chunk_sectors, page);
152 }
153 
154 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
155 {
156 	return queue_var_show(queue_io_min(q), page);
157 }
158 
159 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
160 {
161 	return queue_var_show(queue_io_opt(q), page);
162 }
163 
164 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
165 {
166 	return queue_var_show(q->limits.discard_granularity, page);
167 }
168 
169 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
170 {
171 
172 	return sprintf(page, "%llu\n",
173 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
174 }
175 
176 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
177 {
178 	return sprintf(page, "%llu\n",
179 		       (unsigned long long)q->limits.max_discard_sectors << 9);
180 }
181 
182 static ssize_t queue_discard_max_store(struct request_queue *q,
183 				       const char *page, size_t count)
184 {
185 	unsigned long max_discard;
186 	ssize_t ret = queue_var_store(&max_discard, page, count);
187 
188 	if (ret < 0)
189 		return ret;
190 
191 	if (max_discard & (q->limits.discard_granularity - 1))
192 		return -EINVAL;
193 
194 	max_discard >>= 9;
195 	if (max_discard > UINT_MAX)
196 		return -EINVAL;
197 
198 	if (max_discard > q->limits.max_hw_discard_sectors)
199 		max_discard = q->limits.max_hw_discard_sectors;
200 
201 	q->limits.max_discard_sectors = max_discard;
202 	return ret;
203 }
204 
205 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
206 {
207 	return queue_var_show(0, page);
208 }
209 
210 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
211 {
212 	return sprintf(page, "%llu\n",
213 		(unsigned long long)q->limits.max_write_same_sectors << 9);
214 }
215 
216 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
217 {
218 	return sprintf(page, "%llu\n",
219 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
220 }
221 
222 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
223 {
224 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
225 
226 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
227 }
228 
229 static ssize_t
230 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
231 {
232 	unsigned long max_sectors_kb,
233 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
234 			page_kb = 1 << (PAGE_SHIFT - 10);
235 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
236 
237 	if (ret < 0)
238 		return ret;
239 
240 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
241 					 q->limits.max_dev_sectors >> 1);
242 
243 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
244 		return -EINVAL;
245 
246 	spin_lock_irq(&q->queue_lock);
247 	q->limits.max_sectors = max_sectors_kb << 1;
248 	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
249 	spin_unlock_irq(&q->queue_lock);
250 
251 	return ret;
252 }
253 
254 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
255 {
256 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
257 
258 	return queue_var_show(max_hw_sectors_kb, (page));
259 }
260 
261 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
262 static ssize_t								\
263 queue_show_##name(struct request_queue *q, char *page)			\
264 {									\
265 	int bit;							\
266 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
267 	return queue_var_show(neg ? !bit : bit, page);			\
268 }									\
269 static ssize_t								\
270 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
271 {									\
272 	unsigned long val;						\
273 	ssize_t ret;							\
274 	ret = queue_var_store(&val, page, count);			\
275 	if (ret < 0)							\
276 		 return ret;						\
277 	if (neg)							\
278 		val = !val;						\
279 									\
280 	if (val)							\
281 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
282 	else								\
283 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
284 	return ret;							\
285 }
286 
287 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
288 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
289 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
290 #undef QUEUE_SYSFS_BIT_FNS
291 
292 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
293 {
294 	switch (blk_queue_zoned_model(q)) {
295 	case BLK_ZONED_HA:
296 		return sprintf(page, "host-aware\n");
297 	case BLK_ZONED_HM:
298 		return sprintf(page, "host-managed\n");
299 	default:
300 		return sprintf(page, "none\n");
301 	}
302 }
303 
304 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
305 {
306 	return queue_var_show(blk_queue_nr_zones(q), page);
307 }
308 
309 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
310 {
311 	return queue_var_show(queue_max_open_zones(q), page);
312 }
313 
314 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
315 {
316 	return queue_var_show(queue_max_active_zones(q), page);
317 }
318 
319 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
320 {
321 	return queue_var_show((blk_queue_nomerges(q) << 1) |
322 			       blk_queue_noxmerges(q), page);
323 }
324 
325 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
326 				    size_t count)
327 {
328 	unsigned long nm;
329 	ssize_t ret = queue_var_store(&nm, page, count);
330 
331 	if (ret < 0)
332 		return ret;
333 
334 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
335 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
336 	if (nm == 2)
337 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
338 	else if (nm)
339 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
340 
341 	return ret;
342 }
343 
344 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
345 {
346 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
347 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
348 
349 	return queue_var_show(set << force, page);
350 }
351 
352 static ssize_t
353 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
354 {
355 	ssize_t ret = -EINVAL;
356 #ifdef CONFIG_SMP
357 	unsigned long val;
358 
359 	ret = queue_var_store(&val, page, count);
360 	if (ret < 0)
361 		return ret;
362 
363 	if (val == 2) {
364 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
365 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
366 	} else if (val == 1) {
367 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
368 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
369 	} else if (val == 0) {
370 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
371 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
372 	}
373 #endif
374 	return ret;
375 }
376 
377 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
378 {
379 	int val;
380 
381 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
382 		val = BLK_MQ_POLL_CLASSIC;
383 	else
384 		val = q->poll_nsec / 1000;
385 
386 	return sprintf(page, "%d\n", val);
387 }
388 
389 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
390 				size_t count)
391 {
392 	int err, val;
393 
394 	if (!q->mq_ops || !q->mq_ops->poll)
395 		return -EINVAL;
396 
397 	err = kstrtoint(page, 10, &val);
398 	if (err < 0)
399 		return err;
400 
401 	if (val == BLK_MQ_POLL_CLASSIC)
402 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
403 	else if (val >= 0)
404 		q->poll_nsec = val * 1000;
405 	else
406 		return -EINVAL;
407 
408 	return count;
409 }
410 
411 static ssize_t queue_poll_show(struct request_queue *q, char *page)
412 {
413 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
414 }
415 
416 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
417 				size_t count)
418 {
419 	unsigned long poll_on;
420 	ssize_t ret;
421 
422 	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
423 	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
424 		return -EINVAL;
425 
426 	ret = queue_var_store(&poll_on, page, count);
427 	if (ret < 0)
428 		return ret;
429 
430 	if (poll_on)
431 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
432 	else
433 		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
434 
435 	return ret;
436 }
437 
438 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
439 {
440 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
441 }
442 
443 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
444 				  size_t count)
445 {
446 	unsigned int val;
447 	int err;
448 
449 	err = kstrtou32(page, 10, &val);
450 	if (err || val == 0)
451 		return -EINVAL;
452 
453 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
454 
455 	return count;
456 }
457 
458 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
459 {
460 	if (!wbt_rq_qos(q))
461 		return -EINVAL;
462 
463 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
464 }
465 
466 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
467 				  size_t count)
468 {
469 	struct rq_qos *rqos;
470 	ssize_t ret;
471 	s64 val;
472 
473 	ret = queue_var_store64(&val, page);
474 	if (ret < 0)
475 		return ret;
476 	if (val < -1)
477 		return -EINVAL;
478 
479 	rqos = wbt_rq_qos(q);
480 	if (!rqos) {
481 		ret = wbt_init(q);
482 		if (ret)
483 			return ret;
484 	}
485 
486 	if (val == -1)
487 		val = wbt_default_latency_nsec(q);
488 	else if (val >= 0)
489 		val *= 1000ULL;
490 
491 	if (wbt_get_min_lat(q) == val)
492 		return count;
493 
494 	/*
495 	 * Ensure that the queue is idled, in case the latency update
496 	 * ends up either enabling or disabling wbt completely. We can't
497 	 * have IO inflight if that happens.
498 	 */
499 	blk_mq_freeze_queue(q);
500 	blk_mq_quiesce_queue(q);
501 
502 	wbt_set_min_lat(q, val);
503 
504 	blk_mq_unquiesce_queue(q);
505 	blk_mq_unfreeze_queue(q);
506 
507 	return count;
508 }
509 
510 static ssize_t queue_wc_show(struct request_queue *q, char *page)
511 {
512 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
513 		return sprintf(page, "write back\n");
514 
515 	return sprintf(page, "write through\n");
516 }
517 
518 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
519 			      size_t count)
520 {
521 	int set = -1;
522 
523 	if (!strncmp(page, "write back", 10))
524 		set = 1;
525 	else if (!strncmp(page, "write through", 13) ||
526 		 !strncmp(page, "none", 4))
527 		set = 0;
528 
529 	if (set == -1)
530 		return -EINVAL;
531 
532 	if (set)
533 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
534 	else
535 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
536 
537 	return count;
538 }
539 
540 static ssize_t queue_fua_show(struct request_queue *q, char *page)
541 {
542 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
543 }
544 
545 static ssize_t queue_dax_show(struct request_queue *q, char *page)
546 {
547 	return queue_var_show(blk_queue_dax(q), page);
548 }
549 
550 static struct queue_sysfs_entry queue_requests_entry = {
551 	.attr = {.name = "nr_requests", .mode = 0644 },
552 	.show = queue_requests_show,
553 	.store = queue_requests_store,
554 };
555 
556 static struct queue_sysfs_entry queue_ra_entry = {
557 	.attr = {.name = "read_ahead_kb", .mode = 0644 },
558 	.show = queue_ra_show,
559 	.store = queue_ra_store,
560 };
561 
562 static struct queue_sysfs_entry queue_max_sectors_entry = {
563 	.attr = {.name = "max_sectors_kb", .mode = 0644 },
564 	.show = queue_max_sectors_show,
565 	.store = queue_max_sectors_store,
566 };
567 
568 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
569 	.attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
570 	.show = queue_max_hw_sectors_show,
571 };
572 
573 static struct queue_sysfs_entry queue_max_segments_entry = {
574 	.attr = {.name = "max_segments", .mode = 0444 },
575 	.show = queue_max_segments_show,
576 };
577 
578 static struct queue_sysfs_entry queue_max_discard_segments_entry = {
579 	.attr = {.name = "max_discard_segments", .mode = 0444 },
580 	.show = queue_max_discard_segments_show,
581 };
582 
583 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
584 	.attr = {.name = "max_integrity_segments", .mode = 0444 },
585 	.show = queue_max_integrity_segments_show,
586 };
587 
588 static struct queue_sysfs_entry queue_max_segment_size_entry = {
589 	.attr = {.name = "max_segment_size", .mode = 0444 },
590 	.show = queue_max_segment_size_show,
591 };
592 
593 static struct queue_sysfs_entry queue_iosched_entry = {
594 	.attr = {.name = "scheduler", .mode = 0644 },
595 	.show = elv_iosched_show,
596 	.store = elv_iosched_store,
597 };
598 
599 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
600 	.attr = {.name = "hw_sector_size", .mode = 0444 },
601 	.show = queue_logical_block_size_show,
602 };
603 
604 static struct queue_sysfs_entry queue_logical_block_size_entry = {
605 	.attr = {.name = "logical_block_size", .mode = 0444 },
606 	.show = queue_logical_block_size_show,
607 };
608 
609 static struct queue_sysfs_entry queue_physical_block_size_entry = {
610 	.attr = {.name = "physical_block_size", .mode = 0444 },
611 	.show = queue_physical_block_size_show,
612 };
613 
614 static struct queue_sysfs_entry queue_chunk_sectors_entry = {
615 	.attr = {.name = "chunk_sectors", .mode = 0444 },
616 	.show = queue_chunk_sectors_show,
617 };
618 
619 static struct queue_sysfs_entry queue_io_min_entry = {
620 	.attr = {.name = "minimum_io_size", .mode = 0444 },
621 	.show = queue_io_min_show,
622 };
623 
624 static struct queue_sysfs_entry queue_io_opt_entry = {
625 	.attr = {.name = "optimal_io_size", .mode = 0444 },
626 	.show = queue_io_opt_show,
627 };
628 
629 static struct queue_sysfs_entry queue_discard_granularity_entry = {
630 	.attr = {.name = "discard_granularity", .mode = 0444 },
631 	.show = queue_discard_granularity_show,
632 };
633 
634 static struct queue_sysfs_entry queue_discard_max_hw_entry = {
635 	.attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
636 	.show = queue_discard_max_hw_show,
637 };
638 
639 static struct queue_sysfs_entry queue_discard_max_entry = {
640 	.attr = {.name = "discard_max_bytes", .mode = 0644 },
641 	.show = queue_discard_max_show,
642 	.store = queue_discard_max_store,
643 };
644 
645 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
646 	.attr = {.name = "discard_zeroes_data", .mode = 0444 },
647 	.show = queue_discard_zeroes_data_show,
648 };
649 
650 static struct queue_sysfs_entry queue_write_same_max_entry = {
651 	.attr = {.name = "write_same_max_bytes", .mode = 0444 },
652 	.show = queue_write_same_max_show,
653 };
654 
655 static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
656 	.attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
657 	.show = queue_write_zeroes_max_show,
658 };
659 
660 static struct queue_sysfs_entry queue_zone_append_max_entry = {
661 	.attr = {.name = "zone_append_max_bytes", .mode = 0444 },
662 	.show = queue_zone_append_max_show,
663 };
664 
665 static struct queue_sysfs_entry queue_nonrot_entry = {
666 	.attr = {.name = "rotational", .mode = 0644 },
667 	.show = queue_show_nonrot,
668 	.store = queue_store_nonrot,
669 };
670 
671 static struct queue_sysfs_entry queue_zoned_entry = {
672 	.attr = {.name = "zoned", .mode = 0444 },
673 	.show = queue_zoned_show,
674 };
675 
676 static struct queue_sysfs_entry queue_nr_zones_entry = {
677 	.attr = {.name = "nr_zones", .mode = 0444 },
678 	.show = queue_nr_zones_show,
679 };
680 
681 static struct queue_sysfs_entry queue_max_open_zones_entry = {
682 	.attr = {.name = "max_open_zones", .mode = 0444 },
683 	.show = queue_max_open_zones_show,
684 };
685 
686 static struct queue_sysfs_entry queue_max_active_zones_entry = {
687 	.attr = {.name = "max_active_zones", .mode = 0444 },
688 	.show = queue_max_active_zones_show,
689 };
690 
691 static struct queue_sysfs_entry queue_nomerges_entry = {
692 	.attr = {.name = "nomerges", .mode = 0644 },
693 	.show = queue_nomerges_show,
694 	.store = queue_nomerges_store,
695 };
696 
697 static struct queue_sysfs_entry queue_rq_affinity_entry = {
698 	.attr = {.name = "rq_affinity", .mode = 0644 },
699 	.show = queue_rq_affinity_show,
700 	.store = queue_rq_affinity_store,
701 };
702 
703 static struct queue_sysfs_entry queue_iostats_entry = {
704 	.attr = {.name = "iostats", .mode = 0644 },
705 	.show = queue_show_iostats,
706 	.store = queue_store_iostats,
707 };
708 
709 static struct queue_sysfs_entry queue_random_entry = {
710 	.attr = {.name = "add_random", .mode = 0644 },
711 	.show = queue_show_random,
712 	.store = queue_store_random,
713 };
714 
715 static struct queue_sysfs_entry queue_poll_entry = {
716 	.attr = {.name = "io_poll", .mode = 0644 },
717 	.show = queue_poll_show,
718 	.store = queue_poll_store,
719 };
720 
721 static struct queue_sysfs_entry queue_poll_delay_entry = {
722 	.attr = {.name = "io_poll_delay", .mode = 0644 },
723 	.show = queue_poll_delay_show,
724 	.store = queue_poll_delay_store,
725 };
726 
727 static struct queue_sysfs_entry queue_wc_entry = {
728 	.attr = {.name = "write_cache", .mode = 0644 },
729 	.show = queue_wc_show,
730 	.store = queue_wc_store,
731 };
732 
733 static struct queue_sysfs_entry queue_fua_entry = {
734 	.attr = {.name = "fua", .mode = 0444 },
735 	.show = queue_fua_show,
736 };
737 
738 static struct queue_sysfs_entry queue_dax_entry = {
739 	.attr = {.name = "dax", .mode = 0444 },
740 	.show = queue_dax_show,
741 };
742 
743 static struct queue_sysfs_entry queue_io_timeout_entry = {
744 	.attr = {.name = "io_timeout", .mode = 0644 },
745 	.show = queue_io_timeout_show,
746 	.store = queue_io_timeout_store,
747 };
748 
749 static struct queue_sysfs_entry queue_wb_lat_entry = {
750 	.attr = {.name = "wbt_lat_usec", .mode = 0644 },
751 	.show = queue_wb_lat_show,
752 	.store = queue_wb_lat_store,
753 };
754 
755 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
756 static struct queue_sysfs_entry throtl_sample_time_entry = {
757 	.attr = {.name = "throttle_sample_time", .mode = 0644 },
758 	.show = blk_throtl_sample_time_show,
759 	.store = blk_throtl_sample_time_store,
760 };
761 #endif
762 
763 static struct attribute *queue_attrs[] = {
764 	&queue_requests_entry.attr,
765 	&queue_ra_entry.attr,
766 	&queue_max_hw_sectors_entry.attr,
767 	&queue_max_sectors_entry.attr,
768 	&queue_max_segments_entry.attr,
769 	&queue_max_discard_segments_entry.attr,
770 	&queue_max_integrity_segments_entry.attr,
771 	&queue_max_segment_size_entry.attr,
772 	&queue_iosched_entry.attr,
773 	&queue_hw_sector_size_entry.attr,
774 	&queue_logical_block_size_entry.attr,
775 	&queue_physical_block_size_entry.attr,
776 	&queue_chunk_sectors_entry.attr,
777 	&queue_io_min_entry.attr,
778 	&queue_io_opt_entry.attr,
779 	&queue_discard_granularity_entry.attr,
780 	&queue_discard_max_entry.attr,
781 	&queue_discard_max_hw_entry.attr,
782 	&queue_discard_zeroes_data_entry.attr,
783 	&queue_write_same_max_entry.attr,
784 	&queue_write_zeroes_max_entry.attr,
785 	&queue_zone_append_max_entry.attr,
786 	&queue_nonrot_entry.attr,
787 	&queue_zoned_entry.attr,
788 	&queue_nr_zones_entry.attr,
789 	&queue_max_open_zones_entry.attr,
790 	&queue_max_active_zones_entry.attr,
791 	&queue_nomerges_entry.attr,
792 	&queue_rq_affinity_entry.attr,
793 	&queue_iostats_entry.attr,
794 	&queue_random_entry.attr,
795 	&queue_poll_entry.attr,
796 	&queue_wc_entry.attr,
797 	&queue_fua_entry.attr,
798 	&queue_dax_entry.attr,
799 	&queue_wb_lat_entry.attr,
800 	&queue_poll_delay_entry.attr,
801 	&queue_io_timeout_entry.attr,
802 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
803 	&throtl_sample_time_entry.attr,
804 #endif
805 	NULL,
806 };
807 
808 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
809 				int n)
810 {
811 	struct request_queue *q =
812 		container_of(kobj, struct request_queue, kobj);
813 
814 	if (attr == &queue_io_timeout_entry.attr &&
815 		(!q->mq_ops || !q->mq_ops->timeout))
816 			return 0;
817 
818 	if ((attr == &queue_max_open_zones_entry.attr ||
819 	     attr == &queue_max_active_zones_entry.attr) &&
820 	    !blk_queue_is_zoned(q))
821 		return 0;
822 
823 	return attr->mode;
824 }
825 
826 static struct attribute_group queue_attr_group = {
827 	.attrs = queue_attrs,
828 	.is_visible = queue_attr_visible,
829 };
830 
831 
832 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
833 
834 static ssize_t
835 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
836 {
837 	struct queue_sysfs_entry *entry = to_queue(attr);
838 	struct request_queue *q =
839 		container_of(kobj, struct request_queue, kobj);
840 	ssize_t res;
841 
842 	if (!entry->show)
843 		return -EIO;
844 	mutex_lock(&q->sysfs_lock);
845 	res = entry->show(q, page);
846 	mutex_unlock(&q->sysfs_lock);
847 	return res;
848 }
849 
850 static ssize_t
851 queue_attr_store(struct kobject *kobj, struct attribute *attr,
852 		    const char *page, size_t length)
853 {
854 	struct queue_sysfs_entry *entry = to_queue(attr);
855 	struct request_queue *q;
856 	ssize_t res;
857 
858 	if (!entry->store)
859 		return -EIO;
860 
861 	q = container_of(kobj, struct request_queue, kobj);
862 	mutex_lock(&q->sysfs_lock);
863 	res = entry->store(q, page, length);
864 	mutex_unlock(&q->sysfs_lock);
865 	return res;
866 }
867 
868 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
869 {
870 	struct request_queue *q = container_of(rcu_head, struct request_queue,
871 					       rcu_head);
872 	kmem_cache_free(blk_requestq_cachep, q);
873 }
874 
875 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
876 static void blk_exit_queue(struct request_queue *q)
877 {
878 	/*
879 	 * Since the I/O scheduler exit code may access cgroup information,
880 	 * perform I/O scheduler exit before disassociating from the block
881 	 * cgroup controller.
882 	 */
883 	if (q->elevator) {
884 		ioc_clear_queue(q);
885 		__elevator_exit(q, q->elevator);
886 		q->elevator = NULL;
887 	}
888 
889 	/*
890 	 * Remove all references to @q from the block cgroup controller before
891 	 * restoring @q->queue_lock to avoid that restoring this pointer causes
892 	 * e.g. blkcg_print_blkgs() to crash.
893 	 */
894 	blkcg_exit_queue(q);
895 
896 	/*
897 	 * Since the cgroup code may dereference the @q->backing_dev_info
898 	 * pointer, only decrease its reference count after having removed the
899 	 * association with the block cgroup controller.
900 	 */
901 	bdi_put(q->backing_dev_info);
902 }
903 
904 /**
905  * blk_release_queue - releases all allocated resources of the request_queue
906  * @kobj: pointer to a kobject, whose container is a request_queue
907  *
908  * This function releases all allocated resources of the request queue.
909  *
910  * The struct request_queue refcount is incremented with blk_get_queue() and
911  * decremented with blk_put_queue(). Once the refcount reaches 0 this function
912  * is called.
913  *
914  * For drivers that have a request_queue on a gendisk and added with
915  * __device_add_disk() the refcount to request_queue will reach 0 with
916  * the last put_disk() called by the driver. For drivers which don't use
917  * __device_add_disk() this happens with blk_cleanup_queue().
918  *
919  * Drivers exist which depend on the release of the request_queue to be
920  * synchronous, it should not be deferred.
921  *
922  * Context: can sleep
923  */
924 static void blk_release_queue(struct kobject *kobj)
925 {
926 	struct request_queue *q =
927 		container_of(kobj, struct request_queue, kobj);
928 
929 	might_sleep();
930 
931 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
932 		blk_stat_remove_callback(q, q->poll_cb);
933 	blk_stat_free_callback(q->poll_cb);
934 
935 	blk_free_queue_stats(q->stats);
936 
937 	if (queue_is_mq(q))
938 		cancel_delayed_work_sync(&q->requeue_work);
939 
940 	blk_exit_queue(q);
941 
942 	blk_queue_free_zone_bitmaps(q);
943 
944 	if (queue_is_mq(q))
945 		blk_mq_release(q);
946 
947 	blk_trace_shutdown(q);
948 	mutex_lock(&q->debugfs_mutex);
949 	debugfs_remove_recursive(q->debugfs_dir);
950 	mutex_unlock(&q->debugfs_mutex);
951 
952 	if (queue_is_mq(q))
953 		blk_mq_debugfs_unregister(q);
954 
955 	bioset_exit(&q->bio_split);
956 
957 	ida_simple_remove(&blk_queue_ida, q->id);
958 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
959 }
960 
961 static const struct sysfs_ops queue_sysfs_ops = {
962 	.show	= queue_attr_show,
963 	.store	= queue_attr_store,
964 };
965 
966 struct kobj_type blk_queue_ktype = {
967 	.sysfs_ops	= &queue_sysfs_ops,
968 	.release	= blk_release_queue,
969 };
970 
971 /**
972  * blk_register_queue - register a block layer queue with sysfs
973  * @disk: Disk of which the request queue should be registered with sysfs.
974  */
975 int blk_register_queue(struct gendisk *disk)
976 {
977 	int ret;
978 	struct device *dev = disk_to_dev(disk);
979 	struct request_queue *q = disk->queue;
980 	bool has_elevator = false;
981 
982 	if (WARN_ON(!q))
983 		return -ENXIO;
984 
985 	WARN_ONCE(blk_queue_registered(q),
986 		  "%s is registering an already registered queue\n",
987 		  kobject_name(&dev->kobj));
988 
989 	/*
990 	 * SCSI probing may synchronously create and destroy a lot of
991 	 * request_queues for non-existent devices.  Shutting down a fully
992 	 * functional queue takes measureable wallclock time as RCU grace
993 	 * periods are involved.  To avoid excessive latency in these
994 	 * cases, a request_queue starts out in a degraded mode which is
995 	 * faster to shut down and is made fully functional here as
996 	 * request_queues for non-existent devices never get registered.
997 	 */
998 	if (!blk_queue_init_done(q)) {
999 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
1000 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
1001 	}
1002 
1003 	ret = blk_trace_init_sysfs(dev);
1004 	if (ret)
1005 		return ret;
1006 
1007 	mutex_lock(&q->sysfs_dir_lock);
1008 
1009 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
1010 	if (ret < 0) {
1011 		blk_trace_remove_sysfs(dev);
1012 		goto unlock;
1013 	}
1014 
1015 	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
1016 	if (ret) {
1017 		blk_trace_remove_sysfs(dev);
1018 		kobject_del(&q->kobj);
1019 		kobject_put(&dev->kobj);
1020 		goto unlock;
1021 	}
1022 
1023 	mutex_lock(&q->debugfs_mutex);
1024 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
1025 					    blk_debugfs_root);
1026 	mutex_unlock(&q->debugfs_mutex);
1027 
1028 	if (queue_is_mq(q)) {
1029 		__blk_mq_register_dev(dev, q);
1030 		blk_mq_debugfs_register(q);
1031 	}
1032 
1033 	mutex_lock(&q->sysfs_lock);
1034 	if (q->elevator) {
1035 		ret = elv_register_queue(q, false);
1036 		if (ret) {
1037 			mutex_unlock(&q->sysfs_lock);
1038 			mutex_unlock(&q->sysfs_dir_lock);
1039 			kobject_del(&q->kobj);
1040 			blk_trace_remove_sysfs(dev);
1041 			kobject_put(&dev->kobj);
1042 			return ret;
1043 		}
1044 		has_elevator = true;
1045 	}
1046 
1047 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
1048 	wbt_enable_default(q);
1049 	blk_throtl_register_queue(q);
1050 
1051 	/* Now everything is ready and send out KOBJ_ADD uevent */
1052 	kobject_uevent(&q->kobj, KOBJ_ADD);
1053 	if (has_elevator)
1054 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
1055 	mutex_unlock(&q->sysfs_lock);
1056 
1057 	ret = 0;
1058 unlock:
1059 	mutex_unlock(&q->sysfs_dir_lock);
1060 	return ret;
1061 }
1062 EXPORT_SYMBOL_GPL(blk_register_queue);
1063 
1064 /**
1065  * blk_unregister_queue - counterpart of blk_register_queue()
1066  * @disk: Disk of which the request queue should be unregistered from sysfs.
1067  *
1068  * Note: the caller is responsible for guaranteeing that this function is called
1069  * after blk_register_queue() has finished.
1070  */
1071 void blk_unregister_queue(struct gendisk *disk)
1072 {
1073 	struct request_queue *q = disk->queue;
1074 
1075 	if (WARN_ON(!q))
1076 		return;
1077 
1078 	/* Return early if disk->queue was never registered. */
1079 	if (!blk_queue_registered(q))
1080 		return;
1081 
1082 	/*
1083 	 * Since sysfs_remove_dir() prevents adding new directory entries
1084 	 * before removal of existing entries starts, protect against
1085 	 * concurrent elv_iosched_store() calls.
1086 	 */
1087 	mutex_lock(&q->sysfs_lock);
1088 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
1089 	mutex_unlock(&q->sysfs_lock);
1090 
1091 	mutex_lock(&q->sysfs_dir_lock);
1092 	/*
1093 	 * Remove the sysfs attributes before unregistering the queue data
1094 	 * structures that can be modified through sysfs.
1095 	 */
1096 	if (queue_is_mq(q))
1097 		blk_mq_unregister_dev(disk_to_dev(disk), q);
1098 
1099 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
1100 	kobject_del(&q->kobj);
1101 	blk_trace_remove_sysfs(disk_to_dev(disk));
1102 
1103 	mutex_lock(&q->sysfs_lock);
1104 	if (q->elevator)
1105 		elv_unregister_queue(q);
1106 	mutex_unlock(&q->sysfs_lock);
1107 	mutex_unlock(&q->sysfs_dir_lock);
1108 
1109 	kobject_put(&disk_to_dev(disk)->kobj);
1110 }
1111