xref: /openbmc/linux/block/blk-sysfs.c (revision 4a0a1436)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/debugfs.h>
15 
16 #include "blk.h"
17 #include "blk-mq.h"
18 #include "blk-mq-debugfs.h"
19 #include "blk-mq-sched.h"
20 #include "blk-wbt.h"
21 #include "blk-throttle.h"
22 
23 struct queue_sysfs_entry {
24 	struct attribute attr;
25 	ssize_t (*show)(struct request_queue *, char *);
26 	ssize_t (*store)(struct request_queue *, const char *, size_t);
27 };
28 
29 static ssize_t
30 queue_var_show(unsigned long var, char *page)
31 {
32 	return sprintf(page, "%lu\n", var);
33 }
34 
35 static ssize_t
36 queue_var_store(unsigned long *var, const char *page, size_t count)
37 {
38 	int err;
39 	unsigned long v;
40 
41 	err = kstrtoul(page, 10, &v);
42 	if (err || v > UINT_MAX)
43 		return -EINVAL;
44 
45 	*var = v;
46 
47 	return count;
48 }
49 
50 static ssize_t queue_var_store64(s64 *var, const char *page)
51 {
52 	int err;
53 	s64 v;
54 
55 	err = kstrtos64(page, 10, &v);
56 	if (err < 0)
57 		return err;
58 
59 	*var = v;
60 	return 0;
61 }
62 
63 static ssize_t queue_requests_show(struct request_queue *q, char *page)
64 {
65 	return queue_var_show(q->nr_requests, page);
66 }
67 
68 static ssize_t
69 queue_requests_store(struct request_queue *q, const char *page, size_t count)
70 {
71 	unsigned long nr;
72 	int ret, err;
73 
74 	if (!queue_is_mq(q))
75 		return -EINVAL;
76 
77 	ret = queue_var_store(&nr, page, count);
78 	if (ret < 0)
79 		return ret;
80 
81 	if (nr < BLKDEV_MIN_RQ)
82 		nr = BLKDEV_MIN_RQ;
83 
84 	err = blk_mq_update_nr_requests(q, nr);
85 	if (err)
86 		return err;
87 
88 	return ret;
89 }
90 
91 static ssize_t queue_ra_show(struct request_queue *q, char *page)
92 {
93 	unsigned long ra_kb;
94 
95 	if (!q->disk)
96 		return -EINVAL;
97 	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
98 	return queue_var_show(ra_kb, page);
99 }
100 
101 static ssize_t
102 queue_ra_store(struct request_queue *q, const char *page, size_t count)
103 {
104 	unsigned long ra_kb;
105 	ssize_t ret;
106 
107 	if (!q->disk)
108 		return -EINVAL;
109 	ret = queue_var_store(&ra_kb, page, count);
110 	if (ret < 0)
111 		return ret;
112 	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
113 	return ret;
114 }
115 
116 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
117 {
118 	int max_sectors_kb = queue_max_sectors(q) >> 1;
119 
120 	return queue_var_show(max_sectors_kb, page);
121 }
122 
123 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
124 {
125 	return queue_var_show(queue_max_segments(q), page);
126 }
127 
128 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
129 		char *page)
130 {
131 	return queue_var_show(queue_max_discard_segments(q), page);
132 }
133 
134 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
135 {
136 	return queue_var_show(q->limits.max_integrity_segments, page);
137 }
138 
139 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
140 {
141 	return queue_var_show(queue_max_segment_size(q), page);
142 }
143 
144 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
145 {
146 	return queue_var_show(queue_logical_block_size(q), page);
147 }
148 
149 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150 {
151 	return queue_var_show(queue_physical_block_size(q), page);
152 }
153 
154 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
155 {
156 	return queue_var_show(q->limits.chunk_sectors, page);
157 }
158 
159 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160 {
161 	return queue_var_show(queue_io_min(q), page);
162 }
163 
164 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165 {
166 	return queue_var_show(queue_io_opt(q), page);
167 }
168 
169 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
170 {
171 	return queue_var_show(q->limits.discard_granularity, page);
172 }
173 
174 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
175 {
176 
177 	return sprintf(page, "%llu\n",
178 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
179 }
180 
181 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
182 {
183 	return sprintf(page, "%llu\n",
184 		       (unsigned long long)q->limits.max_discard_sectors << 9);
185 }
186 
187 static ssize_t queue_discard_max_store(struct request_queue *q,
188 				       const char *page, size_t count)
189 {
190 	unsigned long max_discard;
191 	ssize_t ret = queue_var_store(&max_discard, page, count);
192 
193 	if (ret < 0)
194 		return ret;
195 
196 	if (max_discard & (q->limits.discard_granularity - 1))
197 		return -EINVAL;
198 
199 	max_discard >>= 9;
200 	if (max_discard > UINT_MAX)
201 		return -EINVAL;
202 
203 	if (max_discard > q->limits.max_hw_discard_sectors)
204 		max_discard = q->limits.max_hw_discard_sectors;
205 
206 	q->limits.max_discard_sectors = max_discard;
207 	return ret;
208 }
209 
210 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
211 {
212 	return queue_var_show(0, page);
213 }
214 
215 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
216 {
217 	return sprintf(page, "%llu\n",
218 		(unsigned long long)q->limits.max_write_same_sectors << 9);
219 }
220 
221 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
222 {
223 	return sprintf(page, "%llu\n",
224 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
225 }
226 
227 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
228 						 char *page)
229 {
230 	return queue_var_show(queue_zone_write_granularity(q), page);
231 }
232 
233 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
234 {
235 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
236 
237 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
238 }
239 
240 static ssize_t
241 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
242 {
243 	unsigned long max_sectors_kb,
244 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
245 			page_kb = 1 << (PAGE_SHIFT - 10);
246 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
247 
248 	if (ret < 0)
249 		return ret;
250 
251 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
252 					 q->limits.max_dev_sectors >> 1);
253 
254 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
255 		return -EINVAL;
256 
257 	spin_lock_irq(&q->queue_lock);
258 	q->limits.max_sectors = max_sectors_kb << 1;
259 	if (q->disk)
260 		q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
261 	spin_unlock_irq(&q->queue_lock);
262 
263 	return ret;
264 }
265 
266 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
267 {
268 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
269 
270 	return queue_var_show(max_hw_sectors_kb, page);
271 }
272 
273 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
274 {
275 	return queue_var_show(q->limits.virt_boundary_mask, page);
276 }
277 
278 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
279 static ssize_t								\
280 queue_##name##_show(struct request_queue *q, char *page)		\
281 {									\
282 	int bit;							\
283 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
284 	return queue_var_show(neg ? !bit : bit, page);			\
285 }									\
286 static ssize_t								\
287 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
288 {									\
289 	unsigned long val;						\
290 	ssize_t ret;							\
291 	ret = queue_var_store(&val, page, count);			\
292 	if (ret < 0)							\
293 		 return ret;						\
294 	if (neg)							\
295 		val = !val;						\
296 									\
297 	if (val)							\
298 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
299 	else								\
300 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
301 	return ret;							\
302 }
303 
304 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
305 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
306 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
307 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
308 #undef QUEUE_SYSFS_BIT_FNS
309 
310 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
311 {
312 	switch (blk_queue_zoned_model(q)) {
313 	case BLK_ZONED_HA:
314 		return sprintf(page, "host-aware\n");
315 	case BLK_ZONED_HM:
316 		return sprintf(page, "host-managed\n");
317 	default:
318 		return sprintf(page, "none\n");
319 	}
320 }
321 
322 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
323 {
324 	return queue_var_show(blk_queue_nr_zones(q), page);
325 }
326 
327 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
328 {
329 	return queue_var_show(queue_max_open_zones(q), page);
330 }
331 
332 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
333 {
334 	return queue_var_show(queue_max_active_zones(q), page);
335 }
336 
337 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
338 {
339 	return queue_var_show((blk_queue_nomerges(q) << 1) |
340 			       blk_queue_noxmerges(q), page);
341 }
342 
343 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
344 				    size_t count)
345 {
346 	unsigned long nm;
347 	ssize_t ret = queue_var_store(&nm, page, count);
348 
349 	if (ret < 0)
350 		return ret;
351 
352 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
353 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
354 	if (nm == 2)
355 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
356 	else if (nm)
357 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
358 
359 	return ret;
360 }
361 
362 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
363 {
364 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
365 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
366 
367 	return queue_var_show(set << force, page);
368 }
369 
370 static ssize_t
371 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
372 {
373 	ssize_t ret = -EINVAL;
374 #ifdef CONFIG_SMP
375 	unsigned long val;
376 
377 	ret = queue_var_store(&val, page, count);
378 	if (ret < 0)
379 		return ret;
380 
381 	if (val == 2) {
382 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
383 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
384 	} else if (val == 1) {
385 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
386 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
387 	} else if (val == 0) {
388 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
389 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
390 	}
391 #endif
392 	return ret;
393 }
394 
395 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
396 {
397 	int val;
398 
399 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
400 		val = BLK_MQ_POLL_CLASSIC;
401 	else
402 		val = q->poll_nsec / 1000;
403 
404 	return sprintf(page, "%d\n", val);
405 }
406 
407 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
408 				size_t count)
409 {
410 	int err, val;
411 
412 	if (!q->mq_ops || !q->mq_ops->poll)
413 		return -EINVAL;
414 
415 	err = kstrtoint(page, 10, &val);
416 	if (err < 0)
417 		return err;
418 
419 	if (val == BLK_MQ_POLL_CLASSIC)
420 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
421 	else if (val >= 0)
422 		q->poll_nsec = val * 1000;
423 	else
424 		return -EINVAL;
425 
426 	return count;
427 }
428 
429 static ssize_t queue_poll_show(struct request_queue *q, char *page)
430 {
431 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
432 }
433 
434 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
435 				size_t count)
436 {
437 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
438 		return -EINVAL;
439 	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
440 	pr_info_ratelimited("please use driver specific parameters instead.\n");
441 	return count;
442 }
443 
444 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
445 {
446 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
447 }
448 
449 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
450 				  size_t count)
451 {
452 	unsigned int val;
453 	int err;
454 
455 	err = kstrtou32(page, 10, &val);
456 	if (err || val == 0)
457 		return -EINVAL;
458 
459 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
460 
461 	return count;
462 }
463 
464 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
465 {
466 	if (!wbt_rq_qos(q))
467 		return -EINVAL;
468 
469 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
470 }
471 
472 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
473 				  size_t count)
474 {
475 	struct rq_qos *rqos;
476 	ssize_t ret;
477 	s64 val;
478 
479 	ret = queue_var_store64(&val, page);
480 	if (ret < 0)
481 		return ret;
482 	if (val < -1)
483 		return -EINVAL;
484 
485 	rqos = wbt_rq_qos(q);
486 	if (!rqos) {
487 		ret = wbt_init(q);
488 		if (ret)
489 			return ret;
490 	}
491 
492 	if (val == -1)
493 		val = wbt_default_latency_nsec(q);
494 	else if (val >= 0)
495 		val *= 1000ULL;
496 
497 	if (wbt_get_min_lat(q) == val)
498 		return count;
499 
500 	/*
501 	 * Ensure that the queue is idled, in case the latency update
502 	 * ends up either enabling or disabling wbt completely. We can't
503 	 * have IO inflight if that happens.
504 	 */
505 	blk_mq_freeze_queue(q);
506 	blk_mq_quiesce_queue(q);
507 
508 	wbt_set_min_lat(q, val);
509 
510 	blk_mq_unquiesce_queue(q);
511 	blk_mq_unfreeze_queue(q);
512 
513 	return count;
514 }
515 
516 static ssize_t queue_wc_show(struct request_queue *q, char *page)
517 {
518 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
519 		return sprintf(page, "write back\n");
520 
521 	return sprintf(page, "write through\n");
522 }
523 
524 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
525 			      size_t count)
526 {
527 	int set = -1;
528 
529 	if (!strncmp(page, "write back", 10))
530 		set = 1;
531 	else if (!strncmp(page, "write through", 13) ||
532 		 !strncmp(page, "none", 4))
533 		set = 0;
534 
535 	if (set == -1)
536 		return -EINVAL;
537 
538 	if (set)
539 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
540 	else
541 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
542 
543 	return count;
544 }
545 
546 static ssize_t queue_fua_show(struct request_queue *q, char *page)
547 {
548 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
549 }
550 
551 static ssize_t queue_dax_show(struct request_queue *q, char *page)
552 {
553 	return queue_var_show(blk_queue_dax(q), page);
554 }
555 
556 #define QUEUE_RO_ENTRY(_prefix, _name)			\
557 static struct queue_sysfs_entry _prefix##_entry = {	\
558 	.attr	= { .name = _name, .mode = 0444 },	\
559 	.show	= _prefix##_show,			\
560 };
561 
562 #define QUEUE_RW_ENTRY(_prefix, _name)			\
563 static struct queue_sysfs_entry _prefix##_entry = {	\
564 	.attr	= { .name = _name, .mode = 0644 },	\
565 	.show	= _prefix##_show,			\
566 	.store	= _prefix##_store,			\
567 };
568 
569 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
570 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
571 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
572 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
573 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
574 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
575 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
576 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
577 
578 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
579 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
580 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
581 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
582 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
583 
584 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
585 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
586 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
587 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
588 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
589 
590 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
591 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
592 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
593 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
594 
595 QUEUE_RO_ENTRY(queue_zoned, "zoned");
596 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
597 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
598 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
599 
600 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
601 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
602 QUEUE_RW_ENTRY(queue_poll, "io_poll");
603 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
604 QUEUE_RW_ENTRY(queue_wc, "write_cache");
605 QUEUE_RO_ENTRY(queue_fua, "fua");
606 QUEUE_RO_ENTRY(queue_dax, "dax");
607 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
608 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
609 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
610 
611 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
612 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
613 #endif
614 
615 /* legacy alias for logical_block_size: */
616 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
617 	.attr = {.name = "hw_sector_size", .mode = 0444 },
618 	.show = queue_logical_block_size_show,
619 };
620 
621 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
622 QUEUE_RW_ENTRY(queue_iostats, "iostats");
623 QUEUE_RW_ENTRY(queue_random, "add_random");
624 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
625 
626 static struct attribute *queue_attrs[] = {
627 	&queue_requests_entry.attr,
628 	&queue_ra_entry.attr,
629 	&queue_max_hw_sectors_entry.attr,
630 	&queue_max_sectors_entry.attr,
631 	&queue_max_segments_entry.attr,
632 	&queue_max_discard_segments_entry.attr,
633 	&queue_max_integrity_segments_entry.attr,
634 	&queue_max_segment_size_entry.attr,
635 	&elv_iosched_entry.attr,
636 	&queue_hw_sector_size_entry.attr,
637 	&queue_logical_block_size_entry.attr,
638 	&queue_physical_block_size_entry.attr,
639 	&queue_chunk_sectors_entry.attr,
640 	&queue_io_min_entry.attr,
641 	&queue_io_opt_entry.attr,
642 	&queue_discard_granularity_entry.attr,
643 	&queue_discard_max_entry.attr,
644 	&queue_discard_max_hw_entry.attr,
645 	&queue_discard_zeroes_data_entry.attr,
646 	&queue_write_same_max_entry.attr,
647 	&queue_write_zeroes_max_entry.attr,
648 	&queue_zone_append_max_entry.attr,
649 	&queue_zone_write_granularity_entry.attr,
650 	&queue_nonrot_entry.attr,
651 	&queue_zoned_entry.attr,
652 	&queue_nr_zones_entry.attr,
653 	&queue_max_open_zones_entry.attr,
654 	&queue_max_active_zones_entry.attr,
655 	&queue_nomerges_entry.attr,
656 	&queue_rq_affinity_entry.attr,
657 	&queue_iostats_entry.attr,
658 	&queue_stable_writes_entry.attr,
659 	&queue_random_entry.attr,
660 	&queue_poll_entry.attr,
661 	&queue_wc_entry.attr,
662 	&queue_fua_entry.attr,
663 	&queue_dax_entry.attr,
664 	&queue_wb_lat_entry.attr,
665 	&queue_poll_delay_entry.attr,
666 	&queue_io_timeout_entry.attr,
667 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
668 	&blk_throtl_sample_time_entry.attr,
669 #endif
670 	&queue_virt_boundary_mask_entry.attr,
671 	NULL,
672 };
673 
674 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
675 				int n)
676 {
677 	struct request_queue *q =
678 		container_of(kobj, struct request_queue, kobj);
679 
680 	if (attr == &queue_io_timeout_entry.attr &&
681 		(!q->mq_ops || !q->mq_ops->timeout))
682 			return 0;
683 
684 	if ((attr == &queue_max_open_zones_entry.attr ||
685 	     attr == &queue_max_active_zones_entry.attr) &&
686 	    !blk_queue_is_zoned(q))
687 		return 0;
688 
689 	return attr->mode;
690 }
691 
692 static struct attribute_group queue_attr_group = {
693 	.attrs = queue_attrs,
694 	.is_visible = queue_attr_visible,
695 };
696 
697 
698 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
699 
700 static ssize_t
701 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
702 {
703 	struct queue_sysfs_entry *entry = to_queue(attr);
704 	struct request_queue *q =
705 		container_of(kobj, struct request_queue, kobj);
706 	ssize_t res;
707 
708 	if (!entry->show)
709 		return -EIO;
710 	mutex_lock(&q->sysfs_lock);
711 	res = entry->show(q, page);
712 	mutex_unlock(&q->sysfs_lock);
713 	return res;
714 }
715 
716 static ssize_t
717 queue_attr_store(struct kobject *kobj, struct attribute *attr,
718 		    const char *page, size_t length)
719 {
720 	struct queue_sysfs_entry *entry = to_queue(attr);
721 	struct request_queue *q;
722 	ssize_t res;
723 
724 	if (!entry->store)
725 		return -EIO;
726 
727 	q = container_of(kobj, struct request_queue, kobj);
728 	mutex_lock(&q->sysfs_lock);
729 	res = entry->store(q, page, length);
730 	mutex_unlock(&q->sysfs_lock);
731 	return res;
732 }
733 
734 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
735 {
736 	struct request_queue *q = container_of(rcu_head, struct request_queue,
737 					       rcu_head);
738 
739 	kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
740 }
741 
742 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
743 static void blk_exit_queue(struct request_queue *q)
744 {
745 	/*
746 	 * Since the I/O scheduler exit code may access cgroup information,
747 	 * perform I/O scheduler exit before disassociating from the block
748 	 * cgroup controller.
749 	 */
750 	if (q->elevator) {
751 		ioc_clear_queue(q);
752 		elevator_exit(q);
753 	}
754 
755 	/*
756 	 * Remove all references to @q from the block cgroup controller before
757 	 * restoring @q->queue_lock to avoid that restoring this pointer causes
758 	 * e.g. blkcg_print_blkgs() to crash.
759 	 */
760 	blkcg_exit_queue(q);
761 }
762 
763 /**
764  * blk_release_queue - releases all allocated resources of the request_queue
765  * @kobj: pointer to a kobject, whose container is a request_queue
766  *
767  * This function releases all allocated resources of the request queue.
768  *
769  * The struct request_queue refcount is incremented with blk_get_queue() and
770  * decremented with blk_put_queue(). Once the refcount reaches 0 this function
771  * is called.
772  *
773  * For drivers that have a request_queue on a gendisk and added with
774  * __device_add_disk() the refcount to request_queue will reach 0 with
775  * the last put_disk() called by the driver. For drivers which don't use
776  * __device_add_disk() this happens with blk_cleanup_queue().
777  *
778  * Drivers exist which depend on the release of the request_queue to be
779  * synchronous, it should not be deferred.
780  *
781  * Context: can sleep
782  */
783 static void blk_release_queue(struct kobject *kobj)
784 {
785 	struct request_queue *q =
786 		container_of(kobj, struct request_queue, kobj);
787 
788 	might_sleep();
789 
790 	if (q->poll_stat)
791 		blk_stat_remove_callback(q, q->poll_cb);
792 	blk_stat_free_callback(q->poll_cb);
793 
794 	blk_exit_queue(q);
795 
796 	blk_free_queue_stats(q->stats);
797 	kfree(q->poll_stat);
798 
799 	blk_queue_free_zone_bitmaps(q);
800 
801 	if (queue_is_mq(q))
802 		blk_mq_release(q);
803 
804 	blk_trace_shutdown(q);
805 	mutex_lock(&q->debugfs_mutex);
806 	debugfs_remove_recursive(q->debugfs_dir);
807 	mutex_unlock(&q->debugfs_mutex);
808 
809 	if (queue_is_mq(q))
810 		blk_mq_debugfs_unregister(q);
811 
812 	bioset_exit(&q->bio_split);
813 
814 	if (blk_queue_has_srcu(q))
815 		cleanup_srcu_struct(q->srcu);
816 
817 	ida_simple_remove(&blk_queue_ida, q->id);
818 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
819 }
820 
821 static const struct sysfs_ops queue_sysfs_ops = {
822 	.show	= queue_attr_show,
823 	.store	= queue_attr_store,
824 };
825 
826 struct kobj_type blk_queue_ktype = {
827 	.sysfs_ops	= &queue_sysfs_ops,
828 	.release	= blk_release_queue,
829 };
830 
831 /**
832  * blk_register_queue - register a block layer queue with sysfs
833  * @disk: Disk of which the request queue should be registered with sysfs.
834  */
835 int blk_register_queue(struct gendisk *disk)
836 {
837 	int ret;
838 	struct device *dev = disk_to_dev(disk);
839 	struct request_queue *q = disk->queue;
840 
841 	ret = blk_trace_init_sysfs(dev);
842 	if (ret)
843 		return ret;
844 
845 	mutex_lock(&q->sysfs_dir_lock);
846 
847 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
848 	if (ret < 0) {
849 		blk_trace_remove_sysfs(dev);
850 		goto unlock;
851 	}
852 
853 	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
854 	if (ret) {
855 		blk_trace_remove_sysfs(dev);
856 		kobject_del(&q->kobj);
857 		kobject_put(&dev->kobj);
858 		goto unlock;
859 	}
860 
861 	mutex_lock(&q->debugfs_mutex);
862 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
863 					    blk_debugfs_root);
864 	mutex_unlock(&q->debugfs_mutex);
865 
866 	if (queue_is_mq(q)) {
867 		__blk_mq_register_dev(dev, q);
868 		blk_mq_debugfs_register(q);
869 	}
870 
871 	mutex_lock(&q->sysfs_lock);
872 
873 	ret = disk_register_independent_access_ranges(disk, NULL);
874 	if (ret)
875 		goto put_dev;
876 
877 	if (q->elevator) {
878 		ret = elv_register_queue(q, false);
879 		if (ret)
880 			goto put_dev;
881 	}
882 
883 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
884 	wbt_enable_default(q);
885 	blk_throtl_register_queue(q);
886 
887 	/* Now everything is ready and send out KOBJ_ADD uevent */
888 	kobject_uevent(&q->kobj, KOBJ_ADD);
889 	if (q->elevator)
890 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
891 	mutex_unlock(&q->sysfs_lock);
892 
893 unlock:
894 	mutex_unlock(&q->sysfs_dir_lock);
895 
896 	/*
897 	 * SCSI probing may synchronously create and destroy a lot of
898 	 * request_queues for non-existent devices.  Shutting down a fully
899 	 * functional queue takes measureable wallclock time as RCU grace
900 	 * periods are involved.  To avoid excessive latency in these
901 	 * cases, a request_queue starts out in a degraded mode which is
902 	 * faster to shut down and is made fully functional here as
903 	 * request_queues for non-existent devices never get registered.
904 	 */
905 	if (!blk_queue_init_done(q)) {
906 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
907 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
908 	}
909 
910 	return ret;
911 
912 put_dev:
913 	disk_unregister_independent_access_ranges(disk);
914 	mutex_unlock(&q->sysfs_lock);
915 	mutex_unlock(&q->sysfs_dir_lock);
916 	kobject_del(&q->kobj);
917 	blk_trace_remove_sysfs(dev);
918 	kobject_put(&dev->kobj);
919 
920 	return ret;
921 }
922 
923 /**
924  * blk_unregister_queue - counterpart of blk_register_queue()
925  * @disk: Disk of which the request queue should be unregistered from sysfs.
926  *
927  * Note: the caller is responsible for guaranteeing that this function is called
928  * after blk_register_queue() has finished.
929  */
930 void blk_unregister_queue(struct gendisk *disk)
931 {
932 	struct request_queue *q = disk->queue;
933 
934 	if (WARN_ON(!q))
935 		return;
936 
937 	/* Return early if disk->queue was never registered. */
938 	if (!blk_queue_registered(q))
939 		return;
940 
941 	/*
942 	 * Since sysfs_remove_dir() prevents adding new directory entries
943 	 * before removal of existing entries starts, protect against
944 	 * concurrent elv_iosched_store() calls.
945 	 */
946 	mutex_lock(&q->sysfs_lock);
947 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
948 	mutex_unlock(&q->sysfs_lock);
949 
950 	mutex_lock(&q->sysfs_dir_lock);
951 	/*
952 	 * Remove the sysfs attributes before unregistering the queue data
953 	 * structures that can be modified through sysfs.
954 	 */
955 	if (queue_is_mq(q))
956 		blk_mq_unregister_dev(disk_to_dev(disk), q);
957 
958 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
959 	kobject_del(&q->kobj);
960 	blk_trace_remove_sysfs(disk_to_dev(disk));
961 
962 	mutex_lock(&q->sysfs_lock);
963 	if (q->elevator)
964 		elv_unregister_queue(q);
965 	disk_unregister_independent_access_ranges(disk);
966 	mutex_unlock(&q->sysfs_lock);
967 	mutex_unlock(&q->sysfs_dir_lock);
968 
969 	kobject_put(&disk_to_dev(disk)->kobj);
970 }
971