xref: /openbmc/linux/block/blk-sysfs.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to sysfs handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/debugfs.h>
15 
16 #include "blk.h"
17 #include "blk-mq.h"
18 #include "blk-mq-debugfs.h"
19 #include "blk-wbt.h"
20 
21 struct queue_sysfs_entry {
22 	struct attribute attr;
23 	ssize_t (*show)(struct request_queue *, char *);
24 	ssize_t (*store)(struct request_queue *, const char *, size_t);
25 };
26 
27 static ssize_t
28 queue_var_show(unsigned long var, char *page)
29 {
30 	return sprintf(page, "%lu\n", var);
31 }
32 
33 static ssize_t
34 queue_var_store(unsigned long *var, const char *page, size_t count)
35 {
36 	int err;
37 	unsigned long v;
38 
39 	err = kstrtoul(page, 10, &v);
40 	if (err || v > UINT_MAX)
41 		return -EINVAL;
42 
43 	*var = v;
44 
45 	return count;
46 }
47 
48 static ssize_t queue_var_store64(s64 *var, const char *page)
49 {
50 	int err;
51 	s64 v;
52 
53 	err = kstrtos64(page, 10, &v);
54 	if (err < 0)
55 		return err;
56 
57 	*var = v;
58 	return 0;
59 }
60 
61 static ssize_t queue_requests_show(struct request_queue *q, char *page)
62 {
63 	return queue_var_show(q->nr_requests, (page));
64 }
65 
66 static ssize_t
67 queue_requests_store(struct request_queue *q, const char *page, size_t count)
68 {
69 	unsigned long nr;
70 	int ret, err;
71 
72 	if (!queue_is_mq(q))
73 		return -EINVAL;
74 
75 	ret = queue_var_store(&nr, page, count);
76 	if (ret < 0)
77 		return ret;
78 
79 	if (nr < BLKDEV_MIN_RQ)
80 		nr = BLKDEV_MIN_RQ;
81 
82 	err = blk_mq_update_nr_requests(q, nr);
83 	if (err)
84 		return err;
85 
86 	return ret;
87 }
88 
89 static ssize_t queue_ra_show(struct request_queue *q, char *page)
90 {
91 	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
92 					(PAGE_SHIFT - 10);
93 
94 	return queue_var_show(ra_kb, (page));
95 }
96 
97 static ssize_t
98 queue_ra_store(struct request_queue *q, const char *page, size_t count)
99 {
100 	unsigned long ra_kb;
101 	ssize_t ret = queue_var_store(&ra_kb, page, count);
102 
103 	if (ret < 0)
104 		return ret;
105 
106 	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
107 
108 	return ret;
109 }
110 
111 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
112 {
113 	int max_sectors_kb = queue_max_sectors(q) >> 1;
114 
115 	return queue_var_show(max_sectors_kb, (page));
116 }
117 
118 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
119 {
120 	return queue_var_show(queue_max_segments(q), (page));
121 }
122 
123 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
124 		char *page)
125 {
126 	return queue_var_show(queue_max_discard_segments(q), (page));
127 }
128 
129 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
130 {
131 	return queue_var_show(q->limits.max_integrity_segments, (page));
132 }
133 
134 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
135 {
136 	return queue_var_show(queue_max_segment_size(q), (page));
137 }
138 
139 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
140 {
141 	return queue_var_show(queue_logical_block_size(q), page);
142 }
143 
144 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
145 {
146 	return queue_var_show(queue_physical_block_size(q), page);
147 }
148 
149 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
150 {
151 	return queue_var_show(q->limits.chunk_sectors, page);
152 }
153 
154 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
155 {
156 	return queue_var_show(queue_io_min(q), page);
157 }
158 
159 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
160 {
161 	return queue_var_show(queue_io_opt(q), page);
162 }
163 
164 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
165 {
166 	return queue_var_show(q->limits.discard_granularity, page);
167 }
168 
169 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
170 {
171 
172 	return sprintf(page, "%llu\n",
173 		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
174 }
175 
176 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
177 {
178 	return sprintf(page, "%llu\n",
179 		       (unsigned long long)q->limits.max_discard_sectors << 9);
180 }
181 
182 static ssize_t queue_discard_max_store(struct request_queue *q,
183 				       const char *page, size_t count)
184 {
185 	unsigned long max_discard;
186 	ssize_t ret = queue_var_store(&max_discard, page, count);
187 
188 	if (ret < 0)
189 		return ret;
190 
191 	if (max_discard & (q->limits.discard_granularity - 1))
192 		return -EINVAL;
193 
194 	max_discard >>= 9;
195 	if (max_discard > UINT_MAX)
196 		return -EINVAL;
197 
198 	if (max_discard > q->limits.max_hw_discard_sectors)
199 		max_discard = q->limits.max_hw_discard_sectors;
200 
201 	q->limits.max_discard_sectors = max_discard;
202 	return ret;
203 }
204 
205 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
206 {
207 	return queue_var_show(0, page);
208 }
209 
210 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
211 {
212 	return sprintf(page, "%llu\n",
213 		(unsigned long long)q->limits.max_write_same_sectors << 9);
214 }
215 
216 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
217 {
218 	return sprintf(page, "%llu\n",
219 		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
220 }
221 
222 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
223 						 char *page)
224 {
225 	return queue_var_show(queue_zone_write_granularity(q), page);
226 }
227 
228 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
229 {
230 	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
231 
232 	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
233 }
234 
235 static ssize_t
236 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
237 {
238 	unsigned long max_sectors_kb,
239 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
240 			page_kb = 1 << (PAGE_SHIFT - 10);
241 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
242 
243 	if (ret < 0)
244 		return ret;
245 
246 	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
247 					 q->limits.max_dev_sectors >> 1);
248 
249 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
250 		return -EINVAL;
251 
252 	spin_lock_irq(&q->queue_lock);
253 	q->limits.max_sectors = max_sectors_kb << 1;
254 	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
255 	spin_unlock_irq(&q->queue_lock);
256 
257 	return ret;
258 }
259 
260 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
261 {
262 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
263 
264 	return queue_var_show(max_hw_sectors_kb, (page));
265 }
266 
267 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
268 static ssize_t								\
269 queue_##name##_show(struct request_queue *q, char *page)		\
270 {									\
271 	int bit;							\
272 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
273 	return queue_var_show(neg ? !bit : bit, page);			\
274 }									\
275 static ssize_t								\
276 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
277 {									\
278 	unsigned long val;						\
279 	ssize_t ret;							\
280 	ret = queue_var_store(&val, page, count);			\
281 	if (ret < 0)							\
282 		 return ret;						\
283 	if (neg)							\
284 		val = !val;						\
285 									\
286 	if (val)							\
287 		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
288 	else								\
289 		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
290 	return ret;							\
291 }
292 
293 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
294 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
295 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
296 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
297 #undef QUEUE_SYSFS_BIT_FNS
298 
299 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
300 {
301 	switch (blk_queue_zoned_model(q)) {
302 	case BLK_ZONED_HA:
303 		return sprintf(page, "host-aware\n");
304 	case BLK_ZONED_HM:
305 		return sprintf(page, "host-managed\n");
306 	default:
307 		return sprintf(page, "none\n");
308 	}
309 }
310 
311 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
312 {
313 	return queue_var_show(blk_queue_nr_zones(q), page);
314 }
315 
316 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
317 {
318 	return queue_var_show(queue_max_open_zones(q), page);
319 }
320 
321 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
322 {
323 	return queue_var_show(queue_max_active_zones(q), page);
324 }
325 
326 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
327 {
328 	return queue_var_show((blk_queue_nomerges(q) << 1) |
329 			       blk_queue_noxmerges(q), page);
330 }
331 
332 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
333 				    size_t count)
334 {
335 	unsigned long nm;
336 	ssize_t ret = queue_var_store(&nm, page, count);
337 
338 	if (ret < 0)
339 		return ret;
340 
341 	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
342 	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
343 	if (nm == 2)
344 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
345 	else if (nm)
346 		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
347 
348 	return ret;
349 }
350 
351 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
352 {
353 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
354 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
355 
356 	return queue_var_show(set << force, page);
357 }
358 
359 static ssize_t
360 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
361 {
362 	ssize_t ret = -EINVAL;
363 #ifdef CONFIG_SMP
364 	unsigned long val;
365 
366 	ret = queue_var_store(&val, page, count);
367 	if (ret < 0)
368 		return ret;
369 
370 	if (val == 2) {
371 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
372 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
373 	} else if (val == 1) {
374 		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
375 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
376 	} else if (val == 0) {
377 		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
378 		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
379 	}
380 #endif
381 	return ret;
382 }
383 
384 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
385 {
386 	int val;
387 
388 	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
389 		val = BLK_MQ_POLL_CLASSIC;
390 	else
391 		val = q->poll_nsec / 1000;
392 
393 	return sprintf(page, "%d\n", val);
394 }
395 
396 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
397 				size_t count)
398 {
399 	int err, val;
400 
401 	if (!q->mq_ops || !q->mq_ops->poll)
402 		return -EINVAL;
403 
404 	err = kstrtoint(page, 10, &val);
405 	if (err < 0)
406 		return err;
407 
408 	if (val == BLK_MQ_POLL_CLASSIC)
409 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
410 	else if (val >= 0)
411 		q->poll_nsec = val * 1000;
412 	else
413 		return -EINVAL;
414 
415 	return count;
416 }
417 
418 static ssize_t queue_poll_show(struct request_queue *q, char *page)
419 {
420 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
421 }
422 
423 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
424 				size_t count)
425 {
426 	unsigned long poll_on;
427 	ssize_t ret;
428 
429 	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
430 	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
431 		return -EINVAL;
432 
433 	ret = queue_var_store(&poll_on, page, count);
434 	if (ret < 0)
435 		return ret;
436 
437 	if (poll_on) {
438 		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
439 	} else {
440 		blk_mq_freeze_queue(q);
441 		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
442 		blk_mq_unfreeze_queue(q);
443 	}
444 
445 	return ret;
446 }
447 
448 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
449 {
450 	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
451 }
452 
453 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
454 				  size_t count)
455 {
456 	unsigned int val;
457 	int err;
458 
459 	err = kstrtou32(page, 10, &val);
460 	if (err || val == 0)
461 		return -EINVAL;
462 
463 	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
464 
465 	return count;
466 }
467 
468 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
469 {
470 	if (!wbt_rq_qos(q))
471 		return -EINVAL;
472 
473 	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
474 }
475 
476 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
477 				  size_t count)
478 {
479 	struct rq_qos *rqos;
480 	ssize_t ret;
481 	s64 val;
482 
483 	ret = queue_var_store64(&val, page);
484 	if (ret < 0)
485 		return ret;
486 	if (val < -1)
487 		return -EINVAL;
488 
489 	rqos = wbt_rq_qos(q);
490 	if (!rqos) {
491 		ret = wbt_init(q);
492 		if (ret)
493 			return ret;
494 	}
495 
496 	if (val == -1)
497 		val = wbt_default_latency_nsec(q);
498 	else if (val >= 0)
499 		val *= 1000ULL;
500 
501 	if (wbt_get_min_lat(q) == val)
502 		return count;
503 
504 	/*
505 	 * Ensure that the queue is idled, in case the latency update
506 	 * ends up either enabling or disabling wbt completely. We can't
507 	 * have IO inflight if that happens.
508 	 */
509 	blk_mq_freeze_queue(q);
510 	blk_mq_quiesce_queue(q);
511 
512 	wbt_set_min_lat(q, val);
513 
514 	blk_mq_unquiesce_queue(q);
515 	blk_mq_unfreeze_queue(q);
516 
517 	return count;
518 }
519 
520 static ssize_t queue_wc_show(struct request_queue *q, char *page)
521 {
522 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
523 		return sprintf(page, "write back\n");
524 
525 	return sprintf(page, "write through\n");
526 }
527 
528 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
529 			      size_t count)
530 {
531 	int set = -1;
532 
533 	if (!strncmp(page, "write back", 10))
534 		set = 1;
535 	else if (!strncmp(page, "write through", 13) ||
536 		 !strncmp(page, "none", 4))
537 		set = 0;
538 
539 	if (set == -1)
540 		return -EINVAL;
541 
542 	if (set)
543 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
544 	else
545 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
546 
547 	return count;
548 }
549 
550 static ssize_t queue_fua_show(struct request_queue *q, char *page)
551 {
552 	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
553 }
554 
555 static ssize_t queue_dax_show(struct request_queue *q, char *page)
556 {
557 	return queue_var_show(blk_queue_dax(q), page);
558 }
559 
560 #define QUEUE_RO_ENTRY(_prefix, _name)			\
561 static struct queue_sysfs_entry _prefix##_entry = {	\
562 	.attr	= { .name = _name, .mode = 0444 },	\
563 	.show	= _prefix##_show,			\
564 };
565 
566 #define QUEUE_RW_ENTRY(_prefix, _name)			\
567 static struct queue_sysfs_entry _prefix##_entry = {	\
568 	.attr	= { .name = _name, .mode = 0644 },	\
569 	.show	= _prefix##_show,			\
570 	.store	= _prefix##_store,			\
571 };
572 
573 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
574 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
575 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
576 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
577 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
578 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
579 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
580 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
581 
582 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
583 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
584 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
585 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
586 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
587 
588 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
589 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
590 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
591 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
592 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
593 
594 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
595 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
596 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
597 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
598 
599 QUEUE_RO_ENTRY(queue_zoned, "zoned");
600 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
601 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
602 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
603 
604 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
605 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
606 QUEUE_RW_ENTRY(queue_poll, "io_poll");
607 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
608 QUEUE_RW_ENTRY(queue_wc, "write_cache");
609 QUEUE_RO_ENTRY(queue_fua, "fua");
610 QUEUE_RO_ENTRY(queue_dax, "dax");
611 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
612 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
613 
614 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
615 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
616 #endif
617 
618 /* legacy alias for logical_block_size: */
619 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
620 	.attr = {.name = "hw_sector_size", .mode = 0444 },
621 	.show = queue_logical_block_size_show,
622 };
623 
624 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
625 QUEUE_RW_ENTRY(queue_iostats, "iostats");
626 QUEUE_RW_ENTRY(queue_random, "add_random");
627 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
628 
629 static struct attribute *queue_attrs[] = {
630 	&queue_requests_entry.attr,
631 	&queue_ra_entry.attr,
632 	&queue_max_hw_sectors_entry.attr,
633 	&queue_max_sectors_entry.attr,
634 	&queue_max_segments_entry.attr,
635 	&queue_max_discard_segments_entry.attr,
636 	&queue_max_integrity_segments_entry.attr,
637 	&queue_max_segment_size_entry.attr,
638 	&elv_iosched_entry.attr,
639 	&queue_hw_sector_size_entry.attr,
640 	&queue_logical_block_size_entry.attr,
641 	&queue_physical_block_size_entry.attr,
642 	&queue_chunk_sectors_entry.attr,
643 	&queue_io_min_entry.attr,
644 	&queue_io_opt_entry.attr,
645 	&queue_discard_granularity_entry.attr,
646 	&queue_discard_max_entry.attr,
647 	&queue_discard_max_hw_entry.attr,
648 	&queue_discard_zeroes_data_entry.attr,
649 	&queue_write_same_max_entry.attr,
650 	&queue_write_zeroes_max_entry.attr,
651 	&queue_zone_append_max_entry.attr,
652 	&queue_zone_write_granularity_entry.attr,
653 	&queue_nonrot_entry.attr,
654 	&queue_zoned_entry.attr,
655 	&queue_nr_zones_entry.attr,
656 	&queue_max_open_zones_entry.attr,
657 	&queue_max_active_zones_entry.attr,
658 	&queue_nomerges_entry.attr,
659 	&queue_rq_affinity_entry.attr,
660 	&queue_iostats_entry.attr,
661 	&queue_stable_writes_entry.attr,
662 	&queue_random_entry.attr,
663 	&queue_poll_entry.attr,
664 	&queue_wc_entry.attr,
665 	&queue_fua_entry.attr,
666 	&queue_dax_entry.attr,
667 	&queue_wb_lat_entry.attr,
668 	&queue_poll_delay_entry.attr,
669 	&queue_io_timeout_entry.attr,
670 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
671 	&blk_throtl_sample_time_entry.attr,
672 #endif
673 	NULL,
674 };
675 
676 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
677 				int n)
678 {
679 	struct request_queue *q =
680 		container_of(kobj, struct request_queue, kobj);
681 
682 	if (attr == &queue_io_timeout_entry.attr &&
683 		(!q->mq_ops || !q->mq_ops->timeout))
684 			return 0;
685 
686 	if ((attr == &queue_max_open_zones_entry.attr ||
687 	     attr == &queue_max_active_zones_entry.attr) &&
688 	    !blk_queue_is_zoned(q))
689 		return 0;
690 
691 	return attr->mode;
692 }
693 
694 static struct attribute_group queue_attr_group = {
695 	.attrs = queue_attrs,
696 	.is_visible = queue_attr_visible,
697 };
698 
699 
700 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
701 
702 static ssize_t
703 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
704 {
705 	struct queue_sysfs_entry *entry = to_queue(attr);
706 	struct request_queue *q =
707 		container_of(kobj, struct request_queue, kobj);
708 	ssize_t res;
709 
710 	if (!entry->show)
711 		return -EIO;
712 	mutex_lock(&q->sysfs_lock);
713 	res = entry->show(q, page);
714 	mutex_unlock(&q->sysfs_lock);
715 	return res;
716 }
717 
718 static ssize_t
719 queue_attr_store(struct kobject *kobj, struct attribute *attr,
720 		    const char *page, size_t length)
721 {
722 	struct queue_sysfs_entry *entry = to_queue(attr);
723 	struct request_queue *q;
724 	ssize_t res;
725 
726 	if (!entry->store)
727 		return -EIO;
728 
729 	q = container_of(kobj, struct request_queue, kobj);
730 	mutex_lock(&q->sysfs_lock);
731 	res = entry->store(q, page, length);
732 	mutex_unlock(&q->sysfs_lock);
733 	return res;
734 }
735 
736 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
737 {
738 	struct request_queue *q = container_of(rcu_head, struct request_queue,
739 					       rcu_head);
740 	kmem_cache_free(blk_requestq_cachep, q);
741 }
742 
743 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
744 static void blk_exit_queue(struct request_queue *q)
745 {
746 	/*
747 	 * Since the I/O scheduler exit code may access cgroup information,
748 	 * perform I/O scheduler exit before disassociating from the block
749 	 * cgroup controller.
750 	 */
751 	if (q->elevator) {
752 		ioc_clear_queue(q);
753 		__elevator_exit(q, q->elevator);
754 	}
755 
756 	/*
757 	 * Remove all references to @q from the block cgroup controller before
758 	 * restoring @q->queue_lock to avoid that restoring this pointer causes
759 	 * e.g. blkcg_print_blkgs() to crash.
760 	 */
761 	blkcg_exit_queue(q);
762 
763 	/*
764 	 * Since the cgroup code may dereference the @q->backing_dev_info
765 	 * pointer, only decrease its reference count after having removed the
766 	 * association with the block cgroup controller.
767 	 */
768 	bdi_put(q->backing_dev_info);
769 }
770 
771 /**
772  * blk_release_queue - releases all allocated resources of the request_queue
773  * @kobj: pointer to a kobject, whose container is a request_queue
774  *
775  * This function releases all allocated resources of the request queue.
776  *
777  * The struct request_queue refcount is incremented with blk_get_queue() and
778  * decremented with blk_put_queue(). Once the refcount reaches 0 this function
779  * is called.
780  *
781  * For drivers that have a request_queue on a gendisk and added with
782  * __device_add_disk() the refcount to request_queue will reach 0 with
783  * the last put_disk() called by the driver. For drivers which don't use
784  * __device_add_disk() this happens with blk_cleanup_queue().
785  *
786  * Drivers exist which depend on the release of the request_queue to be
787  * synchronous, it should not be deferred.
788  *
789  * Context: can sleep
790  */
791 static void blk_release_queue(struct kobject *kobj)
792 {
793 	struct request_queue *q =
794 		container_of(kobj, struct request_queue, kobj);
795 
796 	might_sleep();
797 
798 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
799 		blk_stat_remove_callback(q, q->poll_cb);
800 	blk_stat_free_callback(q->poll_cb);
801 
802 	blk_free_queue_stats(q->stats);
803 
804 	if (queue_is_mq(q)) {
805 		struct blk_mq_hw_ctx *hctx;
806 		int i;
807 
808 		cancel_delayed_work_sync(&q->requeue_work);
809 
810 		queue_for_each_hw_ctx(q, hctx, i)
811 			cancel_delayed_work_sync(&hctx->run_work);
812 	}
813 
814 	blk_exit_queue(q);
815 
816 	blk_queue_free_zone_bitmaps(q);
817 
818 	if (queue_is_mq(q))
819 		blk_mq_release(q);
820 
821 	blk_trace_shutdown(q);
822 	mutex_lock(&q->debugfs_mutex);
823 	debugfs_remove_recursive(q->debugfs_dir);
824 	mutex_unlock(&q->debugfs_mutex);
825 
826 	if (queue_is_mq(q))
827 		blk_mq_debugfs_unregister(q);
828 
829 	bioset_exit(&q->bio_split);
830 
831 	ida_simple_remove(&blk_queue_ida, q->id);
832 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
833 }
834 
835 static const struct sysfs_ops queue_sysfs_ops = {
836 	.show	= queue_attr_show,
837 	.store	= queue_attr_store,
838 };
839 
840 struct kobj_type blk_queue_ktype = {
841 	.sysfs_ops	= &queue_sysfs_ops,
842 	.release	= blk_release_queue,
843 };
844 
845 /**
846  * blk_register_queue - register a block layer queue with sysfs
847  * @disk: Disk of which the request queue should be registered with sysfs.
848  */
849 int blk_register_queue(struct gendisk *disk)
850 {
851 	int ret;
852 	struct device *dev = disk_to_dev(disk);
853 	struct request_queue *q = disk->queue;
854 
855 	if (WARN_ON(!q))
856 		return -ENXIO;
857 
858 	WARN_ONCE(blk_queue_registered(q),
859 		  "%s is registering an already registered queue\n",
860 		  kobject_name(&dev->kobj));
861 
862 	/*
863 	 * SCSI probing may synchronously create and destroy a lot of
864 	 * request_queues for non-existent devices.  Shutting down a fully
865 	 * functional queue takes measureable wallclock time as RCU grace
866 	 * periods are involved.  To avoid excessive latency in these
867 	 * cases, a request_queue starts out in a degraded mode which is
868 	 * faster to shut down and is made fully functional here as
869 	 * request_queues for non-existent devices never get registered.
870 	 */
871 	if (!blk_queue_init_done(q)) {
872 		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
873 		percpu_ref_switch_to_percpu(&q->q_usage_counter);
874 	}
875 
876 	blk_queue_update_readahead(q);
877 
878 	ret = blk_trace_init_sysfs(dev);
879 	if (ret)
880 		return ret;
881 
882 	mutex_lock(&q->sysfs_dir_lock);
883 
884 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
885 	if (ret < 0) {
886 		blk_trace_remove_sysfs(dev);
887 		goto unlock;
888 	}
889 
890 	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
891 	if (ret) {
892 		blk_trace_remove_sysfs(dev);
893 		kobject_del(&q->kobj);
894 		kobject_put(&dev->kobj);
895 		goto unlock;
896 	}
897 
898 	mutex_lock(&q->debugfs_mutex);
899 	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
900 					    blk_debugfs_root);
901 	mutex_unlock(&q->debugfs_mutex);
902 
903 	if (queue_is_mq(q)) {
904 		__blk_mq_register_dev(dev, q);
905 		blk_mq_debugfs_register(q);
906 	}
907 
908 	mutex_lock(&q->sysfs_lock);
909 	if (q->elevator) {
910 		ret = elv_register_queue(q, false);
911 		if (ret) {
912 			mutex_unlock(&q->sysfs_lock);
913 			mutex_unlock(&q->sysfs_dir_lock);
914 			kobject_del(&q->kobj);
915 			blk_trace_remove_sysfs(dev);
916 			kobject_put(&dev->kobj);
917 			return ret;
918 		}
919 	}
920 
921 	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
922 	wbt_enable_default(q);
923 	blk_throtl_register_queue(q);
924 
925 	/* Now everything is ready and send out KOBJ_ADD uevent */
926 	kobject_uevent(&q->kobj, KOBJ_ADD);
927 	if (q->elevator)
928 		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
929 	mutex_unlock(&q->sysfs_lock);
930 
931 	ret = 0;
932 unlock:
933 	mutex_unlock(&q->sysfs_dir_lock);
934 	return ret;
935 }
936 EXPORT_SYMBOL_GPL(blk_register_queue);
937 
938 /**
939  * blk_unregister_queue - counterpart of blk_register_queue()
940  * @disk: Disk of which the request queue should be unregistered from sysfs.
941  *
942  * Note: the caller is responsible for guaranteeing that this function is called
943  * after blk_register_queue() has finished.
944  */
945 void blk_unregister_queue(struct gendisk *disk)
946 {
947 	struct request_queue *q = disk->queue;
948 
949 	if (WARN_ON(!q))
950 		return;
951 
952 	/* Return early if disk->queue was never registered. */
953 	if (!blk_queue_registered(q))
954 		return;
955 
956 	/*
957 	 * Since sysfs_remove_dir() prevents adding new directory entries
958 	 * before removal of existing entries starts, protect against
959 	 * concurrent elv_iosched_store() calls.
960 	 */
961 	mutex_lock(&q->sysfs_lock);
962 	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
963 	mutex_unlock(&q->sysfs_lock);
964 
965 	mutex_lock(&q->sysfs_dir_lock);
966 	/*
967 	 * Remove the sysfs attributes before unregistering the queue data
968 	 * structures that can be modified through sysfs.
969 	 */
970 	if (queue_is_mq(q))
971 		blk_mq_unregister_dev(disk_to_dev(disk), q);
972 
973 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
974 	kobject_del(&q->kobj);
975 	blk_trace_remove_sysfs(disk_to_dev(disk));
976 
977 	mutex_lock(&q->sysfs_lock);
978 	if (q->elevator)
979 		elv_unregister_queue(q);
980 	mutex_unlock(&q->sysfs_lock);
981 	mutex_unlock(&q->sysfs_dir_lock);
982 
983 	kobject_put(&disk_to_dev(disk)->kobj);
984 }
985