xref: /openbmc/linux/block/blk-sysfs.c (revision c0e297dc)
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include "blk.h"
15 #include "blk-mq.h"
16 
17 struct queue_sysfs_entry {
18 	struct attribute attr;
19 	ssize_t (*show)(struct request_queue *, char *);
20 	ssize_t (*store)(struct request_queue *, const char *, size_t);
21 };
22 
23 static ssize_t
24 queue_var_show(unsigned long var, char *page)
25 {
26 	return sprintf(page, "%lu\n", var);
27 }
28 
29 static ssize_t
30 queue_var_store(unsigned long *var, const char *page, size_t count)
31 {
32 	int err;
33 	unsigned long v;
34 
35 	err = kstrtoul(page, 10, &v);
36 	if (err || v > UINT_MAX)
37 		return -EINVAL;
38 
39 	*var = v;
40 
41 	return count;
42 }
43 
44 static ssize_t queue_requests_show(struct request_queue *q, char *page)
45 {
46 	return queue_var_show(q->nr_requests, (page));
47 }
48 
49 static ssize_t
50 queue_requests_store(struct request_queue *q, const char *page, size_t count)
51 {
52 	unsigned long nr;
53 	int ret, err;
54 
55 	if (!q->request_fn && !q->mq_ops)
56 		return -EINVAL;
57 
58 	ret = queue_var_store(&nr, page, count);
59 	if (ret < 0)
60 		return ret;
61 
62 	if (nr < BLKDEV_MIN_RQ)
63 		nr = BLKDEV_MIN_RQ;
64 
65 	if (q->request_fn)
66 		err = blk_update_nr_requests(q, nr);
67 	else
68 		err = blk_mq_update_nr_requests(q, nr);
69 
70 	if (err)
71 		return err;
72 
73 	return ret;
74 }
75 
76 static ssize_t queue_ra_show(struct request_queue *q, char *page)
77 {
78 	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
79 					(PAGE_CACHE_SHIFT - 10);
80 
81 	return queue_var_show(ra_kb, (page));
82 }
83 
84 static ssize_t
85 queue_ra_store(struct request_queue *q, const char *page, size_t count)
86 {
87 	unsigned long ra_kb;
88 	ssize_t ret = queue_var_store(&ra_kb, page, count);
89 
90 	if (ret < 0)
91 		return ret;
92 
93 	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
94 
95 	return ret;
96 }
97 
98 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
99 {
100 	int max_sectors_kb = queue_max_sectors(q) >> 1;
101 
102 	return queue_var_show(max_sectors_kb, (page));
103 }
104 
105 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
106 {
107 	return queue_var_show(queue_max_segments(q), (page));
108 }
109 
110 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
111 {
112 	return queue_var_show(q->limits.max_integrity_segments, (page));
113 }
114 
115 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116 {
117 	if (blk_queue_cluster(q))
118 		return queue_var_show(queue_max_segment_size(q), (page));
119 
120 	return queue_var_show(PAGE_CACHE_SIZE, (page));
121 }
122 
123 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
124 {
125 	return queue_var_show(queue_logical_block_size(q), page);
126 }
127 
128 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
129 {
130 	return queue_var_show(queue_physical_block_size(q), page);
131 }
132 
133 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
134 {
135 	return queue_var_show(queue_io_min(q), page);
136 }
137 
138 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
139 {
140 	return queue_var_show(queue_io_opt(q), page);
141 }
142 
143 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
144 {
145 	return queue_var_show(q->limits.discard_granularity, page);
146 }
147 
148 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
149 {
150 	return sprintf(page, "%llu\n",
151 		       (unsigned long long)q->limits.max_discard_sectors << 9);
152 }
153 
154 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
155 {
156 	return queue_var_show(queue_discard_zeroes_data(q), page);
157 }
158 
159 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
160 {
161 	return sprintf(page, "%llu\n",
162 		(unsigned long long)q->limits.max_write_same_sectors << 9);
163 }
164 
165 
166 static ssize_t
167 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
168 {
169 	unsigned long max_sectors_kb,
170 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
171 			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
172 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
173 
174 	if (ret < 0)
175 		return ret;
176 
177 	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
178 		return -EINVAL;
179 
180 	spin_lock_irq(q->queue_lock);
181 	q->limits.max_sectors = max_sectors_kb << 1;
182 	spin_unlock_irq(q->queue_lock);
183 
184 	return ret;
185 }
186 
187 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
188 {
189 	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
190 
191 	return queue_var_show(max_hw_sectors_kb, (page));
192 }
193 
194 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
195 static ssize_t								\
196 queue_show_##name(struct request_queue *q, char *page)			\
197 {									\
198 	int bit;							\
199 	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
200 	return queue_var_show(neg ? !bit : bit, page);			\
201 }									\
202 static ssize_t								\
203 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
204 {									\
205 	unsigned long val;						\
206 	ssize_t ret;							\
207 	ret = queue_var_store(&val, page, count);			\
208 	if (ret < 0)							\
209 		 return ret;						\
210 	if (neg)							\
211 		val = !val;						\
212 									\
213 	spin_lock_irq(q->queue_lock);					\
214 	if (val)							\
215 		queue_flag_set(QUEUE_FLAG_##flag, q);			\
216 	else								\
217 		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
218 	spin_unlock_irq(q->queue_lock);					\
219 	return ret;							\
220 }
221 
222 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
223 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
224 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
225 #undef QUEUE_SYSFS_BIT_FNS
226 
227 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
228 {
229 	return queue_var_show((blk_queue_nomerges(q) << 1) |
230 			       blk_queue_noxmerges(q), page);
231 }
232 
233 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
234 				    size_t count)
235 {
236 	unsigned long nm;
237 	ssize_t ret = queue_var_store(&nm, page, count);
238 
239 	if (ret < 0)
240 		return ret;
241 
242 	spin_lock_irq(q->queue_lock);
243 	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
244 	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
245 	if (nm == 2)
246 		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
247 	else if (nm)
248 		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
249 	spin_unlock_irq(q->queue_lock);
250 
251 	return ret;
252 }
253 
254 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
255 {
256 	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
257 	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
258 
259 	return queue_var_show(set << force, page);
260 }
261 
262 static ssize_t
263 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
264 {
265 	ssize_t ret = -EINVAL;
266 #ifdef CONFIG_SMP
267 	unsigned long val;
268 
269 	ret = queue_var_store(&val, page, count);
270 	if (ret < 0)
271 		return ret;
272 
273 	spin_lock_irq(q->queue_lock);
274 	if (val == 2) {
275 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
276 		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
277 	} else if (val == 1) {
278 		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
279 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
280 	} else if (val == 0) {
281 		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
282 		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
283 	}
284 	spin_unlock_irq(q->queue_lock);
285 #endif
286 	return ret;
287 }
288 
289 static struct queue_sysfs_entry queue_requests_entry = {
290 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
291 	.show = queue_requests_show,
292 	.store = queue_requests_store,
293 };
294 
295 static struct queue_sysfs_entry queue_ra_entry = {
296 	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
297 	.show = queue_ra_show,
298 	.store = queue_ra_store,
299 };
300 
301 static struct queue_sysfs_entry queue_max_sectors_entry = {
302 	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
303 	.show = queue_max_sectors_show,
304 	.store = queue_max_sectors_store,
305 };
306 
307 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
308 	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
309 	.show = queue_max_hw_sectors_show,
310 };
311 
312 static struct queue_sysfs_entry queue_max_segments_entry = {
313 	.attr = {.name = "max_segments", .mode = S_IRUGO },
314 	.show = queue_max_segments_show,
315 };
316 
317 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
318 	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
319 	.show = queue_max_integrity_segments_show,
320 };
321 
322 static struct queue_sysfs_entry queue_max_segment_size_entry = {
323 	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
324 	.show = queue_max_segment_size_show,
325 };
326 
327 static struct queue_sysfs_entry queue_iosched_entry = {
328 	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
329 	.show = elv_iosched_show,
330 	.store = elv_iosched_store,
331 };
332 
333 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
334 	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
335 	.show = queue_logical_block_size_show,
336 };
337 
338 static struct queue_sysfs_entry queue_logical_block_size_entry = {
339 	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
340 	.show = queue_logical_block_size_show,
341 };
342 
343 static struct queue_sysfs_entry queue_physical_block_size_entry = {
344 	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
345 	.show = queue_physical_block_size_show,
346 };
347 
348 static struct queue_sysfs_entry queue_io_min_entry = {
349 	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
350 	.show = queue_io_min_show,
351 };
352 
353 static struct queue_sysfs_entry queue_io_opt_entry = {
354 	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
355 	.show = queue_io_opt_show,
356 };
357 
358 static struct queue_sysfs_entry queue_discard_granularity_entry = {
359 	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
360 	.show = queue_discard_granularity_show,
361 };
362 
363 static struct queue_sysfs_entry queue_discard_max_entry = {
364 	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
365 	.show = queue_discard_max_show,
366 };
367 
368 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
369 	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
370 	.show = queue_discard_zeroes_data_show,
371 };
372 
373 static struct queue_sysfs_entry queue_write_same_max_entry = {
374 	.attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
375 	.show = queue_write_same_max_show,
376 };
377 
378 static struct queue_sysfs_entry queue_nonrot_entry = {
379 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
380 	.show = queue_show_nonrot,
381 	.store = queue_store_nonrot,
382 };
383 
384 static struct queue_sysfs_entry queue_nomerges_entry = {
385 	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
386 	.show = queue_nomerges_show,
387 	.store = queue_nomerges_store,
388 };
389 
390 static struct queue_sysfs_entry queue_rq_affinity_entry = {
391 	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
392 	.show = queue_rq_affinity_show,
393 	.store = queue_rq_affinity_store,
394 };
395 
396 static struct queue_sysfs_entry queue_iostats_entry = {
397 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
398 	.show = queue_show_iostats,
399 	.store = queue_store_iostats,
400 };
401 
402 static struct queue_sysfs_entry queue_random_entry = {
403 	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
404 	.show = queue_show_random,
405 	.store = queue_store_random,
406 };
407 
408 static struct attribute *default_attrs[] = {
409 	&queue_requests_entry.attr,
410 	&queue_ra_entry.attr,
411 	&queue_max_hw_sectors_entry.attr,
412 	&queue_max_sectors_entry.attr,
413 	&queue_max_segments_entry.attr,
414 	&queue_max_integrity_segments_entry.attr,
415 	&queue_max_segment_size_entry.attr,
416 	&queue_iosched_entry.attr,
417 	&queue_hw_sector_size_entry.attr,
418 	&queue_logical_block_size_entry.attr,
419 	&queue_physical_block_size_entry.attr,
420 	&queue_io_min_entry.attr,
421 	&queue_io_opt_entry.attr,
422 	&queue_discard_granularity_entry.attr,
423 	&queue_discard_max_entry.attr,
424 	&queue_discard_zeroes_data_entry.attr,
425 	&queue_write_same_max_entry.attr,
426 	&queue_nonrot_entry.attr,
427 	&queue_nomerges_entry.attr,
428 	&queue_rq_affinity_entry.attr,
429 	&queue_iostats_entry.attr,
430 	&queue_random_entry.attr,
431 	NULL,
432 };
433 
434 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
435 
436 static ssize_t
437 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
438 {
439 	struct queue_sysfs_entry *entry = to_queue(attr);
440 	struct request_queue *q =
441 		container_of(kobj, struct request_queue, kobj);
442 	ssize_t res;
443 
444 	if (!entry->show)
445 		return -EIO;
446 	mutex_lock(&q->sysfs_lock);
447 	if (blk_queue_dying(q)) {
448 		mutex_unlock(&q->sysfs_lock);
449 		return -ENOENT;
450 	}
451 	res = entry->show(q, page);
452 	mutex_unlock(&q->sysfs_lock);
453 	return res;
454 }
455 
456 static ssize_t
457 queue_attr_store(struct kobject *kobj, struct attribute *attr,
458 		    const char *page, size_t length)
459 {
460 	struct queue_sysfs_entry *entry = to_queue(attr);
461 	struct request_queue *q;
462 	ssize_t res;
463 
464 	if (!entry->store)
465 		return -EIO;
466 
467 	q = container_of(kobj, struct request_queue, kobj);
468 	mutex_lock(&q->sysfs_lock);
469 	if (blk_queue_dying(q)) {
470 		mutex_unlock(&q->sysfs_lock);
471 		return -ENOENT;
472 	}
473 	res = entry->store(q, page, length);
474 	mutex_unlock(&q->sysfs_lock);
475 	return res;
476 }
477 
478 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
479 {
480 	struct request_queue *q = container_of(rcu_head, struct request_queue,
481 					       rcu_head);
482 	kmem_cache_free(blk_requestq_cachep, q);
483 }
484 
485 /**
486  * blk_release_queue: - release a &struct request_queue when it is no longer needed
487  * @kobj:    the kobj belonging to the request queue to be released
488  *
489  * Description:
490  *     blk_release_queue is the pair to blk_init_queue() or
491  *     blk_queue_make_request().  It should be called when a request queue is
492  *     being released; typically when a block device is being de-registered.
493  *     Currently, its primary task it to free all the &struct request
494  *     structures that were allocated to the queue and the queue itself.
495  *
496  * Note:
497  *     The low level driver must have finished any outstanding requests first
498  *     via blk_cleanup_queue().
499  **/
500 static void blk_release_queue(struct kobject *kobj)
501 {
502 	struct request_queue *q =
503 		container_of(kobj, struct request_queue, kobj);
504 
505 	blkcg_exit_queue(q);
506 
507 	if (q->elevator) {
508 		spin_lock_irq(q->queue_lock);
509 		ioc_clear_queue(q);
510 		spin_unlock_irq(q->queue_lock);
511 		elevator_exit(q->elevator);
512 	}
513 
514 	blk_exit_rl(&q->root_rl);
515 
516 	if (q->queue_tags)
517 		__blk_queue_free_tags(q);
518 
519 	if (!q->mq_ops)
520 		blk_free_flush_queue(q->fq);
521 	else
522 		blk_mq_release(q);
523 
524 	blk_trace_shutdown(q);
525 
526 	ida_simple_remove(&blk_queue_ida, q->id);
527 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
528 }
529 
530 static const struct sysfs_ops queue_sysfs_ops = {
531 	.show	= queue_attr_show,
532 	.store	= queue_attr_store,
533 };
534 
535 struct kobj_type blk_queue_ktype = {
536 	.sysfs_ops	= &queue_sysfs_ops,
537 	.default_attrs	= default_attrs,
538 	.release	= blk_release_queue,
539 };
540 
541 int blk_register_queue(struct gendisk *disk)
542 {
543 	int ret;
544 	struct device *dev = disk_to_dev(disk);
545 	struct request_queue *q = disk->queue;
546 
547 	if (WARN_ON(!q))
548 		return -ENXIO;
549 
550 	/*
551 	 * SCSI probing may synchronously create and destroy a lot of
552 	 * request_queues for non-existent devices.  Shutting down a fully
553 	 * functional queue takes measureable wallclock time as RCU grace
554 	 * periods are involved.  To avoid excessive latency in these
555 	 * cases, a request_queue starts out in a degraded mode which is
556 	 * faster to shut down and is made fully functional here as
557 	 * request_queues for non-existent devices never get registered.
558 	 */
559 	if (!blk_queue_init_done(q)) {
560 		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
561 		blk_queue_bypass_end(q);
562 		if (q->mq_ops)
563 			blk_mq_finish_init(q);
564 	}
565 
566 	ret = blk_trace_init_sysfs(dev);
567 	if (ret)
568 		return ret;
569 
570 	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
571 	if (ret < 0) {
572 		blk_trace_remove_sysfs(dev);
573 		return ret;
574 	}
575 
576 	kobject_uevent(&q->kobj, KOBJ_ADD);
577 
578 	if (q->mq_ops)
579 		blk_mq_register_disk(disk);
580 
581 	if (!q->request_fn)
582 		return 0;
583 
584 	ret = elv_register_queue(q);
585 	if (ret) {
586 		kobject_uevent(&q->kobj, KOBJ_REMOVE);
587 		kobject_del(&q->kobj);
588 		blk_trace_remove_sysfs(dev);
589 		kobject_put(&dev->kobj);
590 		return ret;
591 	}
592 
593 	return 0;
594 }
595 
596 void blk_unregister_queue(struct gendisk *disk)
597 {
598 	struct request_queue *q = disk->queue;
599 
600 	if (WARN_ON(!q))
601 		return;
602 
603 	if (q->mq_ops)
604 		blk_mq_unregister_disk(disk);
605 
606 	if (q->request_fn)
607 		elv_unregister_queue(q);
608 
609 	kobject_uevent(&q->kobj, KOBJ_REMOVE);
610 	kobject_del(&q->kobj);
611 	blk_trace_remove_sysfs(disk_to_dev(disk));
612 	kobject_put(&disk_to_dev(disk)->kobj);
613 }
614