xref: /openbmc/linux/block/blk-mq-sysfs.c (revision c819e2cf)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 	struct request_queue *q;
19 
20 	q = container_of(kobj, struct request_queue, mq_kobj);
21 	free_percpu(q->queue_ctx);
22 }
23 
24 static void blk_mq_ctx_release(struct kobject *kobj)
25 {
26 	struct blk_mq_ctx *ctx;
27 
28 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 	kobject_put(&ctx->queue->mq_kobj);
30 }
31 
32 static void blk_mq_hctx_release(struct kobject *kobj)
33 {
34 	struct blk_mq_hw_ctx *hctx;
35 
36 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
37 	kfree(hctx);
38 }
39 
40 struct blk_mq_ctx_sysfs_entry {
41 	struct attribute attr;
42 	ssize_t (*show)(struct blk_mq_ctx *, char *);
43 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
44 };
45 
46 struct blk_mq_hw_ctx_sysfs_entry {
47 	struct attribute attr;
48 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
49 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
50 };
51 
52 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
53 				 char *page)
54 {
55 	struct blk_mq_ctx_sysfs_entry *entry;
56 	struct blk_mq_ctx *ctx;
57 	struct request_queue *q;
58 	ssize_t res;
59 
60 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
61 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
62 	q = ctx->queue;
63 
64 	if (!entry->show)
65 		return -EIO;
66 
67 	res = -ENOENT;
68 	mutex_lock(&q->sysfs_lock);
69 	if (!blk_queue_dying(q))
70 		res = entry->show(ctx, page);
71 	mutex_unlock(&q->sysfs_lock);
72 	return res;
73 }
74 
75 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
76 				  const char *page, size_t length)
77 {
78 	struct blk_mq_ctx_sysfs_entry *entry;
79 	struct blk_mq_ctx *ctx;
80 	struct request_queue *q;
81 	ssize_t res;
82 
83 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
84 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
85 	q = ctx->queue;
86 
87 	if (!entry->store)
88 		return -EIO;
89 
90 	res = -ENOENT;
91 	mutex_lock(&q->sysfs_lock);
92 	if (!blk_queue_dying(q))
93 		res = entry->store(ctx, page, length);
94 	mutex_unlock(&q->sysfs_lock);
95 	return res;
96 }
97 
98 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
99 				    struct attribute *attr, char *page)
100 {
101 	struct blk_mq_hw_ctx_sysfs_entry *entry;
102 	struct blk_mq_hw_ctx *hctx;
103 	struct request_queue *q;
104 	ssize_t res;
105 
106 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
107 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
108 	q = hctx->queue;
109 
110 	if (!entry->show)
111 		return -EIO;
112 
113 	res = -ENOENT;
114 	mutex_lock(&q->sysfs_lock);
115 	if (!blk_queue_dying(q))
116 		res = entry->show(hctx, page);
117 	mutex_unlock(&q->sysfs_lock);
118 	return res;
119 }
120 
121 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
122 				     struct attribute *attr, const char *page,
123 				     size_t length)
124 {
125 	struct blk_mq_hw_ctx_sysfs_entry *entry;
126 	struct blk_mq_hw_ctx *hctx;
127 	struct request_queue *q;
128 	ssize_t res;
129 
130 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
131 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
132 	q = hctx->queue;
133 
134 	if (!entry->store)
135 		return -EIO;
136 
137 	res = -ENOENT;
138 	mutex_lock(&q->sysfs_lock);
139 	if (!blk_queue_dying(q))
140 		res = entry->store(hctx, page, length);
141 	mutex_unlock(&q->sysfs_lock);
142 	return res;
143 }
144 
145 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
146 {
147 	return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
148 				ctx->rq_dispatched[0]);
149 }
150 
151 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
152 {
153 	return sprintf(page, "%lu\n", ctx->rq_merged);
154 }
155 
156 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
157 {
158 	return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
159 				ctx->rq_completed[0]);
160 }
161 
162 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
163 {
164 	char *start_page = page;
165 	struct request *rq;
166 
167 	page += sprintf(page, "%s:\n", msg);
168 
169 	list_for_each_entry(rq, list, queuelist)
170 		page += sprintf(page, "\t%p\n", rq);
171 
172 	return page - start_page;
173 }
174 
175 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
176 {
177 	ssize_t ret;
178 
179 	spin_lock(&ctx->lock);
180 	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
181 	spin_unlock(&ctx->lock);
182 
183 	return ret;
184 }
185 
186 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
187 					   char *page)
188 {
189 	return sprintf(page, "%lu\n", hctx->queued);
190 }
191 
192 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
193 {
194 	return sprintf(page, "%lu\n", hctx->run);
195 }
196 
197 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
198 					       char *page)
199 {
200 	char *start_page = page;
201 	int i;
202 
203 	page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
204 
205 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
206 		unsigned long d = 1U << (i - 1);
207 
208 		page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
209 	}
210 
211 	return page - start_page;
212 }
213 
214 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
215 					    char *page)
216 {
217 	ssize_t ret;
218 
219 	spin_lock(&hctx->lock);
220 	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
221 	spin_unlock(&hctx->lock);
222 
223 	return ret;
224 }
225 
226 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
227 {
228 	return blk_mq_tag_sysfs_show(hctx->tags, page);
229 }
230 
231 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
232 {
233 	return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
234 }
235 
236 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
237 {
238 	unsigned int i, first = 1;
239 	ssize_t ret = 0;
240 
241 	blk_mq_disable_hotplug();
242 
243 	for_each_cpu(i, hctx->cpumask) {
244 		if (first)
245 			ret += sprintf(ret + page, "%u", i);
246 		else
247 			ret += sprintf(ret + page, ", %u", i);
248 
249 		first = 0;
250 	}
251 
252 	blk_mq_enable_hotplug();
253 
254 	ret += sprintf(ret + page, "\n");
255 	return ret;
256 }
257 
258 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
259 	.attr = {.name = "dispatched", .mode = S_IRUGO },
260 	.show = blk_mq_sysfs_dispatched_show,
261 };
262 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
263 	.attr = {.name = "merged", .mode = S_IRUGO },
264 	.show = blk_mq_sysfs_merged_show,
265 };
266 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
267 	.attr = {.name = "completed", .mode = S_IRUGO },
268 	.show = blk_mq_sysfs_completed_show,
269 };
270 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
271 	.attr = {.name = "rq_list", .mode = S_IRUGO },
272 	.show = blk_mq_sysfs_rq_list_show,
273 };
274 
275 static struct attribute *default_ctx_attrs[] = {
276 	&blk_mq_sysfs_dispatched.attr,
277 	&blk_mq_sysfs_merged.attr,
278 	&blk_mq_sysfs_completed.attr,
279 	&blk_mq_sysfs_rq_list.attr,
280 	NULL,
281 };
282 
283 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
284 	.attr = {.name = "queued", .mode = S_IRUGO },
285 	.show = blk_mq_hw_sysfs_queued_show,
286 };
287 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
288 	.attr = {.name = "run", .mode = S_IRUGO },
289 	.show = blk_mq_hw_sysfs_run_show,
290 };
291 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
292 	.attr = {.name = "dispatched", .mode = S_IRUGO },
293 	.show = blk_mq_hw_sysfs_dispatched_show,
294 };
295 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
296 	.attr = {.name = "active", .mode = S_IRUGO },
297 	.show = blk_mq_hw_sysfs_active_show,
298 };
299 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
300 	.attr = {.name = "pending", .mode = S_IRUGO },
301 	.show = blk_mq_hw_sysfs_rq_list_show,
302 };
303 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
304 	.attr = {.name = "tags", .mode = S_IRUGO },
305 	.show = blk_mq_hw_sysfs_tags_show,
306 };
307 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
308 	.attr = {.name = "cpu_list", .mode = S_IRUGO },
309 	.show = blk_mq_hw_sysfs_cpus_show,
310 };
311 
312 static struct attribute *default_hw_ctx_attrs[] = {
313 	&blk_mq_hw_sysfs_queued.attr,
314 	&blk_mq_hw_sysfs_run.attr,
315 	&blk_mq_hw_sysfs_dispatched.attr,
316 	&blk_mq_hw_sysfs_pending.attr,
317 	&blk_mq_hw_sysfs_tags.attr,
318 	&blk_mq_hw_sysfs_cpus.attr,
319 	&blk_mq_hw_sysfs_active.attr,
320 	NULL,
321 };
322 
323 static const struct sysfs_ops blk_mq_sysfs_ops = {
324 	.show	= blk_mq_sysfs_show,
325 	.store	= blk_mq_sysfs_store,
326 };
327 
328 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
329 	.show	= blk_mq_hw_sysfs_show,
330 	.store	= blk_mq_hw_sysfs_store,
331 };
332 
333 static struct kobj_type blk_mq_ktype = {
334 	.sysfs_ops	= &blk_mq_sysfs_ops,
335 	.release	= blk_mq_sysfs_release,
336 };
337 
338 static struct kobj_type blk_mq_ctx_ktype = {
339 	.sysfs_ops	= &blk_mq_sysfs_ops,
340 	.default_attrs	= default_ctx_attrs,
341 	.release	= blk_mq_ctx_release,
342 };
343 
344 static struct kobj_type blk_mq_hw_ktype = {
345 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
346 	.default_attrs	= default_hw_ctx_attrs,
347 	.release	= blk_mq_hctx_release,
348 };
349 
350 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
351 {
352 	struct blk_mq_ctx *ctx;
353 	int i;
354 
355 	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
356 		return;
357 
358 	hctx_for_each_ctx(hctx, ctx, i)
359 		kobject_del(&ctx->kobj);
360 
361 	kobject_del(&hctx->kobj);
362 }
363 
364 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
365 {
366 	struct request_queue *q = hctx->queue;
367 	struct blk_mq_ctx *ctx;
368 	int i, ret;
369 
370 	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
371 		return 0;
372 
373 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
374 	if (ret)
375 		return ret;
376 
377 	hctx_for_each_ctx(hctx, ctx, i) {
378 		kobject_get(&q->mq_kobj);
379 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
380 		if (ret)
381 			break;
382 	}
383 
384 	return ret;
385 }
386 
387 void blk_mq_unregister_disk(struct gendisk *disk)
388 {
389 	struct request_queue *q = disk->queue;
390 	struct blk_mq_hw_ctx *hctx;
391 	struct blk_mq_ctx *ctx;
392 	int i, j;
393 
394 	queue_for_each_hw_ctx(q, hctx, i) {
395 		blk_mq_unregister_hctx(hctx);
396 
397 		hctx_for_each_ctx(hctx, ctx, j)
398 			kobject_put(&ctx->kobj);
399 
400 		kobject_put(&hctx->kobj);
401 	}
402 
403 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
404 	kobject_del(&q->mq_kobj);
405 	kobject_put(&q->mq_kobj);
406 
407 	kobject_put(&disk_to_dev(disk)->kobj);
408 }
409 
410 static void blk_mq_sysfs_init(struct request_queue *q)
411 {
412 	struct blk_mq_hw_ctx *hctx;
413 	struct blk_mq_ctx *ctx;
414 	int i;
415 
416 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
417 
418 	queue_for_each_hw_ctx(q, hctx, i)
419 		kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
420 
421 	queue_for_each_ctx(q, ctx, i)
422 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
423 }
424 
425 /* see blk_register_queue() */
426 void blk_mq_finish_init(struct request_queue *q)
427 {
428 	percpu_ref_switch_to_percpu(&q->mq_usage_counter);
429 }
430 
431 int blk_mq_register_disk(struct gendisk *disk)
432 {
433 	struct device *dev = disk_to_dev(disk);
434 	struct request_queue *q = disk->queue;
435 	struct blk_mq_hw_ctx *hctx;
436 	int ret, i;
437 
438 	blk_mq_sysfs_init(q);
439 
440 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
441 	if (ret < 0)
442 		return ret;
443 
444 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
445 
446 	queue_for_each_hw_ctx(q, hctx, i) {
447 		hctx->flags |= BLK_MQ_F_SYSFS_UP;
448 		ret = blk_mq_register_hctx(hctx);
449 		if (ret)
450 			break;
451 	}
452 
453 	if (ret) {
454 		blk_mq_unregister_disk(disk);
455 		return ret;
456 	}
457 
458 	return 0;
459 }
460 
461 void blk_mq_sysfs_unregister(struct request_queue *q)
462 {
463 	struct blk_mq_hw_ctx *hctx;
464 	int i;
465 
466 	queue_for_each_hw_ctx(q, hctx, i)
467 		blk_mq_unregister_hctx(hctx);
468 }
469 
470 int blk_mq_sysfs_register(struct request_queue *q)
471 {
472 	struct blk_mq_hw_ctx *hctx;
473 	int i, ret = 0;
474 
475 	queue_for_each_hw_ctx(q, hctx, i) {
476 		ret = blk_mq_register_hctx(hctx);
477 		if (ret)
478 			break;
479 	}
480 
481 	return ret;
482 }
483