xref: /openbmc/linux/block/blk-mq-sysfs.c (revision 19c233b7)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19 
20 struct blk_mq_ctx_sysfs_entry {
21 	struct attribute attr;
22 	ssize_t (*show)(struct blk_mq_ctx *, char *);
23 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25 
26 struct blk_mq_hw_ctx_sysfs_entry {
27 	struct attribute attr;
28 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31 
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 				 char *page)
34 {
35 	struct blk_mq_ctx_sysfs_entry *entry;
36 	struct blk_mq_ctx *ctx;
37 	struct request_queue *q;
38 	ssize_t res;
39 
40 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 	q = ctx->queue;
43 
44 	if (!entry->show)
45 		return -EIO;
46 
47 	res = -ENOENT;
48 	mutex_lock(&q->sysfs_lock);
49 	if (!blk_queue_dying(q))
50 		res = entry->show(ctx, page);
51 	mutex_unlock(&q->sysfs_lock);
52 	return res;
53 }
54 
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 				  const char *page, size_t length)
57 {
58 	struct blk_mq_ctx_sysfs_entry *entry;
59 	struct blk_mq_ctx *ctx;
60 	struct request_queue *q;
61 	ssize_t res;
62 
63 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 	q = ctx->queue;
66 
67 	if (!entry->store)
68 		return -EIO;
69 
70 	res = -ENOENT;
71 	mutex_lock(&q->sysfs_lock);
72 	if (!blk_queue_dying(q))
73 		res = entry->store(ctx, page, length);
74 	mutex_unlock(&q->sysfs_lock);
75 	return res;
76 }
77 
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 				    struct attribute *attr, char *page)
80 {
81 	struct blk_mq_hw_ctx_sysfs_entry *entry;
82 	struct blk_mq_hw_ctx *hctx;
83 	struct request_queue *q;
84 	ssize_t res;
85 
86 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 	q = hctx->queue;
89 
90 	if (!entry->show)
91 		return -EIO;
92 
93 	res = -ENOENT;
94 	mutex_lock(&q->sysfs_lock);
95 	if (!blk_queue_dying(q))
96 		res = entry->show(hctx, page);
97 	mutex_unlock(&q->sysfs_lock);
98 	return res;
99 }
100 
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 				     struct attribute *attr, const char *page,
103 				     size_t length)
104 {
105 	struct blk_mq_hw_ctx_sysfs_entry *entry;
106 	struct blk_mq_hw_ctx *hctx;
107 	struct request_queue *q;
108 	ssize_t res;
109 
110 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 	q = hctx->queue;
113 
114 	if (!entry->store)
115 		return -EIO;
116 
117 	res = -ENOENT;
118 	mutex_lock(&q->sysfs_lock);
119 	if (!blk_queue_dying(q))
120 		res = entry->store(hctx, page, length);
121 	mutex_unlock(&q->sysfs_lock);
122 	return res;
123 }
124 
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127 	return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 				ctx->rq_dispatched[0]);
129 }
130 
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133 	return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135 
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138 	return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 				ctx->rq_completed[0]);
140 }
141 
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144 	char *start_page = page;
145 	struct request *rq;
146 
147 	page += sprintf(page, "%s:\n", msg);
148 
149 	list_for_each_entry(rq, list, queuelist)
150 		page += sprintf(page, "\t%p\n", rq);
151 
152 	return page - start_page;
153 }
154 
155 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
156 {
157 	ssize_t ret;
158 
159 	spin_lock(&ctx->lock);
160 	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
161 	spin_unlock(&ctx->lock);
162 
163 	return ret;
164 }
165 
166 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
167 					   char *page)
168 {
169 	return sprintf(page, "%lu\n", hctx->queued);
170 }
171 
172 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
173 {
174 	return sprintf(page, "%lu\n", hctx->run);
175 }
176 
177 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
178 					       char *page)
179 {
180 	char *start_page = page;
181 	int i;
182 
183 	page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
184 
185 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
186 		unsigned long d = 1U << (i - 1);
187 
188 		page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
189 	}
190 
191 	return page - start_page;
192 }
193 
194 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
195 					    char *page)
196 {
197 	ssize_t ret;
198 
199 	spin_lock(&hctx->lock);
200 	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
201 	spin_unlock(&hctx->lock);
202 
203 	return ret;
204 }
205 
206 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
207 {
208 	return blk_mq_tag_sysfs_show(hctx->tags, page);
209 }
210 
211 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
212 {
213 	return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
214 }
215 
216 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
217 {
218 	unsigned int i, first = 1;
219 	ssize_t ret = 0;
220 
221 	blk_mq_disable_hotplug();
222 
223 	for_each_cpu(i, hctx->cpumask) {
224 		if (first)
225 			ret += sprintf(ret + page, "%u", i);
226 		else
227 			ret += sprintf(ret + page, ", %u", i);
228 
229 		first = 0;
230 	}
231 
232 	blk_mq_enable_hotplug();
233 
234 	ret += sprintf(ret + page, "\n");
235 	return ret;
236 }
237 
238 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
239 	.attr = {.name = "dispatched", .mode = S_IRUGO },
240 	.show = blk_mq_sysfs_dispatched_show,
241 };
242 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
243 	.attr = {.name = "merged", .mode = S_IRUGO },
244 	.show = blk_mq_sysfs_merged_show,
245 };
246 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
247 	.attr = {.name = "completed", .mode = S_IRUGO },
248 	.show = blk_mq_sysfs_completed_show,
249 };
250 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
251 	.attr = {.name = "rq_list", .mode = S_IRUGO },
252 	.show = blk_mq_sysfs_rq_list_show,
253 };
254 
255 static struct attribute *default_ctx_attrs[] = {
256 	&blk_mq_sysfs_dispatched.attr,
257 	&blk_mq_sysfs_merged.attr,
258 	&blk_mq_sysfs_completed.attr,
259 	&blk_mq_sysfs_rq_list.attr,
260 	NULL,
261 };
262 
263 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
264 	.attr = {.name = "queued", .mode = S_IRUGO },
265 	.show = blk_mq_hw_sysfs_queued_show,
266 };
267 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
268 	.attr = {.name = "run", .mode = S_IRUGO },
269 	.show = blk_mq_hw_sysfs_run_show,
270 };
271 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
272 	.attr = {.name = "dispatched", .mode = S_IRUGO },
273 	.show = blk_mq_hw_sysfs_dispatched_show,
274 };
275 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
276 	.attr = {.name = "active", .mode = S_IRUGO },
277 	.show = blk_mq_hw_sysfs_active_show,
278 };
279 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
280 	.attr = {.name = "pending", .mode = S_IRUGO },
281 	.show = blk_mq_hw_sysfs_rq_list_show,
282 };
283 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
284 	.attr = {.name = "tags", .mode = S_IRUGO },
285 	.show = blk_mq_hw_sysfs_tags_show,
286 };
287 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
288 	.attr = {.name = "cpu_list", .mode = S_IRUGO },
289 	.show = blk_mq_hw_sysfs_cpus_show,
290 };
291 
292 static struct attribute *default_hw_ctx_attrs[] = {
293 	&blk_mq_hw_sysfs_queued.attr,
294 	&blk_mq_hw_sysfs_run.attr,
295 	&blk_mq_hw_sysfs_dispatched.attr,
296 	&blk_mq_hw_sysfs_pending.attr,
297 	&blk_mq_hw_sysfs_tags.attr,
298 	&blk_mq_hw_sysfs_cpus.attr,
299 	&blk_mq_hw_sysfs_active.attr,
300 	NULL,
301 };
302 
303 static const struct sysfs_ops blk_mq_sysfs_ops = {
304 	.show	= blk_mq_sysfs_show,
305 	.store	= blk_mq_sysfs_store,
306 };
307 
308 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
309 	.show	= blk_mq_hw_sysfs_show,
310 	.store	= blk_mq_hw_sysfs_store,
311 };
312 
313 static struct kobj_type blk_mq_ktype = {
314 	.sysfs_ops	= &blk_mq_sysfs_ops,
315 	.release	= blk_mq_sysfs_release,
316 };
317 
318 static struct kobj_type blk_mq_ctx_ktype = {
319 	.sysfs_ops	= &blk_mq_sysfs_ops,
320 	.default_attrs	= default_ctx_attrs,
321 	.release	= blk_mq_sysfs_release,
322 };
323 
324 static struct kobj_type blk_mq_hw_ktype = {
325 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
326 	.default_attrs	= default_hw_ctx_attrs,
327 	.release	= blk_mq_sysfs_release,
328 };
329 
330 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
331 {
332 	struct blk_mq_ctx *ctx;
333 	int i;
334 
335 	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
336 		return;
337 
338 	hctx_for_each_ctx(hctx, ctx, i)
339 		kobject_del(&ctx->kobj);
340 
341 	kobject_del(&hctx->kobj);
342 }
343 
344 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
345 {
346 	struct request_queue *q = hctx->queue;
347 	struct blk_mq_ctx *ctx;
348 	int i, ret;
349 
350 	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
351 		return 0;
352 
353 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
354 	if (ret)
355 		return ret;
356 
357 	hctx_for_each_ctx(hctx, ctx, i) {
358 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
359 		if (ret)
360 			break;
361 	}
362 
363 	return ret;
364 }
365 
366 void blk_mq_unregister_disk(struct gendisk *disk)
367 {
368 	struct request_queue *q = disk->queue;
369 	struct blk_mq_hw_ctx *hctx;
370 	struct blk_mq_ctx *ctx;
371 	int i, j;
372 
373 	queue_for_each_hw_ctx(q, hctx, i) {
374 		blk_mq_unregister_hctx(hctx);
375 
376 		hctx_for_each_ctx(hctx, ctx, j)
377 			kobject_put(&ctx->kobj);
378 
379 		kobject_put(&hctx->kobj);
380 	}
381 
382 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
383 	kobject_del(&q->mq_kobj);
384 	kobject_put(&q->mq_kobj);
385 
386 	kobject_put(&disk_to_dev(disk)->kobj);
387 }
388 
389 static void blk_mq_sysfs_init(struct request_queue *q)
390 {
391 	struct blk_mq_hw_ctx *hctx;
392 	struct blk_mq_ctx *ctx;
393 	int i;
394 
395 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
396 
397 	queue_for_each_hw_ctx(q, hctx, i)
398 		kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
399 
400 	queue_for_each_ctx(q, ctx, i)
401 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
402 }
403 
404 /* see blk_register_queue() */
405 void blk_mq_finish_init(struct request_queue *q)
406 {
407 	percpu_ref_switch_to_percpu(&q->mq_usage_counter);
408 }
409 
410 int blk_mq_register_disk(struct gendisk *disk)
411 {
412 	struct device *dev = disk_to_dev(disk);
413 	struct request_queue *q = disk->queue;
414 	struct blk_mq_hw_ctx *hctx;
415 	int ret, i;
416 
417 	blk_mq_sysfs_init(q);
418 
419 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
420 	if (ret < 0)
421 		return ret;
422 
423 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
424 
425 	queue_for_each_hw_ctx(q, hctx, i) {
426 		hctx->flags |= BLK_MQ_F_SYSFS_UP;
427 		ret = blk_mq_register_hctx(hctx);
428 		if (ret)
429 			break;
430 	}
431 
432 	if (ret) {
433 		blk_mq_unregister_disk(disk);
434 		return ret;
435 	}
436 
437 	return 0;
438 }
439 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
440 
441 void blk_mq_sysfs_unregister(struct request_queue *q)
442 {
443 	struct blk_mq_hw_ctx *hctx;
444 	int i;
445 
446 	queue_for_each_hw_ctx(q, hctx, i)
447 		blk_mq_unregister_hctx(hctx);
448 }
449 
450 int blk_mq_sysfs_register(struct request_queue *q)
451 {
452 	struct blk_mq_hw_ctx *hctx;
453 	int i, ret = 0;
454 
455 	queue_for_each_hw_ctx(q, hctx, i) {
456 		ret = blk_mq_register_hctx(hctx);
457 		if (ret)
458 			break;
459 	}
460 
461 	return ret;
462 }
463