xref: /openbmc/linux/block/blk-mq-sysfs.c (revision afb46f79)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19 
20 struct blk_mq_ctx_sysfs_entry {
21 	struct attribute attr;
22 	ssize_t (*show)(struct blk_mq_ctx *, char *);
23 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25 
26 struct blk_mq_hw_ctx_sysfs_entry {
27 	struct attribute attr;
28 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31 
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 				 char *page)
34 {
35 	struct blk_mq_ctx_sysfs_entry *entry;
36 	struct blk_mq_ctx *ctx;
37 	struct request_queue *q;
38 	ssize_t res;
39 
40 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 	q = ctx->queue;
43 
44 	if (!entry->show)
45 		return -EIO;
46 
47 	res = -ENOENT;
48 	mutex_lock(&q->sysfs_lock);
49 	if (!blk_queue_dying(q))
50 		res = entry->show(ctx, page);
51 	mutex_unlock(&q->sysfs_lock);
52 	return res;
53 }
54 
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 				  const char *page, size_t length)
57 {
58 	struct blk_mq_ctx_sysfs_entry *entry;
59 	struct blk_mq_ctx *ctx;
60 	struct request_queue *q;
61 	ssize_t res;
62 
63 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 	q = ctx->queue;
66 
67 	if (!entry->store)
68 		return -EIO;
69 
70 	res = -ENOENT;
71 	mutex_lock(&q->sysfs_lock);
72 	if (!blk_queue_dying(q))
73 		res = entry->store(ctx, page, length);
74 	mutex_unlock(&q->sysfs_lock);
75 	return res;
76 }
77 
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 				    struct attribute *attr, char *page)
80 {
81 	struct blk_mq_hw_ctx_sysfs_entry *entry;
82 	struct blk_mq_hw_ctx *hctx;
83 	struct request_queue *q;
84 	ssize_t res;
85 
86 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 	q = hctx->queue;
89 
90 	if (!entry->show)
91 		return -EIO;
92 
93 	res = -ENOENT;
94 	mutex_lock(&q->sysfs_lock);
95 	if (!blk_queue_dying(q))
96 		res = entry->show(hctx, page);
97 	mutex_unlock(&q->sysfs_lock);
98 	return res;
99 }
100 
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 				     struct attribute *attr, const char *page,
103 				     size_t length)
104 {
105 	struct blk_mq_hw_ctx_sysfs_entry *entry;
106 	struct blk_mq_hw_ctx *hctx;
107 	struct request_queue *q;
108 	ssize_t res;
109 
110 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 	q = hctx->queue;
113 
114 	if (!entry->store)
115 		return -EIO;
116 
117 	res = -ENOENT;
118 	mutex_lock(&q->sysfs_lock);
119 	if (!blk_queue_dying(q))
120 		res = entry->store(hctx, page, length);
121 	mutex_unlock(&q->sysfs_lock);
122 	return res;
123 }
124 
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127 	return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 				ctx->rq_dispatched[0]);
129 }
130 
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133 	return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135 
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138 	return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 				ctx->rq_completed[0]);
140 }
141 
142 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143 {
144 	char *start_page = page;
145 	struct request *rq;
146 
147 	page += sprintf(page, "%s:\n", msg);
148 
149 	list_for_each_entry(rq, list, queuelist)
150 		page += sprintf(page, "\t%p\n", rq);
151 
152 	return page - start_page;
153 }
154 
155 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
156 {
157 	ssize_t ret;
158 
159 	spin_lock(&ctx->lock);
160 	ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
161 	spin_unlock(&ctx->lock);
162 
163 	return ret;
164 }
165 
166 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
167 					   char *page)
168 {
169 	return sprintf(page, "%lu\n", hctx->queued);
170 }
171 
172 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
173 {
174 	return sprintf(page, "%lu\n", hctx->run);
175 }
176 
177 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
178 					       char *page)
179 {
180 	char *start_page = page;
181 	int i;
182 
183 	page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
184 
185 	for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
186 		unsigned long d = 1U << (i - 1);
187 
188 		page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
189 	}
190 
191 	return page - start_page;
192 }
193 
194 static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
195 					    char *page)
196 {
197 	ssize_t ret;
198 
199 	spin_lock(&hctx->lock);
200 	ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
201 	spin_unlock(&hctx->lock);
202 
203 	return ret;
204 }
205 
206 static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
207 {
208 	ssize_t ret;
209 
210 	spin_lock(&hctx->lock);
211 	ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
212 	spin_unlock(&hctx->lock);
213 
214 	return ret;
215 }
216 
217 static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
218 					 const char *page, size_t len)
219 {
220 	struct blk_mq_ctx *ctx;
221 	unsigned long ret;
222 	unsigned int i;
223 
224 	if (kstrtoul(page, 10, &ret)) {
225 		pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
226 		return -EINVAL;
227 	}
228 
229 	spin_lock(&hctx->lock);
230 	if (ret)
231 		hctx->flags |= BLK_MQ_F_SHOULD_IPI;
232 	else
233 		hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
234 	spin_unlock(&hctx->lock);
235 
236 	hctx_for_each_ctx(hctx, ctx, i)
237 		ctx->ipi_redirect = !!ret;
238 
239 	return len;
240 }
241 
242 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
243 {
244 	return blk_mq_tag_sysfs_show(hctx->tags, page);
245 }
246 
247 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
248 {
249 	unsigned int i, queue_num, first = 1;
250 	ssize_t ret = 0;
251 
252 	blk_mq_disable_hotplug();
253 
254 	for_each_online_cpu(i) {
255 		queue_num = hctx->queue->mq_map[i];
256 		if (queue_num != hctx->queue_num)
257 			continue;
258 
259 		if (first)
260 			ret += sprintf(ret + page, "%u", i);
261 		else
262 			ret += sprintf(ret + page, ", %u", i);
263 
264 		first = 0;
265 	}
266 
267 	blk_mq_enable_hotplug();
268 
269 	ret += sprintf(ret + page, "\n");
270 	return ret;
271 }
272 
273 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
274 	.attr = {.name = "dispatched", .mode = S_IRUGO },
275 	.show = blk_mq_sysfs_dispatched_show,
276 };
277 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
278 	.attr = {.name = "merged", .mode = S_IRUGO },
279 	.show = blk_mq_sysfs_merged_show,
280 };
281 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
282 	.attr = {.name = "completed", .mode = S_IRUGO },
283 	.show = blk_mq_sysfs_completed_show,
284 };
285 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
286 	.attr = {.name = "rq_list", .mode = S_IRUGO },
287 	.show = blk_mq_sysfs_rq_list_show,
288 };
289 
290 static struct attribute *default_ctx_attrs[] = {
291 	&blk_mq_sysfs_dispatched.attr,
292 	&blk_mq_sysfs_merged.attr,
293 	&blk_mq_sysfs_completed.attr,
294 	&blk_mq_sysfs_rq_list.attr,
295 	NULL,
296 };
297 
298 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
299 	.attr = {.name = "queued", .mode = S_IRUGO },
300 	.show = blk_mq_hw_sysfs_queued_show,
301 };
302 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
303 	.attr = {.name = "run", .mode = S_IRUGO },
304 	.show = blk_mq_hw_sysfs_run_show,
305 };
306 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
307 	.attr = {.name = "dispatched", .mode = S_IRUGO },
308 	.show = blk_mq_hw_sysfs_dispatched_show,
309 };
310 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
311 	.attr = {.name = "pending", .mode = S_IRUGO },
312 	.show = blk_mq_hw_sysfs_rq_list_show,
313 };
314 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
315 	.attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
316 	.show = blk_mq_hw_sysfs_ipi_show,
317 	.store = blk_mq_hw_sysfs_ipi_store,
318 };
319 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
320 	.attr = {.name = "tags", .mode = S_IRUGO },
321 	.show = blk_mq_hw_sysfs_tags_show,
322 };
323 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
324 	.attr = {.name = "cpu_list", .mode = S_IRUGO },
325 	.show = blk_mq_hw_sysfs_cpus_show,
326 };
327 
328 static struct attribute *default_hw_ctx_attrs[] = {
329 	&blk_mq_hw_sysfs_queued.attr,
330 	&blk_mq_hw_sysfs_run.attr,
331 	&blk_mq_hw_sysfs_dispatched.attr,
332 	&blk_mq_hw_sysfs_pending.attr,
333 	&blk_mq_hw_sysfs_ipi.attr,
334 	&blk_mq_hw_sysfs_tags.attr,
335 	&blk_mq_hw_sysfs_cpus.attr,
336 	NULL,
337 };
338 
339 static const struct sysfs_ops blk_mq_sysfs_ops = {
340 	.show	= blk_mq_sysfs_show,
341 	.store	= blk_mq_sysfs_store,
342 };
343 
344 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
345 	.show	= blk_mq_hw_sysfs_show,
346 	.store	= blk_mq_hw_sysfs_store,
347 };
348 
349 static struct kobj_type blk_mq_ktype = {
350 	.sysfs_ops	= &blk_mq_sysfs_ops,
351 	.release	= blk_mq_sysfs_release,
352 };
353 
354 static struct kobj_type blk_mq_ctx_ktype = {
355 	.sysfs_ops	= &blk_mq_sysfs_ops,
356 	.default_attrs	= default_ctx_attrs,
357 	.release	= blk_mq_sysfs_release,
358 };
359 
360 static struct kobj_type blk_mq_hw_ktype = {
361 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
362 	.default_attrs	= default_hw_ctx_attrs,
363 	.release	= blk_mq_sysfs_release,
364 };
365 
366 void blk_mq_unregister_disk(struct gendisk *disk)
367 {
368 	struct request_queue *q = disk->queue;
369 	struct blk_mq_hw_ctx *hctx;
370 	struct blk_mq_ctx *ctx;
371 	int i, j;
372 
373 	queue_for_each_hw_ctx(q, hctx, i) {
374 		hctx_for_each_ctx(hctx, ctx, j) {
375 			kobject_del(&ctx->kobj);
376 			kobject_put(&ctx->kobj);
377 		}
378 		kobject_del(&hctx->kobj);
379 		kobject_put(&hctx->kobj);
380 	}
381 
382 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
383 	kobject_del(&q->mq_kobj);
384 	kobject_put(&q->mq_kobj);
385 
386 	kobject_put(&disk_to_dev(disk)->kobj);
387 }
388 
389 int blk_mq_register_disk(struct gendisk *disk)
390 {
391 	struct device *dev = disk_to_dev(disk);
392 	struct request_queue *q = disk->queue;
393 	struct blk_mq_hw_ctx *hctx;
394 	struct blk_mq_ctx *ctx;
395 	int ret, i, j;
396 
397 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
398 
399 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
400 	if (ret < 0)
401 		return ret;
402 
403 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
404 
405 	queue_for_each_hw_ctx(q, hctx, i) {
406 		kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
407 		ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
408 		if (ret)
409 			break;
410 
411 		if (!hctx->nr_ctx)
412 			continue;
413 
414 		hctx_for_each_ctx(hctx, ctx, j) {
415 			kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
416 			ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
417 			if (ret)
418 				break;
419 		}
420 	}
421 
422 	if (ret) {
423 		blk_mq_unregister_disk(disk);
424 		return ret;
425 	}
426 
427 	return 0;
428 }
429