xref: /openbmc/linux/block/blk-mq-sysfs.c (revision 239480ab)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15 
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19 
20 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
21 {
22 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
23 						  kobj);
24 	free_cpumask_var(hctx->cpumask);
25 	kfree(hctx->ctxs);
26 	kfree(hctx);
27 }
28 
29 struct blk_mq_ctx_sysfs_entry {
30 	struct attribute attr;
31 	ssize_t (*show)(struct blk_mq_ctx *, char *);
32 	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
33 };
34 
35 struct blk_mq_hw_ctx_sysfs_entry {
36 	struct attribute attr;
37 	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
38 	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
39 };
40 
41 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
42 				 char *page)
43 {
44 	struct blk_mq_ctx_sysfs_entry *entry;
45 	struct blk_mq_ctx *ctx;
46 	struct request_queue *q;
47 	ssize_t res;
48 
49 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
50 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
51 	q = ctx->queue;
52 
53 	if (!entry->show)
54 		return -EIO;
55 
56 	res = -ENOENT;
57 	mutex_lock(&q->sysfs_lock);
58 	if (!blk_queue_dying(q))
59 		res = entry->show(ctx, page);
60 	mutex_unlock(&q->sysfs_lock);
61 	return res;
62 }
63 
64 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
65 				  const char *page, size_t length)
66 {
67 	struct blk_mq_ctx_sysfs_entry *entry;
68 	struct blk_mq_ctx *ctx;
69 	struct request_queue *q;
70 	ssize_t res;
71 
72 	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
73 	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
74 	q = ctx->queue;
75 
76 	if (!entry->store)
77 		return -EIO;
78 
79 	res = -ENOENT;
80 	mutex_lock(&q->sysfs_lock);
81 	if (!blk_queue_dying(q))
82 		res = entry->store(ctx, page, length);
83 	mutex_unlock(&q->sysfs_lock);
84 	return res;
85 }
86 
87 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
88 				    struct attribute *attr, char *page)
89 {
90 	struct blk_mq_hw_ctx_sysfs_entry *entry;
91 	struct blk_mq_hw_ctx *hctx;
92 	struct request_queue *q;
93 	ssize_t res;
94 
95 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
96 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
97 	q = hctx->queue;
98 
99 	if (!entry->show)
100 		return -EIO;
101 
102 	res = -ENOENT;
103 	mutex_lock(&q->sysfs_lock);
104 	if (!blk_queue_dying(q))
105 		res = entry->show(hctx, page);
106 	mutex_unlock(&q->sysfs_lock);
107 	return res;
108 }
109 
110 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
111 				     struct attribute *attr, const char *page,
112 				     size_t length)
113 {
114 	struct blk_mq_hw_ctx_sysfs_entry *entry;
115 	struct blk_mq_hw_ctx *hctx;
116 	struct request_queue *q;
117 	ssize_t res;
118 
119 	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
120 	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
121 	q = hctx->queue;
122 
123 	if (!entry->store)
124 		return -EIO;
125 
126 	res = -ENOENT;
127 	mutex_lock(&q->sysfs_lock);
128 	if (!blk_queue_dying(q))
129 		res = entry->store(hctx, page, length);
130 	mutex_unlock(&q->sysfs_lock);
131 	return res;
132 }
133 
134 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
135 					    char *page)
136 {
137 	return sprintf(page, "%u\n", hctx->tags->nr_tags);
138 }
139 
140 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
141 						     char *page)
142 {
143 	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
144 }
145 
146 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
147 {
148 	unsigned int i, first = 1;
149 	ssize_t ret = 0;
150 
151 	for_each_cpu(i, hctx->cpumask) {
152 		if (first)
153 			ret += sprintf(ret + page, "%u", i);
154 		else
155 			ret += sprintf(ret + page, ", %u", i);
156 
157 		first = 0;
158 	}
159 
160 	ret += sprintf(ret + page, "\n");
161 	return ret;
162 }
163 
164 static struct attribute *default_ctx_attrs[] = {
165 	NULL,
166 };
167 
168 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
169 	.attr = {.name = "nr_tags", .mode = S_IRUGO },
170 	.show = blk_mq_hw_sysfs_nr_tags_show,
171 };
172 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
173 	.attr = {.name = "nr_reserved_tags", .mode = S_IRUGO },
174 	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
175 };
176 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
177 	.attr = {.name = "cpu_list", .mode = S_IRUGO },
178 	.show = blk_mq_hw_sysfs_cpus_show,
179 };
180 
181 static struct attribute *default_hw_ctx_attrs[] = {
182 	&blk_mq_hw_sysfs_nr_tags.attr,
183 	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
184 	&blk_mq_hw_sysfs_cpus.attr,
185 	NULL,
186 };
187 
188 static const struct sysfs_ops blk_mq_sysfs_ops = {
189 	.show	= blk_mq_sysfs_show,
190 	.store	= blk_mq_sysfs_store,
191 };
192 
193 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
194 	.show	= blk_mq_hw_sysfs_show,
195 	.store	= blk_mq_hw_sysfs_store,
196 };
197 
198 static struct kobj_type blk_mq_ktype = {
199 	.sysfs_ops	= &blk_mq_sysfs_ops,
200 	.release	= blk_mq_sysfs_release,
201 };
202 
203 static struct kobj_type blk_mq_ctx_ktype = {
204 	.sysfs_ops	= &blk_mq_sysfs_ops,
205 	.default_attrs	= default_ctx_attrs,
206 	.release	= blk_mq_sysfs_release,
207 };
208 
209 static struct kobj_type blk_mq_hw_ktype = {
210 	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
211 	.default_attrs	= default_hw_ctx_attrs,
212 	.release	= blk_mq_hw_sysfs_release,
213 };
214 
215 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
216 {
217 	struct blk_mq_ctx *ctx;
218 	int i;
219 
220 	if (!hctx->nr_ctx)
221 		return;
222 
223 	hctx_for_each_ctx(hctx, ctx, i)
224 		kobject_del(&ctx->kobj);
225 
226 	kobject_del(&hctx->kobj);
227 }
228 
229 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
230 {
231 	struct request_queue *q = hctx->queue;
232 	struct blk_mq_ctx *ctx;
233 	int i, ret;
234 
235 	if (!hctx->nr_ctx)
236 		return 0;
237 
238 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
239 	if (ret)
240 		return ret;
241 
242 	hctx_for_each_ctx(hctx, ctx, i) {
243 		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
244 		if (ret)
245 			break;
246 	}
247 
248 	return ret;
249 }
250 
251 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
252 {
253 	struct blk_mq_hw_ctx *hctx;
254 	int i;
255 
256 	lockdep_assert_held(&q->sysfs_lock);
257 
258 	queue_for_each_hw_ctx(q, hctx, i)
259 		blk_mq_unregister_hctx(hctx);
260 
261 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
262 	kobject_del(&q->mq_kobj);
263 	kobject_put(&dev->kobj);
264 
265 	q->mq_sysfs_init_done = false;
266 }
267 
268 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
269 {
270 	mutex_lock(&q->sysfs_lock);
271 	__blk_mq_unregister_dev(dev, q);
272 	mutex_unlock(&q->sysfs_lock);
273 }
274 
275 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
276 {
277 	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
278 }
279 
280 void blk_mq_sysfs_deinit(struct request_queue *q)
281 {
282 	struct blk_mq_ctx *ctx;
283 	int cpu;
284 
285 	for_each_possible_cpu(cpu) {
286 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
287 		kobject_put(&ctx->kobj);
288 	}
289 	kobject_put(&q->mq_kobj);
290 }
291 
292 void blk_mq_sysfs_init(struct request_queue *q)
293 {
294 	struct blk_mq_ctx *ctx;
295 	int cpu;
296 
297 	kobject_init(&q->mq_kobj, &blk_mq_ktype);
298 
299 	for_each_possible_cpu(cpu) {
300 		ctx = per_cpu_ptr(q->queue_ctx, cpu);
301 		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
302 	}
303 }
304 
305 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
306 {
307 	struct blk_mq_hw_ctx *hctx;
308 	int ret, i;
309 
310 	WARN_ON_ONCE(!q->kobj.parent);
311 	lockdep_assert_held(&q->sysfs_lock);
312 
313 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
314 	if (ret < 0)
315 		goto out;
316 
317 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
318 
319 	queue_for_each_hw_ctx(q, hctx, i) {
320 		ret = blk_mq_register_hctx(hctx);
321 		if (ret)
322 			goto unreg;
323 	}
324 
325 	q->mq_sysfs_init_done = true;
326 
327 out:
328 	return ret;
329 
330 unreg:
331 	while (--i >= 0)
332 		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
333 
334 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
335 	kobject_del(&q->mq_kobj);
336 	kobject_put(&dev->kobj);
337 	return ret;
338 }
339 
340 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
341 {
342 	int ret;
343 
344 	mutex_lock(&q->sysfs_lock);
345 	ret = __blk_mq_register_dev(dev, q);
346 	mutex_unlock(&q->sysfs_lock);
347 
348 	return ret;
349 }
350 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
351 
352 void blk_mq_sysfs_unregister(struct request_queue *q)
353 {
354 	struct blk_mq_hw_ctx *hctx;
355 	int i;
356 
357 	mutex_lock(&q->sysfs_lock);
358 	if (!q->mq_sysfs_init_done)
359 		goto unlock;
360 
361 	queue_for_each_hw_ctx(q, hctx, i)
362 		blk_mq_unregister_hctx(hctx);
363 
364 unlock:
365 	mutex_unlock(&q->sysfs_lock);
366 }
367 
368 int blk_mq_sysfs_register(struct request_queue *q)
369 {
370 	struct blk_mq_hw_ctx *hctx;
371 	int i, ret = 0;
372 
373 	mutex_lock(&q->sysfs_lock);
374 	if (!q->mq_sysfs_init_done)
375 		goto unlock;
376 
377 	queue_for_each_hw_ctx(q, hctx, i) {
378 		ret = blk_mq_register_hctx(hctx);
379 		if (ret)
380 			break;
381 	}
382 
383 unlock:
384 	mutex_unlock(&q->sysfs_lock);
385 
386 	return ret;
387 }
388