xref: /openbmc/linux/block/blk-mq-sysfs.c (revision bc33f5e5)
1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/kernel.h>
3  #include <linux/module.h>
4  #include <linux/backing-dev.h>
5  #include <linux/bio.h>
6  #include <linux/blkdev.h>
7  #include <linux/mm.h>
8  #include <linux/init.h>
9  #include <linux/slab.h>
10  #include <linux/workqueue.h>
11  #include <linux/smp.h>
12  
13  #include <linux/blk-mq.h>
14  #include "blk.h"
15  #include "blk-mq.h"
16  #include "blk-mq-tag.h"
17  
18  static void blk_mq_sysfs_release(struct kobject *kobj)
19  {
20  	struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21  
22  	free_percpu(ctxs->queue_ctx);
23  	kfree(ctxs);
24  }
25  
26  static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27  {
28  	struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29  
30  	/* ctx->ctxs won't be released until all ctx are freed */
31  	kobject_put(&ctx->ctxs->kobj);
32  }
33  
34  static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35  {
36  	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37  						  kobj);
38  
39  	blk_free_flush_queue(hctx->fq);
40  	sbitmap_free(&hctx->ctx_map);
41  	free_cpumask_var(hctx->cpumask);
42  	kfree(hctx->ctxs);
43  	kfree(hctx);
44  }
45  
46  struct blk_mq_hw_ctx_sysfs_entry {
47  	struct attribute attr;
48  	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
49  	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
50  };
51  
52  static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
53  				    struct attribute *attr, char *page)
54  {
55  	struct blk_mq_hw_ctx_sysfs_entry *entry;
56  	struct blk_mq_hw_ctx *hctx;
57  	struct request_queue *q;
58  	ssize_t res;
59  
60  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
61  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
62  	q = hctx->queue;
63  
64  	if (!entry->show)
65  		return -EIO;
66  
67  	mutex_lock(&q->sysfs_lock);
68  	res = entry->show(hctx, page);
69  	mutex_unlock(&q->sysfs_lock);
70  	return res;
71  }
72  
73  static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
74  				     struct attribute *attr, const char *page,
75  				     size_t length)
76  {
77  	struct blk_mq_hw_ctx_sysfs_entry *entry;
78  	struct blk_mq_hw_ctx *hctx;
79  	struct request_queue *q;
80  	ssize_t res;
81  
82  	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
83  	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
84  	q = hctx->queue;
85  
86  	if (!entry->store)
87  		return -EIO;
88  
89  	mutex_lock(&q->sysfs_lock);
90  	res = entry->store(hctx, page, length);
91  	mutex_unlock(&q->sysfs_lock);
92  	return res;
93  }
94  
95  static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
96  					    char *page)
97  {
98  	return sprintf(page, "%u\n", hctx->tags->nr_tags);
99  }
100  
101  static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
102  						     char *page)
103  {
104  	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
105  }
106  
107  static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
108  {
109  	const size_t size = PAGE_SIZE - 1;
110  	unsigned int i, first = 1;
111  	int ret = 0, pos = 0;
112  
113  	for_each_cpu(i, hctx->cpumask) {
114  		if (first)
115  			ret = snprintf(pos + page, size - pos, "%u", i);
116  		else
117  			ret = snprintf(pos + page, size - pos, ", %u", i);
118  
119  		if (ret >= size - pos)
120  			break;
121  
122  		first = 0;
123  		pos += ret;
124  	}
125  
126  	ret = snprintf(pos + page, size + 1 - pos, "\n");
127  	return pos + ret;
128  }
129  
130  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
131  	.attr = {.name = "nr_tags", .mode = 0444 },
132  	.show = blk_mq_hw_sysfs_nr_tags_show,
133  };
134  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
135  	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
136  	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
137  };
138  static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
139  	.attr = {.name = "cpu_list", .mode = 0444 },
140  	.show = blk_mq_hw_sysfs_cpus_show,
141  };
142  
143  static struct attribute *default_hw_ctx_attrs[] = {
144  	&blk_mq_hw_sysfs_nr_tags.attr,
145  	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
146  	&blk_mq_hw_sysfs_cpus.attr,
147  	NULL,
148  };
149  ATTRIBUTE_GROUPS(default_hw_ctx);
150  
151  static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
152  	.show	= blk_mq_hw_sysfs_show,
153  	.store	= blk_mq_hw_sysfs_store,
154  };
155  
156  static struct kobj_type blk_mq_ktype = {
157  	.release	= blk_mq_sysfs_release,
158  };
159  
160  static struct kobj_type blk_mq_ctx_ktype = {
161  	.release	= blk_mq_ctx_sysfs_release,
162  };
163  
164  static struct kobj_type blk_mq_hw_ktype = {
165  	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
166  	.default_groups = default_hw_ctx_groups,
167  	.release	= blk_mq_hw_sysfs_release,
168  };
169  
170  static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
171  {
172  	struct blk_mq_ctx *ctx;
173  	int i;
174  
175  	if (!hctx->nr_ctx)
176  		return;
177  
178  	hctx_for_each_ctx(hctx, ctx, i)
179  		kobject_del(&ctx->kobj);
180  
181  	kobject_del(&hctx->kobj);
182  }
183  
184  static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
185  {
186  	struct request_queue *q = hctx->queue;
187  	struct blk_mq_ctx *ctx;
188  	int i, ret;
189  
190  	if (!hctx->nr_ctx)
191  		return 0;
192  
193  	ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
194  	if (ret)
195  		return ret;
196  
197  	hctx_for_each_ctx(hctx, ctx, i) {
198  		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
199  		if (ret)
200  			break;
201  	}
202  
203  	return ret;
204  }
205  
206  void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
207  {
208  	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
209  }
210  
211  void blk_mq_sysfs_deinit(struct request_queue *q)
212  {
213  	struct blk_mq_ctx *ctx;
214  	int cpu;
215  
216  	for_each_possible_cpu(cpu) {
217  		ctx = per_cpu_ptr(q->queue_ctx, cpu);
218  		kobject_put(&ctx->kobj);
219  	}
220  	kobject_put(q->mq_kobj);
221  }
222  
223  void blk_mq_sysfs_init(struct request_queue *q)
224  {
225  	struct blk_mq_ctx *ctx;
226  	int cpu;
227  
228  	kobject_init(q->mq_kobj, &blk_mq_ktype);
229  
230  	for_each_possible_cpu(cpu) {
231  		ctx = per_cpu_ptr(q->queue_ctx, cpu);
232  
233  		kobject_get(q->mq_kobj);
234  		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
235  	}
236  }
237  
238  int blk_mq_sysfs_register(struct gendisk *disk)
239  {
240  	struct request_queue *q = disk->queue;
241  	struct blk_mq_hw_ctx *hctx;
242  	unsigned long i, j;
243  	int ret;
244  
245  	lockdep_assert_held(&q->sysfs_dir_lock);
246  
247  	ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
248  	if (ret < 0)
249  		goto out;
250  
251  	kobject_uevent(q->mq_kobj, KOBJ_ADD);
252  
253  	queue_for_each_hw_ctx(q, hctx, i) {
254  		ret = blk_mq_register_hctx(hctx);
255  		if (ret)
256  			goto unreg;
257  	}
258  
259  	q->mq_sysfs_init_done = true;
260  
261  out:
262  	return ret;
263  
264  unreg:
265  	queue_for_each_hw_ctx(q, hctx, j) {
266  		if (j < i)
267  			blk_mq_unregister_hctx(hctx);
268  	}
269  
270  	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
271  	kobject_del(q->mq_kobj);
272  	return ret;
273  }
274  
275  void blk_mq_sysfs_unregister(struct gendisk *disk)
276  {
277  	struct request_queue *q = disk->queue;
278  	struct blk_mq_hw_ctx *hctx;
279  	unsigned long i;
280  
281  	lockdep_assert_held(&q->sysfs_dir_lock);
282  
283  	queue_for_each_hw_ctx(q, hctx, i)
284  		blk_mq_unregister_hctx(hctx);
285  
286  	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
287  	kobject_del(q->mq_kobj);
288  
289  	q->mq_sysfs_init_done = false;
290  }
291  
292  void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
293  {
294  	struct blk_mq_hw_ctx *hctx;
295  	unsigned long i;
296  
297  	mutex_lock(&q->sysfs_dir_lock);
298  	if (!q->mq_sysfs_init_done)
299  		goto unlock;
300  
301  	queue_for_each_hw_ctx(q, hctx, i)
302  		blk_mq_unregister_hctx(hctx);
303  
304  unlock:
305  	mutex_unlock(&q->sysfs_dir_lock);
306  }
307  
308  int blk_mq_sysfs_register_hctxs(struct request_queue *q)
309  {
310  	struct blk_mq_hw_ctx *hctx;
311  	unsigned long i;
312  	int ret = 0;
313  
314  	mutex_lock(&q->sysfs_dir_lock);
315  	if (!q->mq_sysfs_init_done)
316  		goto unlock;
317  
318  	queue_for_each_hw_ctx(q, hctx, i) {
319  		ret = blk_mq_register_hctx(hctx);
320  		if (ret)
321  			break;
322  	}
323  
324  unlock:
325  	mutex_unlock(&q->sysfs_dir_lock);
326  
327  	return ret;
328  }
329