1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/module.h> 4 #include <linux/backing-dev.h> 5 #include <linux/bio.h> 6 #include <linux/blkdev.h> 7 #include <linux/mm.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/workqueue.h> 11 #include <linux/smp.h> 12 13 #include <linux/blk-mq.h> 14 #include "blk.h" 15 #include "blk-mq.h" 16 17 static void blk_mq_sysfs_release(struct kobject *kobj) 18 { 19 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); 20 21 free_percpu(ctxs->queue_ctx); 22 kfree(ctxs); 23 } 24 25 static void blk_mq_ctx_sysfs_release(struct kobject *kobj) 26 { 27 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); 28 29 /* ctx->ctxs won't be released until all ctx are freed */ 30 kobject_put(&ctx->ctxs->kobj); 31 } 32 33 static void blk_mq_hw_sysfs_release(struct kobject *kobj) 34 { 35 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, 36 kobj); 37 38 blk_free_flush_queue(hctx->fq); 39 sbitmap_free(&hctx->ctx_map); 40 free_cpumask_var(hctx->cpumask); 41 kfree(hctx->ctxs); 42 kfree(hctx); 43 } 44 45 struct blk_mq_hw_ctx_sysfs_entry { 46 struct attribute attr; 47 ssize_t (*show)(struct blk_mq_hw_ctx *, char *); 48 }; 49 50 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, 51 struct attribute *attr, char *page) 52 { 53 struct blk_mq_hw_ctx_sysfs_entry *entry; 54 struct blk_mq_hw_ctx *hctx; 55 struct request_queue *q; 56 ssize_t res; 57 58 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 59 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 60 q = hctx->queue; 61 62 if (!entry->show) 63 return -EIO; 64 65 mutex_lock(&q->sysfs_lock); 66 res = entry->show(hctx, page); 67 mutex_unlock(&q->sysfs_lock); 68 return res; 69 } 70 71 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, 72 char *page) 73 { 74 return sprintf(page, "%u\n", hctx->tags->nr_tags); 75 } 76 77 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, 78 char *page) 79 { 80 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); 81 } 82 83 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) 84 { 85 const size_t size = PAGE_SIZE - 1; 86 unsigned int i, first = 1; 87 int ret = 0, pos = 0; 88 89 for_each_cpu(i, hctx->cpumask) { 90 if (first) 91 ret = snprintf(pos + page, size - pos, "%u", i); 92 else 93 ret = snprintf(pos + page, size - pos, ", %u", i); 94 95 if (ret >= size - pos) 96 break; 97 98 first = 0; 99 pos += ret; 100 } 101 102 ret = snprintf(pos + page, size + 1 - pos, "\n"); 103 return pos + ret; 104 } 105 106 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { 107 .attr = {.name = "nr_tags", .mode = 0444 }, 108 .show = blk_mq_hw_sysfs_nr_tags_show, 109 }; 110 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { 111 .attr = {.name = "nr_reserved_tags", .mode = 0444 }, 112 .show = blk_mq_hw_sysfs_nr_reserved_tags_show, 113 }; 114 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { 115 .attr = {.name = "cpu_list", .mode = 0444 }, 116 .show = blk_mq_hw_sysfs_cpus_show, 117 }; 118 119 static struct attribute *default_hw_ctx_attrs[] = { 120 &blk_mq_hw_sysfs_nr_tags.attr, 121 &blk_mq_hw_sysfs_nr_reserved_tags.attr, 122 &blk_mq_hw_sysfs_cpus.attr, 123 NULL, 124 }; 125 ATTRIBUTE_GROUPS(default_hw_ctx); 126 127 static const struct sysfs_ops blk_mq_hw_sysfs_ops = { 128 .show = blk_mq_hw_sysfs_show, 129 }; 130 131 static const struct kobj_type blk_mq_ktype = { 132 .release = blk_mq_sysfs_release, 133 }; 134 135 static const struct kobj_type blk_mq_ctx_ktype = { 136 .release = blk_mq_ctx_sysfs_release, 137 }; 138 139 static const struct kobj_type blk_mq_hw_ktype = { 140 .sysfs_ops = &blk_mq_hw_sysfs_ops, 141 .default_groups = default_hw_ctx_groups, 142 .release = blk_mq_hw_sysfs_release, 143 }; 144 145 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 146 { 147 struct blk_mq_ctx *ctx; 148 int i; 149 150 if (!hctx->nr_ctx) 151 return; 152 153 hctx_for_each_ctx(hctx, ctx, i) 154 kobject_del(&ctx->kobj); 155 156 kobject_del(&hctx->kobj); 157 } 158 159 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) 160 { 161 struct request_queue *q = hctx->queue; 162 struct blk_mq_ctx *ctx; 163 int i, j, ret; 164 165 if (!hctx->nr_ctx) 166 return 0; 167 168 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); 169 if (ret) 170 return ret; 171 172 hctx_for_each_ctx(hctx, ctx, i) { 173 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 174 if (ret) 175 goto out; 176 } 177 178 return 0; 179 out: 180 hctx_for_each_ctx(hctx, ctx, j) { 181 if (j < i) 182 kobject_del(&ctx->kobj); 183 } 184 kobject_del(&hctx->kobj); 185 return ret; 186 } 187 188 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) 189 { 190 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 191 } 192 193 void blk_mq_sysfs_deinit(struct request_queue *q) 194 { 195 struct blk_mq_ctx *ctx; 196 int cpu; 197 198 for_each_possible_cpu(cpu) { 199 ctx = per_cpu_ptr(q->queue_ctx, cpu); 200 kobject_put(&ctx->kobj); 201 } 202 kobject_put(q->mq_kobj); 203 } 204 205 void blk_mq_sysfs_init(struct request_queue *q) 206 { 207 struct blk_mq_ctx *ctx; 208 int cpu; 209 210 kobject_init(q->mq_kobj, &blk_mq_ktype); 211 212 for_each_possible_cpu(cpu) { 213 ctx = per_cpu_ptr(q->queue_ctx, cpu); 214 215 kobject_get(q->mq_kobj); 216 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); 217 } 218 } 219 220 int blk_mq_sysfs_register(struct gendisk *disk) 221 { 222 struct request_queue *q = disk->queue; 223 struct blk_mq_hw_ctx *hctx; 224 unsigned long i, j; 225 int ret; 226 227 lockdep_assert_held(&q->sysfs_dir_lock); 228 229 ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq"); 230 if (ret < 0) 231 goto out; 232 233 kobject_uevent(q->mq_kobj, KOBJ_ADD); 234 235 queue_for_each_hw_ctx(q, hctx, i) { 236 ret = blk_mq_register_hctx(hctx); 237 if (ret) 238 goto unreg; 239 } 240 241 q->mq_sysfs_init_done = true; 242 243 out: 244 return ret; 245 246 unreg: 247 queue_for_each_hw_ctx(q, hctx, j) { 248 if (j < i) 249 blk_mq_unregister_hctx(hctx); 250 } 251 252 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 253 kobject_del(q->mq_kobj); 254 return ret; 255 } 256 257 void blk_mq_sysfs_unregister(struct gendisk *disk) 258 { 259 struct request_queue *q = disk->queue; 260 struct blk_mq_hw_ctx *hctx; 261 unsigned long i; 262 263 lockdep_assert_held(&q->sysfs_dir_lock); 264 265 queue_for_each_hw_ctx(q, hctx, i) 266 blk_mq_unregister_hctx(hctx); 267 268 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 269 kobject_del(q->mq_kobj); 270 271 q->mq_sysfs_init_done = false; 272 } 273 274 void blk_mq_sysfs_unregister_hctxs(struct request_queue *q) 275 { 276 struct blk_mq_hw_ctx *hctx; 277 unsigned long i; 278 279 mutex_lock(&q->sysfs_dir_lock); 280 if (!q->mq_sysfs_init_done) 281 goto unlock; 282 283 queue_for_each_hw_ctx(q, hctx, i) 284 blk_mq_unregister_hctx(hctx); 285 286 unlock: 287 mutex_unlock(&q->sysfs_dir_lock); 288 } 289 290 int blk_mq_sysfs_register_hctxs(struct request_queue *q) 291 { 292 struct blk_mq_hw_ctx *hctx; 293 unsigned long i; 294 int ret = 0; 295 296 mutex_lock(&q->sysfs_dir_lock); 297 if (!q->mq_sysfs_init_done) 298 goto unlock; 299 300 queue_for_each_hw_ctx(q, hctx, i) { 301 ret = blk_mq_register_hctx(hctx); 302 if (ret) 303 break; 304 } 305 306 unlock: 307 mutex_unlock(&q->sysfs_dir_lock); 308 309 return ret; 310 } 311