1 #include <linux/kernel.h> 2 #include <linux/module.h> 3 #include <linux/backing-dev.h> 4 #include <linux/bio.h> 5 #include <linux/blkdev.h> 6 #include <linux/mm.h> 7 #include <linux/init.h> 8 #include <linux/slab.h> 9 #include <linux/workqueue.h> 10 #include <linux/smp.h> 11 12 #include <linux/blk-mq.h> 13 #include "blk-mq.h" 14 #include "blk-mq-tag.h" 15 16 static void blk_mq_sysfs_release(struct kobject *kobj) 17 { 18 } 19 20 static void blk_mq_hw_sysfs_release(struct kobject *kobj) 21 { 22 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, 23 kobj); 24 free_cpumask_var(hctx->cpumask); 25 kfree(hctx->ctxs); 26 kfree(hctx); 27 } 28 29 struct blk_mq_ctx_sysfs_entry { 30 struct attribute attr; 31 ssize_t (*show)(struct blk_mq_ctx *, char *); 32 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); 33 }; 34 35 struct blk_mq_hw_ctx_sysfs_entry { 36 struct attribute attr; 37 ssize_t (*show)(struct blk_mq_hw_ctx *, char *); 38 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); 39 }; 40 41 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, 42 char *page) 43 { 44 struct blk_mq_ctx_sysfs_entry *entry; 45 struct blk_mq_ctx *ctx; 46 struct request_queue *q; 47 ssize_t res; 48 49 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); 50 ctx = container_of(kobj, struct blk_mq_ctx, kobj); 51 q = ctx->queue; 52 53 if (!entry->show) 54 return -EIO; 55 56 res = -ENOENT; 57 mutex_lock(&q->sysfs_lock); 58 if (!blk_queue_dying(q)) 59 res = entry->show(ctx, page); 60 mutex_unlock(&q->sysfs_lock); 61 return res; 62 } 63 64 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, 65 const char *page, size_t length) 66 { 67 struct blk_mq_ctx_sysfs_entry *entry; 68 struct blk_mq_ctx *ctx; 69 struct request_queue *q; 70 ssize_t res; 71 72 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); 73 ctx = container_of(kobj, struct blk_mq_ctx, kobj); 74 q = ctx->queue; 75 76 if (!entry->store) 77 return -EIO; 78 79 res = -ENOENT; 80 mutex_lock(&q->sysfs_lock); 81 if (!blk_queue_dying(q)) 82 res = entry->store(ctx, page, length); 83 mutex_unlock(&q->sysfs_lock); 84 return res; 85 } 86 87 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, 88 struct attribute *attr, char *page) 89 { 90 struct blk_mq_hw_ctx_sysfs_entry *entry; 91 struct blk_mq_hw_ctx *hctx; 92 struct request_queue *q; 93 ssize_t res; 94 95 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 96 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 97 q = hctx->queue; 98 99 if (!entry->show) 100 return -EIO; 101 102 res = -ENOENT; 103 mutex_lock(&q->sysfs_lock); 104 if (!blk_queue_dying(q)) 105 res = entry->show(hctx, page); 106 mutex_unlock(&q->sysfs_lock); 107 return res; 108 } 109 110 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, 111 struct attribute *attr, const char *page, 112 size_t length) 113 { 114 struct blk_mq_hw_ctx_sysfs_entry *entry; 115 struct blk_mq_hw_ctx *hctx; 116 struct request_queue *q; 117 ssize_t res; 118 119 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 120 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 121 q = hctx->queue; 122 123 if (!entry->store) 124 return -EIO; 125 126 res = -ENOENT; 127 mutex_lock(&q->sysfs_lock); 128 if (!blk_queue_dying(q)) 129 res = entry->store(hctx, page, length); 130 mutex_unlock(&q->sysfs_lock); 131 return res; 132 } 133 134 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, 135 char *page) 136 { 137 return sprintf(page, "%u\n", hctx->tags->nr_tags); 138 } 139 140 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, 141 char *page) 142 { 143 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); 144 } 145 146 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) 147 { 148 unsigned int i, first = 1; 149 ssize_t ret = 0; 150 151 for_each_cpu(i, hctx->cpumask) { 152 if (first) 153 ret += sprintf(ret + page, "%u", i); 154 else 155 ret += sprintf(ret + page, ", %u", i); 156 157 first = 0; 158 } 159 160 ret += sprintf(ret + page, "\n"); 161 return ret; 162 } 163 164 static struct attribute *default_ctx_attrs[] = { 165 NULL, 166 }; 167 168 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { 169 .attr = {.name = "nr_tags", .mode = S_IRUGO }, 170 .show = blk_mq_hw_sysfs_nr_tags_show, 171 }; 172 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { 173 .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO }, 174 .show = blk_mq_hw_sysfs_nr_reserved_tags_show, 175 }; 176 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { 177 .attr = {.name = "cpu_list", .mode = S_IRUGO }, 178 .show = blk_mq_hw_sysfs_cpus_show, 179 }; 180 181 static struct attribute *default_hw_ctx_attrs[] = { 182 &blk_mq_hw_sysfs_nr_tags.attr, 183 &blk_mq_hw_sysfs_nr_reserved_tags.attr, 184 &blk_mq_hw_sysfs_cpus.attr, 185 NULL, 186 }; 187 188 static const struct sysfs_ops blk_mq_sysfs_ops = { 189 .show = blk_mq_sysfs_show, 190 .store = blk_mq_sysfs_store, 191 }; 192 193 static const struct sysfs_ops blk_mq_hw_sysfs_ops = { 194 .show = blk_mq_hw_sysfs_show, 195 .store = blk_mq_hw_sysfs_store, 196 }; 197 198 static struct kobj_type blk_mq_ktype = { 199 .sysfs_ops = &blk_mq_sysfs_ops, 200 .release = blk_mq_sysfs_release, 201 }; 202 203 static struct kobj_type blk_mq_ctx_ktype = { 204 .sysfs_ops = &blk_mq_sysfs_ops, 205 .default_attrs = default_ctx_attrs, 206 .release = blk_mq_sysfs_release, 207 }; 208 209 static struct kobj_type blk_mq_hw_ktype = { 210 .sysfs_ops = &blk_mq_hw_sysfs_ops, 211 .default_attrs = default_hw_ctx_attrs, 212 .release = blk_mq_hw_sysfs_release, 213 }; 214 215 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 216 { 217 struct blk_mq_ctx *ctx; 218 int i; 219 220 if (!hctx->nr_ctx) 221 return; 222 223 hctx_for_each_ctx(hctx, ctx, i) 224 kobject_del(&ctx->kobj); 225 226 kobject_del(&hctx->kobj); 227 } 228 229 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) 230 { 231 struct request_queue *q = hctx->queue; 232 struct blk_mq_ctx *ctx; 233 int i, ret; 234 235 if (!hctx->nr_ctx) 236 return 0; 237 238 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); 239 if (ret) 240 return ret; 241 242 hctx_for_each_ctx(hctx, ctx, i) { 243 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 244 if (ret) 245 break; 246 } 247 248 return ret; 249 } 250 251 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) 252 { 253 struct blk_mq_hw_ctx *hctx; 254 int i; 255 256 lockdep_assert_held(&q->sysfs_lock); 257 258 queue_for_each_hw_ctx(q, hctx, i) 259 blk_mq_unregister_hctx(hctx); 260 261 blk_mq_debugfs_unregister_mq(q); 262 263 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 264 kobject_del(&q->mq_kobj); 265 kobject_put(&dev->kobj); 266 267 q->mq_sysfs_init_done = false; 268 } 269 270 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) 271 { 272 mutex_lock(&q->sysfs_lock); 273 __blk_mq_unregister_dev(dev, q); 274 mutex_unlock(&q->sysfs_lock); 275 } 276 277 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) 278 { 279 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 280 } 281 282 void blk_mq_sysfs_deinit(struct request_queue *q) 283 { 284 struct blk_mq_ctx *ctx; 285 int cpu; 286 287 for_each_possible_cpu(cpu) { 288 ctx = per_cpu_ptr(q->queue_ctx, cpu); 289 kobject_put(&ctx->kobj); 290 } 291 kobject_put(&q->mq_kobj); 292 } 293 294 void blk_mq_sysfs_init(struct request_queue *q) 295 { 296 struct blk_mq_ctx *ctx; 297 int cpu; 298 299 kobject_init(&q->mq_kobj, &blk_mq_ktype); 300 301 for_each_possible_cpu(cpu) { 302 ctx = per_cpu_ptr(q->queue_ctx, cpu); 303 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); 304 } 305 } 306 307 int __blk_mq_register_dev(struct device *dev, struct request_queue *q) 308 { 309 struct blk_mq_hw_ctx *hctx; 310 int ret, i; 311 312 WARN_ON_ONCE(!q->kobj.parent); 313 lockdep_assert_held(&q->sysfs_lock); 314 315 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 316 if (ret < 0) 317 goto out; 318 319 kobject_uevent(&q->mq_kobj, KOBJ_ADD); 320 321 blk_mq_debugfs_register(q); 322 323 queue_for_each_hw_ctx(q, hctx, i) { 324 ret = blk_mq_register_hctx(hctx); 325 if (ret) 326 goto unreg; 327 } 328 329 q->mq_sysfs_init_done = true; 330 331 out: 332 return ret; 333 334 unreg: 335 while (--i >= 0) 336 blk_mq_unregister_hctx(q->queue_hw_ctx[i]); 337 338 blk_mq_debugfs_unregister_mq(q); 339 340 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 341 kobject_del(&q->mq_kobj); 342 kobject_put(&dev->kobj); 343 return ret; 344 } 345 346 int blk_mq_register_dev(struct device *dev, struct request_queue *q) 347 { 348 int ret; 349 350 mutex_lock(&q->sysfs_lock); 351 ret = __blk_mq_register_dev(dev, q); 352 mutex_unlock(&q->sysfs_lock); 353 354 return ret; 355 } 356 EXPORT_SYMBOL_GPL(blk_mq_register_dev); 357 358 void blk_mq_sysfs_unregister(struct request_queue *q) 359 { 360 struct blk_mq_hw_ctx *hctx; 361 int i; 362 363 mutex_lock(&q->sysfs_lock); 364 if (!q->mq_sysfs_init_done) 365 goto unlock; 366 367 blk_mq_debugfs_unregister_mq(q); 368 369 queue_for_each_hw_ctx(q, hctx, i) 370 blk_mq_unregister_hctx(hctx); 371 372 unlock: 373 mutex_unlock(&q->sysfs_lock); 374 } 375 376 int blk_mq_sysfs_register(struct request_queue *q) 377 { 378 struct blk_mq_hw_ctx *hctx; 379 int i, ret = 0; 380 381 mutex_lock(&q->sysfs_lock); 382 if (!q->mq_sysfs_init_done) 383 goto unlock; 384 385 blk_mq_debugfs_register_mq(q); 386 387 queue_for_each_hw_ctx(q, hctx, i) { 388 ret = blk_mq_register_hctx(hctx); 389 if (ret) 390 break; 391 } 392 393 unlock: 394 mutex_unlock(&q->sysfs_lock); 395 396 return ret; 397 } 398