1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and 4 * Shaohua Li <shli@fb.com> 5 */ 6 #include <linux/module.h> 7 8 #include <linux/moduleparam.h> 9 #include <linux/sched.h> 10 #include <linux/fs.h> 11 #include <linux/init.h> 12 #include "null_blk.h" 13 14 #undef pr_fmt 15 #define pr_fmt(fmt) "null_blk: " fmt 16 17 #define FREE_BATCH 16 18 19 #define TICKS_PER_SEC 50ULL 20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) 21 22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 23 static DECLARE_FAULT_ATTR(null_timeout_attr); 24 static DECLARE_FAULT_ATTR(null_requeue_attr); 25 static DECLARE_FAULT_ATTR(null_init_hctx_attr); 26 #endif 27 28 static inline u64 mb_per_tick(int mbps) 29 { 30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); 31 } 32 33 /* 34 * Status flags for nullb_device. 35 * 36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. 37 * UP: Device is currently on and visible in userspace. 38 * THROTTLED: Device is being throttled. 39 * CACHE: Device is using a write-back cache. 40 */ 41 enum nullb_device_flags { 42 NULLB_DEV_FL_CONFIGURED = 0, 43 NULLB_DEV_FL_UP = 1, 44 NULLB_DEV_FL_THROTTLED = 2, 45 NULLB_DEV_FL_CACHE = 3, 46 }; 47 48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) 49 /* 50 * nullb_page is a page in memory for nullb devices. 51 * 52 * @page: The page holding the data. 53 * @bitmap: The bitmap represents which sector in the page has data. 54 * Each bit represents one block size. For example, sector 8 55 * will use the 7th bit 56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache 57 * page is being flushing to storage. FREE means the cache page is freed and 58 * should be skipped from flushing to storage. Please see 59 * null_make_cache_space 60 */ 61 struct nullb_page { 62 struct page *page; 63 DECLARE_BITMAP(bitmap, MAP_SZ); 64 }; 65 #define NULLB_PAGE_LOCK (MAP_SZ - 1) 66 #define NULLB_PAGE_FREE (MAP_SZ - 2) 67 68 static LIST_HEAD(nullb_list); 69 static struct mutex lock; 70 static int null_major; 71 static DEFINE_IDA(nullb_indexes); 72 static struct blk_mq_tag_set tag_set; 73 74 enum { 75 NULL_IRQ_NONE = 0, 76 NULL_IRQ_SOFTIRQ = 1, 77 NULL_IRQ_TIMER = 2, 78 }; 79 80 enum { 81 NULL_Q_BIO = 0, 82 NULL_Q_RQ = 1, 83 NULL_Q_MQ = 2, 84 }; 85 86 static bool g_virt_boundary = false; 87 module_param_named(virt_boundary, g_virt_boundary, bool, 0444); 88 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False"); 89 90 static int g_no_sched; 91 module_param_named(no_sched, g_no_sched, int, 0444); 92 MODULE_PARM_DESC(no_sched, "No io scheduler"); 93 94 static int g_submit_queues = 1; 95 module_param_named(submit_queues, g_submit_queues, int, 0444); 96 MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 97 98 static int g_poll_queues = 1; 99 module_param_named(poll_queues, g_poll_queues, int, 0444); 100 MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues"); 101 102 static int g_home_node = NUMA_NO_NODE; 103 module_param_named(home_node, g_home_node, int, 0444); 104 MODULE_PARM_DESC(home_node, "Home node for the device"); 105 106 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 107 /* 108 * For more details about fault injection, please refer to 109 * Documentation/fault-injection/fault-injection.rst. 110 */ 111 static char g_timeout_str[80]; 112 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); 113 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); 114 115 static char g_requeue_str[80]; 116 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); 117 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>"); 118 119 static char g_init_hctx_str[80]; 120 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444); 121 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>"); 122 #endif 123 124 static int g_queue_mode = NULL_Q_MQ; 125 126 static int null_param_store_val(const char *str, int *val, int min, int max) 127 { 128 int ret, new_val; 129 130 ret = kstrtoint(str, 10, &new_val); 131 if (ret) 132 return -EINVAL; 133 134 if (new_val < min || new_val > max) 135 return -EINVAL; 136 137 *val = new_val; 138 return 0; 139 } 140 141 static int null_set_queue_mode(const char *str, const struct kernel_param *kp) 142 { 143 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); 144 } 145 146 static const struct kernel_param_ops null_queue_mode_param_ops = { 147 .set = null_set_queue_mode, 148 .get = param_get_int, 149 }; 150 151 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); 152 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); 153 154 static int g_gb = 250; 155 module_param_named(gb, g_gb, int, 0444); 156 MODULE_PARM_DESC(gb, "Size in GB"); 157 158 static int g_bs = 512; 159 module_param_named(bs, g_bs, int, 0444); 160 MODULE_PARM_DESC(bs, "Block size (in bytes)"); 161 162 static int g_max_sectors; 163 module_param_named(max_sectors, g_max_sectors, int, 0444); 164 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)"); 165 166 static unsigned int nr_devices = 1; 167 module_param(nr_devices, uint, 0444); 168 MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 169 170 static bool g_blocking; 171 module_param_named(blocking, g_blocking, bool, 0444); 172 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); 173 174 static bool shared_tags; 175 module_param(shared_tags, bool, 0444); 176 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); 177 178 static bool g_shared_tag_bitmap; 179 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444); 180 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq"); 181 182 static int g_irqmode = NULL_IRQ_SOFTIRQ; 183 184 static int null_set_irqmode(const char *str, const struct kernel_param *kp) 185 { 186 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, 187 NULL_IRQ_TIMER); 188 } 189 190 static const struct kernel_param_ops null_irqmode_param_ops = { 191 .set = null_set_irqmode, 192 .get = param_get_int, 193 }; 194 195 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); 196 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 197 198 static unsigned long g_completion_nsec = 10000; 199 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); 200 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 201 202 static int g_hw_queue_depth = 64; 203 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); 204 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 205 206 static bool g_use_per_node_hctx; 207 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); 208 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); 209 210 static bool g_zoned; 211 module_param_named(zoned, g_zoned, bool, S_IRUGO); 212 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); 213 214 static unsigned long g_zone_size = 256; 215 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); 216 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); 217 218 static unsigned long g_zone_capacity; 219 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444); 220 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size"); 221 222 static unsigned int g_zone_nr_conv; 223 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); 224 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); 225 226 static unsigned int g_zone_max_open; 227 module_param_named(zone_max_open, g_zone_max_open, uint, 0444); 228 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)"); 229 230 static unsigned int g_zone_max_active; 231 module_param_named(zone_max_active, g_zone_max_active, uint, 0444); 232 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)"); 233 234 static struct nullb_device *null_alloc_dev(void); 235 static void null_free_dev(struct nullb_device *dev); 236 static void null_del_dev(struct nullb *nullb); 237 static int null_add_dev(struct nullb_device *dev); 238 static struct nullb *null_find_dev_by_name(const char *name); 239 static void null_free_device_storage(struct nullb_device *dev, bool is_cache); 240 241 static inline struct nullb_device *to_nullb_device(struct config_item *item) 242 { 243 return item ? container_of(item, struct nullb_device, item) : NULL; 244 } 245 246 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) 247 { 248 return snprintf(page, PAGE_SIZE, "%u\n", val); 249 } 250 251 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, 252 char *page) 253 { 254 return snprintf(page, PAGE_SIZE, "%lu\n", val); 255 } 256 257 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) 258 { 259 return snprintf(page, PAGE_SIZE, "%u\n", val); 260 } 261 262 static ssize_t nullb_device_uint_attr_store(unsigned int *val, 263 const char *page, size_t count) 264 { 265 unsigned int tmp; 266 int result; 267 268 result = kstrtouint(page, 0, &tmp); 269 if (result < 0) 270 return result; 271 272 *val = tmp; 273 return count; 274 } 275 276 static ssize_t nullb_device_ulong_attr_store(unsigned long *val, 277 const char *page, size_t count) 278 { 279 int result; 280 unsigned long tmp; 281 282 result = kstrtoul(page, 0, &tmp); 283 if (result < 0) 284 return result; 285 286 *val = tmp; 287 return count; 288 } 289 290 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, 291 size_t count) 292 { 293 bool tmp; 294 int result; 295 296 result = kstrtobool(page, &tmp); 297 if (result < 0) 298 return result; 299 300 *val = tmp; 301 return count; 302 } 303 304 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ 305 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \ 306 static ssize_t \ 307 nullb_device_##NAME##_show(struct config_item *item, char *page) \ 308 { \ 309 return nullb_device_##TYPE##_attr_show( \ 310 to_nullb_device(item)->NAME, page); \ 311 } \ 312 static ssize_t \ 313 nullb_device_##NAME##_store(struct config_item *item, const char *page, \ 314 size_t count) \ 315 { \ 316 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\ 317 struct nullb_device *dev = to_nullb_device(item); \ 318 TYPE new_value = 0; \ 319 int ret; \ 320 \ 321 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\ 322 if (ret < 0) \ 323 return ret; \ 324 if (apply_fn) \ 325 ret = apply_fn(dev, new_value); \ 326 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \ 327 ret = -EBUSY; \ 328 if (ret < 0) \ 329 return ret; \ 330 dev->NAME = new_value; \ 331 return count; \ 332 } \ 333 CONFIGFS_ATTR(nullb_device_, NAME); 334 335 static int nullb_update_nr_hw_queues(struct nullb_device *dev, 336 unsigned int submit_queues, 337 unsigned int poll_queues) 338 339 { 340 struct blk_mq_tag_set *set; 341 int ret, nr_hw_queues; 342 343 if (!dev->nullb) 344 return 0; 345 346 /* 347 * Make sure at least one submit queue exists. 348 */ 349 if (!submit_queues) 350 return -EINVAL; 351 352 /* 353 * Make sure that null_init_hctx() does not access nullb->queues[] past 354 * the end of that array. 355 */ 356 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues) 357 return -EINVAL; 358 359 /* 360 * Keep previous and new queue numbers in nullb_device for reference in 361 * the call back function null_map_queues(). 362 */ 363 dev->prev_submit_queues = dev->submit_queues; 364 dev->prev_poll_queues = dev->poll_queues; 365 dev->submit_queues = submit_queues; 366 dev->poll_queues = poll_queues; 367 368 set = dev->nullb->tag_set; 369 nr_hw_queues = submit_queues + poll_queues; 370 blk_mq_update_nr_hw_queues(set, nr_hw_queues); 371 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM; 372 373 if (ret) { 374 /* on error, revert the queue numbers */ 375 dev->submit_queues = dev->prev_submit_queues; 376 dev->poll_queues = dev->prev_poll_queues; 377 } 378 379 return ret; 380 } 381 382 static int nullb_apply_submit_queues(struct nullb_device *dev, 383 unsigned int submit_queues) 384 { 385 return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); 386 } 387 388 static int nullb_apply_poll_queues(struct nullb_device *dev, 389 unsigned int poll_queues) 390 { 391 return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); 392 } 393 394 NULLB_DEVICE_ATTR(size, ulong, NULL); 395 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL); 396 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues); 397 NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues); 398 NULLB_DEVICE_ATTR(home_node, uint, NULL); 399 NULLB_DEVICE_ATTR(queue_mode, uint, NULL); 400 NULLB_DEVICE_ATTR(blocksize, uint, NULL); 401 NULLB_DEVICE_ATTR(max_sectors, uint, NULL); 402 NULLB_DEVICE_ATTR(irqmode, uint, NULL); 403 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL); 404 NULLB_DEVICE_ATTR(index, uint, NULL); 405 NULLB_DEVICE_ATTR(blocking, bool, NULL); 406 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL); 407 NULLB_DEVICE_ATTR(memory_backed, bool, NULL); 408 NULLB_DEVICE_ATTR(discard, bool, NULL); 409 NULLB_DEVICE_ATTR(mbps, uint, NULL); 410 NULLB_DEVICE_ATTR(cache_size, ulong, NULL); 411 NULLB_DEVICE_ATTR(zoned, bool, NULL); 412 NULLB_DEVICE_ATTR(zone_size, ulong, NULL); 413 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); 414 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); 415 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); 416 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); 417 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); 418 419 static ssize_t nullb_device_power_show(struct config_item *item, char *page) 420 { 421 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); 422 } 423 424 static ssize_t nullb_device_power_store(struct config_item *item, 425 const char *page, size_t count) 426 { 427 struct nullb_device *dev = to_nullb_device(item); 428 bool newp = false; 429 ssize_t ret; 430 431 ret = nullb_device_bool_attr_store(&newp, page, count); 432 if (ret < 0) 433 return ret; 434 435 if (!dev->power && newp) { 436 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) 437 return count; 438 ret = null_add_dev(dev); 439 if (ret) { 440 clear_bit(NULLB_DEV_FL_UP, &dev->flags); 441 return ret; 442 } 443 444 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); 445 dev->power = newp; 446 } else if (dev->power && !newp) { 447 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { 448 mutex_lock(&lock); 449 dev->power = newp; 450 null_del_dev(dev->nullb); 451 mutex_unlock(&lock); 452 } 453 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); 454 } 455 456 return count; 457 } 458 459 CONFIGFS_ATTR(nullb_device_, power); 460 461 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) 462 { 463 struct nullb_device *t_dev = to_nullb_device(item); 464 465 return badblocks_show(&t_dev->badblocks, page, 0); 466 } 467 468 static ssize_t nullb_device_badblocks_store(struct config_item *item, 469 const char *page, size_t count) 470 { 471 struct nullb_device *t_dev = to_nullb_device(item); 472 char *orig, *buf, *tmp; 473 u64 start, end; 474 int ret; 475 476 orig = kstrndup(page, count, GFP_KERNEL); 477 if (!orig) 478 return -ENOMEM; 479 480 buf = strstrip(orig); 481 482 ret = -EINVAL; 483 if (buf[0] != '+' && buf[0] != '-') 484 goto out; 485 tmp = strchr(&buf[1], '-'); 486 if (!tmp) 487 goto out; 488 *tmp = '\0'; 489 ret = kstrtoull(buf + 1, 0, &start); 490 if (ret) 491 goto out; 492 ret = kstrtoull(tmp + 1, 0, &end); 493 if (ret) 494 goto out; 495 ret = -EINVAL; 496 if (start > end) 497 goto out; 498 /* enable badblocks */ 499 cmpxchg(&t_dev->badblocks.shift, -1, 0); 500 if (buf[0] == '+') 501 ret = badblocks_set(&t_dev->badblocks, start, 502 end - start + 1, 1); 503 else 504 ret = badblocks_clear(&t_dev->badblocks, start, 505 end - start + 1); 506 if (ret == 0) 507 ret = count; 508 out: 509 kfree(orig); 510 return ret; 511 } 512 CONFIGFS_ATTR(nullb_device_, badblocks); 513 514 static struct configfs_attribute *nullb_device_attrs[] = { 515 &nullb_device_attr_size, 516 &nullb_device_attr_completion_nsec, 517 &nullb_device_attr_submit_queues, 518 &nullb_device_attr_poll_queues, 519 &nullb_device_attr_home_node, 520 &nullb_device_attr_queue_mode, 521 &nullb_device_attr_blocksize, 522 &nullb_device_attr_max_sectors, 523 &nullb_device_attr_irqmode, 524 &nullb_device_attr_hw_queue_depth, 525 &nullb_device_attr_index, 526 &nullb_device_attr_blocking, 527 &nullb_device_attr_use_per_node_hctx, 528 &nullb_device_attr_power, 529 &nullb_device_attr_memory_backed, 530 &nullb_device_attr_discard, 531 &nullb_device_attr_mbps, 532 &nullb_device_attr_cache_size, 533 &nullb_device_attr_badblocks, 534 &nullb_device_attr_zoned, 535 &nullb_device_attr_zone_size, 536 &nullb_device_attr_zone_capacity, 537 &nullb_device_attr_zone_nr_conv, 538 &nullb_device_attr_zone_max_open, 539 &nullb_device_attr_zone_max_active, 540 &nullb_device_attr_virt_boundary, 541 NULL, 542 }; 543 544 static void nullb_device_release(struct config_item *item) 545 { 546 struct nullb_device *dev = to_nullb_device(item); 547 548 null_free_device_storage(dev, false); 549 null_free_dev(dev); 550 } 551 552 static struct configfs_item_operations nullb_device_ops = { 553 .release = nullb_device_release, 554 }; 555 556 static const struct config_item_type nullb_device_type = { 557 .ct_item_ops = &nullb_device_ops, 558 .ct_attrs = nullb_device_attrs, 559 .ct_owner = THIS_MODULE, 560 }; 561 562 static struct 563 config_item *nullb_group_make_item(struct config_group *group, const char *name) 564 { 565 struct nullb_device *dev; 566 567 if (null_find_dev_by_name(name)) 568 return ERR_PTR(-EEXIST); 569 570 dev = null_alloc_dev(); 571 if (!dev) 572 return ERR_PTR(-ENOMEM); 573 574 config_item_init_type_name(&dev->item, name, &nullb_device_type); 575 576 return &dev->item; 577 } 578 579 static void 580 nullb_group_drop_item(struct config_group *group, struct config_item *item) 581 { 582 struct nullb_device *dev = to_nullb_device(item); 583 584 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { 585 mutex_lock(&lock); 586 dev->power = false; 587 null_del_dev(dev->nullb); 588 mutex_unlock(&lock); 589 } 590 591 config_item_put(item); 592 } 593 594 static ssize_t memb_group_features_show(struct config_item *item, char *page) 595 { 596 return snprintf(page, PAGE_SIZE, 597 "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n"); 598 } 599 600 CONFIGFS_ATTR_RO(memb_group_, features); 601 602 static struct configfs_attribute *nullb_group_attrs[] = { 603 &memb_group_attr_features, 604 NULL, 605 }; 606 607 static struct configfs_group_operations nullb_group_ops = { 608 .make_item = nullb_group_make_item, 609 .drop_item = nullb_group_drop_item, 610 }; 611 612 static const struct config_item_type nullb_group_type = { 613 .ct_group_ops = &nullb_group_ops, 614 .ct_attrs = nullb_group_attrs, 615 .ct_owner = THIS_MODULE, 616 }; 617 618 static struct configfs_subsystem nullb_subsys = { 619 .su_group = { 620 .cg_item = { 621 .ci_namebuf = "nullb", 622 .ci_type = &nullb_group_type, 623 }, 624 }, 625 }; 626 627 static inline int null_cache_active(struct nullb *nullb) 628 { 629 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 630 } 631 632 static struct nullb_device *null_alloc_dev(void) 633 { 634 struct nullb_device *dev; 635 636 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 637 if (!dev) 638 return NULL; 639 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); 640 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); 641 if (badblocks_init(&dev->badblocks, 0)) { 642 kfree(dev); 643 return NULL; 644 } 645 646 dev->size = g_gb * 1024; 647 dev->completion_nsec = g_completion_nsec; 648 dev->submit_queues = g_submit_queues; 649 dev->prev_submit_queues = g_submit_queues; 650 dev->poll_queues = g_poll_queues; 651 dev->prev_poll_queues = g_poll_queues; 652 dev->home_node = g_home_node; 653 dev->queue_mode = g_queue_mode; 654 dev->blocksize = g_bs; 655 dev->max_sectors = g_max_sectors; 656 dev->irqmode = g_irqmode; 657 dev->hw_queue_depth = g_hw_queue_depth; 658 dev->blocking = g_blocking; 659 dev->use_per_node_hctx = g_use_per_node_hctx; 660 dev->zoned = g_zoned; 661 dev->zone_size = g_zone_size; 662 dev->zone_capacity = g_zone_capacity; 663 dev->zone_nr_conv = g_zone_nr_conv; 664 dev->zone_max_open = g_zone_max_open; 665 dev->zone_max_active = g_zone_max_active; 666 dev->virt_boundary = g_virt_boundary; 667 return dev; 668 } 669 670 static void null_free_dev(struct nullb_device *dev) 671 { 672 if (!dev) 673 return; 674 675 null_free_zoned_dev(dev); 676 badblocks_exit(&dev->badblocks); 677 kfree(dev); 678 } 679 680 static void put_tag(struct nullb_queue *nq, unsigned int tag) 681 { 682 clear_bit_unlock(tag, nq->tag_map); 683 684 if (waitqueue_active(&nq->wait)) 685 wake_up(&nq->wait); 686 } 687 688 static unsigned int get_tag(struct nullb_queue *nq) 689 { 690 unsigned int tag; 691 692 do { 693 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); 694 if (tag >= nq->queue_depth) 695 return -1U; 696 } while (test_and_set_bit_lock(tag, nq->tag_map)); 697 698 return tag; 699 } 700 701 static void free_cmd(struct nullb_cmd *cmd) 702 { 703 put_tag(cmd->nq, cmd->tag); 704 } 705 706 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); 707 708 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 709 { 710 struct nullb_cmd *cmd; 711 unsigned int tag; 712 713 tag = get_tag(nq); 714 if (tag != -1U) { 715 cmd = &nq->cmds[tag]; 716 cmd->tag = tag; 717 cmd->error = BLK_STS_OK; 718 cmd->nq = nq; 719 if (nq->dev->irqmode == NULL_IRQ_TIMER) { 720 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, 721 HRTIMER_MODE_REL); 722 cmd->timer.function = null_cmd_timer_expired; 723 } 724 return cmd; 725 } 726 727 return NULL; 728 } 729 730 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio) 731 { 732 struct nullb_cmd *cmd; 733 DEFINE_WAIT(wait); 734 735 do { 736 /* 737 * This avoids multiple return statements, multiple calls to 738 * __alloc_cmd() and a fast path call to prepare_to_wait(). 739 */ 740 cmd = __alloc_cmd(nq); 741 if (cmd) { 742 cmd->bio = bio; 743 return cmd; 744 } 745 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); 746 io_schedule(); 747 finish_wait(&nq->wait, &wait); 748 } while (1); 749 } 750 751 static void end_cmd(struct nullb_cmd *cmd) 752 { 753 int queue_mode = cmd->nq->dev->queue_mode; 754 755 switch (queue_mode) { 756 case NULL_Q_MQ: 757 blk_mq_end_request(cmd->rq, cmd->error); 758 return; 759 case NULL_Q_BIO: 760 cmd->bio->bi_status = cmd->error; 761 bio_endio(cmd->bio); 762 break; 763 } 764 765 free_cmd(cmd); 766 } 767 768 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 769 { 770 end_cmd(container_of(timer, struct nullb_cmd, timer)); 771 772 return HRTIMER_NORESTART; 773 } 774 775 static void null_cmd_end_timer(struct nullb_cmd *cmd) 776 { 777 ktime_t kt = cmd->nq->dev->completion_nsec; 778 779 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 780 } 781 782 static void null_complete_rq(struct request *rq) 783 { 784 end_cmd(blk_mq_rq_to_pdu(rq)); 785 } 786 787 static struct nullb_page *null_alloc_page(void) 788 { 789 struct nullb_page *t_page; 790 791 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO); 792 if (!t_page) 793 return NULL; 794 795 t_page->page = alloc_pages(GFP_NOIO, 0); 796 if (!t_page->page) { 797 kfree(t_page); 798 return NULL; 799 } 800 801 memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); 802 return t_page; 803 } 804 805 static void null_free_page(struct nullb_page *t_page) 806 { 807 __set_bit(NULLB_PAGE_FREE, t_page->bitmap); 808 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) 809 return; 810 __free_page(t_page->page); 811 kfree(t_page); 812 } 813 814 static bool null_page_empty(struct nullb_page *page) 815 { 816 int size = MAP_SZ - 2; 817 818 return find_first_bit(page->bitmap, size) == size; 819 } 820 821 static void null_free_sector(struct nullb *nullb, sector_t sector, 822 bool is_cache) 823 { 824 unsigned int sector_bit; 825 u64 idx; 826 struct nullb_page *t_page, *ret; 827 struct radix_tree_root *root; 828 829 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 830 idx = sector >> PAGE_SECTORS_SHIFT; 831 sector_bit = (sector & SECTOR_MASK); 832 833 t_page = radix_tree_lookup(root, idx); 834 if (t_page) { 835 __clear_bit(sector_bit, t_page->bitmap); 836 837 if (null_page_empty(t_page)) { 838 ret = radix_tree_delete_item(root, idx, t_page); 839 WARN_ON(ret != t_page); 840 null_free_page(ret); 841 if (is_cache) 842 nullb->dev->curr_cache -= PAGE_SIZE; 843 } 844 } 845 } 846 847 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, 848 struct nullb_page *t_page, bool is_cache) 849 { 850 struct radix_tree_root *root; 851 852 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 853 854 if (radix_tree_insert(root, idx, t_page)) { 855 null_free_page(t_page); 856 t_page = radix_tree_lookup(root, idx); 857 WARN_ON(!t_page || t_page->page->index != idx); 858 } else if (is_cache) 859 nullb->dev->curr_cache += PAGE_SIZE; 860 861 return t_page; 862 } 863 864 static void null_free_device_storage(struct nullb_device *dev, bool is_cache) 865 { 866 unsigned long pos = 0; 867 int nr_pages; 868 struct nullb_page *ret, *t_pages[FREE_BATCH]; 869 struct radix_tree_root *root; 870 871 root = is_cache ? &dev->cache : &dev->data; 872 873 do { 874 int i; 875 876 nr_pages = radix_tree_gang_lookup(root, 877 (void **)t_pages, pos, FREE_BATCH); 878 879 for (i = 0; i < nr_pages; i++) { 880 pos = t_pages[i]->page->index; 881 ret = radix_tree_delete_item(root, pos, t_pages[i]); 882 WARN_ON(ret != t_pages[i]); 883 null_free_page(ret); 884 } 885 886 pos++; 887 } while (nr_pages == FREE_BATCH); 888 889 if (is_cache) 890 dev->curr_cache = 0; 891 } 892 893 static struct nullb_page *__null_lookup_page(struct nullb *nullb, 894 sector_t sector, bool for_write, bool is_cache) 895 { 896 unsigned int sector_bit; 897 u64 idx; 898 struct nullb_page *t_page; 899 struct radix_tree_root *root; 900 901 idx = sector >> PAGE_SECTORS_SHIFT; 902 sector_bit = (sector & SECTOR_MASK); 903 904 root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 905 t_page = radix_tree_lookup(root, idx); 906 WARN_ON(t_page && t_page->page->index != idx); 907 908 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) 909 return t_page; 910 911 return NULL; 912 } 913 914 static struct nullb_page *null_lookup_page(struct nullb *nullb, 915 sector_t sector, bool for_write, bool ignore_cache) 916 { 917 struct nullb_page *page = NULL; 918 919 if (!ignore_cache) 920 page = __null_lookup_page(nullb, sector, for_write, true); 921 if (page) 922 return page; 923 return __null_lookup_page(nullb, sector, for_write, false); 924 } 925 926 static struct nullb_page *null_insert_page(struct nullb *nullb, 927 sector_t sector, bool ignore_cache) 928 __releases(&nullb->lock) 929 __acquires(&nullb->lock) 930 { 931 u64 idx; 932 struct nullb_page *t_page; 933 934 t_page = null_lookup_page(nullb, sector, true, ignore_cache); 935 if (t_page) 936 return t_page; 937 938 spin_unlock_irq(&nullb->lock); 939 940 t_page = null_alloc_page(); 941 if (!t_page) 942 goto out_lock; 943 944 if (radix_tree_preload(GFP_NOIO)) 945 goto out_freepage; 946 947 spin_lock_irq(&nullb->lock); 948 idx = sector >> PAGE_SECTORS_SHIFT; 949 t_page->page->index = idx; 950 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); 951 radix_tree_preload_end(); 952 953 return t_page; 954 out_freepage: 955 null_free_page(t_page); 956 out_lock: 957 spin_lock_irq(&nullb->lock); 958 return null_lookup_page(nullb, sector, true, ignore_cache); 959 } 960 961 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) 962 { 963 int i; 964 unsigned int offset; 965 u64 idx; 966 struct nullb_page *t_page, *ret; 967 void *dst, *src; 968 969 idx = c_page->page->index; 970 971 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); 972 973 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); 974 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { 975 null_free_page(c_page); 976 if (t_page && null_page_empty(t_page)) { 977 ret = radix_tree_delete_item(&nullb->dev->data, 978 idx, t_page); 979 null_free_page(t_page); 980 } 981 return 0; 982 } 983 984 if (!t_page) 985 return -ENOMEM; 986 987 src = kmap_atomic(c_page->page); 988 dst = kmap_atomic(t_page->page); 989 990 for (i = 0; i < PAGE_SECTORS; 991 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { 992 if (test_bit(i, c_page->bitmap)) { 993 offset = (i << SECTOR_SHIFT); 994 memcpy(dst + offset, src + offset, 995 nullb->dev->blocksize); 996 __set_bit(i, t_page->bitmap); 997 } 998 } 999 1000 kunmap_atomic(dst); 1001 kunmap_atomic(src); 1002 1003 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); 1004 null_free_page(ret); 1005 nullb->dev->curr_cache -= PAGE_SIZE; 1006 1007 return 0; 1008 } 1009 1010 static int null_make_cache_space(struct nullb *nullb, unsigned long n) 1011 { 1012 int i, err, nr_pages; 1013 struct nullb_page *c_pages[FREE_BATCH]; 1014 unsigned long flushed = 0, one_round; 1015 1016 again: 1017 if ((nullb->dev->cache_size * 1024 * 1024) > 1018 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) 1019 return 0; 1020 1021 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, 1022 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); 1023 /* 1024 * nullb_flush_cache_page could unlock before using the c_pages. To 1025 * avoid race, we don't allow page free 1026 */ 1027 for (i = 0; i < nr_pages; i++) { 1028 nullb->cache_flush_pos = c_pages[i]->page->index; 1029 /* 1030 * We found the page which is being flushed to disk by other 1031 * threads 1032 */ 1033 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) 1034 c_pages[i] = NULL; 1035 else 1036 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); 1037 } 1038 1039 one_round = 0; 1040 for (i = 0; i < nr_pages; i++) { 1041 if (c_pages[i] == NULL) 1042 continue; 1043 err = null_flush_cache_page(nullb, c_pages[i]); 1044 if (err) 1045 return err; 1046 one_round++; 1047 } 1048 flushed += one_round << PAGE_SHIFT; 1049 1050 if (n > flushed) { 1051 if (nr_pages == 0) 1052 nullb->cache_flush_pos = 0; 1053 if (one_round == 0) { 1054 /* give other threads a chance */ 1055 spin_unlock_irq(&nullb->lock); 1056 spin_lock_irq(&nullb->lock); 1057 } 1058 goto again; 1059 } 1060 return 0; 1061 } 1062 1063 static int copy_to_nullb(struct nullb *nullb, struct page *source, 1064 unsigned int off, sector_t sector, size_t n, bool is_fua) 1065 { 1066 size_t temp, count = 0; 1067 unsigned int offset; 1068 struct nullb_page *t_page; 1069 void *dst, *src; 1070 1071 while (count < n) { 1072 temp = min_t(size_t, nullb->dev->blocksize, n - count); 1073 1074 if (null_cache_active(nullb) && !is_fua) 1075 null_make_cache_space(nullb, PAGE_SIZE); 1076 1077 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 1078 t_page = null_insert_page(nullb, sector, 1079 !null_cache_active(nullb) || is_fua); 1080 if (!t_page) 1081 return -ENOSPC; 1082 1083 src = kmap_atomic(source); 1084 dst = kmap_atomic(t_page->page); 1085 memcpy(dst + offset, src + off + count, temp); 1086 kunmap_atomic(dst); 1087 kunmap_atomic(src); 1088 1089 __set_bit(sector & SECTOR_MASK, t_page->bitmap); 1090 1091 if (is_fua) 1092 null_free_sector(nullb, sector, true); 1093 1094 count += temp; 1095 sector += temp >> SECTOR_SHIFT; 1096 } 1097 return 0; 1098 } 1099 1100 static int copy_from_nullb(struct nullb *nullb, struct page *dest, 1101 unsigned int off, sector_t sector, size_t n) 1102 { 1103 size_t temp, count = 0; 1104 unsigned int offset; 1105 struct nullb_page *t_page; 1106 void *dst, *src; 1107 1108 while (count < n) { 1109 temp = min_t(size_t, nullb->dev->blocksize, n - count); 1110 1111 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 1112 t_page = null_lookup_page(nullb, sector, false, 1113 !null_cache_active(nullb)); 1114 1115 dst = kmap_atomic(dest); 1116 if (!t_page) { 1117 memset(dst + off + count, 0, temp); 1118 goto next; 1119 } 1120 src = kmap_atomic(t_page->page); 1121 memcpy(dst + off + count, src + offset, temp); 1122 kunmap_atomic(src); 1123 next: 1124 kunmap_atomic(dst); 1125 1126 count += temp; 1127 sector += temp >> SECTOR_SHIFT; 1128 } 1129 return 0; 1130 } 1131 1132 static void nullb_fill_pattern(struct nullb *nullb, struct page *page, 1133 unsigned int len, unsigned int off) 1134 { 1135 void *dst; 1136 1137 dst = kmap_atomic(page); 1138 memset(dst + off, 0xFF, len); 1139 kunmap_atomic(dst); 1140 } 1141 1142 blk_status_t null_handle_discard(struct nullb_device *dev, 1143 sector_t sector, sector_t nr_sectors) 1144 { 1145 struct nullb *nullb = dev->nullb; 1146 size_t n = nr_sectors << SECTOR_SHIFT; 1147 size_t temp; 1148 1149 spin_lock_irq(&nullb->lock); 1150 while (n > 0) { 1151 temp = min_t(size_t, n, dev->blocksize); 1152 null_free_sector(nullb, sector, false); 1153 if (null_cache_active(nullb)) 1154 null_free_sector(nullb, sector, true); 1155 sector += temp >> SECTOR_SHIFT; 1156 n -= temp; 1157 } 1158 spin_unlock_irq(&nullb->lock); 1159 1160 return BLK_STS_OK; 1161 } 1162 1163 static int null_handle_flush(struct nullb *nullb) 1164 { 1165 int err; 1166 1167 if (!null_cache_active(nullb)) 1168 return 0; 1169 1170 spin_lock_irq(&nullb->lock); 1171 while (true) { 1172 err = null_make_cache_space(nullb, 1173 nullb->dev->cache_size * 1024 * 1024); 1174 if (err || nullb->dev->curr_cache == 0) 1175 break; 1176 } 1177 1178 WARN_ON(!radix_tree_empty(&nullb->dev->cache)); 1179 spin_unlock_irq(&nullb->lock); 1180 return err; 1181 } 1182 1183 static int null_transfer(struct nullb *nullb, struct page *page, 1184 unsigned int len, unsigned int off, bool is_write, sector_t sector, 1185 bool is_fua) 1186 { 1187 struct nullb_device *dev = nullb->dev; 1188 unsigned int valid_len = len; 1189 int err = 0; 1190 1191 if (!is_write) { 1192 if (dev->zoned) 1193 valid_len = null_zone_valid_read_len(nullb, 1194 sector, len); 1195 1196 if (valid_len) { 1197 err = copy_from_nullb(nullb, page, off, 1198 sector, valid_len); 1199 off += valid_len; 1200 len -= valid_len; 1201 } 1202 1203 if (len) 1204 nullb_fill_pattern(nullb, page, len, off); 1205 flush_dcache_page(page); 1206 } else { 1207 flush_dcache_page(page); 1208 err = copy_to_nullb(nullb, page, off, sector, len, is_fua); 1209 } 1210 1211 return err; 1212 } 1213 1214 static int null_handle_rq(struct nullb_cmd *cmd) 1215 { 1216 struct request *rq = cmd->rq; 1217 struct nullb *nullb = cmd->nq->dev->nullb; 1218 int err; 1219 unsigned int len; 1220 sector_t sector = blk_rq_pos(rq); 1221 struct req_iterator iter; 1222 struct bio_vec bvec; 1223 1224 spin_lock_irq(&nullb->lock); 1225 rq_for_each_segment(bvec, rq, iter) { 1226 len = bvec.bv_len; 1227 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1228 op_is_write(req_op(rq)), sector, 1229 rq->cmd_flags & REQ_FUA); 1230 if (err) { 1231 spin_unlock_irq(&nullb->lock); 1232 return err; 1233 } 1234 sector += len >> SECTOR_SHIFT; 1235 } 1236 spin_unlock_irq(&nullb->lock); 1237 1238 return 0; 1239 } 1240 1241 static int null_handle_bio(struct nullb_cmd *cmd) 1242 { 1243 struct bio *bio = cmd->bio; 1244 struct nullb *nullb = cmd->nq->dev->nullb; 1245 int err; 1246 unsigned int len; 1247 sector_t sector = bio->bi_iter.bi_sector; 1248 struct bio_vec bvec; 1249 struct bvec_iter iter; 1250 1251 spin_lock_irq(&nullb->lock); 1252 bio_for_each_segment(bvec, bio, iter) { 1253 len = bvec.bv_len; 1254 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1255 op_is_write(bio_op(bio)), sector, 1256 bio->bi_opf & REQ_FUA); 1257 if (err) { 1258 spin_unlock_irq(&nullb->lock); 1259 return err; 1260 } 1261 sector += len >> SECTOR_SHIFT; 1262 } 1263 spin_unlock_irq(&nullb->lock); 1264 return 0; 1265 } 1266 1267 static void null_stop_queue(struct nullb *nullb) 1268 { 1269 struct request_queue *q = nullb->q; 1270 1271 if (nullb->dev->queue_mode == NULL_Q_MQ) 1272 blk_mq_stop_hw_queues(q); 1273 } 1274 1275 static void null_restart_queue_async(struct nullb *nullb) 1276 { 1277 struct request_queue *q = nullb->q; 1278 1279 if (nullb->dev->queue_mode == NULL_Q_MQ) 1280 blk_mq_start_stopped_hw_queues(q, true); 1281 } 1282 1283 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd) 1284 { 1285 struct nullb_device *dev = cmd->nq->dev; 1286 struct nullb *nullb = dev->nullb; 1287 blk_status_t sts = BLK_STS_OK; 1288 struct request *rq = cmd->rq; 1289 1290 if (!hrtimer_active(&nullb->bw_timer)) 1291 hrtimer_restart(&nullb->bw_timer); 1292 1293 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { 1294 null_stop_queue(nullb); 1295 /* race with timer */ 1296 if (atomic_long_read(&nullb->cur_bytes) > 0) 1297 null_restart_queue_async(nullb); 1298 /* requeue request */ 1299 sts = BLK_STS_DEV_RESOURCE; 1300 } 1301 return sts; 1302 } 1303 1304 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, 1305 sector_t sector, 1306 sector_t nr_sectors) 1307 { 1308 struct badblocks *bb = &cmd->nq->dev->badblocks; 1309 sector_t first_bad; 1310 int bad_sectors; 1311 1312 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) 1313 return BLK_STS_IOERR; 1314 1315 return BLK_STS_OK; 1316 } 1317 1318 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, 1319 enum req_opf op, 1320 sector_t sector, 1321 sector_t nr_sectors) 1322 { 1323 struct nullb_device *dev = cmd->nq->dev; 1324 int err; 1325 1326 if (op == REQ_OP_DISCARD) 1327 return null_handle_discard(dev, sector, nr_sectors); 1328 1329 if (dev->queue_mode == NULL_Q_BIO) 1330 err = null_handle_bio(cmd); 1331 else 1332 err = null_handle_rq(cmd); 1333 1334 return errno_to_blk_status(err); 1335 } 1336 1337 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd) 1338 { 1339 struct nullb_device *dev = cmd->nq->dev; 1340 struct bio *bio; 1341 1342 if (dev->memory_backed) 1343 return; 1344 1345 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) { 1346 zero_fill_bio(cmd->bio); 1347 } else if (req_op(cmd->rq) == REQ_OP_READ) { 1348 __rq_for_each_bio(bio, cmd->rq) 1349 zero_fill_bio(bio); 1350 } 1351 } 1352 1353 static inline void nullb_complete_cmd(struct nullb_cmd *cmd) 1354 { 1355 /* 1356 * Since root privileges are required to configure the null_blk 1357 * driver, it is fine that this driver does not initialize the 1358 * data buffers of read commands. Zero-initialize these buffers 1359 * anyway if KMSAN is enabled to prevent that KMSAN complains 1360 * about null_blk not initializing read data buffers. 1361 */ 1362 if (IS_ENABLED(CONFIG_KMSAN)) 1363 nullb_zero_read_cmd_buffer(cmd); 1364 1365 /* Complete IO by inline, softirq or timer */ 1366 switch (cmd->nq->dev->irqmode) { 1367 case NULL_IRQ_SOFTIRQ: 1368 switch (cmd->nq->dev->queue_mode) { 1369 case NULL_Q_MQ: 1370 if (likely(!blk_should_fake_timeout(cmd->rq->q))) 1371 blk_mq_complete_request(cmd->rq); 1372 break; 1373 case NULL_Q_BIO: 1374 /* 1375 * XXX: no proper submitting cpu information available. 1376 */ 1377 end_cmd(cmd); 1378 break; 1379 } 1380 break; 1381 case NULL_IRQ_NONE: 1382 end_cmd(cmd); 1383 break; 1384 case NULL_IRQ_TIMER: 1385 null_cmd_end_timer(cmd); 1386 break; 1387 } 1388 } 1389 1390 blk_status_t null_process_cmd(struct nullb_cmd *cmd, 1391 enum req_opf op, sector_t sector, 1392 unsigned int nr_sectors) 1393 { 1394 struct nullb_device *dev = cmd->nq->dev; 1395 blk_status_t ret; 1396 1397 if (dev->badblocks.shift != -1) { 1398 ret = null_handle_badblocks(cmd, sector, nr_sectors); 1399 if (ret != BLK_STS_OK) 1400 return ret; 1401 } 1402 1403 if (dev->memory_backed) 1404 return null_handle_memory_backed(cmd, op, sector, nr_sectors); 1405 1406 return BLK_STS_OK; 1407 } 1408 1409 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, 1410 sector_t nr_sectors, enum req_opf op) 1411 { 1412 struct nullb_device *dev = cmd->nq->dev; 1413 struct nullb *nullb = dev->nullb; 1414 blk_status_t sts; 1415 1416 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1417 sts = null_handle_throttled(cmd); 1418 if (sts != BLK_STS_OK) 1419 return sts; 1420 } 1421 1422 if (op == REQ_OP_FLUSH) { 1423 cmd->error = errno_to_blk_status(null_handle_flush(nullb)); 1424 goto out; 1425 } 1426 1427 if (dev->zoned) 1428 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); 1429 else 1430 sts = null_process_cmd(cmd, op, sector, nr_sectors); 1431 1432 /* Do not overwrite errors (e.g. timeout errors) */ 1433 if (cmd->error == BLK_STS_OK) 1434 cmd->error = sts; 1435 1436 out: 1437 nullb_complete_cmd(cmd); 1438 return BLK_STS_OK; 1439 } 1440 1441 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) 1442 { 1443 struct nullb *nullb = container_of(timer, struct nullb, bw_timer); 1444 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 1445 unsigned int mbps = nullb->dev->mbps; 1446 1447 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) 1448 return HRTIMER_NORESTART; 1449 1450 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); 1451 null_restart_queue_async(nullb); 1452 1453 hrtimer_forward_now(&nullb->bw_timer, timer_interval); 1454 1455 return HRTIMER_RESTART; 1456 } 1457 1458 static void nullb_setup_bwtimer(struct nullb *nullb) 1459 { 1460 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 1461 1462 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1463 nullb->bw_timer.function = nullb_bwtimer_fn; 1464 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); 1465 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); 1466 } 1467 1468 static struct nullb_queue *nullb_to_queue(struct nullb *nullb) 1469 { 1470 int index = 0; 1471 1472 if (nullb->nr_queues != 1) 1473 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); 1474 1475 return &nullb->queues[index]; 1476 } 1477 1478 static void null_submit_bio(struct bio *bio) 1479 { 1480 sector_t sector = bio->bi_iter.bi_sector; 1481 sector_t nr_sectors = bio_sectors(bio); 1482 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data; 1483 struct nullb_queue *nq = nullb_to_queue(nullb); 1484 1485 null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio)); 1486 } 1487 1488 static bool should_timeout_request(struct request *rq) 1489 { 1490 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1491 if (g_timeout_str[0]) 1492 return should_fail(&null_timeout_attr, 1); 1493 #endif 1494 return false; 1495 } 1496 1497 static bool should_requeue_request(struct request *rq) 1498 { 1499 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1500 if (g_requeue_str[0]) 1501 return should_fail(&null_requeue_attr, 1); 1502 #endif 1503 return false; 1504 } 1505 1506 static int null_map_queues(struct blk_mq_tag_set *set) 1507 { 1508 struct nullb *nullb = set->driver_data; 1509 int i, qoff; 1510 unsigned int submit_queues = g_submit_queues; 1511 unsigned int poll_queues = g_poll_queues; 1512 1513 if (nullb) { 1514 struct nullb_device *dev = nullb->dev; 1515 1516 /* 1517 * Refer nr_hw_queues of the tag set to check if the expected 1518 * number of hardware queues are prepared. If block layer failed 1519 * to prepare them, use previous numbers of submit queues and 1520 * poll queues to map queues. 1521 */ 1522 if (set->nr_hw_queues == 1523 dev->submit_queues + dev->poll_queues) { 1524 submit_queues = dev->submit_queues; 1525 poll_queues = dev->poll_queues; 1526 } else if (set->nr_hw_queues == 1527 dev->prev_submit_queues + dev->prev_poll_queues) { 1528 submit_queues = dev->prev_submit_queues; 1529 poll_queues = dev->prev_poll_queues; 1530 } else { 1531 pr_warn("tag set has unexpected nr_hw_queues: %d\n", 1532 set->nr_hw_queues); 1533 return -EINVAL; 1534 } 1535 } 1536 1537 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 1538 struct blk_mq_queue_map *map = &set->map[i]; 1539 1540 switch (i) { 1541 case HCTX_TYPE_DEFAULT: 1542 map->nr_queues = submit_queues; 1543 break; 1544 case HCTX_TYPE_READ: 1545 map->nr_queues = 0; 1546 continue; 1547 case HCTX_TYPE_POLL: 1548 map->nr_queues = poll_queues; 1549 break; 1550 } 1551 map->queue_offset = qoff; 1552 qoff += map->nr_queues; 1553 blk_mq_map_queues(map); 1554 } 1555 1556 return 0; 1557 } 1558 1559 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1560 { 1561 struct nullb_queue *nq = hctx->driver_data; 1562 LIST_HEAD(list); 1563 int nr = 0; 1564 1565 spin_lock(&nq->poll_lock); 1566 list_splice_init(&nq->poll_list, &list); 1567 spin_unlock(&nq->poll_lock); 1568 1569 while (!list_empty(&list)) { 1570 struct nullb_cmd *cmd; 1571 struct request *req; 1572 1573 req = list_first_entry(&list, struct request, queuelist); 1574 list_del_init(&req->queuelist); 1575 cmd = blk_mq_rq_to_pdu(req); 1576 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), 1577 blk_rq_sectors(req)); 1578 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, 1579 blk_mq_end_request_batch)) 1580 end_cmd(cmd); 1581 nr++; 1582 } 1583 1584 return nr; 1585 } 1586 1587 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) 1588 { 1589 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; 1590 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); 1591 1592 pr_info("rq %p timed out\n", rq); 1593 1594 if (hctx->type == HCTX_TYPE_POLL) { 1595 struct nullb_queue *nq = hctx->driver_data; 1596 1597 spin_lock(&nq->poll_lock); 1598 list_del_init(&rq->queuelist); 1599 spin_unlock(&nq->poll_lock); 1600 } 1601 1602 /* 1603 * If the device is marked as blocking (i.e. memory backed or zoned 1604 * device), the submission path may be blocked waiting for resources 1605 * and cause real timeouts. For these real timeouts, the submission 1606 * path will complete the request using blk_mq_complete_request(). 1607 * Only fake timeouts need to execute blk_mq_complete_request() here. 1608 */ 1609 cmd->error = BLK_STS_TIMEOUT; 1610 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) 1611 blk_mq_complete_request(rq); 1612 return BLK_EH_DONE; 1613 } 1614 1615 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, 1616 const struct blk_mq_queue_data *bd) 1617 { 1618 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1619 struct nullb_queue *nq = hctx->driver_data; 1620 sector_t nr_sectors = blk_rq_sectors(bd->rq); 1621 sector_t sector = blk_rq_pos(bd->rq); 1622 const bool is_poll = hctx->type == HCTX_TYPE_POLL; 1623 1624 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1625 1626 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) { 1627 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1628 cmd->timer.function = null_cmd_timer_expired; 1629 } 1630 cmd->rq = bd->rq; 1631 cmd->error = BLK_STS_OK; 1632 cmd->nq = nq; 1633 cmd->fake_timeout = should_timeout_request(bd->rq); 1634 1635 blk_mq_start_request(bd->rq); 1636 1637 if (should_requeue_request(bd->rq)) { 1638 /* 1639 * Alternate between hitting the core BUSY path, and the 1640 * driver driven requeue path 1641 */ 1642 nq->requeue_selection++; 1643 if (nq->requeue_selection & 1) 1644 return BLK_STS_RESOURCE; 1645 else { 1646 blk_mq_requeue_request(bd->rq, true); 1647 return BLK_STS_OK; 1648 } 1649 } 1650 1651 if (is_poll) { 1652 spin_lock(&nq->poll_lock); 1653 list_add_tail(&bd->rq->queuelist, &nq->poll_list); 1654 spin_unlock(&nq->poll_lock); 1655 return BLK_STS_OK; 1656 } 1657 if (cmd->fake_timeout) 1658 return BLK_STS_OK; 1659 1660 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); 1661 } 1662 1663 static void cleanup_queue(struct nullb_queue *nq) 1664 { 1665 kfree(nq->tag_map); 1666 kfree(nq->cmds); 1667 } 1668 1669 static void cleanup_queues(struct nullb *nullb) 1670 { 1671 int i; 1672 1673 for (i = 0; i < nullb->nr_queues; i++) 1674 cleanup_queue(&nullb->queues[i]); 1675 1676 kfree(nullb->queues); 1677 } 1678 1679 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1680 { 1681 struct nullb_queue *nq = hctx->driver_data; 1682 struct nullb *nullb = nq->dev->nullb; 1683 1684 nullb->nr_queues--; 1685 } 1686 1687 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 1688 { 1689 init_waitqueue_head(&nq->wait); 1690 nq->queue_depth = nullb->queue_depth; 1691 nq->dev = nullb->dev; 1692 INIT_LIST_HEAD(&nq->poll_list); 1693 spin_lock_init(&nq->poll_lock); 1694 } 1695 1696 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, 1697 unsigned int hctx_idx) 1698 { 1699 struct nullb *nullb = hctx->queue->queuedata; 1700 struct nullb_queue *nq; 1701 1702 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1703 if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1)) 1704 return -EFAULT; 1705 #endif 1706 1707 nq = &nullb->queues[hctx_idx]; 1708 hctx->driver_data = nq; 1709 null_init_queue(nullb, nq); 1710 nullb->nr_queues++; 1711 1712 return 0; 1713 } 1714 1715 static const struct blk_mq_ops null_mq_ops = { 1716 .queue_rq = null_queue_rq, 1717 .complete = null_complete_rq, 1718 .timeout = null_timeout_rq, 1719 .poll = null_poll, 1720 .map_queues = null_map_queues, 1721 .init_hctx = null_init_hctx, 1722 .exit_hctx = null_exit_hctx, 1723 }; 1724 1725 static void null_del_dev(struct nullb *nullb) 1726 { 1727 struct nullb_device *dev; 1728 1729 if (!nullb) 1730 return; 1731 1732 dev = nullb->dev; 1733 1734 ida_simple_remove(&nullb_indexes, nullb->index); 1735 1736 list_del_init(&nullb->list); 1737 1738 del_gendisk(nullb->disk); 1739 1740 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { 1741 hrtimer_cancel(&nullb->bw_timer); 1742 atomic_long_set(&nullb->cur_bytes, LONG_MAX); 1743 null_restart_queue_async(nullb); 1744 } 1745 1746 blk_cleanup_disk(nullb->disk); 1747 if (dev->queue_mode == NULL_Q_MQ && 1748 nullb->tag_set == &nullb->__tag_set) 1749 blk_mq_free_tag_set(nullb->tag_set); 1750 cleanup_queues(nullb); 1751 if (null_cache_active(nullb)) 1752 null_free_device_storage(nullb->dev, true); 1753 kfree(nullb); 1754 dev->nullb = NULL; 1755 } 1756 1757 static void null_config_discard(struct nullb *nullb) 1758 { 1759 if (nullb->dev->discard == false) 1760 return; 1761 1762 if (!nullb->dev->memory_backed) { 1763 nullb->dev->discard = false; 1764 pr_info("discard option is ignored without memory backing\n"); 1765 return; 1766 } 1767 1768 if (nullb->dev->zoned) { 1769 nullb->dev->discard = false; 1770 pr_info("discard option is ignored in zoned mode\n"); 1771 return; 1772 } 1773 1774 nullb->q->limits.discard_granularity = nullb->dev->blocksize; 1775 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); 1776 } 1777 1778 static const struct block_device_operations null_bio_ops = { 1779 .owner = THIS_MODULE, 1780 .submit_bio = null_submit_bio, 1781 .report_zones = null_report_zones, 1782 }; 1783 1784 static const struct block_device_operations null_rq_ops = { 1785 .owner = THIS_MODULE, 1786 .report_zones = null_report_zones, 1787 }; 1788 1789 static int setup_commands(struct nullb_queue *nq) 1790 { 1791 struct nullb_cmd *cmd; 1792 int i, tag_size; 1793 1794 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); 1795 if (!nq->cmds) 1796 return -ENOMEM; 1797 1798 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 1799 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL); 1800 if (!nq->tag_map) { 1801 kfree(nq->cmds); 1802 return -ENOMEM; 1803 } 1804 1805 for (i = 0; i < nq->queue_depth; i++) { 1806 cmd = &nq->cmds[i]; 1807 cmd->tag = -1U; 1808 } 1809 1810 return 0; 1811 } 1812 1813 static int setup_queues(struct nullb *nullb) 1814 { 1815 int nqueues = nr_cpu_ids; 1816 1817 if (g_poll_queues) 1818 nqueues += g_poll_queues; 1819 1820 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), 1821 GFP_KERNEL); 1822 if (!nullb->queues) 1823 return -ENOMEM; 1824 1825 nullb->queue_depth = nullb->dev->hw_queue_depth; 1826 return 0; 1827 } 1828 1829 static int init_driver_queues(struct nullb *nullb) 1830 { 1831 struct nullb_queue *nq; 1832 int i, ret = 0; 1833 1834 for (i = 0; i < nullb->dev->submit_queues; i++) { 1835 nq = &nullb->queues[i]; 1836 1837 null_init_queue(nullb, nq); 1838 1839 ret = setup_commands(nq); 1840 if (ret) 1841 return ret; 1842 nullb->nr_queues++; 1843 } 1844 return 0; 1845 } 1846 1847 static int null_gendisk_register(struct nullb *nullb) 1848 { 1849 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT; 1850 struct gendisk *disk = nullb->disk; 1851 1852 set_capacity(disk, size); 1853 1854 disk->major = null_major; 1855 disk->first_minor = nullb->index; 1856 disk->minors = 1; 1857 if (queue_is_mq(nullb->q)) 1858 disk->fops = &null_rq_ops; 1859 else 1860 disk->fops = &null_bio_ops; 1861 disk->private_data = nullb; 1862 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 1863 1864 if (nullb->dev->zoned) { 1865 int ret = null_register_zoned_dev(nullb); 1866 1867 if (ret) 1868 return ret; 1869 } 1870 1871 return add_disk(disk); 1872 } 1873 1874 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) 1875 { 1876 int poll_queues; 1877 1878 set->ops = &null_mq_ops; 1879 set->nr_hw_queues = nullb ? nullb->dev->submit_queues : 1880 g_submit_queues; 1881 poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues; 1882 if (poll_queues) 1883 set->nr_hw_queues += poll_queues; 1884 set->queue_depth = nullb ? nullb->dev->hw_queue_depth : 1885 g_hw_queue_depth; 1886 set->numa_node = nullb ? nullb->dev->home_node : g_home_node; 1887 set->cmd_size = sizeof(struct nullb_cmd); 1888 set->flags = BLK_MQ_F_SHOULD_MERGE; 1889 if (g_no_sched) 1890 set->flags |= BLK_MQ_F_NO_SCHED; 1891 if (g_shared_tag_bitmap) 1892 set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; 1893 set->driver_data = nullb; 1894 if (poll_queues) 1895 set->nr_maps = 3; 1896 else 1897 set->nr_maps = 1; 1898 1899 if ((nullb && nullb->dev->blocking) || g_blocking) 1900 set->flags |= BLK_MQ_F_BLOCKING; 1901 1902 return blk_mq_alloc_tag_set(set); 1903 } 1904 1905 static int null_validate_conf(struct nullb_device *dev) 1906 { 1907 dev->blocksize = round_down(dev->blocksize, 512); 1908 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); 1909 1910 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { 1911 if (dev->submit_queues != nr_online_nodes) 1912 dev->submit_queues = nr_online_nodes; 1913 } else if (dev->submit_queues > nr_cpu_ids) 1914 dev->submit_queues = nr_cpu_ids; 1915 else if (dev->submit_queues == 0) 1916 dev->submit_queues = 1; 1917 dev->prev_submit_queues = dev->submit_queues; 1918 1919 if (dev->poll_queues > g_poll_queues) 1920 dev->poll_queues = g_poll_queues; 1921 dev->prev_poll_queues = dev->poll_queues; 1922 1923 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); 1924 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); 1925 1926 /* Do memory allocation, so set blocking */ 1927 if (dev->memory_backed) 1928 dev->blocking = true; 1929 else /* cache is meaningless */ 1930 dev->cache_size = 0; 1931 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, 1932 dev->cache_size); 1933 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); 1934 /* can not stop a queue */ 1935 if (dev->queue_mode == NULL_Q_BIO) 1936 dev->mbps = 0; 1937 1938 if (dev->zoned && 1939 (!dev->zone_size || !is_power_of_2(dev->zone_size))) { 1940 pr_err("zone_size must be power-of-two\n"); 1941 return -EINVAL; 1942 } 1943 1944 return 0; 1945 } 1946 1947 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1948 static bool __null_setup_fault(struct fault_attr *attr, char *str) 1949 { 1950 if (!str[0]) 1951 return true; 1952 1953 if (!setup_fault_attr(attr, str)) 1954 return false; 1955 1956 attr->verbose = 0; 1957 return true; 1958 } 1959 #endif 1960 1961 static bool null_setup_fault(void) 1962 { 1963 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION 1964 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) 1965 return false; 1966 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) 1967 return false; 1968 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str)) 1969 return false; 1970 #endif 1971 return true; 1972 } 1973 1974 static int null_add_dev(struct nullb_device *dev) 1975 { 1976 struct nullb *nullb; 1977 int rv; 1978 1979 rv = null_validate_conf(dev); 1980 if (rv) 1981 return rv; 1982 1983 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); 1984 if (!nullb) { 1985 rv = -ENOMEM; 1986 goto out; 1987 } 1988 nullb->dev = dev; 1989 dev->nullb = nullb; 1990 1991 spin_lock_init(&nullb->lock); 1992 1993 rv = setup_queues(nullb); 1994 if (rv) 1995 goto out_free_nullb; 1996 1997 if (dev->queue_mode == NULL_Q_MQ) { 1998 if (shared_tags) { 1999 nullb->tag_set = &tag_set; 2000 rv = 0; 2001 } else { 2002 nullb->tag_set = &nullb->__tag_set; 2003 rv = null_init_tag_set(nullb, nullb->tag_set); 2004 } 2005 2006 if (rv) 2007 goto out_cleanup_queues; 2008 2009 if (!null_setup_fault()) 2010 goto out_cleanup_tags; 2011 2012 nullb->tag_set->timeout = 5 * HZ; 2013 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); 2014 if (IS_ERR(nullb->disk)) { 2015 rv = PTR_ERR(nullb->disk); 2016 goto out_cleanup_tags; 2017 } 2018 nullb->q = nullb->disk->queue; 2019 } else if (dev->queue_mode == NULL_Q_BIO) { 2020 rv = -ENOMEM; 2021 nullb->disk = blk_alloc_disk(nullb->dev->home_node); 2022 if (!nullb->disk) 2023 goto out_cleanup_queues; 2024 2025 nullb->q = nullb->disk->queue; 2026 rv = init_driver_queues(nullb); 2027 if (rv) 2028 goto out_cleanup_disk; 2029 } 2030 2031 if (dev->mbps) { 2032 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); 2033 nullb_setup_bwtimer(nullb); 2034 } 2035 2036 if (dev->cache_size > 0) { 2037 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 2038 blk_queue_write_cache(nullb->q, true, true); 2039 } 2040 2041 if (dev->zoned) { 2042 rv = null_init_zoned_dev(dev, nullb->q); 2043 if (rv) 2044 goto out_cleanup_disk; 2045 } 2046 2047 nullb->q->queuedata = nullb; 2048 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); 2049 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); 2050 2051 mutex_lock(&lock); 2052 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); 2053 dev->index = nullb->index; 2054 mutex_unlock(&lock); 2055 2056 blk_queue_logical_block_size(nullb->q, dev->blocksize); 2057 blk_queue_physical_block_size(nullb->q, dev->blocksize); 2058 if (!dev->max_sectors) 2059 dev->max_sectors = queue_max_hw_sectors(nullb->q); 2060 dev->max_sectors = min_t(unsigned int, dev->max_sectors, 2061 BLK_DEF_MAX_SECTORS); 2062 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); 2063 2064 if (dev->virt_boundary) 2065 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); 2066 2067 null_config_discard(nullb); 2068 2069 if (config_item_name(&dev->item)) { 2070 /* Use configfs dir name as the device name */ 2071 snprintf(nullb->disk_name, sizeof(nullb->disk_name), 2072 "%s", config_item_name(&dev->item)); 2073 } else { 2074 sprintf(nullb->disk_name, "nullb%d", nullb->index); 2075 } 2076 2077 rv = null_gendisk_register(nullb); 2078 if (rv) 2079 goto out_cleanup_zone; 2080 2081 mutex_lock(&lock); 2082 list_add_tail(&nullb->list, &nullb_list); 2083 mutex_unlock(&lock); 2084 2085 pr_info("disk %s created\n", nullb->disk_name); 2086 2087 return 0; 2088 out_cleanup_zone: 2089 null_free_zoned_dev(dev); 2090 out_cleanup_disk: 2091 blk_cleanup_disk(nullb->disk); 2092 out_cleanup_tags: 2093 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) 2094 blk_mq_free_tag_set(nullb->tag_set); 2095 out_cleanup_queues: 2096 cleanup_queues(nullb); 2097 out_free_nullb: 2098 kfree(nullb); 2099 dev->nullb = NULL; 2100 out: 2101 return rv; 2102 } 2103 2104 static struct nullb *null_find_dev_by_name(const char *name) 2105 { 2106 struct nullb *nullb = NULL, *nb; 2107 2108 mutex_lock(&lock); 2109 list_for_each_entry(nb, &nullb_list, list) { 2110 if (strcmp(nb->disk_name, name) == 0) { 2111 nullb = nb; 2112 break; 2113 } 2114 } 2115 mutex_unlock(&lock); 2116 2117 return nullb; 2118 } 2119 2120 static int null_create_dev(void) 2121 { 2122 struct nullb_device *dev; 2123 int ret; 2124 2125 dev = null_alloc_dev(); 2126 if (!dev) 2127 return -ENOMEM; 2128 2129 ret = null_add_dev(dev); 2130 if (ret) { 2131 null_free_dev(dev); 2132 return ret; 2133 } 2134 2135 return 0; 2136 } 2137 2138 static void null_destroy_dev(struct nullb *nullb) 2139 { 2140 struct nullb_device *dev = nullb->dev; 2141 2142 null_del_dev(nullb); 2143 null_free_dev(dev); 2144 } 2145 2146 static int __init null_init(void) 2147 { 2148 int ret = 0; 2149 unsigned int i; 2150 struct nullb *nullb; 2151 2152 if (g_bs > PAGE_SIZE) { 2153 pr_warn("invalid block size\n"); 2154 pr_warn("defaults block size to %lu\n", PAGE_SIZE); 2155 g_bs = PAGE_SIZE; 2156 } 2157 2158 if (g_max_sectors > BLK_DEF_MAX_SECTORS) { 2159 pr_warn("invalid max sectors\n"); 2160 pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS); 2161 g_max_sectors = BLK_DEF_MAX_SECTORS; 2162 } 2163 2164 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { 2165 pr_err("invalid home_node value\n"); 2166 g_home_node = NUMA_NO_NODE; 2167 } 2168 2169 if (g_queue_mode == NULL_Q_RQ) { 2170 pr_err("legacy IO path is no longer available\n"); 2171 return -EINVAL; 2172 } 2173 2174 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { 2175 if (g_submit_queues != nr_online_nodes) { 2176 pr_warn("submit_queues param is set to %u.\n", 2177 nr_online_nodes); 2178 g_submit_queues = nr_online_nodes; 2179 } 2180 } else if (g_submit_queues > nr_cpu_ids) { 2181 g_submit_queues = nr_cpu_ids; 2182 } else if (g_submit_queues <= 0) { 2183 g_submit_queues = 1; 2184 } 2185 2186 if (g_queue_mode == NULL_Q_MQ && shared_tags) { 2187 ret = null_init_tag_set(NULL, &tag_set); 2188 if (ret) 2189 return ret; 2190 } 2191 2192 config_group_init(&nullb_subsys.su_group); 2193 mutex_init(&nullb_subsys.su_mutex); 2194 2195 ret = configfs_register_subsystem(&nullb_subsys); 2196 if (ret) 2197 goto err_tagset; 2198 2199 mutex_init(&lock); 2200 2201 null_major = register_blkdev(0, "nullb"); 2202 if (null_major < 0) { 2203 ret = null_major; 2204 goto err_conf; 2205 } 2206 2207 for (i = 0; i < nr_devices; i++) { 2208 ret = null_create_dev(); 2209 if (ret) 2210 goto err_dev; 2211 } 2212 2213 pr_info("module loaded\n"); 2214 return 0; 2215 2216 err_dev: 2217 while (!list_empty(&nullb_list)) { 2218 nullb = list_entry(nullb_list.next, struct nullb, list); 2219 null_destroy_dev(nullb); 2220 } 2221 unregister_blkdev(null_major, "nullb"); 2222 err_conf: 2223 configfs_unregister_subsystem(&nullb_subsys); 2224 err_tagset: 2225 if (g_queue_mode == NULL_Q_MQ && shared_tags) 2226 blk_mq_free_tag_set(&tag_set); 2227 return ret; 2228 } 2229 2230 static void __exit null_exit(void) 2231 { 2232 struct nullb *nullb; 2233 2234 configfs_unregister_subsystem(&nullb_subsys); 2235 2236 unregister_blkdev(null_major, "nullb"); 2237 2238 mutex_lock(&lock); 2239 while (!list_empty(&nullb_list)) { 2240 nullb = list_entry(nullb_list.next, struct nullb, list); 2241 null_destroy_dev(nullb); 2242 } 2243 mutex_unlock(&lock); 2244 2245 if (g_queue_mode == NULL_Q_MQ && shared_tags) 2246 blk_mq_free_tag_set(&tag_set); 2247 } 2248 2249 module_init(null_init); 2250 module_exit(null_exit); 2251 2252 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); 2253 MODULE_LICENSE("GPL"); 2254