1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to sysfs handling 4 */ 5 #include <linux/kernel.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/blktrace_api.h> 12 #include <linux/blk-mq.h> 13 #include <linux/blk-cgroup.h> 14 15 #include "blk.h" 16 #include "blk-mq.h" 17 #include "blk-mq-debugfs.h" 18 #include "blk-wbt.h" 19 20 struct queue_sysfs_entry { 21 struct attribute attr; 22 ssize_t (*show)(struct request_queue *, char *); 23 ssize_t (*store)(struct request_queue *, const char *, size_t); 24 }; 25 26 static ssize_t 27 queue_var_show(unsigned long var, char *page) 28 { 29 return sprintf(page, "%lu\n", var); 30 } 31 32 static ssize_t 33 queue_var_store(unsigned long *var, const char *page, size_t count) 34 { 35 int err; 36 unsigned long v; 37 38 err = kstrtoul(page, 10, &v); 39 if (err || v > UINT_MAX) 40 return -EINVAL; 41 42 *var = v; 43 44 return count; 45 } 46 47 static ssize_t queue_var_store64(s64 *var, const char *page) 48 { 49 int err; 50 s64 v; 51 52 err = kstrtos64(page, 10, &v); 53 if (err < 0) 54 return err; 55 56 *var = v; 57 return 0; 58 } 59 60 static ssize_t queue_requests_show(struct request_queue *q, char *page) 61 { 62 return queue_var_show(q->nr_requests, (page)); 63 } 64 65 static ssize_t 66 queue_requests_store(struct request_queue *q, const char *page, size_t count) 67 { 68 unsigned long nr; 69 int ret, err; 70 71 if (!q->request_fn && !q->mq_ops) 72 return -EINVAL; 73 74 ret = queue_var_store(&nr, page, count); 75 if (ret < 0) 76 return ret; 77 78 if (nr < BLKDEV_MIN_RQ) 79 nr = BLKDEV_MIN_RQ; 80 81 if (q->request_fn) 82 err = blk_update_nr_requests(q, nr); 83 else 84 err = blk_mq_update_nr_requests(q, nr); 85 86 if (err) 87 return err; 88 89 return ret; 90 } 91 92 static ssize_t queue_ra_show(struct request_queue *q, char *page) 93 { 94 unsigned long ra_kb = q->backing_dev_info->ra_pages << 95 (PAGE_SHIFT - 10); 96 97 return queue_var_show(ra_kb, (page)); 98 } 99 100 static ssize_t 101 queue_ra_store(struct request_queue *q, const char *page, size_t count) 102 { 103 unsigned long ra_kb; 104 ssize_t ret = queue_var_store(&ra_kb, page, count); 105 106 if (ret < 0) 107 return ret; 108 109 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); 110 111 return ret; 112 } 113 114 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 115 { 116 int max_sectors_kb = queue_max_sectors(q) >> 1; 117 118 return queue_var_show(max_sectors_kb, (page)); 119 } 120 121 static ssize_t queue_max_segments_show(struct request_queue *q, char *page) 122 { 123 return queue_var_show(queue_max_segments(q), (page)); 124 } 125 126 static ssize_t queue_max_discard_segments_show(struct request_queue *q, 127 char *page) 128 { 129 return queue_var_show(queue_max_discard_segments(q), (page)); 130 } 131 132 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) 133 { 134 return queue_var_show(q->limits.max_integrity_segments, (page)); 135 } 136 137 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 138 { 139 if (blk_queue_cluster(q)) 140 return queue_var_show(queue_max_segment_size(q), (page)); 141 142 return queue_var_show(PAGE_SIZE, (page)); 143 } 144 145 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 146 { 147 return queue_var_show(queue_logical_block_size(q), page); 148 } 149 150 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 151 { 152 return queue_var_show(queue_physical_block_size(q), page); 153 } 154 155 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) 156 { 157 return queue_var_show(q->limits.chunk_sectors, page); 158 } 159 160 static ssize_t queue_io_min_show(struct request_queue *q, char *page) 161 { 162 return queue_var_show(queue_io_min(q), page); 163 } 164 165 static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 166 { 167 return queue_var_show(queue_io_opt(q), page); 168 } 169 170 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) 171 { 172 return queue_var_show(q->limits.discard_granularity, page); 173 } 174 175 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 176 { 177 178 return sprintf(page, "%llu\n", 179 (unsigned long long)q->limits.max_hw_discard_sectors << 9); 180 } 181 182 static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 183 { 184 return sprintf(page, "%llu\n", 185 (unsigned long long)q->limits.max_discard_sectors << 9); 186 } 187 188 static ssize_t queue_discard_max_store(struct request_queue *q, 189 const char *page, size_t count) 190 { 191 unsigned long max_discard; 192 ssize_t ret = queue_var_store(&max_discard, page, count); 193 194 if (ret < 0) 195 return ret; 196 197 if (max_discard & (q->limits.discard_granularity - 1)) 198 return -EINVAL; 199 200 max_discard >>= 9; 201 if (max_discard > UINT_MAX) 202 return -EINVAL; 203 204 if (max_discard > q->limits.max_hw_discard_sectors) 205 max_discard = q->limits.max_hw_discard_sectors; 206 207 q->limits.max_discard_sectors = max_discard; 208 return ret; 209 } 210 211 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 212 { 213 return queue_var_show(0, page); 214 } 215 216 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) 217 { 218 return sprintf(page, "%llu\n", 219 (unsigned long long)q->limits.max_write_same_sectors << 9); 220 } 221 222 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) 223 { 224 return sprintf(page, "%llu\n", 225 (unsigned long long)q->limits.max_write_zeroes_sectors << 9); 226 } 227 228 static ssize_t 229 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 230 { 231 unsigned long max_sectors_kb, 232 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 233 page_kb = 1 << (PAGE_SHIFT - 10); 234 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 235 236 if (ret < 0) 237 return ret; 238 239 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) 240 q->limits.max_dev_sectors >> 1); 241 242 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 243 return -EINVAL; 244 245 spin_lock_irq(q->queue_lock); 246 q->limits.max_sectors = max_sectors_kb << 1; 247 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); 248 spin_unlock_irq(q->queue_lock); 249 250 return ret; 251 } 252 253 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 254 { 255 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 256 257 return queue_var_show(max_hw_sectors_kb, (page)); 258 } 259 260 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 261 static ssize_t \ 262 queue_show_##name(struct request_queue *q, char *page) \ 263 { \ 264 int bit; \ 265 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 266 return queue_var_show(neg ? !bit : bit, page); \ 267 } \ 268 static ssize_t \ 269 queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 270 { \ 271 unsigned long val; \ 272 ssize_t ret; \ 273 ret = queue_var_store(&val, page, count); \ 274 if (ret < 0) \ 275 return ret; \ 276 if (neg) \ 277 val = !val; \ 278 \ 279 if (val) \ 280 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ 281 else \ 282 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ 283 return ret; \ 284 } 285 286 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 287 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 288 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 289 #undef QUEUE_SYSFS_BIT_FNS 290 291 static ssize_t queue_zoned_show(struct request_queue *q, char *page) 292 { 293 switch (blk_queue_zoned_model(q)) { 294 case BLK_ZONED_HA: 295 return sprintf(page, "host-aware\n"); 296 case BLK_ZONED_HM: 297 return sprintf(page, "host-managed\n"); 298 default: 299 return sprintf(page, "none\n"); 300 } 301 } 302 303 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 304 { 305 return queue_var_show((blk_queue_nomerges(q) << 1) | 306 blk_queue_noxmerges(q), page); 307 } 308 309 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 310 size_t count) 311 { 312 unsigned long nm; 313 ssize_t ret = queue_var_store(&nm, page, count); 314 315 if (ret < 0) 316 return ret; 317 318 spin_lock_irq(q->queue_lock); 319 queue_flag_clear(QUEUE_FLAG_NOMERGES, q); 320 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); 321 if (nm == 2) 322 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 323 else if (nm) 324 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 325 spin_unlock_irq(q->queue_lock); 326 327 return ret; 328 } 329 330 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) 331 { 332 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); 333 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); 334 335 return queue_var_show(set << force, page); 336 } 337 338 static ssize_t 339 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) 340 { 341 ssize_t ret = -EINVAL; 342 #ifdef CONFIG_SMP 343 unsigned long val; 344 345 ret = queue_var_store(&val, page, count); 346 if (ret < 0) 347 return ret; 348 349 spin_lock_irq(q->queue_lock); 350 if (val == 2) { 351 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 352 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 353 } else if (val == 1) { 354 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 355 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 356 } else if (val == 0) { 357 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 358 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 359 } 360 spin_unlock_irq(q->queue_lock); 361 #endif 362 return ret; 363 } 364 365 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) 366 { 367 int val; 368 369 if (q->poll_nsec == -1) 370 val = -1; 371 else 372 val = q->poll_nsec / 1000; 373 374 return sprintf(page, "%d\n", val); 375 } 376 377 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, 378 size_t count) 379 { 380 int err, val; 381 382 if (!q->mq_ops || !q->mq_ops->poll) 383 return -EINVAL; 384 385 err = kstrtoint(page, 10, &val); 386 if (err < 0) 387 return err; 388 389 if (val == -1) 390 q->poll_nsec = -1; 391 else 392 q->poll_nsec = val * 1000; 393 394 return count; 395 } 396 397 static ssize_t queue_poll_show(struct request_queue *q, char *page) 398 { 399 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); 400 } 401 402 static ssize_t queue_poll_store(struct request_queue *q, const char *page, 403 size_t count) 404 { 405 unsigned long poll_on; 406 ssize_t ret; 407 408 if (!q->mq_ops || !q->mq_ops->poll) 409 return -EINVAL; 410 411 ret = queue_var_store(&poll_on, page, count); 412 if (ret < 0) 413 return ret; 414 415 if (poll_on) 416 blk_queue_flag_set(QUEUE_FLAG_POLL, q); 417 else 418 blk_queue_flag_clear(QUEUE_FLAG_POLL, q); 419 420 return ret; 421 } 422 423 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) 424 { 425 if (!wbt_rq_qos(q)) 426 return -EINVAL; 427 428 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); 429 } 430 431 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, 432 size_t count) 433 { 434 struct rq_qos *rqos; 435 ssize_t ret; 436 s64 val; 437 438 ret = queue_var_store64(&val, page); 439 if (ret < 0) 440 return ret; 441 if (val < -1) 442 return -EINVAL; 443 444 rqos = wbt_rq_qos(q); 445 if (!rqos) { 446 ret = wbt_init(q); 447 if (ret) 448 return ret; 449 } 450 451 if (val == -1) 452 val = wbt_default_latency_nsec(q); 453 else if (val >= 0) 454 val *= 1000ULL; 455 456 /* 457 * Ensure that the queue is idled, in case the latency update 458 * ends up either enabling or disabling wbt completely. We can't 459 * have IO inflight if that happens. 460 */ 461 if (q->mq_ops) { 462 blk_mq_freeze_queue(q); 463 blk_mq_quiesce_queue(q); 464 } else 465 blk_queue_bypass_start(q); 466 467 wbt_set_min_lat(q, val); 468 wbt_update_limits(q); 469 470 if (q->mq_ops) { 471 blk_mq_unquiesce_queue(q); 472 blk_mq_unfreeze_queue(q); 473 } else 474 blk_queue_bypass_end(q); 475 476 return count; 477 } 478 479 static ssize_t queue_wc_show(struct request_queue *q, char *page) 480 { 481 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 482 return sprintf(page, "write back\n"); 483 484 return sprintf(page, "write through\n"); 485 } 486 487 static ssize_t queue_wc_store(struct request_queue *q, const char *page, 488 size_t count) 489 { 490 int set = -1; 491 492 if (!strncmp(page, "write back", 10)) 493 set = 1; 494 else if (!strncmp(page, "write through", 13) || 495 !strncmp(page, "none", 4)) 496 set = 0; 497 498 if (set == -1) 499 return -EINVAL; 500 501 if (set) 502 blk_queue_flag_set(QUEUE_FLAG_WC, q); 503 else 504 blk_queue_flag_clear(QUEUE_FLAG_WC, q); 505 506 return count; 507 } 508 509 static ssize_t queue_fua_show(struct request_queue *q, char *page) 510 { 511 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); 512 } 513 514 static ssize_t queue_dax_show(struct request_queue *q, char *page) 515 { 516 return queue_var_show(blk_queue_dax(q), page); 517 } 518 519 static struct queue_sysfs_entry queue_requests_entry = { 520 .attr = {.name = "nr_requests", .mode = 0644 }, 521 .show = queue_requests_show, 522 .store = queue_requests_store, 523 }; 524 525 static struct queue_sysfs_entry queue_ra_entry = { 526 .attr = {.name = "read_ahead_kb", .mode = 0644 }, 527 .show = queue_ra_show, 528 .store = queue_ra_store, 529 }; 530 531 static struct queue_sysfs_entry queue_max_sectors_entry = { 532 .attr = {.name = "max_sectors_kb", .mode = 0644 }, 533 .show = queue_max_sectors_show, 534 .store = queue_max_sectors_store, 535 }; 536 537 static struct queue_sysfs_entry queue_max_hw_sectors_entry = { 538 .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, 539 .show = queue_max_hw_sectors_show, 540 }; 541 542 static struct queue_sysfs_entry queue_max_segments_entry = { 543 .attr = {.name = "max_segments", .mode = 0444 }, 544 .show = queue_max_segments_show, 545 }; 546 547 static struct queue_sysfs_entry queue_max_discard_segments_entry = { 548 .attr = {.name = "max_discard_segments", .mode = 0444 }, 549 .show = queue_max_discard_segments_show, 550 }; 551 552 static struct queue_sysfs_entry queue_max_integrity_segments_entry = { 553 .attr = {.name = "max_integrity_segments", .mode = 0444 }, 554 .show = queue_max_integrity_segments_show, 555 }; 556 557 static struct queue_sysfs_entry queue_max_segment_size_entry = { 558 .attr = {.name = "max_segment_size", .mode = 0444 }, 559 .show = queue_max_segment_size_show, 560 }; 561 562 static struct queue_sysfs_entry queue_iosched_entry = { 563 .attr = {.name = "scheduler", .mode = 0644 }, 564 .show = elv_iosched_show, 565 .store = elv_iosched_store, 566 }; 567 568 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 569 .attr = {.name = "hw_sector_size", .mode = 0444 }, 570 .show = queue_logical_block_size_show, 571 }; 572 573 static struct queue_sysfs_entry queue_logical_block_size_entry = { 574 .attr = {.name = "logical_block_size", .mode = 0444 }, 575 .show = queue_logical_block_size_show, 576 }; 577 578 static struct queue_sysfs_entry queue_physical_block_size_entry = { 579 .attr = {.name = "physical_block_size", .mode = 0444 }, 580 .show = queue_physical_block_size_show, 581 }; 582 583 static struct queue_sysfs_entry queue_chunk_sectors_entry = { 584 .attr = {.name = "chunk_sectors", .mode = 0444 }, 585 .show = queue_chunk_sectors_show, 586 }; 587 588 static struct queue_sysfs_entry queue_io_min_entry = { 589 .attr = {.name = "minimum_io_size", .mode = 0444 }, 590 .show = queue_io_min_show, 591 }; 592 593 static struct queue_sysfs_entry queue_io_opt_entry = { 594 .attr = {.name = "optimal_io_size", .mode = 0444 }, 595 .show = queue_io_opt_show, 596 }; 597 598 static struct queue_sysfs_entry queue_discard_granularity_entry = { 599 .attr = {.name = "discard_granularity", .mode = 0444 }, 600 .show = queue_discard_granularity_show, 601 }; 602 603 static struct queue_sysfs_entry queue_discard_max_hw_entry = { 604 .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, 605 .show = queue_discard_max_hw_show, 606 }; 607 608 static struct queue_sysfs_entry queue_discard_max_entry = { 609 .attr = {.name = "discard_max_bytes", .mode = 0644 }, 610 .show = queue_discard_max_show, 611 .store = queue_discard_max_store, 612 }; 613 614 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { 615 .attr = {.name = "discard_zeroes_data", .mode = 0444 }, 616 .show = queue_discard_zeroes_data_show, 617 }; 618 619 static struct queue_sysfs_entry queue_write_same_max_entry = { 620 .attr = {.name = "write_same_max_bytes", .mode = 0444 }, 621 .show = queue_write_same_max_show, 622 }; 623 624 static struct queue_sysfs_entry queue_write_zeroes_max_entry = { 625 .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, 626 .show = queue_write_zeroes_max_show, 627 }; 628 629 static struct queue_sysfs_entry queue_nonrot_entry = { 630 .attr = {.name = "rotational", .mode = 0644 }, 631 .show = queue_show_nonrot, 632 .store = queue_store_nonrot, 633 }; 634 635 static struct queue_sysfs_entry queue_zoned_entry = { 636 .attr = {.name = "zoned", .mode = 0444 }, 637 .show = queue_zoned_show, 638 }; 639 640 static struct queue_sysfs_entry queue_nomerges_entry = { 641 .attr = {.name = "nomerges", .mode = 0644 }, 642 .show = queue_nomerges_show, 643 .store = queue_nomerges_store, 644 }; 645 646 static struct queue_sysfs_entry queue_rq_affinity_entry = { 647 .attr = {.name = "rq_affinity", .mode = 0644 }, 648 .show = queue_rq_affinity_show, 649 .store = queue_rq_affinity_store, 650 }; 651 652 static struct queue_sysfs_entry queue_iostats_entry = { 653 .attr = {.name = "iostats", .mode = 0644 }, 654 .show = queue_show_iostats, 655 .store = queue_store_iostats, 656 }; 657 658 static struct queue_sysfs_entry queue_random_entry = { 659 .attr = {.name = "add_random", .mode = 0644 }, 660 .show = queue_show_random, 661 .store = queue_store_random, 662 }; 663 664 static struct queue_sysfs_entry queue_poll_entry = { 665 .attr = {.name = "io_poll", .mode = 0644 }, 666 .show = queue_poll_show, 667 .store = queue_poll_store, 668 }; 669 670 static struct queue_sysfs_entry queue_poll_delay_entry = { 671 .attr = {.name = "io_poll_delay", .mode = 0644 }, 672 .show = queue_poll_delay_show, 673 .store = queue_poll_delay_store, 674 }; 675 676 static struct queue_sysfs_entry queue_wc_entry = { 677 .attr = {.name = "write_cache", .mode = 0644 }, 678 .show = queue_wc_show, 679 .store = queue_wc_store, 680 }; 681 682 static struct queue_sysfs_entry queue_fua_entry = { 683 .attr = {.name = "fua", .mode = 0444 }, 684 .show = queue_fua_show, 685 }; 686 687 static struct queue_sysfs_entry queue_dax_entry = { 688 .attr = {.name = "dax", .mode = 0444 }, 689 .show = queue_dax_show, 690 }; 691 692 static struct queue_sysfs_entry queue_wb_lat_entry = { 693 .attr = {.name = "wbt_lat_usec", .mode = 0644 }, 694 .show = queue_wb_lat_show, 695 .store = queue_wb_lat_store, 696 }; 697 698 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 699 static struct queue_sysfs_entry throtl_sample_time_entry = { 700 .attr = {.name = "throttle_sample_time", .mode = 0644 }, 701 .show = blk_throtl_sample_time_show, 702 .store = blk_throtl_sample_time_store, 703 }; 704 #endif 705 706 static struct attribute *default_attrs[] = { 707 &queue_requests_entry.attr, 708 &queue_ra_entry.attr, 709 &queue_max_hw_sectors_entry.attr, 710 &queue_max_sectors_entry.attr, 711 &queue_max_segments_entry.attr, 712 &queue_max_discard_segments_entry.attr, 713 &queue_max_integrity_segments_entry.attr, 714 &queue_max_segment_size_entry.attr, 715 &queue_iosched_entry.attr, 716 &queue_hw_sector_size_entry.attr, 717 &queue_logical_block_size_entry.attr, 718 &queue_physical_block_size_entry.attr, 719 &queue_chunk_sectors_entry.attr, 720 &queue_io_min_entry.attr, 721 &queue_io_opt_entry.attr, 722 &queue_discard_granularity_entry.attr, 723 &queue_discard_max_entry.attr, 724 &queue_discard_max_hw_entry.attr, 725 &queue_discard_zeroes_data_entry.attr, 726 &queue_write_same_max_entry.attr, 727 &queue_write_zeroes_max_entry.attr, 728 &queue_nonrot_entry.attr, 729 &queue_zoned_entry.attr, 730 &queue_nomerges_entry.attr, 731 &queue_rq_affinity_entry.attr, 732 &queue_iostats_entry.attr, 733 &queue_random_entry.attr, 734 &queue_poll_entry.attr, 735 &queue_wc_entry.attr, 736 &queue_fua_entry.attr, 737 &queue_dax_entry.attr, 738 &queue_wb_lat_entry.attr, 739 &queue_poll_delay_entry.attr, 740 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 741 &throtl_sample_time_entry.attr, 742 #endif 743 NULL, 744 }; 745 746 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) 747 748 static ssize_t 749 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 750 { 751 struct queue_sysfs_entry *entry = to_queue(attr); 752 struct request_queue *q = 753 container_of(kobj, struct request_queue, kobj); 754 ssize_t res; 755 756 if (!entry->show) 757 return -EIO; 758 mutex_lock(&q->sysfs_lock); 759 if (blk_queue_dying(q)) { 760 mutex_unlock(&q->sysfs_lock); 761 return -ENOENT; 762 } 763 res = entry->show(q, page); 764 mutex_unlock(&q->sysfs_lock); 765 return res; 766 } 767 768 static ssize_t 769 queue_attr_store(struct kobject *kobj, struct attribute *attr, 770 const char *page, size_t length) 771 { 772 struct queue_sysfs_entry *entry = to_queue(attr); 773 struct request_queue *q; 774 ssize_t res; 775 776 if (!entry->store) 777 return -EIO; 778 779 q = container_of(kobj, struct request_queue, kobj); 780 mutex_lock(&q->sysfs_lock); 781 if (blk_queue_dying(q)) { 782 mutex_unlock(&q->sysfs_lock); 783 return -ENOENT; 784 } 785 res = entry->store(q, page, length); 786 mutex_unlock(&q->sysfs_lock); 787 return res; 788 } 789 790 static void blk_free_queue_rcu(struct rcu_head *rcu_head) 791 { 792 struct request_queue *q = container_of(rcu_head, struct request_queue, 793 rcu_head); 794 kmem_cache_free(blk_requestq_cachep, q); 795 } 796 797 /** 798 * __blk_release_queue - release a request queue when it is no longer needed 799 * @work: pointer to the release_work member of the request queue to be released 800 * 801 * Description: 802 * blk_release_queue is the counterpart of blk_init_queue(). It should be 803 * called when a request queue is being released; typically when a block 804 * device is being de-registered. Its primary task it to free the queue 805 * itself. 806 * 807 * Notes: 808 * The low level driver must have finished any outstanding requests first 809 * via blk_cleanup_queue(). 810 * 811 * Although blk_release_queue() may be called with preemption disabled, 812 * __blk_release_queue() may sleep. 813 */ 814 static void __blk_release_queue(struct work_struct *work) 815 { 816 struct request_queue *q = container_of(work, typeof(*q), release_work); 817 818 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 819 blk_stat_remove_callback(q, q->poll_cb); 820 blk_stat_free_callback(q->poll_cb); 821 822 if (!blk_queue_dead(q)) { 823 /* 824 * Last reference was dropped without having called 825 * blk_cleanup_queue(). 826 */ 827 WARN_ONCE(blk_queue_init_done(q), 828 "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n", 829 q); 830 blk_exit_queue(q); 831 } 832 833 WARN(blk_queue_root_blkg(q), 834 "request queue %p is being released but it has not yet been removed from the blkcg controller\n", 835 q); 836 837 blk_free_queue_stats(q->stats); 838 839 blk_exit_rl(q, &q->root_rl); 840 841 if (q->queue_tags) 842 __blk_queue_free_tags(q); 843 844 if (!q->mq_ops) { 845 if (q->exit_rq_fn) 846 q->exit_rq_fn(q, q->fq->flush_rq); 847 blk_free_flush_queue(q->fq); 848 } else { 849 blk_mq_release(q); 850 } 851 852 blk_trace_shutdown(q); 853 854 if (q->mq_ops) 855 blk_mq_debugfs_unregister(q); 856 857 bioset_exit(&q->bio_split); 858 859 ida_simple_remove(&blk_queue_ida, q->id); 860 call_rcu(&q->rcu_head, blk_free_queue_rcu); 861 } 862 863 static void blk_release_queue(struct kobject *kobj) 864 { 865 struct request_queue *q = 866 container_of(kobj, struct request_queue, kobj); 867 868 INIT_WORK(&q->release_work, __blk_release_queue); 869 schedule_work(&q->release_work); 870 } 871 872 static const struct sysfs_ops queue_sysfs_ops = { 873 .show = queue_attr_show, 874 .store = queue_attr_store, 875 }; 876 877 struct kobj_type blk_queue_ktype = { 878 .sysfs_ops = &queue_sysfs_ops, 879 .default_attrs = default_attrs, 880 .release = blk_release_queue, 881 }; 882 883 /** 884 * blk_register_queue - register a block layer queue with sysfs 885 * @disk: Disk of which the request queue should be registered with sysfs. 886 */ 887 int blk_register_queue(struct gendisk *disk) 888 { 889 int ret; 890 struct device *dev = disk_to_dev(disk); 891 struct request_queue *q = disk->queue; 892 893 if (WARN_ON(!q)) 894 return -ENXIO; 895 896 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), 897 "%s is registering an already registered queue\n", 898 kobject_name(&dev->kobj)); 899 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); 900 901 /* 902 * SCSI probing may synchronously create and destroy a lot of 903 * request_queues for non-existent devices. Shutting down a fully 904 * functional queue takes measureable wallclock time as RCU grace 905 * periods are involved. To avoid excessive latency in these 906 * cases, a request_queue starts out in a degraded mode which is 907 * faster to shut down and is made fully functional here as 908 * request_queues for non-existent devices never get registered. 909 */ 910 if (!blk_queue_init_done(q)) { 911 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); 912 percpu_ref_switch_to_percpu(&q->q_usage_counter); 913 blk_queue_bypass_end(q); 914 } 915 916 ret = blk_trace_init_sysfs(dev); 917 if (ret) 918 return ret; 919 920 /* Prevent changes through sysfs until registration is completed. */ 921 mutex_lock(&q->sysfs_lock); 922 923 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 924 if (ret < 0) { 925 blk_trace_remove_sysfs(dev); 926 goto unlock; 927 } 928 929 if (q->mq_ops) { 930 __blk_mq_register_dev(dev, q); 931 blk_mq_debugfs_register(q); 932 } 933 934 kobject_uevent(&q->kobj, KOBJ_ADD); 935 936 wbt_enable_default(q); 937 938 blk_throtl_register_queue(q); 939 940 if (q->request_fn || (q->mq_ops && q->elevator)) { 941 ret = elv_register_queue(q); 942 if (ret) { 943 mutex_unlock(&q->sysfs_lock); 944 kobject_uevent(&q->kobj, KOBJ_REMOVE); 945 kobject_del(&q->kobj); 946 blk_trace_remove_sysfs(dev); 947 kobject_put(&dev->kobj); 948 return ret; 949 } 950 } 951 ret = 0; 952 unlock: 953 mutex_unlock(&q->sysfs_lock); 954 return ret; 955 } 956 EXPORT_SYMBOL_GPL(blk_register_queue); 957 958 /** 959 * blk_unregister_queue - counterpart of blk_register_queue() 960 * @disk: Disk of which the request queue should be unregistered from sysfs. 961 * 962 * Note: the caller is responsible for guaranteeing that this function is called 963 * after blk_register_queue() has finished. 964 */ 965 void blk_unregister_queue(struct gendisk *disk) 966 { 967 struct request_queue *q = disk->queue; 968 969 if (WARN_ON(!q)) 970 return; 971 972 /* Return early if disk->queue was never registered. */ 973 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) 974 return; 975 976 /* 977 * Since sysfs_remove_dir() prevents adding new directory entries 978 * before removal of existing entries starts, protect against 979 * concurrent elv_iosched_store() calls. 980 */ 981 mutex_lock(&q->sysfs_lock); 982 983 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 984 985 /* 986 * Remove the sysfs attributes before unregistering the queue data 987 * structures that can be modified through sysfs. 988 */ 989 if (q->mq_ops) 990 blk_mq_unregister_dev(disk_to_dev(disk), q); 991 mutex_unlock(&q->sysfs_lock); 992 993 kobject_uevent(&q->kobj, KOBJ_REMOVE); 994 kobject_del(&q->kobj); 995 blk_trace_remove_sysfs(disk_to_dev(disk)); 996 997 rq_qos_exit(q); 998 999 mutex_lock(&q->sysfs_lock); 1000 if (q->request_fn || (q->mq_ops && q->elevator)) 1001 elv_unregister_queue(q); 1002 mutex_unlock(&q->sysfs_lock); 1003 1004 kobject_put(&disk_to_dev(disk)->kobj); 1005 } 1006