1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block device elevator/IO-scheduler. 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * 7 * 30042000 Jens Axboe <axboe@kernel.dk> : 8 * 9 * Split the elevator a bit so that it is possible to choose a different 10 * one or even write a new "plug in". There are three pieces: 11 * - elevator_fn, inserts a new request in the queue list 12 * - elevator_merge_fn, decides whether a new buffer can be merged with 13 * an existing request 14 * - elevator_dequeue_fn, called when a request is taken off the active list 15 * 16 * 20082000 Dave Jones <davej@suse.de> : 17 * Removed tests for max-bomb-segments, which was breaking elvtune 18 * when run without -bN 19 * 20 * Jens: 21 * - Rework again to work with bio instead of buffer_heads 22 * - loose bi_dev comparisons, partition handling is right now 23 * - completely modularize elevator setup and teardown 24 * 25 */ 26 #include <linux/kernel.h> 27 #include <linux/fs.h> 28 #include <linux/blkdev.h> 29 #include <linux/bio.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/compiler.h> 34 #include <linux/blktrace_api.h> 35 #include <linux/hash.h> 36 #include <linux/uaccess.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/blk-cgroup.h> 39 40 #include <trace/events/block.h> 41 42 #include "elevator.h" 43 #include "blk.h" 44 #include "blk-mq-sched.h" 45 #include "blk-pm.h" 46 #include "blk-wbt.h" 47 48 static DEFINE_SPINLOCK(elv_list_lock); 49 static LIST_HEAD(elv_list); 50 51 /* 52 * Merge hash stuff. 53 */ 54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 55 56 /* 57 * Query io scheduler to see if the current process issuing bio may be 58 * merged with rq. 59 */ 60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) 61 { 62 struct request_queue *q = rq->q; 63 struct elevator_queue *e = q->elevator; 64 65 if (e->type->ops.allow_merge) 66 return e->type->ops.allow_merge(q, rq, bio); 67 68 return 1; 69 } 70 71 /* 72 * can we safely merge with this request? 73 */ 74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) 75 { 76 if (!blk_rq_merge_ok(rq, bio)) 77 return false; 78 79 if (!elv_iosched_allow_bio_merge(rq, bio)) 80 return false; 81 82 return true; 83 } 84 EXPORT_SYMBOL(elv_bio_merge_ok); 85 86 static inline bool elv_support_features(unsigned int elv_features, 87 unsigned int required_features) 88 { 89 return (required_features & elv_features) == required_features; 90 } 91 92 /** 93 * elevator_match - Test an elevator name and features 94 * @e: Scheduler to test 95 * @name: Elevator name to test 96 * @required_features: Features that the elevator must provide 97 * 98 * Return true if the elevator @e name matches @name and if @e provides all 99 * the features specified by @required_features. 100 */ 101 static bool elevator_match(const struct elevator_type *e, const char *name, 102 unsigned int required_features) 103 { 104 if (!elv_support_features(e->elevator_features, required_features)) 105 return false; 106 if (!strcmp(e->elevator_name, name)) 107 return true; 108 if (e->elevator_alias && !strcmp(e->elevator_alias, name)) 109 return true; 110 111 return false; 112 } 113 114 /** 115 * elevator_find - Find an elevator 116 * @name: Name of the elevator to find 117 * @required_features: Features that the elevator must provide 118 * 119 * Return the first registered scheduler with name @name and supporting the 120 * features @required_features and NULL otherwise. 121 */ 122 static struct elevator_type *elevator_find(const char *name, 123 unsigned int required_features) 124 { 125 struct elevator_type *e; 126 127 list_for_each_entry(e, &elv_list, list) { 128 if (elevator_match(e, name, required_features)) 129 return e; 130 } 131 132 return NULL; 133 } 134 135 static void elevator_put(struct elevator_type *e) 136 { 137 module_put(e->elevator_owner); 138 } 139 140 static struct elevator_type *elevator_get(struct request_queue *q, 141 const char *name, bool try_loading) 142 { 143 struct elevator_type *e; 144 145 spin_lock(&elv_list_lock); 146 147 e = elevator_find(name, q->required_elevator_features); 148 if (!e && try_loading) { 149 spin_unlock(&elv_list_lock); 150 request_module("%s-iosched", name); 151 spin_lock(&elv_list_lock); 152 e = elevator_find(name, q->required_elevator_features); 153 } 154 155 if (e && !try_module_get(e->elevator_owner)) 156 e = NULL; 157 158 spin_unlock(&elv_list_lock); 159 return e; 160 } 161 162 static struct kobj_type elv_ktype; 163 164 struct elevator_queue *elevator_alloc(struct request_queue *q, 165 struct elevator_type *e) 166 { 167 struct elevator_queue *eq; 168 169 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); 170 if (unlikely(!eq)) 171 return NULL; 172 173 eq->type = e; 174 kobject_init(&eq->kobj, &elv_ktype); 175 mutex_init(&eq->sysfs_lock); 176 hash_init(eq->hash); 177 178 return eq; 179 } 180 EXPORT_SYMBOL(elevator_alloc); 181 182 static void elevator_release(struct kobject *kobj) 183 { 184 struct elevator_queue *e; 185 186 e = container_of(kobj, struct elevator_queue, kobj); 187 elevator_put(e->type); 188 kfree(e); 189 } 190 191 void __elevator_exit(struct request_queue *q, struct elevator_queue *e) 192 { 193 mutex_lock(&e->sysfs_lock); 194 blk_mq_exit_sched(q, e); 195 mutex_unlock(&e->sysfs_lock); 196 197 kobject_put(&e->kobj); 198 } 199 200 static inline void __elv_rqhash_del(struct request *rq) 201 { 202 hash_del(&rq->hash); 203 rq->rq_flags &= ~RQF_HASHED; 204 } 205 206 void elv_rqhash_del(struct request_queue *q, struct request *rq) 207 { 208 if (ELV_ON_HASH(rq)) 209 __elv_rqhash_del(rq); 210 } 211 EXPORT_SYMBOL_GPL(elv_rqhash_del); 212 213 void elv_rqhash_add(struct request_queue *q, struct request *rq) 214 { 215 struct elevator_queue *e = q->elevator; 216 217 BUG_ON(ELV_ON_HASH(rq)); 218 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); 219 rq->rq_flags |= RQF_HASHED; 220 } 221 EXPORT_SYMBOL_GPL(elv_rqhash_add); 222 223 void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 224 { 225 __elv_rqhash_del(rq); 226 elv_rqhash_add(q, rq); 227 } 228 229 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 230 { 231 struct elevator_queue *e = q->elevator; 232 struct hlist_node *next; 233 struct request *rq; 234 235 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { 236 BUG_ON(!ELV_ON_HASH(rq)); 237 238 if (unlikely(!rq_mergeable(rq))) { 239 __elv_rqhash_del(rq); 240 continue; 241 } 242 243 if (rq_hash_key(rq) == offset) 244 return rq; 245 } 246 247 return NULL; 248 } 249 250 /* 251 * RB-tree support functions for inserting/lookup/removal of requests 252 * in a sorted RB tree. 253 */ 254 void elv_rb_add(struct rb_root *root, struct request *rq) 255 { 256 struct rb_node **p = &root->rb_node; 257 struct rb_node *parent = NULL; 258 struct request *__rq; 259 260 while (*p) { 261 parent = *p; 262 __rq = rb_entry(parent, struct request, rb_node); 263 264 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) 265 p = &(*p)->rb_left; 266 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) 267 p = &(*p)->rb_right; 268 } 269 270 rb_link_node(&rq->rb_node, parent, p); 271 rb_insert_color(&rq->rb_node, root); 272 } 273 EXPORT_SYMBOL(elv_rb_add); 274 275 void elv_rb_del(struct rb_root *root, struct request *rq) 276 { 277 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 278 rb_erase(&rq->rb_node, root); 279 RB_CLEAR_NODE(&rq->rb_node); 280 } 281 EXPORT_SYMBOL(elv_rb_del); 282 283 struct request *elv_rb_find(struct rb_root *root, sector_t sector) 284 { 285 struct rb_node *n = root->rb_node; 286 struct request *rq; 287 288 while (n) { 289 rq = rb_entry(n, struct request, rb_node); 290 291 if (sector < blk_rq_pos(rq)) 292 n = n->rb_left; 293 else if (sector > blk_rq_pos(rq)) 294 n = n->rb_right; 295 else 296 return rq; 297 } 298 299 return NULL; 300 } 301 EXPORT_SYMBOL(elv_rb_find); 302 303 enum elv_merge elv_merge(struct request_queue *q, struct request **req, 304 struct bio *bio) 305 { 306 struct elevator_queue *e = q->elevator; 307 struct request *__rq; 308 309 /* 310 * Levels of merges: 311 * nomerges: No merges at all attempted 312 * noxmerges: Only simple one-hit cache try 313 * merges: All merge tries attempted 314 */ 315 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) 316 return ELEVATOR_NO_MERGE; 317 318 /* 319 * First try one-hit cache. 320 */ 321 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { 322 enum elv_merge ret = blk_try_merge(q->last_merge, bio); 323 324 if (ret != ELEVATOR_NO_MERGE) { 325 *req = q->last_merge; 326 return ret; 327 } 328 } 329 330 if (blk_queue_noxmerges(q)) 331 return ELEVATOR_NO_MERGE; 332 333 /* 334 * See if our hash lookup can find a potential backmerge. 335 */ 336 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); 337 if (__rq && elv_bio_merge_ok(__rq, bio)) { 338 *req = __rq; 339 340 if (blk_discard_mergable(__rq)) 341 return ELEVATOR_DISCARD_MERGE; 342 return ELEVATOR_BACK_MERGE; 343 } 344 345 if (e->type->ops.request_merge) 346 return e->type->ops.request_merge(q, req, bio); 347 348 return ELEVATOR_NO_MERGE; 349 } 350 351 /* 352 * Attempt to do an insertion back merge. Only check for the case where 353 * we can append 'rq' to an existing request, so we can throw 'rq' away 354 * afterwards. 355 * 356 * Returns true if we merged, false otherwise. 'free' will contain all 357 * requests that need to be freed. 358 */ 359 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, 360 struct list_head *free) 361 { 362 struct request *__rq; 363 bool ret; 364 365 if (blk_queue_nomerges(q)) 366 return false; 367 368 /* 369 * First try one-hit cache. 370 */ 371 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { 372 list_add(&rq->queuelist, free); 373 return true; 374 } 375 376 if (blk_queue_noxmerges(q)) 377 return false; 378 379 ret = false; 380 /* 381 * See if our hash lookup can find a potential backmerge. 382 */ 383 while (1) { 384 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); 385 if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) 386 break; 387 388 list_add(&rq->queuelist, free); 389 /* The merged request could be merged with others, try again */ 390 ret = true; 391 rq = __rq; 392 } 393 394 return ret; 395 } 396 397 void elv_merged_request(struct request_queue *q, struct request *rq, 398 enum elv_merge type) 399 { 400 struct elevator_queue *e = q->elevator; 401 402 if (e->type->ops.request_merged) 403 e->type->ops.request_merged(q, rq, type); 404 405 if (type == ELEVATOR_BACK_MERGE) 406 elv_rqhash_reposition(q, rq); 407 408 q->last_merge = rq; 409 } 410 411 void elv_merge_requests(struct request_queue *q, struct request *rq, 412 struct request *next) 413 { 414 struct elevator_queue *e = q->elevator; 415 416 if (e->type->ops.requests_merged) 417 e->type->ops.requests_merged(q, rq, next); 418 419 elv_rqhash_reposition(q, rq); 420 q->last_merge = rq; 421 } 422 423 struct request *elv_latter_request(struct request_queue *q, struct request *rq) 424 { 425 struct elevator_queue *e = q->elevator; 426 427 if (e->type->ops.next_request) 428 return e->type->ops.next_request(q, rq); 429 430 return NULL; 431 } 432 433 struct request *elv_former_request(struct request_queue *q, struct request *rq) 434 { 435 struct elevator_queue *e = q->elevator; 436 437 if (e->type->ops.former_request) 438 return e->type->ops.former_request(q, rq); 439 440 return NULL; 441 } 442 443 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 444 445 static ssize_t 446 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 447 { 448 struct elv_fs_entry *entry = to_elv(attr); 449 struct elevator_queue *e; 450 ssize_t error; 451 452 if (!entry->show) 453 return -EIO; 454 455 e = container_of(kobj, struct elevator_queue, kobj); 456 mutex_lock(&e->sysfs_lock); 457 error = e->type ? entry->show(e, page) : -ENOENT; 458 mutex_unlock(&e->sysfs_lock); 459 return error; 460 } 461 462 static ssize_t 463 elv_attr_store(struct kobject *kobj, struct attribute *attr, 464 const char *page, size_t length) 465 { 466 struct elv_fs_entry *entry = to_elv(attr); 467 struct elevator_queue *e; 468 ssize_t error; 469 470 if (!entry->store) 471 return -EIO; 472 473 e = container_of(kobj, struct elevator_queue, kobj); 474 mutex_lock(&e->sysfs_lock); 475 error = e->type ? entry->store(e, page, length) : -ENOENT; 476 mutex_unlock(&e->sysfs_lock); 477 return error; 478 } 479 480 static const struct sysfs_ops elv_sysfs_ops = { 481 .show = elv_attr_show, 482 .store = elv_attr_store, 483 }; 484 485 static struct kobj_type elv_ktype = { 486 .sysfs_ops = &elv_sysfs_ops, 487 .release = elevator_release, 488 }; 489 490 int elv_register_queue(struct request_queue *q, bool uevent) 491 { 492 struct elevator_queue *e = q->elevator; 493 int error; 494 495 lockdep_assert_held(&q->sysfs_lock); 496 497 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); 498 if (!error) { 499 struct elv_fs_entry *attr = e->type->elevator_attrs; 500 if (attr) { 501 while (attr->attr.name) { 502 if (sysfs_create_file(&e->kobj, &attr->attr)) 503 break; 504 attr++; 505 } 506 } 507 if (uevent) 508 kobject_uevent(&e->kobj, KOBJ_ADD); 509 510 e->registered = 1; 511 } 512 return error; 513 } 514 515 void elv_unregister_queue(struct request_queue *q) 516 { 517 lockdep_assert_held(&q->sysfs_lock); 518 519 if (q) { 520 struct elevator_queue *e = q->elevator; 521 522 kobject_uevent(&e->kobj, KOBJ_REMOVE); 523 kobject_del(&e->kobj); 524 525 e->registered = 0; 526 /* Re-enable throttling in case elevator disabled it */ 527 wbt_enable_default(q); 528 } 529 } 530 531 int elv_register(struct elevator_type *e) 532 { 533 /* insert_requests and dispatch_request are mandatory */ 534 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) 535 return -EINVAL; 536 537 /* create icq_cache if requested */ 538 if (e->icq_size) { 539 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || 540 WARN_ON(e->icq_align < __alignof__(struct io_cq))) 541 return -EINVAL; 542 543 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), 544 "%s_io_cq", e->elevator_name); 545 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, 546 e->icq_align, 0, NULL); 547 if (!e->icq_cache) 548 return -ENOMEM; 549 } 550 551 /* register, don't allow duplicate names */ 552 spin_lock(&elv_list_lock); 553 if (elevator_find(e->elevator_name, 0)) { 554 spin_unlock(&elv_list_lock); 555 kmem_cache_destroy(e->icq_cache); 556 return -EBUSY; 557 } 558 list_add_tail(&e->list, &elv_list); 559 spin_unlock(&elv_list_lock); 560 561 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name); 562 563 return 0; 564 } 565 EXPORT_SYMBOL_GPL(elv_register); 566 567 void elv_unregister(struct elevator_type *e) 568 { 569 /* unregister */ 570 spin_lock(&elv_list_lock); 571 list_del_init(&e->list); 572 spin_unlock(&elv_list_lock); 573 574 /* 575 * Destroy icq_cache if it exists. icq's are RCU managed. Make 576 * sure all RCU operations are complete before proceeding. 577 */ 578 if (e->icq_cache) { 579 rcu_barrier(); 580 kmem_cache_destroy(e->icq_cache); 581 e->icq_cache = NULL; 582 } 583 } 584 EXPORT_SYMBOL_GPL(elv_unregister); 585 586 int elevator_switch_mq(struct request_queue *q, 587 struct elevator_type *new_e) 588 { 589 int ret; 590 591 lockdep_assert_held(&q->sysfs_lock); 592 593 if (q->elevator) { 594 if (q->elevator->registered) 595 elv_unregister_queue(q); 596 597 ioc_clear_queue(q); 598 elevator_exit(q, q->elevator); 599 } 600 601 ret = blk_mq_init_sched(q, new_e); 602 if (ret) 603 goto out; 604 605 if (new_e) { 606 ret = elv_register_queue(q, true); 607 if (ret) { 608 elevator_exit(q, q->elevator); 609 goto out; 610 } 611 } 612 613 if (new_e) 614 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); 615 else 616 blk_add_trace_msg(q, "elv switch: none"); 617 618 out: 619 return ret; 620 } 621 622 static inline bool elv_support_iosched(struct request_queue *q) 623 { 624 if (!queue_is_mq(q) || 625 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) 626 return false; 627 return true; 628 } 629 630 /* 631 * For single queue devices, default to using mq-deadline. If we have multiple 632 * queues or mq-deadline is not available, default to "none". 633 */ 634 static struct elevator_type *elevator_get_default(struct request_queue *q) 635 { 636 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) 637 return NULL; 638 639 if (q->nr_hw_queues != 1 && 640 !blk_mq_is_shared_tags(q->tag_set->flags)) 641 return NULL; 642 643 return elevator_get(q, "mq-deadline", false); 644 } 645 646 /* 647 * Get the first elevator providing the features required by the request queue. 648 * Default to "none" if no matching elevator is found. 649 */ 650 static struct elevator_type *elevator_get_by_features(struct request_queue *q) 651 { 652 struct elevator_type *e, *found = NULL; 653 654 spin_lock(&elv_list_lock); 655 656 list_for_each_entry(e, &elv_list, list) { 657 if (elv_support_features(e->elevator_features, 658 q->required_elevator_features)) { 659 found = e; 660 break; 661 } 662 } 663 664 if (found && !try_module_get(found->elevator_owner)) 665 found = NULL; 666 667 spin_unlock(&elv_list_lock); 668 return found; 669 } 670 671 /* 672 * For a device queue that has no required features, use the default elevator 673 * settings. Otherwise, use the first elevator available matching the required 674 * features. If no suitable elevator is find or if the chosen elevator 675 * initialization fails, fall back to the "none" elevator (no elevator). 676 */ 677 void elevator_init_mq(struct request_queue *q) 678 { 679 struct elevator_type *e; 680 int err; 681 682 if (!elv_support_iosched(q)) 683 return; 684 685 WARN_ON_ONCE(blk_queue_registered(q)); 686 687 if (unlikely(q->elevator)) 688 return; 689 690 if (!q->required_elevator_features) 691 e = elevator_get_default(q); 692 else 693 e = elevator_get_by_features(q); 694 if (!e) 695 return; 696 697 /* 698 * We are called before adding disk, when there isn't any FS I/O, 699 * so freezing queue plus canceling dispatch work is enough to 700 * drain any dispatch activities originated from passthrough 701 * requests, then no need to quiesce queue which may add long boot 702 * latency, especially when lots of disks are involved. 703 */ 704 blk_mq_freeze_queue(q); 705 blk_mq_cancel_work_sync(q); 706 707 err = blk_mq_init_sched(q, e); 708 709 blk_mq_unfreeze_queue(q); 710 711 if (err) { 712 pr_warn("\"%s\" elevator initialization failed, " 713 "falling back to \"none\"\n", e->elevator_name); 714 elevator_put(e); 715 } 716 } 717 718 /* 719 * switch to new_e io scheduler. be careful not to introduce deadlocks - 720 * we don't free the old io scheduler, before we have allocated what we 721 * need for the new one. this way we have a chance of going back to the old 722 * one, if the new one fails init for some reason. 723 */ 724 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) 725 { 726 int err; 727 728 lockdep_assert_held(&q->sysfs_lock); 729 730 blk_mq_freeze_queue(q); 731 blk_mq_quiesce_queue(q); 732 733 err = elevator_switch_mq(q, new_e); 734 735 blk_mq_unquiesce_queue(q); 736 blk_mq_unfreeze_queue(q); 737 738 return err; 739 } 740 741 /* 742 * Switch this queue to the given IO scheduler. 743 */ 744 static int __elevator_change(struct request_queue *q, const char *name) 745 { 746 char elevator_name[ELV_NAME_MAX]; 747 struct elevator_type *e; 748 749 /* Make sure queue is not in the middle of being removed */ 750 if (!blk_queue_registered(q)) 751 return -ENOENT; 752 753 /* 754 * Special case for mq, turn off scheduling 755 */ 756 if (!strncmp(name, "none", 4)) { 757 if (!q->elevator) 758 return 0; 759 return elevator_switch(q, NULL); 760 } 761 762 strlcpy(elevator_name, name, sizeof(elevator_name)); 763 e = elevator_get(q, strstrip(elevator_name), true); 764 if (!e) 765 return -EINVAL; 766 767 if (q->elevator && 768 elevator_match(q->elevator->type, elevator_name, 0)) { 769 elevator_put(e); 770 return 0; 771 } 772 773 return elevator_switch(q, e); 774 } 775 776 ssize_t elv_iosched_store(struct request_queue *q, const char *name, 777 size_t count) 778 { 779 int ret; 780 781 if (!elv_support_iosched(q)) 782 return count; 783 784 ret = __elevator_change(q, name); 785 if (!ret) 786 return count; 787 788 return ret; 789 } 790 791 ssize_t elv_iosched_show(struct request_queue *q, char *name) 792 { 793 struct elevator_queue *e = q->elevator; 794 struct elevator_type *elv = NULL; 795 struct elevator_type *__e; 796 int len = 0; 797 798 if (!queue_is_mq(q)) 799 return sprintf(name, "none\n"); 800 801 if (!q->elevator) 802 len += sprintf(name+len, "[none] "); 803 else 804 elv = e->type; 805 806 spin_lock(&elv_list_lock); 807 list_for_each_entry(__e, &elv_list, list) { 808 if (elv && elevator_match(elv, __e->elevator_name, 0)) { 809 len += sprintf(name+len, "[%s] ", elv->elevator_name); 810 continue; 811 } 812 if (elv_support_iosched(q) && 813 elevator_match(__e, __e->elevator_name, 814 q->required_elevator_features)) 815 len += sprintf(name+len, "%s ", __e->elevator_name); 816 } 817 spin_unlock(&elv_list_lock); 818 819 if (q->elevator) 820 len += sprintf(name+len, "none"); 821 822 len += sprintf(len+name, "\n"); 823 return len; 824 } 825 826 struct request *elv_rb_former_request(struct request_queue *q, 827 struct request *rq) 828 { 829 struct rb_node *rbprev = rb_prev(&rq->rb_node); 830 831 if (rbprev) 832 return rb_entry_rq(rbprev); 833 834 return NULL; 835 } 836 EXPORT_SYMBOL(elv_rb_former_request); 837 838 struct request *elv_rb_latter_request(struct request_queue *q, 839 struct request *rq) 840 { 841 struct rb_node *rbnext = rb_next(&rq->rb_node); 842 843 if (rbnext) 844 return rb_entry_rq(rbnext); 845 846 return NULL; 847 } 848 EXPORT_SYMBOL(elv_rb_latter_request); 849 850 static int __init elevator_setup(char *str) 851 { 852 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n" 853 "Please use sysfs to set IO scheduler for individual devices.\n"); 854 return 1; 855 } 856 857 __setup("elevator=", elevator_setup); 858