1 /* 2 * Block device elevator/IO-scheduler. 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * 6 * 30042000 Jens Axboe <axboe@kernel.dk> : 7 * 8 * Split the elevator a bit so that it is possible to choose a different 9 * one or even write a new "plug in". There are three pieces: 10 * - elevator_fn, inserts a new request in the queue list 11 * - elevator_merge_fn, decides whether a new buffer can be merged with 12 * an existing request 13 * - elevator_dequeue_fn, called when a request is taken off the active list 14 * 15 * 20082000 Dave Jones <davej@suse.de> : 16 * Removed tests for max-bomb-segments, which was breaking elvtune 17 * when run without -bN 18 * 19 * Jens: 20 * - Rework again to work with bio instead of buffer_heads 21 * - loose bi_dev comparisons, partition handling is right now 22 * - completely modularize elevator setup and teardown 23 * 24 */ 25 #include <linux/kernel.h> 26 #include <linux/fs.h> 27 #include <linux/blkdev.h> 28 #include <linux/elevator.h> 29 #include <linux/bio.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/compiler.h> 34 #include <linux/delay.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/hash.h> 37 38 #include <asm/uaccess.h> 39 40 static DEFINE_SPINLOCK(elv_list_lock); 41 static LIST_HEAD(elv_list); 42 43 /* 44 * Merge hash stuff. 45 */ 46 static const int elv_hash_shift = 6; 47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3) 48 #define ELV_HASH_FN(sec) \ 49 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 50 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 51 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 52 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 53 54 /* 55 * Query io scheduler to see if the current process issuing bio may be 56 * merged with rq. 57 */ 58 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 59 { 60 struct request_queue *q = rq->q; 61 elevator_t *e = q->elevator; 62 63 if (e->ops->elevator_allow_merge_fn) 64 return e->ops->elevator_allow_merge_fn(q, rq, bio); 65 66 return 1; 67 } 68 69 /* 70 * can we safely merge with this request? 71 */ 72 int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73 { 74 if (!rq_mergeable(rq)) 75 return 0; 76 77 /* 78 * different data direction or already started, don't merge 79 */ 80 if (bio_data_dir(bio) != rq_data_dir(rq)) 81 return 0; 82 83 /* 84 * must be same device and not a special request 85 */ 86 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) 87 return 0; 88 89 /* 90 * only merge integrity protected bio into ditto rq 91 */ 92 if (bio_integrity(bio) != blk_integrity_rq(rq)) 93 return 0; 94 95 if (!elv_iosched_allow_merge(rq, bio)) 96 return 0; 97 98 return 1; 99 } 100 EXPORT_SYMBOL(elv_rq_merge_ok); 101 102 static inline int elv_try_merge(struct request *__rq, struct bio *bio) 103 { 104 int ret = ELEVATOR_NO_MERGE; 105 106 /* 107 * we can merge and sequence is ok, check if it's possible 108 */ 109 if (elv_rq_merge_ok(__rq, bio)) { 110 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 111 ret = ELEVATOR_BACK_MERGE; 112 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 113 ret = ELEVATOR_FRONT_MERGE; 114 } 115 116 return ret; 117 } 118 119 static struct elevator_type *elevator_find(const char *name) 120 { 121 struct elevator_type *e; 122 123 list_for_each_entry(e, &elv_list, list) { 124 if (!strcmp(e->elevator_name, name)) 125 return e; 126 } 127 128 return NULL; 129 } 130 131 static void elevator_put(struct elevator_type *e) 132 { 133 module_put(e->elevator_owner); 134 } 135 136 static struct elevator_type *elevator_get(const char *name) 137 { 138 struct elevator_type *e; 139 140 spin_lock(&elv_list_lock); 141 142 e = elevator_find(name); 143 if (!e) { 144 char elv[ELV_NAME_MAX + strlen("-iosched")]; 145 146 spin_unlock(&elv_list_lock); 147 148 if (!strcmp(name, "anticipatory")) 149 sprintf(elv, "as-iosched"); 150 else 151 sprintf(elv, "%s-iosched", name); 152 153 request_module("%s", elv); 154 spin_lock(&elv_list_lock); 155 e = elevator_find(name); 156 } 157 158 if (e && !try_module_get(e->elevator_owner)) 159 e = NULL; 160 161 spin_unlock(&elv_list_lock); 162 163 return e; 164 } 165 166 static void *elevator_init_queue(struct request_queue *q, 167 struct elevator_queue *eq) 168 { 169 return eq->ops->elevator_init_fn(q); 170 } 171 172 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, 173 void *data) 174 { 175 q->elevator = eq; 176 eq->elevator_data = data; 177 } 178 179 static char chosen_elevator[16]; 180 181 static int __init elevator_setup(char *str) 182 { 183 /* 184 * Be backwards-compatible with previous kernels, so users 185 * won't get the wrong elevator. 186 */ 187 if (!strcmp(str, "as")) 188 strcpy(chosen_elevator, "anticipatory"); 189 else 190 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); 191 return 1; 192 } 193 194 __setup("elevator=", elevator_setup); 195 196 static struct kobj_type elv_ktype; 197 198 static elevator_t *elevator_alloc(struct request_queue *q, 199 struct elevator_type *e) 200 { 201 elevator_t *eq; 202 int i; 203 204 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); 205 if (unlikely(!eq)) 206 goto err; 207 208 eq->ops = &e->ops; 209 eq->elevator_type = e; 210 kobject_init(&eq->kobj, &elv_ktype); 211 mutex_init(&eq->sysfs_lock); 212 213 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, 214 GFP_KERNEL, q->node); 215 if (!eq->hash) 216 goto err; 217 218 for (i = 0; i < ELV_HASH_ENTRIES; i++) 219 INIT_HLIST_HEAD(&eq->hash[i]); 220 221 return eq; 222 err: 223 kfree(eq); 224 elevator_put(e); 225 return NULL; 226 } 227 228 static void elevator_release(struct kobject *kobj) 229 { 230 elevator_t *e = container_of(kobj, elevator_t, kobj); 231 232 elevator_put(e->elevator_type); 233 kfree(e->hash); 234 kfree(e); 235 } 236 237 int elevator_init(struct request_queue *q, char *name) 238 { 239 struct elevator_type *e = NULL; 240 struct elevator_queue *eq; 241 int ret = 0; 242 void *data; 243 244 INIT_LIST_HEAD(&q->queue_head); 245 q->last_merge = NULL; 246 q->end_sector = 0; 247 q->boundary_rq = NULL; 248 249 if (name) { 250 e = elevator_get(name); 251 if (!e) 252 return -EINVAL; 253 } 254 255 if (!e && *chosen_elevator) { 256 e = elevator_get(chosen_elevator); 257 if (!e) 258 printk(KERN_ERR "I/O scheduler %s not found\n", 259 chosen_elevator); 260 } 261 262 if (!e) { 263 e = elevator_get(CONFIG_DEFAULT_IOSCHED); 264 if (!e) { 265 printk(KERN_ERR 266 "Default I/O scheduler not found. " \ 267 "Using noop.\n"); 268 e = elevator_get("noop"); 269 } 270 } 271 272 eq = elevator_alloc(q, e); 273 if (!eq) 274 return -ENOMEM; 275 276 data = elevator_init_queue(q, eq); 277 if (!data) { 278 kobject_put(&eq->kobj); 279 return -ENOMEM; 280 } 281 282 elevator_attach(q, eq, data); 283 return ret; 284 } 285 EXPORT_SYMBOL(elevator_init); 286 287 void elevator_exit(elevator_t *e) 288 { 289 mutex_lock(&e->sysfs_lock); 290 if (e->ops->elevator_exit_fn) 291 e->ops->elevator_exit_fn(e); 292 e->ops = NULL; 293 mutex_unlock(&e->sysfs_lock); 294 295 kobject_put(&e->kobj); 296 } 297 EXPORT_SYMBOL(elevator_exit); 298 299 static void elv_activate_rq(struct request_queue *q, struct request *rq) 300 { 301 elevator_t *e = q->elevator; 302 303 if (e->ops->elevator_activate_req_fn) 304 e->ops->elevator_activate_req_fn(q, rq); 305 } 306 307 static void elv_deactivate_rq(struct request_queue *q, struct request *rq) 308 { 309 elevator_t *e = q->elevator; 310 311 if (e->ops->elevator_deactivate_req_fn) 312 e->ops->elevator_deactivate_req_fn(q, rq); 313 } 314 315 static inline void __elv_rqhash_del(struct request *rq) 316 { 317 hlist_del_init(&rq->hash); 318 } 319 320 static void elv_rqhash_del(struct request_queue *q, struct request *rq) 321 { 322 if (ELV_ON_HASH(rq)) 323 __elv_rqhash_del(rq); 324 } 325 326 static void elv_rqhash_add(struct request_queue *q, struct request *rq) 327 { 328 elevator_t *e = q->elevator; 329 330 BUG_ON(ELV_ON_HASH(rq)); 331 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 332 } 333 334 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 335 { 336 __elv_rqhash_del(rq); 337 elv_rqhash_add(q, rq); 338 } 339 340 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 341 { 342 elevator_t *e = q->elevator; 343 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 344 struct hlist_node *entry, *next; 345 struct request *rq; 346 347 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 348 BUG_ON(!ELV_ON_HASH(rq)); 349 350 if (unlikely(!rq_mergeable(rq))) { 351 __elv_rqhash_del(rq); 352 continue; 353 } 354 355 if (rq_hash_key(rq) == offset) 356 return rq; 357 } 358 359 return NULL; 360 } 361 362 /* 363 * RB-tree support functions for inserting/lookup/removal of requests 364 * in a sorted RB tree. 365 */ 366 struct request *elv_rb_add(struct rb_root *root, struct request *rq) 367 { 368 struct rb_node **p = &root->rb_node; 369 struct rb_node *parent = NULL; 370 struct request *__rq; 371 372 while (*p) { 373 parent = *p; 374 __rq = rb_entry(parent, struct request, rb_node); 375 376 if (rq->sector < __rq->sector) 377 p = &(*p)->rb_left; 378 else if (rq->sector > __rq->sector) 379 p = &(*p)->rb_right; 380 else 381 return __rq; 382 } 383 384 rb_link_node(&rq->rb_node, parent, p); 385 rb_insert_color(&rq->rb_node, root); 386 return NULL; 387 } 388 EXPORT_SYMBOL(elv_rb_add); 389 390 void elv_rb_del(struct rb_root *root, struct request *rq) 391 { 392 BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); 393 rb_erase(&rq->rb_node, root); 394 RB_CLEAR_NODE(&rq->rb_node); 395 } 396 EXPORT_SYMBOL(elv_rb_del); 397 398 struct request *elv_rb_find(struct rb_root *root, sector_t sector) 399 { 400 struct rb_node *n = root->rb_node; 401 struct request *rq; 402 403 while (n) { 404 rq = rb_entry(n, struct request, rb_node); 405 406 if (sector < rq->sector) 407 n = n->rb_left; 408 else if (sector > rq->sector) 409 n = n->rb_right; 410 else 411 return rq; 412 } 413 414 return NULL; 415 } 416 EXPORT_SYMBOL(elv_rb_find); 417 418 /* 419 * Insert rq into dispatch queue of q. Queue lock must be held on 420 * entry. rq is sort instead into the dispatch queue. To be used by 421 * specific elevators. 422 */ 423 void elv_dispatch_sort(struct request_queue *q, struct request *rq) 424 { 425 sector_t boundary; 426 struct list_head *entry; 427 int stop_flags; 428 429 if (q->last_merge == rq) 430 q->last_merge = NULL; 431 432 elv_rqhash_del(q, rq); 433 434 q->nr_sorted--; 435 436 boundary = q->end_sector; 437 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 438 list_for_each_prev(entry, &q->queue_head) { 439 struct request *pos = list_entry_rq(entry); 440 441 if (rq_data_dir(rq) != rq_data_dir(pos)) 442 break; 443 if (pos->cmd_flags & stop_flags) 444 break; 445 if (rq->sector >= boundary) { 446 if (pos->sector < boundary) 447 continue; 448 } else { 449 if (pos->sector >= boundary) 450 break; 451 } 452 if (rq->sector >= pos->sector) 453 break; 454 } 455 456 list_add(&rq->queuelist, entry); 457 } 458 EXPORT_SYMBOL(elv_dispatch_sort); 459 460 /* 461 * Insert rq into dispatch queue of q. Queue lock must be held on 462 * entry. rq is added to the back of the dispatch queue. To be used by 463 * specific elevators. 464 */ 465 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) 466 { 467 if (q->last_merge == rq) 468 q->last_merge = NULL; 469 470 elv_rqhash_del(q, rq); 471 472 q->nr_sorted--; 473 474 q->end_sector = rq_end_sector(rq); 475 q->boundary_rq = rq; 476 list_add_tail(&rq->queuelist, &q->queue_head); 477 } 478 EXPORT_SYMBOL(elv_dispatch_add_tail); 479 480 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 481 { 482 elevator_t *e = q->elevator; 483 struct request *__rq; 484 int ret; 485 486 /* 487 * First try one-hit cache. 488 */ 489 if (q->last_merge) { 490 ret = elv_try_merge(q->last_merge, bio); 491 if (ret != ELEVATOR_NO_MERGE) { 492 *req = q->last_merge; 493 return ret; 494 } 495 } 496 497 if (blk_queue_nomerges(q)) 498 return ELEVATOR_NO_MERGE; 499 500 /* 501 * See if our hash lookup can find a potential backmerge. 502 */ 503 __rq = elv_rqhash_find(q, bio->bi_sector); 504 if (__rq && elv_rq_merge_ok(__rq, bio)) { 505 *req = __rq; 506 return ELEVATOR_BACK_MERGE; 507 } 508 509 if (e->ops->elevator_merge_fn) 510 return e->ops->elevator_merge_fn(q, req, bio); 511 512 return ELEVATOR_NO_MERGE; 513 } 514 515 void elv_merged_request(struct request_queue *q, struct request *rq, int type) 516 { 517 elevator_t *e = q->elevator; 518 519 if (e->ops->elevator_merged_fn) 520 e->ops->elevator_merged_fn(q, rq, type); 521 522 if (type == ELEVATOR_BACK_MERGE) 523 elv_rqhash_reposition(q, rq); 524 525 q->last_merge = rq; 526 } 527 528 void elv_merge_requests(struct request_queue *q, struct request *rq, 529 struct request *next) 530 { 531 elevator_t *e = q->elevator; 532 533 if (e->ops->elevator_merge_req_fn) 534 e->ops->elevator_merge_req_fn(q, rq, next); 535 536 elv_rqhash_reposition(q, rq); 537 elv_rqhash_del(q, next); 538 539 q->nr_sorted--; 540 q->last_merge = rq; 541 } 542 543 void elv_requeue_request(struct request_queue *q, struct request *rq) 544 { 545 /* 546 * it already went through dequeue, we need to decrement the 547 * in_flight count again 548 */ 549 if (blk_account_rq(rq)) { 550 q->in_flight--; 551 if (blk_sorted_rq(rq)) 552 elv_deactivate_rq(q, rq); 553 } 554 555 rq->cmd_flags &= ~REQ_STARTED; 556 557 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 558 } 559 560 static void elv_drain_elevator(struct request_queue *q) 561 { 562 static int printed; 563 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 564 ; 565 if (q->nr_sorted == 0) 566 return; 567 if (printed++ < 10) { 568 printk(KERN_ERR "%s: forced dispatching is broken " 569 "(nr_sorted=%u), please report this\n", 570 q->elevator->elevator_type->elevator_name, q->nr_sorted); 571 } 572 } 573 574 void elv_insert(struct request_queue *q, struct request *rq, int where) 575 { 576 struct list_head *pos; 577 unsigned ordseq; 578 int unplug_it = 1; 579 580 blk_add_trace_rq(q, rq, BLK_TA_INSERT); 581 582 rq->q = q; 583 584 switch (where) { 585 case ELEVATOR_INSERT_FRONT: 586 rq->cmd_flags |= REQ_SOFTBARRIER; 587 588 list_add(&rq->queuelist, &q->queue_head); 589 break; 590 591 case ELEVATOR_INSERT_BACK: 592 rq->cmd_flags |= REQ_SOFTBARRIER; 593 elv_drain_elevator(q); 594 list_add_tail(&rq->queuelist, &q->queue_head); 595 /* 596 * We kick the queue here for the following reasons. 597 * - The elevator might have returned NULL previously 598 * to delay requests and returned them now. As the 599 * queue wasn't empty before this request, ll_rw_blk 600 * won't run the queue on return, resulting in hang. 601 * - Usually, back inserted requests won't be merged 602 * with anything. There's no point in delaying queue 603 * processing. 604 */ 605 blk_remove_plug(q); 606 q->request_fn(q); 607 break; 608 609 case ELEVATOR_INSERT_SORT: 610 BUG_ON(!blk_fs_request(rq)); 611 rq->cmd_flags |= REQ_SORTED; 612 q->nr_sorted++; 613 if (rq_mergeable(rq)) { 614 elv_rqhash_add(q, rq); 615 if (!q->last_merge) 616 q->last_merge = rq; 617 } 618 619 /* 620 * Some ioscheds (cfq) run q->request_fn directly, so 621 * rq cannot be accessed after calling 622 * elevator_add_req_fn. 623 */ 624 q->elevator->ops->elevator_add_req_fn(q, rq); 625 break; 626 627 case ELEVATOR_INSERT_REQUEUE: 628 /* 629 * If ordered flush isn't in progress, we do front 630 * insertion; otherwise, requests should be requeued 631 * in ordseq order. 632 */ 633 rq->cmd_flags |= REQ_SOFTBARRIER; 634 635 /* 636 * Most requeues happen because of a busy condition, 637 * don't force unplug of the queue for that case. 638 */ 639 unplug_it = 0; 640 641 if (q->ordseq == 0) { 642 list_add(&rq->queuelist, &q->queue_head); 643 break; 644 } 645 646 ordseq = blk_ordered_req_seq(rq); 647 648 list_for_each(pos, &q->queue_head) { 649 struct request *pos_rq = list_entry_rq(pos); 650 if (ordseq <= blk_ordered_req_seq(pos_rq)) 651 break; 652 } 653 654 list_add_tail(&rq->queuelist, pos); 655 break; 656 657 default: 658 printk(KERN_ERR "%s: bad insertion point %d\n", 659 __func__, where); 660 BUG(); 661 } 662 663 if (unplug_it && blk_queue_plugged(q)) { 664 int nrq = q->rq.count[READ] + q->rq.count[WRITE] 665 - q->in_flight; 666 667 if (nrq >= q->unplug_thresh) 668 __generic_unplug_device(q); 669 } 670 } 671 672 void __elv_add_request(struct request_queue *q, struct request *rq, int where, 673 int plug) 674 { 675 if (q->ordcolor) 676 rq->cmd_flags |= REQ_ORDERED_COLOR; 677 678 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 679 /* 680 * toggle ordered color 681 */ 682 if (blk_barrier_rq(rq)) 683 q->ordcolor ^= 1; 684 685 /* 686 * barriers implicitly indicate back insertion 687 */ 688 if (where == ELEVATOR_INSERT_SORT) 689 where = ELEVATOR_INSERT_BACK; 690 691 /* 692 * this request is scheduling boundary, update 693 * end_sector 694 */ 695 if (blk_fs_request(rq)) { 696 q->end_sector = rq_end_sector(rq); 697 q->boundary_rq = rq; 698 } 699 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 700 where == ELEVATOR_INSERT_SORT) 701 where = ELEVATOR_INSERT_BACK; 702 703 if (plug) 704 blk_plug_device(q); 705 706 elv_insert(q, rq, where); 707 } 708 EXPORT_SYMBOL(__elv_add_request); 709 710 void elv_add_request(struct request_queue *q, struct request *rq, int where, 711 int plug) 712 { 713 unsigned long flags; 714 715 spin_lock_irqsave(q->queue_lock, flags); 716 __elv_add_request(q, rq, where, plug); 717 spin_unlock_irqrestore(q->queue_lock, flags); 718 } 719 EXPORT_SYMBOL(elv_add_request); 720 721 static inline struct request *__elv_next_request(struct request_queue *q) 722 { 723 struct request *rq; 724 725 while (1) { 726 while (!list_empty(&q->queue_head)) { 727 rq = list_entry_rq(q->queue_head.next); 728 if (blk_do_ordered(q, &rq)) 729 return rq; 730 } 731 732 if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 733 return NULL; 734 } 735 } 736 737 struct request *elv_next_request(struct request_queue *q) 738 { 739 struct request *rq; 740 int ret; 741 742 while ((rq = __elv_next_request(q)) != NULL) { 743 /* 744 * Kill the empty barrier place holder, the driver must 745 * not ever see it. 746 */ 747 if (blk_empty_barrier(rq)) { 748 end_queued_request(rq, 1); 749 continue; 750 } 751 if (!(rq->cmd_flags & REQ_STARTED)) { 752 /* 753 * This is the first time the device driver 754 * sees this request (possibly after 755 * requeueing). Notify IO scheduler. 756 */ 757 if (blk_sorted_rq(rq)) 758 elv_activate_rq(q, rq); 759 760 /* 761 * just mark as started even if we don't start 762 * it, a request that has been delayed should 763 * not be passed by new incoming requests 764 */ 765 rq->cmd_flags |= REQ_STARTED; 766 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 767 } 768 769 if (!q->boundary_rq || q->boundary_rq == rq) { 770 q->end_sector = rq_end_sector(rq); 771 q->boundary_rq = NULL; 772 } 773 774 if (rq->cmd_flags & REQ_DONTPREP) 775 break; 776 777 if (q->dma_drain_size && rq->data_len) { 778 /* 779 * make sure space for the drain appears we 780 * know we can do this because max_hw_segments 781 * has been adjusted to be one fewer than the 782 * device can handle 783 */ 784 rq->nr_phys_segments++; 785 rq->nr_hw_segments++; 786 } 787 788 if (!q->prep_rq_fn) 789 break; 790 791 ret = q->prep_rq_fn(q, rq); 792 if (ret == BLKPREP_OK) { 793 break; 794 } else if (ret == BLKPREP_DEFER) { 795 /* 796 * the request may have been (partially) prepped. 797 * we need to keep this request in the front to 798 * avoid resource deadlock. REQ_STARTED will 799 * prevent other fs requests from passing this one. 800 */ 801 if (q->dma_drain_size && rq->data_len && 802 !(rq->cmd_flags & REQ_DONTPREP)) { 803 /* 804 * remove the space for the drain we added 805 * so that we don't add it again 806 */ 807 --rq->nr_phys_segments; 808 --rq->nr_hw_segments; 809 } 810 811 rq = NULL; 812 break; 813 } else if (ret == BLKPREP_KILL) { 814 rq->cmd_flags |= REQ_QUIET; 815 end_queued_request(rq, 0); 816 } else { 817 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 818 break; 819 } 820 } 821 822 return rq; 823 } 824 EXPORT_SYMBOL(elv_next_request); 825 826 void elv_dequeue_request(struct request_queue *q, struct request *rq) 827 { 828 BUG_ON(list_empty(&rq->queuelist)); 829 BUG_ON(ELV_ON_HASH(rq)); 830 831 list_del_init(&rq->queuelist); 832 833 /* 834 * the time frame between a request being removed from the lists 835 * and to it is freed is accounted as io that is in progress at 836 * the driver side. 837 */ 838 if (blk_account_rq(rq)) 839 q->in_flight++; 840 } 841 EXPORT_SYMBOL(elv_dequeue_request); 842 843 int elv_queue_empty(struct request_queue *q) 844 { 845 elevator_t *e = q->elevator; 846 847 if (!list_empty(&q->queue_head)) 848 return 0; 849 850 if (e->ops->elevator_queue_empty_fn) 851 return e->ops->elevator_queue_empty_fn(q); 852 853 return 1; 854 } 855 EXPORT_SYMBOL(elv_queue_empty); 856 857 struct request *elv_latter_request(struct request_queue *q, struct request *rq) 858 { 859 elevator_t *e = q->elevator; 860 861 if (e->ops->elevator_latter_req_fn) 862 return e->ops->elevator_latter_req_fn(q, rq); 863 return NULL; 864 } 865 866 struct request *elv_former_request(struct request_queue *q, struct request *rq) 867 { 868 elevator_t *e = q->elevator; 869 870 if (e->ops->elevator_former_req_fn) 871 return e->ops->elevator_former_req_fn(q, rq); 872 return NULL; 873 } 874 875 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 876 { 877 elevator_t *e = q->elevator; 878 879 if (e->ops->elevator_set_req_fn) 880 return e->ops->elevator_set_req_fn(q, rq, gfp_mask); 881 882 rq->elevator_private = NULL; 883 return 0; 884 } 885 886 void elv_put_request(struct request_queue *q, struct request *rq) 887 { 888 elevator_t *e = q->elevator; 889 890 if (e->ops->elevator_put_req_fn) 891 e->ops->elevator_put_req_fn(rq); 892 } 893 894 int elv_may_queue(struct request_queue *q, int rw) 895 { 896 elevator_t *e = q->elevator; 897 898 if (e->ops->elevator_may_queue_fn) 899 return e->ops->elevator_may_queue_fn(q, rw); 900 901 return ELV_MQUEUE_MAY; 902 } 903 904 void elv_completed_request(struct request_queue *q, struct request *rq) 905 { 906 elevator_t *e = q->elevator; 907 908 /* 909 * request is released from the driver, io must be done 910 */ 911 if (blk_account_rq(rq)) { 912 q->in_flight--; 913 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 914 e->ops->elevator_completed_req_fn(q, rq); 915 } 916 917 /* 918 * Check if the queue is waiting for fs requests to be 919 * drained for flush sequence. 920 */ 921 if (unlikely(q->ordseq)) { 922 struct request *first_rq = list_entry_rq(q->queue_head.next); 923 if (q->in_flight == 0 && 924 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 925 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 926 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 927 q->request_fn(q); 928 } 929 } 930 } 931 932 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) 933 934 static ssize_t 935 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 936 { 937 elevator_t *e = container_of(kobj, elevator_t, kobj); 938 struct elv_fs_entry *entry = to_elv(attr); 939 ssize_t error; 940 941 if (!entry->show) 942 return -EIO; 943 944 mutex_lock(&e->sysfs_lock); 945 error = e->ops ? entry->show(e, page) : -ENOENT; 946 mutex_unlock(&e->sysfs_lock); 947 return error; 948 } 949 950 static ssize_t 951 elv_attr_store(struct kobject *kobj, struct attribute *attr, 952 const char *page, size_t length) 953 { 954 elevator_t *e = container_of(kobj, elevator_t, kobj); 955 struct elv_fs_entry *entry = to_elv(attr); 956 ssize_t error; 957 958 if (!entry->store) 959 return -EIO; 960 961 mutex_lock(&e->sysfs_lock); 962 error = e->ops ? entry->store(e, page, length) : -ENOENT; 963 mutex_unlock(&e->sysfs_lock); 964 return error; 965 } 966 967 static struct sysfs_ops elv_sysfs_ops = { 968 .show = elv_attr_show, 969 .store = elv_attr_store, 970 }; 971 972 static struct kobj_type elv_ktype = { 973 .sysfs_ops = &elv_sysfs_ops, 974 .release = elevator_release, 975 }; 976 977 int elv_register_queue(struct request_queue *q) 978 { 979 elevator_t *e = q->elevator; 980 int error; 981 982 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); 983 if (!error) { 984 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; 985 if (attr) { 986 while (attr->attr.name) { 987 if (sysfs_create_file(&e->kobj, &attr->attr)) 988 break; 989 attr++; 990 } 991 } 992 kobject_uevent(&e->kobj, KOBJ_ADD); 993 } 994 return error; 995 } 996 997 static void __elv_unregister_queue(elevator_t *e) 998 { 999 kobject_uevent(&e->kobj, KOBJ_REMOVE); 1000 kobject_del(&e->kobj); 1001 } 1002 1003 void elv_unregister_queue(struct request_queue *q) 1004 { 1005 if (q) 1006 __elv_unregister_queue(q->elevator); 1007 } 1008 1009 void elv_register(struct elevator_type *e) 1010 { 1011 char *def = ""; 1012 1013 spin_lock(&elv_list_lock); 1014 BUG_ON(elevator_find(e->elevator_name)); 1015 list_add_tail(&e->list, &elv_list); 1016 spin_unlock(&elv_list_lock); 1017 1018 if (!strcmp(e->elevator_name, chosen_elevator) || 1019 (!*chosen_elevator && 1020 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 1021 def = " (default)"; 1022 1023 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, 1024 def); 1025 } 1026 EXPORT_SYMBOL_GPL(elv_register); 1027 1028 void elv_unregister(struct elevator_type *e) 1029 { 1030 struct task_struct *g, *p; 1031 1032 /* 1033 * Iterate every thread in the process to remove the io contexts. 1034 */ 1035 if (e->ops.trim) { 1036 read_lock(&tasklist_lock); 1037 do_each_thread(g, p) { 1038 task_lock(p); 1039 if (p->io_context) 1040 e->ops.trim(p->io_context); 1041 task_unlock(p); 1042 } while_each_thread(g, p); 1043 read_unlock(&tasklist_lock); 1044 } 1045 1046 spin_lock(&elv_list_lock); 1047 list_del_init(&e->list); 1048 spin_unlock(&elv_list_lock); 1049 } 1050 EXPORT_SYMBOL_GPL(elv_unregister); 1051 1052 /* 1053 * switch to new_e io scheduler. be careful not to introduce deadlocks - 1054 * we don't free the old io scheduler, before we have allocated what we 1055 * need for the new one. this way we have a chance of going back to the old 1056 * one, if the new one fails init for some reason. 1057 */ 1058 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) 1059 { 1060 elevator_t *old_elevator, *e; 1061 void *data; 1062 1063 /* 1064 * Allocate new elevator 1065 */ 1066 e = elevator_alloc(q, new_e); 1067 if (!e) 1068 return 0; 1069 1070 data = elevator_init_queue(q, e); 1071 if (!data) { 1072 kobject_put(&e->kobj); 1073 return 0; 1074 } 1075 1076 /* 1077 * Turn on BYPASS and drain all requests w/ elevator private data 1078 */ 1079 spin_lock_irq(q->queue_lock); 1080 1081 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 1082 1083 elv_drain_elevator(q); 1084 1085 while (q->rq.elvpriv) { 1086 blk_remove_plug(q); 1087 q->request_fn(q); 1088 spin_unlock_irq(q->queue_lock); 1089 msleep(10); 1090 spin_lock_irq(q->queue_lock); 1091 elv_drain_elevator(q); 1092 } 1093 1094 /* 1095 * Remember old elevator. 1096 */ 1097 old_elevator = q->elevator; 1098 1099 /* 1100 * attach and start new elevator 1101 */ 1102 elevator_attach(q, e, data); 1103 1104 spin_unlock_irq(q->queue_lock); 1105 1106 __elv_unregister_queue(old_elevator); 1107 1108 if (elv_register_queue(q)) 1109 goto fail_register; 1110 1111 /* 1112 * finally exit old elevator and turn off BYPASS. 1113 */ 1114 elevator_exit(old_elevator); 1115 spin_lock_irq(q->queue_lock); 1116 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1117 spin_unlock_irq(q->queue_lock); 1118 1119 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1120 1121 return 1; 1122 1123 fail_register: 1124 /* 1125 * switch failed, exit the new io scheduler and reattach the old 1126 * one again (along with re-adding the sysfs dir) 1127 */ 1128 elevator_exit(e); 1129 q->elevator = old_elevator; 1130 elv_register_queue(q); 1131 1132 spin_lock_irq(q->queue_lock); 1133 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1134 spin_unlock_irq(q->queue_lock); 1135 1136 return 0; 1137 } 1138 1139 ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1140 size_t count) 1141 { 1142 char elevator_name[ELV_NAME_MAX]; 1143 size_t len; 1144 struct elevator_type *e; 1145 1146 elevator_name[sizeof(elevator_name) - 1] = '\0'; 1147 strncpy(elevator_name, name, sizeof(elevator_name) - 1); 1148 len = strlen(elevator_name); 1149 1150 if (len && elevator_name[len - 1] == '\n') 1151 elevator_name[len - 1] = '\0'; 1152 1153 e = elevator_get(elevator_name); 1154 if (!e) { 1155 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1156 return -EINVAL; 1157 } 1158 1159 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 1160 elevator_put(e); 1161 return count; 1162 } 1163 1164 if (!elevator_switch(q, e)) 1165 printk(KERN_ERR "elevator: switch to %s failed\n", 1166 elevator_name); 1167 return count; 1168 } 1169 1170 ssize_t elv_iosched_show(struct request_queue *q, char *name) 1171 { 1172 elevator_t *e = q->elevator; 1173 struct elevator_type *elv = e->elevator_type; 1174 struct elevator_type *__e; 1175 int len = 0; 1176 1177 spin_lock(&elv_list_lock); 1178 list_for_each_entry(__e, &elv_list, list) { 1179 if (!strcmp(elv->elevator_name, __e->elevator_name)) 1180 len += sprintf(name+len, "[%s] ", elv->elevator_name); 1181 else 1182 len += sprintf(name+len, "%s ", __e->elevator_name); 1183 } 1184 spin_unlock(&elv_list_lock); 1185 1186 len += sprintf(len+name, "\n"); 1187 return len; 1188 } 1189 1190 struct request *elv_rb_former_request(struct request_queue *q, 1191 struct request *rq) 1192 { 1193 struct rb_node *rbprev = rb_prev(&rq->rb_node); 1194 1195 if (rbprev) 1196 return rb_entry_rq(rbprev); 1197 1198 return NULL; 1199 } 1200 EXPORT_SYMBOL(elv_rb_former_request); 1201 1202 struct request *elv_rb_latter_request(struct request_queue *q, 1203 struct request *rq) 1204 { 1205 struct rb_node *rbnext = rb_next(&rq->rb_node); 1206 1207 if (rbnext) 1208 return rb_entry_rq(rbnext); 1209 1210 return NULL; 1211 } 1212 EXPORT_SYMBOL(elv_rb_latter_request); 1213