13dcf60bcSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
23a65dfe8SJens Axboe /*
33a65dfe8SJens Axboe * Block device elevator/IO-scheduler.
43a65dfe8SJens Axboe *
53a65dfe8SJens Axboe * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
63a65dfe8SJens Axboe *
70fe23479SJens Axboe * 30042000 Jens Axboe <axboe@kernel.dk> :
83a65dfe8SJens Axboe *
93a65dfe8SJens Axboe * Split the elevator a bit so that it is possible to choose a different
103a65dfe8SJens Axboe * one or even write a new "plug in". There are three pieces:
113a65dfe8SJens Axboe * - elevator_fn, inserts a new request in the queue list
123a65dfe8SJens Axboe * - elevator_merge_fn, decides whether a new buffer can be merged with
133a65dfe8SJens Axboe * an existing request
143a65dfe8SJens Axboe * - elevator_dequeue_fn, called when a request is taken off the active list
153a65dfe8SJens Axboe *
163a65dfe8SJens Axboe * 20082000 Dave Jones <davej@suse.de> :
173a65dfe8SJens Axboe * Removed tests for max-bomb-segments, which was breaking elvtune
183a65dfe8SJens Axboe * when run without -bN
193a65dfe8SJens Axboe *
203a65dfe8SJens Axboe * Jens:
213a65dfe8SJens Axboe * - Rework again to work with bio instead of buffer_heads
223a65dfe8SJens Axboe * - loose bi_dev comparisons, partition handling is right now
233a65dfe8SJens Axboe * - completely modularize elevator setup and teardown
243a65dfe8SJens Axboe *
253a65dfe8SJens Axboe */
263a65dfe8SJens Axboe #include <linux/kernel.h>
273a65dfe8SJens Axboe #include <linux/fs.h>
283a65dfe8SJens Axboe #include <linux/blkdev.h>
293a65dfe8SJens Axboe #include <linux/bio.h>
303a65dfe8SJens Axboe #include <linux/module.h>
313a65dfe8SJens Axboe #include <linux/slab.h>
323a65dfe8SJens Axboe #include <linux/init.h>
333a65dfe8SJens Axboe #include <linux/compiler.h>
342056a782SJens Axboe #include <linux/blktrace_api.h>
359817064bSJens Axboe #include <linux/hash.h>
360835da67SJens Axboe #include <linux/uaccess.h>
37c8158819SLin Ming #include <linux/pm_runtime.h>
383a65dfe8SJens Axboe
3955782138SLi Zefan #include <trace/events/block.h>
4055782138SLi Zefan
412e9bc346SChristoph Hellwig #include "elevator.h"
42242f9dcbSJens Axboe #include "blk.h"
43bd166ef1SJens Axboe #include "blk-mq-sched.h"
44bca6b067SBart Van Assche #include "blk-pm.h"
458330cdb0SJan Kara #include "blk-wbt.h"
46672fdcf0SMing Lei #include "blk-cgroup.h"
47242f9dcbSJens Axboe
483a65dfe8SJens Axboe static DEFINE_SPINLOCK(elv_list_lock);
493a65dfe8SJens Axboe static LIST_HEAD(elv_list);
503a65dfe8SJens Axboe
513a65dfe8SJens Axboe /*
529817064bSJens Axboe * Merge hash stuff.
539817064bSJens Axboe */
5483096ebfSTejun Heo #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
559817064bSJens Axboe
569817064bSJens Axboe /*
57da775265SJens Axboe * Query io scheduler to see if the current process issuing bio may be
58da775265SJens Axboe * merged with rq.
59da775265SJens Axboe */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)608d283ee6SJinlong Chen static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61da775265SJens Axboe {
62165125e1SJens Axboe struct request_queue *q = rq->q;
63b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
64da775265SJens Axboe
65f9cd4bfeSJens Axboe if (e->type->ops.allow_merge)
66f9cd4bfeSJens Axboe return e->type->ops.allow_merge(q, rq, bio);
67da775265SJens Axboe
688d283ee6SJinlong Chen return true;
69da775265SJens Axboe }
70da775265SJens Axboe
71da775265SJens Axboe /*
723a65dfe8SJens Axboe * can we safely merge with this request?
733a65dfe8SJens Axboe */
elv_bio_merge_ok(struct request * rq,struct bio * bio)7472ef799bSTahsin Erdogan bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
753a65dfe8SJens Axboe {
76050c8ea8STejun Heo if (!blk_rq_merge_ok(rq, bio))
7772ef799bSTahsin Erdogan return false;
787ba1ba12SMartin K. Petersen
7972ef799bSTahsin Erdogan if (!elv_iosched_allow_bio_merge(rq, bio))
8072ef799bSTahsin Erdogan return false;
81da775265SJens Axboe
8272ef799bSTahsin Erdogan return true;
833a65dfe8SJens Axboe }
8472ef799bSTahsin Erdogan EXPORT_SYMBOL(elv_bio_merge_ok);
853a65dfe8SJens Axboe
elv_support_features(struct request_queue * q,const struct elevator_type * e)86ffb86425SChristoph Hellwig static inline bool elv_support_features(struct request_queue *q,
87ffb86425SChristoph Hellwig const struct elevator_type *e)
888ac0d9a8SJens Axboe {
89ffb86425SChristoph Hellwig return (q->required_elevator_features & e->elevator_features) ==
90ffb86425SChristoph Hellwig q->required_elevator_features;
9168c43f13SDamien Le Moal }
9268c43f13SDamien Le Moal
9368c43f13SDamien Le Moal /**
94f69b5e8fSJinlong Chen * elevator_match - Check whether @e's name or alias matches @name
9568c43f13SDamien Le Moal * @e: Scheduler to test
9668c43f13SDamien Le Moal * @name: Elevator name to test
9768c43f13SDamien Le Moal *
98f69b5e8fSJinlong Chen * Return true if the elevator @e's name or alias matches @name.
9968c43f13SDamien Le Moal */
elevator_match(const struct elevator_type * e,const char * name)100ffb86425SChristoph Hellwig static bool elevator_match(const struct elevator_type *e, const char *name)
10168c43f13SDamien Le Moal {
102ffb86425SChristoph Hellwig return !strcmp(e->elevator_name, name) ||
103ffb86425SChristoph Hellwig (e->elevator_alias && !strcmp(e->elevator_alias, name));
1048ac0d9a8SJens Axboe }
1058ac0d9a8SJens Axboe
__elevator_find(const char * name)106ffb86425SChristoph Hellwig static struct elevator_type *__elevator_find(const char *name)
1073a65dfe8SJens Axboe {
108a22b169dSVasily Tarasov struct elevator_type *e;
1093a65dfe8SJens Axboe
110ffb86425SChristoph Hellwig list_for_each_entry(e, &elv_list, list)
111ffb86425SChristoph Hellwig if (elevator_match(e, name))
1123a65dfe8SJens Axboe return e;
113a22b169dSVasily Tarasov return NULL;
114a22b169dSVasily Tarasov }
115a22b169dSVasily Tarasov
elevator_find_get(struct request_queue * q,const char * name)11681eaca44SChristoph Hellwig static struct elevator_type *elevator_find_get(struct request_queue *q,
11781eaca44SChristoph Hellwig const char *name)
1183a65dfe8SJens Axboe {
1193a65dfe8SJens Axboe struct elevator_type *e;
1203a65dfe8SJens Axboe
1212a12dcd7SJens Axboe spin_lock(&elv_list_lock);
122ffb86425SChristoph Hellwig e = __elevator_find(name);
123ffb86425SChristoph Hellwig if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
1243a65dfe8SJens Axboe e = NULL;
1252a12dcd7SJens Axboe spin_unlock(&elv_list_lock);
1263a65dfe8SJens Axboe return e;
1273a65dfe8SJens Axboe }
1283a65dfe8SJens Axboe
1295f622417SThomas Weißschuh static const struct kobj_type elv_ktype;
1303d1ab40fSAl Viro
elevator_alloc(struct request_queue * q,struct elevator_type * e)131d50235b7SJianpeng Ma struct elevator_queue *elevator_alloc(struct request_queue *q,
132165125e1SJens Axboe struct elevator_type *e)
1333d1ab40fSAl Viro {
134b374d18aSJens Axboe struct elevator_queue *eq;
1359817064bSJens Axboe
136c1b511ebSJoe Perches eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
1379817064bSJens Axboe if (unlikely(!eq))
1388406a4d5SChao Yu return NULL;
1399817064bSJens Axboe
1408ed40ee3SJinlong Chen __elevator_get(e);
14122f746e2STejun Heo eq->type = e;
142f9cb074bSGreg Kroah-Hartman kobject_init(&eq->kobj, &elv_ktype);
1433d1ab40fSAl Viro mutex_init(&eq->sysfs_lock);
144242d98f0SSasha Levin hash_init(eq->hash);
1459817064bSJens Axboe
1463d1ab40fSAl Viro return eq;
1473d1ab40fSAl Viro }
148d50235b7SJianpeng Ma EXPORT_SYMBOL(elevator_alloc);
1493d1ab40fSAl Viro
elevator_release(struct kobject * kobj)1503d1ab40fSAl Viro static void elevator_release(struct kobject *kobj)
1513d1ab40fSAl Viro {
152b374d18aSJens Axboe struct elevator_queue *e;
1539817064bSJens Axboe
154b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj);
15522f746e2STejun Heo elevator_put(e->type);
1563d1ab40fSAl Viro kfree(e);
1573d1ab40fSAl Viro }
1583d1ab40fSAl Viro
elevator_exit(struct request_queue * q)1590c6cb3a2SChristoph Hellwig void elevator_exit(struct request_queue *q)
1603a65dfe8SJens Axboe {
1610c6cb3a2SChristoph Hellwig struct elevator_queue *e = q->elevator;
1620c6cb3a2SChristoph Hellwig
16328883074SChristoph Hellwig ioc_clear_queue(q);
16428883074SChristoph Hellwig blk_mq_sched_free_rqs(q);
16528883074SChristoph Hellwig
1663d1ab40fSAl Viro mutex_lock(&e->sysfs_lock);
16754d5329dSOmar Sandoval blk_mq_exit_sched(q, e);
1683d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock);
1693a65dfe8SJens Axboe
1703d1ab40fSAl Viro kobject_put(&e->kobj);
1713a65dfe8SJens Axboe }
1722e662b65SJens Axboe
__elv_rqhash_del(struct request * rq)1739817064bSJens Axboe static inline void __elv_rqhash_del(struct request *rq)
1749817064bSJens Axboe {
175242d98f0SSasha Levin hash_del(&rq->hash);
176e8064021SChristoph Hellwig rq->rq_flags &= ~RQF_HASHED;
1779817064bSJens Axboe }
1789817064bSJens Axboe
elv_rqhash_del(struct request_queue * q,struct request * rq)17970b3ea05SJens Axboe void elv_rqhash_del(struct request_queue *q, struct request *rq)
1809817064bSJens Axboe {
1819817064bSJens Axboe if (ELV_ON_HASH(rq))
1829817064bSJens Axboe __elv_rqhash_del(rq);
1839817064bSJens Axboe }
184bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(elv_rqhash_del);
1859817064bSJens Axboe
elv_rqhash_add(struct request_queue * q,struct request * rq)18670b3ea05SJens Axboe void elv_rqhash_add(struct request_queue *q, struct request *rq)
1879817064bSJens Axboe {
188b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
1899817064bSJens Axboe
1909817064bSJens Axboe BUG_ON(ELV_ON_HASH(rq));
191242d98f0SSasha Levin hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192e8064021SChristoph Hellwig rq->rq_flags |= RQF_HASHED;
1939817064bSJens Axboe }
194bd166ef1SJens Axboe EXPORT_SYMBOL_GPL(elv_rqhash_add);
1959817064bSJens Axboe
elv_rqhash_reposition(struct request_queue * q,struct request * rq)19670b3ea05SJens Axboe void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
1979817064bSJens Axboe {
1989817064bSJens Axboe __elv_rqhash_del(rq);
1999817064bSJens Axboe elv_rqhash_add(q, rq);
2009817064bSJens Axboe }
2019817064bSJens Axboe
elv_rqhash_find(struct request_queue * q,sector_t offset)20270b3ea05SJens Axboe struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
2039817064bSJens Axboe {
204b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
205b67bfe0dSSasha Levin struct hlist_node *next;
2069817064bSJens Axboe struct request *rq;
2079817064bSJens Axboe
208ee89f812SLinus Torvalds hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
2099817064bSJens Axboe BUG_ON(!ELV_ON_HASH(rq));
2109817064bSJens Axboe
2119817064bSJens Axboe if (unlikely(!rq_mergeable(rq))) {
2129817064bSJens Axboe __elv_rqhash_del(rq);
2139817064bSJens Axboe continue;
2149817064bSJens Axboe }
2159817064bSJens Axboe
2169817064bSJens Axboe if (rq_hash_key(rq) == offset)
2179817064bSJens Axboe return rq;
2189817064bSJens Axboe }
2199817064bSJens Axboe
2209817064bSJens Axboe return NULL;
2219817064bSJens Axboe }
2229817064bSJens Axboe
2233a65dfe8SJens Axboe /*
2242e662b65SJens Axboe * RB-tree support functions for inserting/lookup/removal of requests
2252e662b65SJens Axboe * in a sorted RB tree.
2262e662b65SJens Axboe */
elv_rb_add(struct rb_root * root,struct request * rq)227796d5116SJeff Moyer void elv_rb_add(struct rb_root *root, struct request *rq)
2282e662b65SJens Axboe {
2292e662b65SJens Axboe struct rb_node **p = &root->rb_node;
2302e662b65SJens Axboe struct rb_node *parent = NULL;
2312e662b65SJens Axboe struct request *__rq;
2322e662b65SJens Axboe
2332e662b65SJens Axboe while (*p) {
2342e662b65SJens Axboe parent = *p;
2352e662b65SJens Axboe __rq = rb_entry(parent, struct request, rb_node);
2362e662b65SJens Axboe
23783096ebfSTejun Heo if (blk_rq_pos(rq) < blk_rq_pos(__rq))
2382e662b65SJens Axboe p = &(*p)->rb_left;
239796d5116SJeff Moyer else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
2402e662b65SJens Axboe p = &(*p)->rb_right;
2412e662b65SJens Axboe }
2422e662b65SJens Axboe
2432e662b65SJens Axboe rb_link_node(&rq->rb_node, parent, p);
2442e662b65SJens Axboe rb_insert_color(&rq->rb_node, root);
2452e662b65SJens Axboe }
2462e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_add);
2472e662b65SJens Axboe
elv_rb_del(struct rb_root * root,struct request * rq)2482e662b65SJens Axboe void elv_rb_del(struct rb_root *root, struct request *rq)
2492e662b65SJens Axboe {
2502e662b65SJens Axboe BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
2512e662b65SJens Axboe rb_erase(&rq->rb_node, root);
2522e662b65SJens Axboe RB_CLEAR_NODE(&rq->rb_node);
2532e662b65SJens Axboe }
2542e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_del);
2552e662b65SJens Axboe
elv_rb_find(struct rb_root * root,sector_t sector)2562e662b65SJens Axboe struct request *elv_rb_find(struct rb_root *root, sector_t sector)
2572e662b65SJens Axboe {
2582e662b65SJens Axboe struct rb_node *n = root->rb_node;
2592e662b65SJens Axboe struct request *rq;
2602e662b65SJens Axboe
2612e662b65SJens Axboe while (n) {
2622e662b65SJens Axboe rq = rb_entry(n, struct request, rb_node);
2632e662b65SJens Axboe
26483096ebfSTejun Heo if (sector < blk_rq_pos(rq))
2652e662b65SJens Axboe n = n->rb_left;
26683096ebfSTejun Heo else if (sector > blk_rq_pos(rq))
2672e662b65SJens Axboe n = n->rb_right;
2682e662b65SJens Axboe else
2692e662b65SJens Axboe return rq;
2702e662b65SJens Axboe }
2712e662b65SJens Axboe
2722e662b65SJens Axboe return NULL;
2732e662b65SJens Axboe }
2742e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_find);
2752e662b65SJens Axboe
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)27634fe7c05SChristoph Hellwig enum elv_merge elv_merge(struct request_queue *q, struct request **req,
27734fe7c05SChristoph Hellwig struct bio *bio)
2783a65dfe8SJens Axboe {
279b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
2809817064bSJens Axboe struct request *__rq;
2813a65dfe8SJens Axboe
2829817064bSJens Axboe /*
283488991e2SAlan D. Brunelle * Levels of merges:
284488991e2SAlan D. Brunelle * nomerges: No merges at all attempted
285488991e2SAlan D. Brunelle * noxmerges: Only simple one-hit cache try
286488991e2SAlan D. Brunelle * merges: All merge tries attempted
287488991e2SAlan D. Brunelle */
2887460d389SMing Lei if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289488991e2SAlan D. Brunelle return ELEVATOR_NO_MERGE;
290488991e2SAlan D. Brunelle
291488991e2SAlan D. Brunelle /*
2929817064bSJens Axboe * First try one-hit cache.
2939817064bSJens Axboe */
29472ef799bSTahsin Erdogan if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
29534fe7c05SChristoph Hellwig enum elv_merge ret = blk_try_merge(q->last_merge, bio);
29634fe7c05SChristoph Hellwig
2973a65dfe8SJens Axboe if (ret != ELEVATOR_NO_MERGE) {
2983a65dfe8SJens Axboe *req = q->last_merge;
2993a65dfe8SJens Axboe return ret;
3003a65dfe8SJens Axboe }
3013a65dfe8SJens Axboe }
3023a65dfe8SJens Axboe
303488991e2SAlan D. Brunelle if (blk_queue_noxmerges(q))
304ac9fafa1SAlan D. Brunelle return ELEVATOR_NO_MERGE;
305ac9fafa1SAlan D. Brunelle
3069817064bSJens Axboe /*
3079817064bSJens Axboe * See if our hash lookup can find a potential backmerge.
3089817064bSJens Axboe */
3094f024f37SKent Overstreet __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
31072ef799bSTahsin Erdogan if (__rq && elv_bio_merge_ok(__rq, bio)) {
3119817064bSJens Axboe *req = __rq;
312866663b7SMing Lei
313866663b7SMing Lei if (blk_discard_mergable(__rq))
314866663b7SMing Lei return ELEVATOR_DISCARD_MERGE;
3159817064bSJens Axboe return ELEVATOR_BACK_MERGE;
3169817064bSJens Axboe }
3179817064bSJens Axboe
318f9cd4bfeSJens Axboe if (e->type->ops.request_merge)
319f9cd4bfeSJens Axboe return e->type->ops.request_merge(q, req, bio);
3203a65dfe8SJens Axboe
3213a65dfe8SJens Axboe return ELEVATOR_NO_MERGE;
3223a65dfe8SJens Axboe }
3233a65dfe8SJens Axboe
3245e84ea3aSJens Axboe /*
3255e84ea3aSJens Axboe * Attempt to do an insertion back merge. Only check for the case where
3265e84ea3aSJens Axboe * we can append 'rq' to an existing request, so we can throw 'rq' away
3275e84ea3aSJens Axboe * afterwards.
3285e84ea3aSJens Axboe *
329fd2ef39cSJan Kara * Returns true if we merged, false otherwise. 'free' will contain all
330fd2ef39cSJan Kara * requests that need to be freed.
3315e84ea3aSJens Axboe */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)332fd2ef39cSJan Kara bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333fd2ef39cSJan Kara struct list_head *free)
3345e84ea3aSJens Axboe {
3355e84ea3aSJens Axboe struct request *__rq;
336bee0393cSShaohua Li bool ret;
3375e84ea3aSJens Axboe
3385e84ea3aSJens Axboe if (blk_queue_nomerges(q))
3395e84ea3aSJens Axboe return false;
3405e84ea3aSJens Axboe
3415e84ea3aSJens Axboe /*
3425e84ea3aSJens Axboe * First try one-hit cache.
3435e84ea3aSJens Axboe */
344fd2ef39cSJan Kara if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345fd2ef39cSJan Kara list_add(&rq->queuelist, free);
3465e84ea3aSJens Axboe return true;
347fd2ef39cSJan Kara }
3485e84ea3aSJens Axboe
3495e84ea3aSJens Axboe if (blk_queue_noxmerges(q))
3505e84ea3aSJens Axboe return false;
3515e84ea3aSJens Axboe
352bee0393cSShaohua Li ret = false;
3535e84ea3aSJens Axboe /*
3545e84ea3aSJens Axboe * See if our hash lookup can find a potential backmerge.
3555e84ea3aSJens Axboe */
356bee0393cSShaohua Li while (1) {
3575e84ea3aSJens Axboe __rq = elv_rqhash_find(q, blk_rq_pos(rq));
358bee0393cSShaohua Li if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359bee0393cSShaohua Li break;
3605e84ea3aSJens Axboe
361fd2ef39cSJan Kara list_add(&rq->queuelist, free);
362bee0393cSShaohua Li /* The merged request could be merged with others, try again */
363bee0393cSShaohua Li ret = true;
364bee0393cSShaohua Li rq = __rq;
365bee0393cSShaohua Li }
366bee0393cSShaohua Li
367bee0393cSShaohua Li return ret;
3685e84ea3aSJens Axboe }
3695e84ea3aSJens Axboe
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)37034fe7c05SChristoph Hellwig void elv_merged_request(struct request_queue *q, struct request *rq,
37134fe7c05SChristoph Hellwig enum elv_merge type)
3723a65dfe8SJens Axboe {
373b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
3743a65dfe8SJens Axboe
375f9cd4bfeSJens Axboe if (e->type->ops.request_merged)
376f9cd4bfeSJens Axboe e->type->ops.request_merged(q, rq, type);
3773a65dfe8SJens Axboe
3782e662b65SJens Axboe if (type == ELEVATOR_BACK_MERGE)
3799817064bSJens Axboe elv_rqhash_reposition(q, rq);
3809817064bSJens Axboe
3813a65dfe8SJens Axboe q->last_merge = rq;
3823a65dfe8SJens Axboe }
3833a65dfe8SJens Axboe
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)384165125e1SJens Axboe void elv_merge_requests(struct request_queue *q, struct request *rq,
3853a65dfe8SJens Axboe struct request *next)
3863a65dfe8SJens Axboe {
387b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
3883a65dfe8SJens Axboe
389f9cd4bfeSJens Axboe if (e->type->ops.requests_merged)
390f9cd4bfeSJens Axboe e->type->ops.requests_merged(q, rq, next);
3913a65dfe8SJens Axboe
3929817064bSJens Axboe elv_rqhash_reposition(q, rq);
3933a65dfe8SJens Axboe q->last_merge = rq;
3943a65dfe8SJens Axboe }
3953a65dfe8SJens Axboe
elv_latter_request(struct request_queue * q,struct request * rq)396165125e1SJens Axboe struct request *elv_latter_request(struct request_queue *q, struct request *rq)
3973a65dfe8SJens Axboe {
398b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
3993a65dfe8SJens Axboe
400f9cd4bfeSJens Axboe if (e->type->ops.next_request)
401f9cd4bfeSJens Axboe return e->type->ops.next_request(q, rq);
402bd166ef1SJens Axboe
4033a65dfe8SJens Axboe return NULL;
4043a65dfe8SJens Axboe }
4053a65dfe8SJens Axboe
elv_former_request(struct request_queue * q,struct request * rq)406165125e1SJens Axboe struct request *elv_former_request(struct request_queue *q, struct request *rq)
4073a65dfe8SJens Axboe {
408b374d18aSJens Axboe struct elevator_queue *e = q->elevator;
4093a65dfe8SJens Axboe
410f9cd4bfeSJens Axboe if (e->type->ops.former_request)
411f9cd4bfeSJens Axboe return e->type->ops.former_request(q, rq);
412a1ce35faSJens Axboe
4133a65dfe8SJens Axboe return NULL;
4143a65dfe8SJens Axboe }
4153a65dfe8SJens Axboe
4163d1ab40fSAl Viro #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
4173d1ab40fSAl Viro
4183d1ab40fSAl Viro static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)4193d1ab40fSAl Viro elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4203d1ab40fSAl Viro {
4213d1ab40fSAl Viro struct elv_fs_entry *entry = to_elv(attr);
422b374d18aSJens Axboe struct elevator_queue *e;
4233d1ab40fSAl Viro ssize_t error;
4243d1ab40fSAl Viro
4253d1ab40fSAl Viro if (!entry->show)
4263d1ab40fSAl Viro return -EIO;
4273d1ab40fSAl Viro
428b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj);
4293d1ab40fSAl Viro mutex_lock(&e->sysfs_lock);
43022f746e2STejun Heo error = e->type ? entry->show(e, page) : -ENOENT;
4313d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock);
4323d1ab40fSAl Viro return error;
4333d1ab40fSAl Viro }
4343d1ab40fSAl Viro
4353d1ab40fSAl Viro static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)4363d1ab40fSAl Viro elv_attr_store(struct kobject *kobj, struct attribute *attr,
4373d1ab40fSAl Viro const char *page, size_t length)
4383d1ab40fSAl Viro {
4393d1ab40fSAl Viro struct elv_fs_entry *entry = to_elv(attr);
440b374d18aSJens Axboe struct elevator_queue *e;
4413d1ab40fSAl Viro ssize_t error;
4423d1ab40fSAl Viro
4433d1ab40fSAl Viro if (!entry->store)
4443d1ab40fSAl Viro return -EIO;
4453d1ab40fSAl Viro
446b374d18aSJens Axboe e = container_of(kobj, struct elevator_queue, kobj);
4473d1ab40fSAl Viro mutex_lock(&e->sysfs_lock);
44822f746e2STejun Heo error = e->type ? entry->store(e, page, length) : -ENOENT;
4493d1ab40fSAl Viro mutex_unlock(&e->sysfs_lock);
4503d1ab40fSAl Viro return error;
4513d1ab40fSAl Viro }
4523d1ab40fSAl Viro
45352cf25d0SEmese Revfy static const struct sysfs_ops elv_sysfs_ops = {
4543d1ab40fSAl Viro .show = elv_attr_show,
4553d1ab40fSAl Viro .store = elv_attr_store,
4563d1ab40fSAl Viro };
4573d1ab40fSAl Viro
4585f622417SThomas Weißschuh static const struct kobj_type elv_ktype = {
4593d1ab40fSAl Viro .sysfs_ops = &elv_sysfs_ops,
4603d1ab40fSAl Viro .release = elevator_release,
4613d1ab40fSAl Viro };
4623d1ab40fSAl Viro
elv_register_queue(struct request_queue * q,bool uevent)463cecf5d87SMing Lei int elv_register_queue(struct request_queue *q, bool uevent)
4643a65dfe8SJens Axboe {
4655a5bafdcSTejun Heo struct elevator_queue *e = q->elevator;
4663d1ab40fSAl Viro int error;
4673a65dfe8SJens Axboe
468f0c6ae09SYufen Yu lockdep_assert_held(&q->sysfs_lock);
469f0c6ae09SYufen Yu
4702bd85221SChristoph Hellwig error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
4713d1ab40fSAl Viro if (!error) {
47222f746e2STejun Heo struct elv_fs_entry *attr = e->type->elevator_attrs;
4733d1ab40fSAl Viro if (attr) {
474e572ec7eSAl Viro while (attr->attr.name) {
475e572ec7eSAl Viro if (sysfs_create_file(&e->kobj, &attr->attr))
4763d1ab40fSAl Viro break;
477e572ec7eSAl Viro attr++;
4783d1ab40fSAl Viro }
4793d1ab40fSAl Viro }
480cecf5d87SMing Lei if (uevent)
4813d1ab40fSAl Viro kobject_uevent(&e->kobj, KOBJ_ADD);
482cecf5d87SMing Lei
483181d0663SYu Kuai set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
4843d1ab40fSAl Viro }
4853d1ab40fSAl Viro return error;
4863a65dfe8SJens Axboe }
4873a65dfe8SJens Axboe
elv_unregister_queue(struct request_queue * q)488f8fc877dSTejun Heo void elv_unregister_queue(struct request_queue *q)
4893a65dfe8SJens Axboe {
490f5ec592dSEric Biggers struct elevator_queue *e = q->elevator;
491f5ec592dSEric Biggers
492f0c6ae09SYufen Yu lockdep_assert_held(&q->sysfs_lock);
493f0c6ae09SYufen Yu
494181d0663SYu Kuai if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
4953d1ab40fSAl Viro kobject_uevent(&e->kobj, KOBJ_REMOVE);
4963d1ab40fSAl Viro kobject_del(&e->kobj);
4973a65dfe8SJens Axboe }
4983a65dfe8SJens Axboe }
4993a65dfe8SJens Axboe
elv_register(struct elevator_type * e)500e567bf71SJens Axboe int elv_register(struct elevator_type *e)
5013a65dfe8SJens Axboe {
502e5c0ca13SChengming Zhou /* finish request is mandatory */
503e5c0ca13SChengming Zhou if (WARN_ON_ONCE(!e->ops.finish_request))
504e5c0ca13SChengming Zhou return -EINVAL;
505e42cfb1dSDamien Le Moal /* insert_requests and dispatch_request are mandatory */
506e42cfb1dSDamien Le Moal if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
507e42cfb1dSDamien Le Moal return -EINVAL;
508e42cfb1dSDamien Le Moal
5093d3c2379STejun Heo /* create icq_cache if requested */
5103d3c2379STejun Heo if (e->icq_size) {
5113d3c2379STejun Heo if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
5123d3c2379STejun Heo WARN_ON(e->icq_align < __alignof__(struct io_cq)))
5133d3c2379STejun Heo return -EINVAL;
5143d3c2379STejun Heo
5153d3c2379STejun Heo snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
5163d3c2379STejun Heo "%s_io_cq", e->elevator_name);
5173d3c2379STejun Heo e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
5183d3c2379STejun Heo e->icq_align, 0, NULL);
5193d3c2379STejun Heo if (!e->icq_cache)
5203d3c2379STejun Heo return -ENOMEM;
5213d3c2379STejun Heo }
5223d3c2379STejun Heo
5233d3c2379STejun Heo /* register, don't allow duplicate names */
5242a12dcd7SJens Axboe spin_lock(&elv_list_lock);
525ffb86425SChristoph Hellwig if (__elevator_find(e->elevator_name)) {
5263d3c2379STejun Heo spin_unlock(&elv_list_lock);
5273d3c2379STejun Heo kmem_cache_destroy(e->icq_cache);
5283d3c2379STejun Heo return -EBUSY;
5293d3c2379STejun Heo }
5303a65dfe8SJens Axboe list_add_tail(&e->list, &elv_list);
5312a12dcd7SJens Axboe spin_unlock(&elv_list_lock);
5323a65dfe8SJens Axboe
533d0b0a81aSHisao Tanabe printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
534d0b0a81aSHisao Tanabe
5353d3c2379STejun Heo return 0;
5363a65dfe8SJens Axboe }
5373a65dfe8SJens Axboe EXPORT_SYMBOL_GPL(elv_register);
5383a65dfe8SJens Axboe
elv_unregister(struct elevator_type * e)5393a65dfe8SJens Axboe void elv_unregister(struct elevator_type *e)
5403a65dfe8SJens Axboe {
5413d3c2379STejun Heo /* unregister */
5422a12dcd7SJens Axboe spin_lock(&elv_list_lock);
5433a65dfe8SJens Axboe list_del_init(&e->list);
5442a12dcd7SJens Axboe spin_unlock(&elv_list_lock);
5453d3c2379STejun Heo
5463d3c2379STejun Heo /*
5473d3c2379STejun Heo * Destroy icq_cache if it exists. icq's are RCU managed. Make
5483d3c2379STejun Heo * sure all RCU operations are complete before proceeding.
5493d3c2379STejun Heo */
5503d3c2379STejun Heo if (e->icq_cache) {
5513d3c2379STejun Heo rcu_barrier();
5523d3c2379STejun Heo kmem_cache_destroy(e->icq_cache);
5533d3c2379STejun Heo e->icq_cache = NULL;
5543d3c2379STejun Heo }
5553a65dfe8SJens Axboe }
5563a65dfe8SJens Axboe EXPORT_SYMBOL_GPL(elv_unregister);
5573a65dfe8SJens Axboe
elv_support_iosched(struct request_queue * q)55861db437dSDamien Le Moal static inline bool elv_support_iosched(struct request_queue *q)
55961db437dSDamien Le Moal {
5606251b754SYufen Yu if (!queue_is_mq(q) ||
561*f49a9d86SSurajSonawane2415 (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
56261db437dSDamien Le Moal return false;
56361db437dSDamien Le Moal return true;
56461db437dSDamien Le Moal }
56561db437dSDamien Le Moal
5663a65dfe8SJens Axboe /*
567a0958ba7SDamien Le Moal * For single queue devices, default to using mq-deadline. If we have multiple
568a0958ba7SDamien Le Moal * queues or mq-deadline is not available, default to "none".
569a0958ba7SDamien Le Moal */
elevator_get_default(struct request_queue * q)570a0958ba7SDamien Le Moal static struct elevator_type *elevator_get_default(struct request_queue *q)
571a0958ba7SDamien Le Moal {
572*f49a9d86SSurajSonawane2415 if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
57390b71980SBart Van Assche return NULL;
57490b71980SBart Van Assche
575580dca81SMing Lei if (q->nr_hw_queues != 1 &&
576079a2e3eSJohn Garry !blk_mq_is_shared_tags(q->tag_set->flags))
577a0958ba7SDamien Le Moal return NULL;
578a0958ba7SDamien Le Moal
57981eaca44SChristoph Hellwig return elevator_find_get(q, "mq-deadline");
580a0958ba7SDamien Le Moal }
581a0958ba7SDamien Le Moal
582a0958ba7SDamien Le Moal /*
583a0958ba7SDamien Le Moal * Get the first elevator providing the features required by the request queue.
584a0958ba7SDamien Le Moal * Default to "none" if no matching elevator is found.
585a0958ba7SDamien Le Moal */
elevator_get_by_features(struct request_queue * q)586a0958ba7SDamien Le Moal static struct elevator_type *elevator_get_by_features(struct request_queue *q)
587a0958ba7SDamien Le Moal {
588a2614255SJens Axboe struct elevator_type *e, *found = NULL;
589a0958ba7SDamien Le Moal
590a0958ba7SDamien Le Moal spin_lock(&elv_list_lock);
591a0958ba7SDamien Le Moal
592a0958ba7SDamien Le Moal list_for_each_entry(e, &elv_list, list) {
593ffb86425SChristoph Hellwig if (elv_support_features(q, e)) {
594a2614255SJens Axboe found = e;
595a0958ba7SDamien Le Moal break;
596a0958ba7SDamien Le Moal }
597a2614255SJens Axboe }
598a0958ba7SDamien Le Moal
599dd6f7f17SChristoph Hellwig if (found && !elevator_tryget(found))
600a2614255SJens Axboe found = NULL;
601a0958ba7SDamien Le Moal
602a0958ba7SDamien Le Moal spin_unlock(&elv_list_lock);
603a2614255SJens Axboe return found;
604a0958ba7SDamien Le Moal }
605a0958ba7SDamien Le Moal
606a0958ba7SDamien Le Moal /*
607a0958ba7SDamien Le Moal * For a device queue that has no required features, use the default elevator
608a0958ba7SDamien Le Moal * settings. Otherwise, use the first elevator available matching the required
609a0958ba7SDamien Le Moal * features. If no suitable elevator is find or if the chosen elevator
610a0958ba7SDamien Le Moal * initialization fails, fall back to the "none" elevator (no elevator).
611131d08e1SChristoph Hellwig */
elevator_init_mq(struct request_queue * q)612954b4a5cSDamien Le Moal void elevator_init_mq(struct request_queue *q)
613131d08e1SChristoph Hellwig {
614131d08e1SChristoph Hellwig struct elevator_type *e;
615954b4a5cSDamien Le Moal int err;
616131d08e1SChristoph Hellwig
61761db437dSDamien Le Moal if (!elv_support_iosched(q))
618954b4a5cSDamien Le Moal return;
61961db437dSDamien Le Moal
62075e6c00fSYufen Yu WARN_ON_ONCE(blk_queue_registered(q));
621c48dac13SMing Lei
622131d08e1SChristoph Hellwig if (unlikely(q->elevator))
623954b4a5cSDamien Le Moal return;
624131d08e1SChristoph Hellwig
625a0958ba7SDamien Le Moal if (!q->required_elevator_features)
626a0958ba7SDamien Le Moal e = elevator_get_default(q);
627a0958ba7SDamien Le Moal else
628a0958ba7SDamien Le Moal e = elevator_get_by_features(q);
629131d08e1SChristoph Hellwig if (!e)
630954b4a5cSDamien Le Moal return;
631131d08e1SChristoph Hellwig
632245a489eSMing Lei /*
633245a489eSMing Lei * We are called before adding disk, when there isn't any FS I/O,
634245a489eSMing Lei * so freezing queue plus canceling dispatch work is enough to
635245a489eSMing Lei * drain any dispatch activities originated from passthrough
636245a489eSMing Lei * requests, then no need to quiesce queue which may add long boot
637245a489eSMing Lei * latency, especially when lots of disks are involved.
638245a489eSMing Lei */
639737eb78eSDamien Le Moal blk_mq_freeze_queue(q);
640245a489eSMing Lei blk_mq_cancel_work_sync(q);
641737eb78eSDamien Le Moal
642131d08e1SChristoph Hellwig err = blk_mq_init_sched(q, e);
643737eb78eSDamien Le Moal
644737eb78eSDamien Le Moal blk_mq_unfreeze_queue(q);
645737eb78eSDamien Le Moal
646954b4a5cSDamien Le Moal if (err) {
647954b4a5cSDamien Le Moal pr_warn("\"%s\" elevator initialization failed, "
648954b4a5cSDamien Le Moal "falling back to \"none\"\n", e->elevator_name);
649954b4a5cSDamien Le Moal }
6508ed40ee3SJinlong Chen
6518ed40ee3SJinlong Chen elevator_put(e);
652131d08e1SChristoph Hellwig }
653131d08e1SChristoph Hellwig
654131d08e1SChristoph Hellwig /*
655ac1171bdSJinlong Chen * Switch to new_e io scheduler.
656ac1171bdSJinlong Chen *
657ac1171bdSJinlong Chen * If switching fails, we are most likely running out of memory and not able
658ac1171bdSJinlong Chen * to restore the old io scheduler, so leaving the io scheduler being none.
6593a65dfe8SJens Axboe */
elevator_switch(struct request_queue * q,struct elevator_type * new_e)6608237c01fSKeith Busch int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
6613a65dfe8SJens Axboe {
66264b36075SChristoph Hellwig int ret;
6633a65dfe8SJens Axboe
66414a23498SBart Van Assche lockdep_assert_held(&q->sysfs_lock);
66514a23498SBart Van Assche
666d48ece20SJianchao Wang blk_mq_freeze_queue(q);
667d48ece20SJianchao Wang blk_mq_quiesce_queue(q);
668d48ece20SJianchao Wang
66964b36075SChristoph Hellwig if (q->elevator) {
67064b36075SChristoph Hellwig elv_unregister_queue(q);
67164b36075SChristoph Hellwig elevator_exit(q);
67264b36075SChristoph Hellwig }
67364b36075SChristoph Hellwig
67464b36075SChristoph Hellwig ret = blk_mq_init_sched(q, new_e);
67564b36075SChristoph Hellwig if (ret)
67664b36075SChristoph Hellwig goto out_unfreeze;
67764b36075SChristoph Hellwig
67864b36075SChristoph Hellwig ret = elv_register_queue(q, true);
67964b36075SChristoph Hellwig if (ret) {
68064b36075SChristoph Hellwig elevator_exit(q);
68164b36075SChristoph Hellwig goto out_unfreeze;
68264b36075SChristoph Hellwig }
68364b36075SChristoph Hellwig blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
68464b36075SChristoph Hellwig
68564b36075SChristoph Hellwig out_unfreeze:
68664b36075SChristoph Hellwig blk_mq_unquiesce_queue(q);
68764b36075SChristoph Hellwig blk_mq_unfreeze_queue(q);
688e0cca8bcSJinlong Chen
689e0cca8bcSJinlong Chen if (ret) {
690e0cca8bcSJinlong Chen pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
691e0cca8bcSJinlong Chen new_e->elevator_name);
692e0cca8bcSJinlong Chen }
693e0cca8bcSJinlong Chen
69464b36075SChristoph Hellwig return ret;
69564b36075SChristoph Hellwig }
69664b36075SChristoph Hellwig
elevator_disable(struct request_queue * q)69764b36075SChristoph Hellwig void elevator_disable(struct request_queue *q)
69864b36075SChristoph Hellwig {
69964b36075SChristoph Hellwig lockdep_assert_held(&q->sysfs_lock);
70064b36075SChristoph Hellwig
70164b36075SChristoph Hellwig blk_mq_freeze_queue(q);
70264b36075SChristoph Hellwig blk_mq_quiesce_queue(q);
70364b36075SChristoph Hellwig
70464b36075SChristoph Hellwig elv_unregister_queue(q);
70564b36075SChristoph Hellwig elevator_exit(q);
70664b36075SChristoph Hellwig blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
70764b36075SChristoph Hellwig q->elevator = NULL;
70864b36075SChristoph Hellwig q->nr_requests = q->tag_set->queue_depth;
70964b36075SChristoph Hellwig blk_add_trace_msg(q, "elv switch: none");
710d48ece20SJianchao Wang
711d48ece20SJianchao Wang blk_mq_unquiesce_queue(q);
712d48ece20SJianchao Wang blk_mq_unfreeze_queue(q);
713d48ece20SJianchao Wang }
714bd166ef1SJens Axboe
7155a5bafdcSTejun Heo /*
7165dd531a0SJens Axboe * Switch this queue to the given IO scheduler.
7175dd531a0SJens Axboe */
elevator_change(struct request_queue * q,const char * elevator_name)71858367c8aSChristoph Hellwig static int elevator_change(struct request_queue *q, const char *elevator_name)
7193a65dfe8SJens Axboe {
7203a65dfe8SJens Axboe struct elevator_type *e;
7218ed40ee3SJinlong Chen int ret;
7223a65dfe8SJens Axboe
723e9a823fbSDavid Jeffery /* Make sure queue is not in the middle of being removed */
72458c898baSMing Lei if (!blk_queue_registered(q))
725e9a823fbSDavid Jeffery return -ENOENT;
726e9a823fbSDavid Jeffery
72758367c8aSChristoph Hellwig if (!strncmp(elevator_name, "none", 4)) {
72864b36075SChristoph Hellwig if (q->elevator)
72964b36075SChristoph Hellwig elevator_disable(q);
730fbd72127SAleksei Zakharov return 0;
731fbd72127SAleksei Zakharov }
732cd43e26fSMartin K. Petersen
733ffb86425SChristoph Hellwig if (q->elevator && elevator_match(q->elevator->type, elevator_name))
734b54c2ad9SJinlong Chen return 0;
735b54c2ad9SJinlong Chen
73681eaca44SChristoph Hellwig e = elevator_find_get(q, elevator_name);
73781eaca44SChristoph Hellwig if (!e) {
73881eaca44SChristoph Hellwig request_module("%s-iosched", elevator_name);
73981eaca44SChristoph Hellwig e = elevator_find_get(q, elevator_name);
740340ff321SJens Axboe if (!e)
7413a65dfe8SJens Axboe return -EINVAL;
74281eaca44SChristoph Hellwig }
7438ed40ee3SJinlong Chen ret = elevator_switch(q, e);
7448ed40ee3SJinlong Chen elevator_put(e);
7458ed40ee3SJinlong Chen return ret;
7465dd531a0SJens Axboe }
7477c8a3679STomoki Sekiyama
elv_iosched_store(struct request_queue * q,const char * buf,size_t count)74858367c8aSChristoph Hellwig ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
7495dd531a0SJens Axboe size_t count)
7505dd531a0SJens Axboe {
75158367c8aSChristoph Hellwig char elevator_name[ELV_NAME_MAX];
7525dd531a0SJens Axboe int ret;
7535dd531a0SJens Axboe
7546251b754SYufen Yu if (!elv_support_iosched(q))
7553a65dfe8SJens Axboe return count;
7565dd531a0SJens Axboe
75720d09975SAzeem Shaikh strscpy(elevator_name, buf, sizeof(elevator_name));
75858367c8aSChristoph Hellwig ret = elevator_change(q, strstrip(elevator_name));
7595dd531a0SJens Axboe if (!ret)
7605dd531a0SJens Axboe return count;
7615dd531a0SJens Axboe return ret;
7623a65dfe8SJens Axboe }
7633a65dfe8SJens Axboe
elv_iosched_show(struct request_queue * q,char * name)764165125e1SJens Axboe ssize_t elv_iosched_show(struct request_queue *q, char *name)
7653a65dfe8SJens Axboe {
76616095af2SChristoph Hellwig struct elevator_queue *eq = q->elevator;
76716095af2SChristoph Hellwig struct elevator_type *cur = NULL, *e;
7683a65dfe8SJens Axboe int len = 0;
7693a65dfe8SJens Axboe
770aae2a643SChristoph Hellwig if (!elv_support_iosched(q))
771cd43e26fSMartin K. Petersen return sprintf(name, "none\n");
772cd43e26fSMartin K. Petersen
7737919d679SJinlong Chen if (!q->elevator) {
774bd166ef1SJens Axboe len += sprintf(name+len, "[none] ");
7757919d679SJinlong Chen } else {
7767919d679SJinlong Chen len += sprintf(name+len, "none ");
77716095af2SChristoph Hellwig cur = eq->type;
7787919d679SJinlong Chen }
779cd43e26fSMartin K. Petersen
7802a12dcd7SJens Axboe spin_lock(&elv_list_lock);
78116095af2SChristoph Hellwig list_for_each_entry(e, &elv_list, list) {
7825998249eSJinlong Chen if (e == cur)
7837a3b3660SJinlong Chen len += sprintf(name+len, "[%s] ", e->elevator_name);
7845998249eSJinlong Chen else if (elv_support_features(q, e))
78516095af2SChristoph Hellwig len += sprintf(name+len, "%s ", e->elevator_name);
7863a65dfe8SJens Axboe }
7872a12dcd7SJens Axboe spin_unlock(&elv_list_lock);
7883a65dfe8SJens Axboe
789c6451edeSJinlong Chen len += sprintf(name+len, "\n");
7903a65dfe8SJens Axboe return len;
7913a65dfe8SJens Axboe }
7923a65dfe8SJens Axboe
elv_rb_former_request(struct request_queue * q,struct request * rq)793165125e1SJens Axboe struct request *elv_rb_former_request(struct request_queue *q,
794165125e1SJens Axboe struct request *rq)
7952e662b65SJens Axboe {
7962e662b65SJens Axboe struct rb_node *rbprev = rb_prev(&rq->rb_node);
7972e662b65SJens Axboe
7982e662b65SJens Axboe if (rbprev)
7992e662b65SJens Axboe return rb_entry_rq(rbprev);
8002e662b65SJens Axboe
8012e662b65SJens Axboe return NULL;
8022e662b65SJens Axboe }
8032e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_former_request);
8042e662b65SJens Axboe
elv_rb_latter_request(struct request_queue * q,struct request * rq)805165125e1SJens Axboe struct request *elv_rb_latter_request(struct request_queue *q,
806165125e1SJens Axboe struct request *rq)
8072e662b65SJens Axboe {
8082e662b65SJens Axboe struct rb_node *rbnext = rb_next(&rq->rb_node);
8092e662b65SJens Axboe
8102e662b65SJens Axboe if (rbnext)
8112e662b65SJens Axboe return rb_entry_rq(rbnext);
8122e662b65SJens Axboe
8132e662b65SJens Axboe return NULL;
8142e662b65SJens Axboe }
8152e662b65SJens Axboe EXPORT_SYMBOL(elv_rb_latter_request);
816f8db3835SJan Kara
elevator_setup(char * str)817f8db3835SJan Kara static int __init elevator_setup(char *str)
818f8db3835SJan Kara {
819f8db3835SJan Kara pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
820f8db3835SJan Kara "Please use sysfs to set IO scheduler for individual devices.\n");
821f8db3835SJan Kara return 1;
822f8db3835SJan Kara }
823f8db3835SJan Kara
824f8db3835SJan Kara __setup("elevator=", elevator_setup);
825