xref: /openbmc/linux/block/elevator.c (revision 9464bf9762a8b16f4fbd05b115dcde51b339ac58)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  Block device elevator/IO-scheduler.
4   *
5   *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6   *
7   * 30042000 Jens Axboe <axboe@kernel.dk> :
8   *
9   * Split the elevator a bit so that it is possible to choose a different
10   * one or even write a new "plug in". There are three pieces:
11   * - elevator_fn, inserts a new request in the queue list
12   * - elevator_merge_fn, decides whether a new buffer can be merged with
13   *   an existing request
14   * - elevator_dequeue_fn, called when a request is taken off the active list
15   *
16   * 20082000 Dave Jones <davej@suse.de> :
17   * Removed tests for max-bomb-segments, which was breaking elvtune
18   *  when run without -bN
19   *
20   * Jens:
21   * - Rework again to work with bio instead of buffer_heads
22   * - loose bi_dev comparisons, partition handling is right now
23   * - completely modularize elevator setup and teardown
24   *
25   */
26  #include <linux/kernel.h>
27  #include <linux/fs.h>
28  #include <linux/blkdev.h>
29  #include <linux/bio.h>
30  #include <linux/module.h>
31  #include <linux/slab.h>
32  #include <linux/init.h>
33  #include <linux/compiler.h>
34  #include <linux/blktrace_api.h>
35  #include <linux/hash.h>
36  #include <linux/uaccess.h>
37  #include <linux/pm_runtime.h>
38  
39  #include <trace/events/block.h>
40  
41  #include "elevator.h"
42  #include "blk.h"
43  #include "blk-mq-sched.h"
44  #include "blk-pm.h"
45  #include "blk-wbt.h"
46  #include "blk-cgroup.h"
47  
48  static DEFINE_SPINLOCK(elv_list_lock);
49  static LIST_HEAD(elv_list);
50  
51  /*
52   * Merge hash stuff.
53   */
54  #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
55  
56  /*
57   * Query io scheduler to see if the current process issuing bio may be
58   * merged with rq.
59   */
elv_iosched_allow_bio_merge(struct request * rq,struct bio * bio)60  static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61  {
62  	struct request_queue *q = rq->q;
63  	struct elevator_queue *e = q->elevator;
64  
65  	if (e->type->ops.allow_merge)
66  		return e->type->ops.allow_merge(q, rq, bio);
67  
68  	return true;
69  }
70  
71  /*
72   * can we safely merge with this request?
73   */
elv_bio_merge_ok(struct request * rq,struct bio * bio)74  bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75  {
76  	if (!blk_rq_merge_ok(rq, bio))
77  		return false;
78  
79  	if (!elv_iosched_allow_bio_merge(rq, bio))
80  		return false;
81  
82  	return true;
83  }
84  EXPORT_SYMBOL(elv_bio_merge_ok);
85  
elv_support_features(struct request_queue * q,const struct elevator_type * e)86  static inline bool elv_support_features(struct request_queue *q,
87  		const struct elevator_type *e)
88  {
89  	return (q->required_elevator_features & e->elevator_features) ==
90  		q->required_elevator_features;
91  }
92  
93  /**
94   * elevator_match - Check whether @e's name or alias matches @name
95   * @e: Scheduler to test
96   * @name: Elevator name to test
97   *
98   * Return true if the elevator @e's name or alias matches @name.
99   */
elevator_match(const struct elevator_type * e,const char * name)100  static bool elevator_match(const struct elevator_type *e, const char *name)
101  {
102  	return !strcmp(e->elevator_name, name) ||
103  		(e->elevator_alias && !strcmp(e->elevator_alias, name));
104  }
105  
__elevator_find(const char * name)106  static struct elevator_type *__elevator_find(const char *name)
107  {
108  	struct elevator_type *e;
109  
110  	list_for_each_entry(e, &elv_list, list)
111  		if (elevator_match(e, name))
112  			return e;
113  	return NULL;
114  }
115  
elevator_find_get(struct request_queue * q,const char * name)116  static struct elevator_type *elevator_find_get(struct request_queue *q,
117  		const char *name)
118  {
119  	struct elevator_type *e;
120  
121  	spin_lock(&elv_list_lock);
122  	e = __elevator_find(name);
123  	if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
124  		e = NULL;
125  	spin_unlock(&elv_list_lock);
126  	return e;
127  }
128  
129  static const struct kobj_type elv_ktype;
130  
elevator_alloc(struct request_queue * q,struct elevator_type * e)131  struct elevator_queue *elevator_alloc(struct request_queue *q,
132  				  struct elevator_type *e)
133  {
134  	struct elevator_queue *eq;
135  
136  	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
137  	if (unlikely(!eq))
138  		return NULL;
139  
140  	__elevator_get(e);
141  	eq->type = e;
142  	kobject_init(&eq->kobj, &elv_ktype);
143  	mutex_init(&eq->sysfs_lock);
144  	hash_init(eq->hash);
145  
146  	return eq;
147  }
148  EXPORT_SYMBOL(elevator_alloc);
149  
elevator_release(struct kobject * kobj)150  static void elevator_release(struct kobject *kobj)
151  {
152  	struct elevator_queue *e;
153  
154  	e = container_of(kobj, struct elevator_queue, kobj);
155  	elevator_put(e->type);
156  	kfree(e);
157  }
158  
elevator_exit(struct request_queue * q)159  void elevator_exit(struct request_queue *q)
160  {
161  	struct elevator_queue *e = q->elevator;
162  
163  	ioc_clear_queue(q);
164  	blk_mq_sched_free_rqs(q);
165  
166  	mutex_lock(&e->sysfs_lock);
167  	blk_mq_exit_sched(q, e);
168  	mutex_unlock(&e->sysfs_lock);
169  
170  	kobject_put(&e->kobj);
171  }
172  
__elv_rqhash_del(struct request * rq)173  static inline void __elv_rqhash_del(struct request *rq)
174  {
175  	hash_del(&rq->hash);
176  	rq->rq_flags &= ~RQF_HASHED;
177  }
178  
elv_rqhash_del(struct request_queue * q,struct request * rq)179  void elv_rqhash_del(struct request_queue *q, struct request *rq)
180  {
181  	if (ELV_ON_HASH(rq))
182  		__elv_rqhash_del(rq);
183  }
184  EXPORT_SYMBOL_GPL(elv_rqhash_del);
185  
elv_rqhash_add(struct request_queue * q,struct request * rq)186  void elv_rqhash_add(struct request_queue *q, struct request *rq)
187  {
188  	struct elevator_queue *e = q->elevator;
189  
190  	BUG_ON(ELV_ON_HASH(rq));
191  	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192  	rq->rq_flags |= RQF_HASHED;
193  }
194  EXPORT_SYMBOL_GPL(elv_rqhash_add);
195  
elv_rqhash_reposition(struct request_queue * q,struct request * rq)196  void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
197  {
198  	__elv_rqhash_del(rq);
199  	elv_rqhash_add(q, rq);
200  }
201  
elv_rqhash_find(struct request_queue * q,sector_t offset)202  struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
203  {
204  	struct elevator_queue *e = q->elevator;
205  	struct hlist_node *next;
206  	struct request *rq;
207  
208  	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
209  		BUG_ON(!ELV_ON_HASH(rq));
210  
211  		if (unlikely(!rq_mergeable(rq))) {
212  			__elv_rqhash_del(rq);
213  			continue;
214  		}
215  
216  		if (rq_hash_key(rq) == offset)
217  			return rq;
218  	}
219  
220  	return NULL;
221  }
222  
223  /*
224   * RB-tree support functions for inserting/lookup/removal of requests
225   * in a sorted RB tree.
226   */
elv_rb_add(struct rb_root * root,struct request * rq)227  void elv_rb_add(struct rb_root *root, struct request *rq)
228  {
229  	struct rb_node **p = &root->rb_node;
230  	struct rb_node *parent = NULL;
231  	struct request *__rq;
232  
233  	while (*p) {
234  		parent = *p;
235  		__rq = rb_entry(parent, struct request, rb_node);
236  
237  		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
238  			p = &(*p)->rb_left;
239  		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
240  			p = &(*p)->rb_right;
241  	}
242  
243  	rb_link_node(&rq->rb_node, parent, p);
244  	rb_insert_color(&rq->rb_node, root);
245  }
246  EXPORT_SYMBOL(elv_rb_add);
247  
elv_rb_del(struct rb_root * root,struct request * rq)248  void elv_rb_del(struct rb_root *root, struct request *rq)
249  {
250  	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
251  	rb_erase(&rq->rb_node, root);
252  	RB_CLEAR_NODE(&rq->rb_node);
253  }
254  EXPORT_SYMBOL(elv_rb_del);
255  
elv_rb_find(struct rb_root * root,sector_t sector)256  struct request *elv_rb_find(struct rb_root *root, sector_t sector)
257  {
258  	struct rb_node *n = root->rb_node;
259  	struct request *rq;
260  
261  	while (n) {
262  		rq = rb_entry(n, struct request, rb_node);
263  
264  		if (sector < blk_rq_pos(rq))
265  			n = n->rb_left;
266  		else if (sector > blk_rq_pos(rq))
267  			n = n->rb_right;
268  		else
269  			return rq;
270  	}
271  
272  	return NULL;
273  }
274  EXPORT_SYMBOL(elv_rb_find);
275  
elv_merge(struct request_queue * q,struct request ** req,struct bio * bio)276  enum elv_merge elv_merge(struct request_queue *q, struct request **req,
277  		struct bio *bio)
278  {
279  	struct elevator_queue *e = q->elevator;
280  	struct request *__rq;
281  
282  	/*
283  	 * Levels of merges:
284  	 * 	nomerges:  No merges at all attempted
285  	 * 	noxmerges: Only simple one-hit cache try
286  	 * 	merges:	   All merge tries attempted
287  	 */
288  	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289  		return ELEVATOR_NO_MERGE;
290  
291  	/*
292  	 * First try one-hit cache.
293  	 */
294  	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
295  		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
296  
297  		if (ret != ELEVATOR_NO_MERGE) {
298  			*req = q->last_merge;
299  			return ret;
300  		}
301  	}
302  
303  	if (blk_queue_noxmerges(q))
304  		return ELEVATOR_NO_MERGE;
305  
306  	/*
307  	 * See if our hash lookup can find a potential backmerge.
308  	 */
309  	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
310  	if (__rq && elv_bio_merge_ok(__rq, bio)) {
311  		*req = __rq;
312  
313  		if (blk_discard_mergable(__rq))
314  			return ELEVATOR_DISCARD_MERGE;
315  		return ELEVATOR_BACK_MERGE;
316  	}
317  
318  	if (e->type->ops.request_merge)
319  		return e->type->ops.request_merge(q, req, bio);
320  
321  	return ELEVATOR_NO_MERGE;
322  }
323  
324  /*
325   * Attempt to do an insertion back merge. Only check for the case where
326   * we can append 'rq' to an existing request, so we can throw 'rq' away
327   * afterwards.
328   *
329   * Returns true if we merged, false otherwise. 'free' will contain all
330   * requests that need to be freed.
331   */
elv_attempt_insert_merge(struct request_queue * q,struct request * rq,struct list_head * free)332  bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333  			      struct list_head *free)
334  {
335  	struct request *__rq;
336  	bool ret;
337  
338  	if (blk_queue_nomerges(q))
339  		return false;
340  
341  	/*
342  	 * First try one-hit cache.
343  	 */
344  	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345  		list_add(&rq->queuelist, free);
346  		return true;
347  	}
348  
349  	if (blk_queue_noxmerges(q))
350  		return false;
351  
352  	ret = false;
353  	/*
354  	 * See if our hash lookup can find a potential backmerge.
355  	 */
356  	while (1) {
357  		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
358  		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359  			break;
360  
361  		list_add(&rq->queuelist, free);
362  		/* The merged request could be merged with others, try again */
363  		ret = true;
364  		rq = __rq;
365  	}
366  
367  	return ret;
368  }
369  
elv_merged_request(struct request_queue * q,struct request * rq,enum elv_merge type)370  void elv_merged_request(struct request_queue *q, struct request *rq,
371  		enum elv_merge type)
372  {
373  	struct elevator_queue *e = q->elevator;
374  
375  	if (e->type->ops.request_merged)
376  		e->type->ops.request_merged(q, rq, type);
377  
378  	if (type == ELEVATOR_BACK_MERGE)
379  		elv_rqhash_reposition(q, rq);
380  
381  	q->last_merge = rq;
382  }
383  
elv_merge_requests(struct request_queue * q,struct request * rq,struct request * next)384  void elv_merge_requests(struct request_queue *q, struct request *rq,
385  			     struct request *next)
386  {
387  	struct elevator_queue *e = q->elevator;
388  
389  	if (e->type->ops.requests_merged)
390  		e->type->ops.requests_merged(q, rq, next);
391  
392  	elv_rqhash_reposition(q, rq);
393  	q->last_merge = rq;
394  }
395  
elv_latter_request(struct request_queue * q,struct request * rq)396  struct request *elv_latter_request(struct request_queue *q, struct request *rq)
397  {
398  	struct elevator_queue *e = q->elevator;
399  
400  	if (e->type->ops.next_request)
401  		return e->type->ops.next_request(q, rq);
402  
403  	return NULL;
404  }
405  
elv_former_request(struct request_queue * q,struct request * rq)406  struct request *elv_former_request(struct request_queue *q, struct request *rq)
407  {
408  	struct elevator_queue *e = q->elevator;
409  
410  	if (e->type->ops.former_request)
411  		return e->type->ops.former_request(q, rq);
412  
413  	return NULL;
414  }
415  
416  #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
417  
418  static ssize_t
elv_attr_show(struct kobject * kobj,struct attribute * attr,char * page)419  elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420  {
421  	struct elv_fs_entry *entry = to_elv(attr);
422  	struct elevator_queue *e;
423  	ssize_t error;
424  
425  	if (!entry->show)
426  		return -EIO;
427  
428  	e = container_of(kobj, struct elevator_queue, kobj);
429  	mutex_lock(&e->sysfs_lock);
430  	error = e->type ? entry->show(e, page) : -ENOENT;
431  	mutex_unlock(&e->sysfs_lock);
432  	return error;
433  }
434  
435  static ssize_t
elv_attr_store(struct kobject * kobj,struct attribute * attr,const char * page,size_t length)436  elv_attr_store(struct kobject *kobj, struct attribute *attr,
437  	       const char *page, size_t length)
438  {
439  	struct elv_fs_entry *entry = to_elv(attr);
440  	struct elevator_queue *e;
441  	ssize_t error;
442  
443  	if (!entry->store)
444  		return -EIO;
445  
446  	e = container_of(kobj, struct elevator_queue, kobj);
447  	mutex_lock(&e->sysfs_lock);
448  	error = e->type ? entry->store(e, page, length) : -ENOENT;
449  	mutex_unlock(&e->sysfs_lock);
450  	return error;
451  }
452  
453  static const struct sysfs_ops elv_sysfs_ops = {
454  	.show	= elv_attr_show,
455  	.store	= elv_attr_store,
456  };
457  
458  static const struct kobj_type elv_ktype = {
459  	.sysfs_ops	= &elv_sysfs_ops,
460  	.release	= elevator_release,
461  };
462  
elv_register_queue(struct request_queue * q,bool uevent)463  int elv_register_queue(struct request_queue *q, bool uevent)
464  {
465  	struct elevator_queue *e = q->elevator;
466  	int error;
467  
468  	lockdep_assert_held(&q->sysfs_lock);
469  
470  	error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
471  	if (!error) {
472  		struct elv_fs_entry *attr = e->type->elevator_attrs;
473  		if (attr) {
474  			while (attr->attr.name) {
475  				if (sysfs_create_file(&e->kobj, &attr->attr))
476  					break;
477  				attr++;
478  			}
479  		}
480  		if (uevent)
481  			kobject_uevent(&e->kobj, KOBJ_ADD);
482  
483  		set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
484  	}
485  	return error;
486  }
487  
elv_unregister_queue(struct request_queue * q)488  void elv_unregister_queue(struct request_queue *q)
489  {
490  	struct elevator_queue *e = q->elevator;
491  
492  	lockdep_assert_held(&q->sysfs_lock);
493  
494  	if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
495  		kobject_uevent(&e->kobj, KOBJ_REMOVE);
496  		kobject_del(&e->kobj);
497  	}
498  }
499  
elv_register(struct elevator_type * e)500  int elv_register(struct elevator_type *e)
501  {
502  	/* finish request is mandatory */
503  	if (WARN_ON_ONCE(!e->ops.finish_request))
504  		return -EINVAL;
505  	/* insert_requests and dispatch_request are mandatory */
506  	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
507  		return -EINVAL;
508  
509  	/* create icq_cache if requested */
510  	if (e->icq_size) {
511  		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
512  		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
513  			return -EINVAL;
514  
515  		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
516  			 "%s_io_cq", e->elevator_name);
517  		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
518  						 e->icq_align, 0, NULL);
519  		if (!e->icq_cache)
520  			return -ENOMEM;
521  	}
522  
523  	/* register, don't allow duplicate names */
524  	spin_lock(&elv_list_lock);
525  	if (__elevator_find(e->elevator_name)) {
526  		spin_unlock(&elv_list_lock);
527  		kmem_cache_destroy(e->icq_cache);
528  		return -EBUSY;
529  	}
530  	list_add_tail(&e->list, &elv_list);
531  	spin_unlock(&elv_list_lock);
532  
533  	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
534  
535  	return 0;
536  }
537  EXPORT_SYMBOL_GPL(elv_register);
538  
elv_unregister(struct elevator_type * e)539  void elv_unregister(struct elevator_type *e)
540  {
541  	/* unregister */
542  	spin_lock(&elv_list_lock);
543  	list_del_init(&e->list);
544  	spin_unlock(&elv_list_lock);
545  
546  	/*
547  	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
548  	 * sure all RCU operations are complete before proceeding.
549  	 */
550  	if (e->icq_cache) {
551  		rcu_barrier();
552  		kmem_cache_destroy(e->icq_cache);
553  		e->icq_cache = NULL;
554  	}
555  }
556  EXPORT_SYMBOL_GPL(elv_unregister);
557  
elv_support_iosched(struct request_queue * q)558  static inline bool elv_support_iosched(struct request_queue *q)
559  {
560  	if (!queue_is_mq(q) ||
561  	    (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
562  		return false;
563  	return true;
564  }
565  
566  /*
567   * For single queue devices, default to using mq-deadline. If we have multiple
568   * queues or mq-deadline is not available, default to "none".
569   */
elevator_get_default(struct request_queue * q)570  static struct elevator_type *elevator_get_default(struct request_queue *q)
571  {
572  	if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
573  		return NULL;
574  
575  	if (q->nr_hw_queues != 1 &&
576  	    !blk_mq_is_shared_tags(q->tag_set->flags))
577  		return NULL;
578  
579  	return elevator_find_get(q, "mq-deadline");
580  }
581  
582  /*
583   * Get the first elevator providing the features required by the request queue.
584   * Default to "none" if no matching elevator is found.
585   */
elevator_get_by_features(struct request_queue * q)586  static struct elevator_type *elevator_get_by_features(struct request_queue *q)
587  {
588  	struct elevator_type *e, *found = NULL;
589  
590  	spin_lock(&elv_list_lock);
591  
592  	list_for_each_entry(e, &elv_list, list) {
593  		if (elv_support_features(q, e)) {
594  			found = e;
595  			break;
596  		}
597  	}
598  
599  	if (found && !elevator_tryget(found))
600  		found = NULL;
601  
602  	spin_unlock(&elv_list_lock);
603  	return found;
604  }
605  
606  /*
607   * For a device queue that has no required features, use the default elevator
608   * settings. Otherwise, use the first elevator available matching the required
609   * features. If no suitable elevator is find or if the chosen elevator
610   * initialization fails, fall back to the "none" elevator (no elevator).
611   */
elevator_init_mq(struct request_queue * q)612  void elevator_init_mq(struct request_queue *q)
613  {
614  	struct elevator_type *e;
615  	int err;
616  
617  	if (!elv_support_iosched(q))
618  		return;
619  
620  	WARN_ON_ONCE(blk_queue_registered(q));
621  
622  	if (unlikely(q->elevator))
623  		return;
624  
625  	if (!q->required_elevator_features)
626  		e = elevator_get_default(q);
627  	else
628  		e = elevator_get_by_features(q);
629  	if (!e)
630  		return;
631  
632  	/*
633  	 * We are called before adding disk, when there isn't any FS I/O,
634  	 * so freezing queue plus canceling dispatch work is enough to
635  	 * drain any dispatch activities originated from passthrough
636  	 * requests, then no need to quiesce queue which may add long boot
637  	 * latency, especially when lots of disks are involved.
638  	 */
639  	blk_mq_freeze_queue(q);
640  	blk_mq_cancel_work_sync(q);
641  
642  	err = blk_mq_init_sched(q, e);
643  
644  	blk_mq_unfreeze_queue(q);
645  
646  	if (err) {
647  		pr_warn("\"%s\" elevator initialization failed, "
648  			"falling back to \"none\"\n", e->elevator_name);
649  	}
650  
651  	elevator_put(e);
652  }
653  
654  /*
655   * Switch to new_e io scheduler.
656   *
657   * If switching fails, we are most likely running out of memory and not able
658   * to restore the old io scheduler, so leaving the io scheduler being none.
659   */
elevator_switch(struct request_queue * q,struct elevator_type * new_e)660  int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
661  {
662  	int ret;
663  
664  	lockdep_assert_held(&q->sysfs_lock);
665  
666  	blk_mq_freeze_queue(q);
667  	blk_mq_quiesce_queue(q);
668  
669  	if (q->elevator) {
670  		elv_unregister_queue(q);
671  		elevator_exit(q);
672  	}
673  
674  	ret = blk_mq_init_sched(q, new_e);
675  	if (ret)
676  		goto out_unfreeze;
677  
678  	ret = elv_register_queue(q, true);
679  	if (ret) {
680  		elevator_exit(q);
681  		goto out_unfreeze;
682  	}
683  	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
684  
685  out_unfreeze:
686  	blk_mq_unquiesce_queue(q);
687  	blk_mq_unfreeze_queue(q);
688  
689  	if (ret) {
690  		pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
691  			new_e->elevator_name);
692  	}
693  
694  	return ret;
695  }
696  
elevator_disable(struct request_queue * q)697  void elevator_disable(struct request_queue *q)
698  {
699  	lockdep_assert_held(&q->sysfs_lock);
700  
701  	blk_mq_freeze_queue(q);
702  	blk_mq_quiesce_queue(q);
703  
704  	elv_unregister_queue(q);
705  	elevator_exit(q);
706  	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
707  	q->elevator = NULL;
708  	q->nr_requests = q->tag_set->queue_depth;
709  	blk_add_trace_msg(q, "elv switch: none");
710  
711  	blk_mq_unquiesce_queue(q);
712  	blk_mq_unfreeze_queue(q);
713  }
714  
715  /*
716   * Switch this queue to the given IO scheduler.
717   */
elevator_change(struct request_queue * q,const char * elevator_name)718  static int elevator_change(struct request_queue *q, const char *elevator_name)
719  {
720  	struct elevator_type *e;
721  	int ret;
722  
723  	/* Make sure queue is not in the middle of being removed */
724  	if (!blk_queue_registered(q))
725  		return -ENOENT;
726  
727  	if (!strncmp(elevator_name, "none", 4)) {
728  		if (q->elevator)
729  			elevator_disable(q);
730  		return 0;
731  	}
732  
733  	if (q->elevator && elevator_match(q->elevator->type, elevator_name))
734  		return 0;
735  
736  	e = elevator_find_get(q, elevator_name);
737  	if (!e) {
738  		request_module("%s-iosched", elevator_name);
739  		e = elevator_find_get(q, elevator_name);
740  		if (!e)
741  			return -EINVAL;
742  	}
743  	ret = elevator_switch(q, e);
744  	elevator_put(e);
745  	return ret;
746  }
747  
elv_iosched_store(struct request_queue * q,const char * buf,size_t count)748  ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
749  			  size_t count)
750  {
751  	char elevator_name[ELV_NAME_MAX];
752  	int ret;
753  
754  	if (!elv_support_iosched(q))
755  		return count;
756  
757  	strscpy(elevator_name, buf, sizeof(elevator_name));
758  	ret = elevator_change(q, strstrip(elevator_name));
759  	if (!ret)
760  		return count;
761  	return ret;
762  }
763  
elv_iosched_show(struct request_queue * q,char * name)764  ssize_t elv_iosched_show(struct request_queue *q, char *name)
765  {
766  	struct elevator_queue *eq = q->elevator;
767  	struct elevator_type *cur = NULL, *e;
768  	int len = 0;
769  
770  	if (!elv_support_iosched(q))
771  		return sprintf(name, "none\n");
772  
773  	if (!q->elevator) {
774  		len += sprintf(name+len, "[none] ");
775  	} else {
776  		len += sprintf(name+len, "none ");
777  		cur = eq->type;
778  	}
779  
780  	spin_lock(&elv_list_lock);
781  	list_for_each_entry(e, &elv_list, list) {
782  		if (e == cur)
783  			len += sprintf(name+len, "[%s] ", e->elevator_name);
784  		else if (elv_support_features(q, e))
785  			len += sprintf(name+len, "%s ", e->elevator_name);
786  	}
787  	spin_unlock(&elv_list_lock);
788  
789  	len += sprintf(name+len, "\n");
790  	return len;
791  }
792  
elv_rb_former_request(struct request_queue * q,struct request * rq)793  struct request *elv_rb_former_request(struct request_queue *q,
794  				      struct request *rq)
795  {
796  	struct rb_node *rbprev = rb_prev(&rq->rb_node);
797  
798  	if (rbprev)
799  		return rb_entry_rq(rbprev);
800  
801  	return NULL;
802  }
803  EXPORT_SYMBOL(elv_rb_former_request);
804  
elv_rb_latter_request(struct request_queue * q,struct request * rq)805  struct request *elv_rb_latter_request(struct request_queue *q,
806  				      struct request *rq)
807  {
808  	struct rb_node *rbnext = rb_next(&rq->rb_node);
809  
810  	if (rbnext)
811  		return rb_entry_rq(rbnext);
812  
813  	return NULL;
814  }
815  EXPORT_SYMBOL(elv_rb_latter_request);
816  
elevator_setup(char * str)817  static int __init elevator_setup(char *str)
818  {
819  	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
820  		"Please use sysfs to set IO scheduler for individual devices.\n");
821  	return 1;
822  }
823  
824  __setup("elevator=", elevator_setup);
825