xref: /openbmc/linux/block/blk-ioc.c (revision a0f14d8b)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
286db1e29SJens Axboe /*
386db1e29SJens Axboe  * Functions related to io context handling
486db1e29SJens Axboe  */
586db1e29SJens Axboe #include <linux/kernel.h>
686db1e29SJens Axboe #include <linux/module.h>
786db1e29SJens Axboe #include <linux/init.h>
886db1e29SJens Axboe #include <linux/bio.h>
986db1e29SJens Axboe #include <linux/blkdev.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11f719ff9bSIngo Molnar #include <linux/sched/task.h>
1286db1e29SJens Axboe 
1386db1e29SJens Axboe #include "blk.h"
142aa7745bSChristoph Hellwig #include "blk-mq-sched.h"
1586db1e29SJens Axboe 
1686db1e29SJens Axboe /*
1786db1e29SJens Axboe  * For io context allocations
1886db1e29SJens Axboe  */
1986db1e29SJens Axboe static struct kmem_cache *iocontext_cachep;
2086db1e29SJens Axboe 
216e736be7STejun Heo /**
226e736be7STejun Heo  * get_io_context - increment reference count to io_context
236e736be7STejun Heo  * @ioc: io_context to get
246e736be7STejun Heo  *
256e736be7STejun Heo  * Increment reference count to @ioc.
266e736be7STejun Heo  */
2787dd1d63SChristoph Hellwig static void get_io_context(struct io_context *ioc)
286e736be7STejun Heo {
296e736be7STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
306e736be7STejun Heo 	atomic_long_inc(&ioc->refcount);
316e736be7STejun Heo }
326e736be7STejun Heo 
337e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head)
347e5a8794STejun Heo {
357e5a8794STejun Heo 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
367e5a8794STejun Heo 
377e5a8794STejun Heo 	kmem_cache_free(icq->__rcu_icq_cache, icq);
387e5a8794STejun Heo }
397e5a8794STejun Heo 
403d492c2eSOmar Sandoval /*
417b36a718SJens Axboe  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
427b36a718SJens Axboe  * and queue locked for legacy.
433d492c2eSOmar Sandoval  */
447e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq)
457e5a8794STejun Heo {
46621032adSTejun Heo 	struct elevator_type *et = icq->q->elevator->type;
47621032adSTejun Heo 
48621032adSTejun Heo 	if (icq->flags & ICQ_EXITED)
49621032adSTejun Heo 		return;
50621032adSTejun Heo 
51f9cd4bfeSJens Axboe 	if (et->ops.exit_icq)
52f9cd4bfeSJens Axboe 		et->ops.exit_icq(icq);
53621032adSTejun Heo 
54621032adSTejun Heo 	icq->flags |= ICQ_EXITED;
55621032adSTejun Heo }
56621032adSTejun Heo 
577b36a718SJens Axboe /*
587b36a718SJens Axboe  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
597b36a718SJens Axboe  * and queue locked for legacy.
607b36a718SJens Axboe  */
61621032adSTejun Heo static void ioc_destroy_icq(struct io_cq *icq)
62621032adSTejun Heo {
637e5a8794STejun Heo 	struct io_context *ioc = icq->ioc;
647e5a8794STejun Heo 	struct request_queue *q = icq->q;
657e5a8794STejun Heo 	struct elevator_type *et = q->elevator->type;
667e5a8794STejun Heo 
677e5a8794STejun Heo 	lockdep_assert_held(&ioc->lock);
687e5a8794STejun Heo 
697e5a8794STejun Heo 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
707e5a8794STejun Heo 	hlist_del_init(&icq->ioc_node);
717e5a8794STejun Heo 	list_del_init(&icq->q_node);
727e5a8794STejun Heo 
737e5a8794STejun Heo 	/*
747e5a8794STejun Heo 	 * Both setting lookup hint to and clearing it from @icq are done
757e5a8794STejun Heo 	 * under queue_lock.  If it's not pointing to @icq now, it never
767e5a8794STejun Heo 	 * will.  Hint assignment itself can race safely.
777e5a8794STejun Heo 	 */
78ec6c676aSPaul E. McKenney 	if (rcu_access_pointer(ioc->icq_hint) == icq)
797e5a8794STejun Heo 		rcu_assign_pointer(ioc->icq_hint, NULL);
807e5a8794STejun Heo 
81621032adSTejun Heo 	ioc_exit_icq(icq);
827e5a8794STejun Heo 
837e5a8794STejun Heo 	/*
847e5a8794STejun Heo 	 * @icq->q might have gone away by the time RCU callback runs
857e5a8794STejun Heo 	 * making it impossible to determine icq_cache.  Record it in @icq.
867e5a8794STejun Heo 	 */
877e5a8794STejun Heo 	icq->__rcu_icq_cache = et->icq_cache;
8830a2da7bSSahitya Tummala 	icq->flags |= ICQ_DESTROYED;
897e5a8794STejun Heo 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
907e5a8794STejun Heo }
917e5a8794STejun Heo 
92b2efa052STejun Heo /*
93b2efa052STejun Heo  * Slow path for ioc release in put_io_context().  Performs double-lock
94c5869807STejun Heo  * dancing to unlink all icq's and then frees ioc.
95b2efa052STejun Heo  */
96b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work)
97b2efa052STejun Heo {
98b2efa052STejun Heo 	struct io_context *ioc = container_of(work, struct io_context,
99b2efa052STejun Heo 					      release_work);
100a43f085fSJohn Ogness 	spin_lock_irq(&ioc->lock);
101b2efa052STejun Heo 
102c5869807STejun Heo 	while (!hlist_empty(&ioc->icq_list)) {
103c5869807STejun Heo 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104c5869807STejun Heo 						struct io_cq, ioc_node);
1052274b029STejun Heo 		struct request_queue *q = icq->q;
106b2efa052STejun Heo 
1070d945c1fSChristoph Hellwig 		if (spin_trylock(&q->queue_lock)) {
108621032adSTejun Heo 			ioc_destroy_icq(icq);
1090d945c1fSChristoph Hellwig 			spin_unlock(&q->queue_lock);
110b2efa052STejun Heo 		} else {
111ab96bbabSJohn Ogness 			/* Make sure q and icq cannot be freed. */
112ab96bbabSJohn Ogness 			rcu_read_lock();
113ab96bbabSJohn Ogness 
114ab96bbabSJohn Ogness 			/* Re-acquire the locks in the correct order. */
115ab96bbabSJohn Ogness 			spin_unlock(&ioc->lock);
116ab96bbabSJohn Ogness 			spin_lock(&q->queue_lock);
117ab96bbabSJohn Ogness 			spin_lock(&ioc->lock);
118ab96bbabSJohn Ogness 
119ab96bbabSJohn Ogness 			/*
120ab96bbabSJohn Ogness 			 * The icq may have been destroyed when the ioc lock
121ab96bbabSJohn Ogness 			 * was released.
122ab96bbabSJohn Ogness 			 */
123ab96bbabSJohn Ogness 			if (!(icq->flags & ICQ_DESTROYED))
124ab96bbabSJohn Ogness 				ioc_destroy_icq(icq);
125ab96bbabSJohn Ogness 
126ab96bbabSJohn Ogness 			spin_unlock(&q->queue_lock);
127ab96bbabSJohn Ogness 			rcu_read_unlock();
128b2efa052STejun Heo 		}
1292274b029STejun Heo 	}
1302274b029STejun Heo 
131a43f085fSJohn Ogness 	spin_unlock_irq(&ioc->lock);
132b2efa052STejun Heo 
133b2efa052STejun Heo 	kmem_cache_free(iocontext_cachep, ioc);
13486db1e29SJens Axboe }
13586db1e29SJens Axboe 
13642ec57a8STejun Heo /**
13742ec57a8STejun Heo  * put_io_context - put a reference of io_context
13842ec57a8STejun Heo  * @ioc: io_context to put
13942ec57a8STejun Heo  *
14042ec57a8STejun Heo  * Decrement reference count of @ioc and release it if the count reaches
14111a3122fSTejun Heo  * zero.
14286db1e29SJens Axboe  */
14311a3122fSTejun Heo void put_io_context(struct io_context *ioc)
14486db1e29SJens Axboe {
145b2efa052STejun Heo 	unsigned long flags;
146ff8c1474SXiaotian Feng 	bool free_ioc = false;
147b2efa052STejun Heo 
14886db1e29SJens Axboe 	if (ioc == NULL)
14942ec57a8STejun Heo 		return;
15086db1e29SJens Axboe 
15142ec57a8STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
15242ec57a8STejun Heo 
153b2efa052STejun Heo 	/*
15411a3122fSTejun Heo 	 * Releasing ioc requires reverse order double locking and we may
15511a3122fSTejun Heo 	 * already be holding a queue_lock.  Do it asynchronously from wq.
156b2efa052STejun Heo 	 */
15711a3122fSTejun Heo 	if (atomic_long_dec_and_test(&ioc->refcount)) {
15811a3122fSTejun Heo 		spin_lock_irqsave(&ioc->lock, flags);
15911a3122fSTejun Heo 		if (!hlist_empty(&ioc->icq_list))
160695588f9SViresh Kumar 			queue_work(system_power_efficient_wq,
161695588f9SViresh Kumar 					&ioc->release_work);
162ff8c1474SXiaotian Feng 		else
163ff8c1474SXiaotian Feng 			free_ioc = true;
16411a3122fSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
16511a3122fSTejun Heo 	}
166ff8c1474SXiaotian Feng 
167ff8c1474SXiaotian Feng 	if (free_ioc)
168ff8c1474SXiaotian Feng 		kmem_cache_free(iocontext_cachep, ioc);
16986db1e29SJens Axboe }
170222ee581SChristoph Hellwig EXPORT_SYMBOL_GPL(put_io_context);
17186db1e29SJens Axboe 
172f6e8d01bSTejun Heo /**
173f6e8d01bSTejun Heo  * put_io_context_active - put active reference on ioc
174f6e8d01bSTejun Heo  * @ioc: ioc of interest
175f6e8d01bSTejun Heo  *
17650569c24SChristoph Hellwig  * Put an active reference to an ioc.  If active reference reaches zero after
177f6e8d01bSTejun Heo  * put, @ioc can never issue further IOs and ioscheds are notified.
178f6e8d01bSTejun Heo  */
17933047425SChristoph Hellwig static void put_io_context_active(struct io_context *ioc)
18086db1e29SJens Axboe {
181f6e8d01bSTejun Heo 	struct io_cq *icq;
18286db1e29SJens Axboe 
183f6e8d01bSTejun Heo 	if (!atomic_dec_and_test(&ioc->active_ref)) {
184621032adSTejun Heo 		put_io_context(ioc);
185621032adSTejun Heo 		return;
186621032adSTejun Heo 	}
187621032adSTejun Heo 
188a43f085fSJohn Ogness 	spin_lock_irq(&ioc->lock);
189b67bfe0dSSasha Levin 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
190621032adSTejun Heo 		if (icq->flags & ICQ_EXITED)
191621032adSTejun Heo 			continue;
1923d492c2eSOmar Sandoval 
1933d492c2eSOmar Sandoval 		ioc_exit_icq(icq);
1943d492c2eSOmar Sandoval 	}
195a43f085fSJohn Ogness 	spin_unlock_irq(&ioc->lock);
196621032adSTejun Heo 
19711a3122fSTejun Heo 	put_io_context(ioc);
19886db1e29SJens Axboe }
19986db1e29SJens Axboe 
200f6e8d01bSTejun Heo /* Called by the exiting task */
201f6e8d01bSTejun Heo void exit_io_context(struct task_struct *task)
202f6e8d01bSTejun Heo {
203f6e8d01bSTejun Heo 	struct io_context *ioc;
204f6e8d01bSTejun Heo 
205f6e8d01bSTejun Heo 	task_lock(task);
206f6e8d01bSTejun Heo 	ioc = task->io_context;
207f6e8d01bSTejun Heo 	task->io_context = NULL;
208f6e8d01bSTejun Heo 	task_unlock(task);
209f6e8d01bSTejun Heo 
210f6e8d01bSTejun Heo 	atomic_dec(&ioc->nr_tasks);
211f6e8d01bSTejun Heo 	put_io_context_active(ioc);
212f6e8d01bSTejun Heo }
213f6e8d01bSTejun Heo 
2147b36a718SJens Axboe static void __ioc_clear_queue(struct list_head *icq_list)
2157b36a718SJens Axboe {
2167b36a718SJens Axboe 	unsigned long flags;
2177b36a718SJens Axboe 
21830a2da7bSSahitya Tummala 	rcu_read_lock();
2197b36a718SJens Axboe 	while (!list_empty(icq_list)) {
2207b36a718SJens Axboe 		struct io_cq *icq = list_entry(icq_list->next,
2217b36a718SJens Axboe 						struct io_cq, q_node);
2227b36a718SJens Axboe 		struct io_context *ioc = icq->ioc;
2237b36a718SJens Axboe 
2247b36a718SJens Axboe 		spin_lock_irqsave(&ioc->lock, flags);
22530a2da7bSSahitya Tummala 		if (icq->flags & ICQ_DESTROYED) {
22630a2da7bSSahitya Tummala 			spin_unlock_irqrestore(&ioc->lock, flags);
22730a2da7bSSahitya Tummala 			continue;
22830a2da7bSSahitya Tummala 		}
2297b36a718SJens Axboe 		ioc_destroy_icq(icq);
2307b36a718SJens Axboe 		spin_unlock_irqrestore(&ioc->lock, flags);
2317b36a718SJens Axboe 	}
23230a2da7bSSahitya Tummala 	rcu_read_unlock();
2337b36a718SJens Axboe }
2347b36a718SJens Axboe 
2357e5a8794STejun Heo /**
2367e5a8794STejun Heo  * ioc_clear_queue - break any ioc association with the specified queue
2377e5a8794STejun Heo  * @q: request_queue being cleared
2387e5a8794STejun Heo  *
2397b36a718SJens Axboe  * Walk @q->icq_list and exit all io_cq's.
2407e5a8794STejun Heo  */
2417e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q)
2427e5a8794STejun Heo {
2437b36a718SJens Axboe 	LIST_HEAD(icq_list);
2447e5a8794STejun Heo 
2450d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
2467b36a718SJens Axboe 	list_splice_init(&q->icq_list, &icq_list);
2470d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
2487e5a8794STejun Heo 
2497b36a718SJens Axboe 	__ioc_clear_queue(&icq_list);
2507e5a8794STejun Heo }
2517e5a8794STejun Heo 
252*a0f14d8bSChristoph Hellwig static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
25386db1e29SJens Axboe {
254df415656SPaul Bolle 	struct io_context *ioc;
25586db1e29SJens Axboe 
25642ec57a8STejun Heo 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
25742ec57a8STejun Heo 				    node);
25842ec57a8STejun Heo 	if (unlikely(!ioc))
259*a0f14d8bSChristoph Hellwig 		return NULL;
26042ec57a8STejun Heo 
261df415656SPaul Bolle 	atomic_long_set(&ioc->refcount, 1);
2624638a83eSOlof Johansson 	atomic_set(&ioc->nr_tasks, 1);
263f6e8d01bSTejun Heo 	atomic_set(&ioc->active_ref, 1);
264df415656SPaul Bolle 	spin_lock_init(&ioc->lock);
265c137969bSShakeel Butt 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
266c5869807STejun Heo 	INIT_HLIST_HEAD(&ioc->icq_list);
267b2efa052STejun Heo 	INIT_WORK(&ioc->release_work, ioc_release_fn);
268*a0f14d8bSChristoph Hellwig 	return ioc;
269*a0f14d8bSChristoph Hellwig }
270*a0f14d8bSChristoph Hellwig 
271*a0f14d8bSChristoph Hellwig static int create_task_io_context(struct task_struct *task, gfp_t gfp_flags,
272*a0f14d8bSChristoph Hellwig 		int node)
273*a0f14d8bSChristoph Hellwig {
274*a0f14d8bSChristoph Hellwig 	struct io_context *ioc;
275*a0f14d8bSChristoph Hellwig 	int ret;
276*a0f14d8bSChristoph Hellwig 
277*a0f14d8bSChristoph Hellwig 	ioc = alloc_io_context(gfp_flags, node);
278*a0f14d8bSChristoph Hellwig 	if (!ioc)
279*a0f14d8bSChristoph Hellwig 		return -ENOMEM;
28086db1e29SJens Axboe 
281fd638368STejun Heo 	/*
282fd638368STejun Heo 	 * Try to install.  ioc shouldn't be installed if someone else
283fd638368STejun Heo 	 * already did or @task, which isn't %current, is exiting.  Note
284fd638368STejun Heo 	 * that we need to allow ioc creation on exiting %current as exit
285fd638368STejun Heo 	 * path may issue IOs from e.g. exit_files().  The exit path is
286fd638368STejun Heo 	 * responsible for not issuing IO after exit_io_context().
287fd638368STejun Heo 	 */
2886e736be7STejun Heo 	task_lock(task);
289fd638368STejun Heo 	if (!task->io_context &&
290fd638368STejun Heo 	    (task == current || !(task->flags & PF_EXITING)))
2916e736be7STejun Heo 		task->io_context = ioc;
292f2dbd76aSTejun Heo 	else
2936e736be7STejun Heo 		kmem_cache_free(iocontext_cachep, ioc);
2943c9c708cSEric Dumazet 
2953c9c708cSEric Dumazet 	ret = task->io_context ? 0 : -EBUSY;
2963c9c708cSEric Dumazet 
2976e736be7STejun Heo 	task_unlock(task);
29824acfc34STejun Heo 
2993c9c708cSEric Dumazet 	return ret;
30086db1e29SJens Axboe }
30186db1e29SJens Axboe 
3026e736be7STejun Heo /**
3036e736be7STejun Heo  * get_task_io_context - get io_context of a task
3046e736be7STejun Heo  * @task: task of interest
3056e736be7STejun Heo  * @gfp_flags: allocation flags, used if allocation is necessary
3066e736be7STejun Heo  * @node: allocation node, used if allocation is necessary
30786db1e29SJens Axboe  *
3086e736be7STejun Heo  * Return io_context of @task.  If it doesn't exist, it is created with
3096e736be7STejun Heo  * @gfp_flags and @node.  The returned io_context has its reference count
3106e736be7STejun Heo  * incremented.
3116e736be7STejun Heo  *
3126e736be7STejun Heo  * This function always goes through task_lock() and it's better to use
313f2dbd76aSTejun Heo  * %current->io_context + get_io_context() for %current.
31486db1e29SJens Axboe  */
3156e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
3166e736be7STejun Heo 				       gfp_t gfp_flags, int node)
31786db1e29SJens Axboe {
3186e736be7STejun Heo 	struct io_context *ioc;
31986db1e29SJens Axboe 
320d0164adcSMel Gorman 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
32186db1e29SJens Axboe 
322f2dbd76aSTejun Heo 	do {
3236e736be7STejun Heo 		task_lock(task);
3246e736be7STejun Heo 		ioc = task->io_context;
3256e736be7STejun Heo 		if (likely(ioc)) {
3266e736be7STejun Heo 			get_io_context(ioc);
3276e736be7STejun Heo 			task_unlock(task);
328df415656SPaul Bolle 			return ioc;
32986db1e29SJens Axboe 		}
3306e736be7STejun Heo 		task_unlock(task);
33124acfc34STejun Heo 	} while (!create_task_io_context(task, gfp_flags, node));
3326e736be7STejun Heo 
333f2dbd76aSTejun Heo 	return NULL;
3346e736be7STejun Heo }
33586db1e29SJens Axboe 
33688c9a2ceSChristoph Hellwig int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
33788c9a2ceSChristoph Hellwig {
33888c9a2ceSChristoph Hellwig 	struct io_context *ioc = current->io_context;
33988c9a2ceSChristoph Hellwig 	struct io_context *new_ioc;
34088c9a2ceSChristoph Hellwig 
34188c9a2ceSChristoph Hellwig 	/*
34288c9a2ceSChristoph Hellwig 	 * Share io context with parent, if CLONE_IO is set
34388c9a2ceSChristoph Hellwig 	 */
34488c9a2ceSChristoph Hellwig 	if (clone_flags & CLONE_IO) {
34550569c24SChristoph Hellwig 		atomic_long_inc(&ioc->refcount);
34650569c24SChristoph Hellwig 		atomic_inc(&ioc->active_ref);
34788c9a2ceSChristoph Hellwig 		atomic_inc(&ioc->nr_tasks);
34888c9a2ceSChristoph Hellwig 		tsk->io_context = ioc;
34988c9a2ceSChristoph Hellwig 	} else if (ioprio_valid(ioc->ioprio)) {
35088c9a2ceSChristoph Hellwig 		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
35188c9a2ceSChristoph Hellwig 		if (unlikely(!new_ioc))
35288c9a2ceSChristoph Hellwig 			return -ENOMEM;
35388c9a2ceSChristoph Hellwig 
35488c9a2ceSChristoph Hellwig 		new_ioc->ioprio = ioc->ioprio;
35588c9a2ceSChristoph Hellwig 		put_io_context(new_ioc);
35688c9a2ceSChristoph Hellwig 	}
35788c9a2ceSChristoph Hellwig 
35888c9a2ceSChristoph Hellwig 	return 0;
35988c9a2ceSChristoph Hellwig }
36088c9a2ceSChristoph Hellwig 
36147fdd4caSTejun Heo /**
36247fdd4caSTejun Heo  * ioc_lookup_icq - lookup io_cq from ioc
36347fdd4caSTejun Heo  * @ioc: the associated io_context
36447fdd4caSTejun Heo  * @q: the associated request_queue
36547fdd4caSTejun Heo  *
36647fdd4caSTejun Heo  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
36747fdd4caSTejun Heo  * with @q->queue_lock held.
36847fdd4caSTejun Heo  */
36947fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
37047fdd4caSTejun Heo {
37147fdd4caSTejun Heo 	struct io_cq *icq;
37247fdd4caSTejun Heo 
3730d945c1fSChristoph Hellwig 	lockdep_assert_held(&q->queue_lock);
37447fdd4caSTejun Heo 
37547fdd4caSTejun Heo 	/*
37647fdd4caSTejun Heo 	 * icq's are indexed from @ioc using radix tree and hint pointer,
37747fdd4caSTejun Heo 	 * both of which are protected with RCU.  All removals are done
37847fdd4caSTejun Heo 	 * holding both q and ioc locks, and we're holding q lock - if we
37947fdd4caSTejun Heo 	 * find a icq which points to us, it's guaranteed to be valid.
38047fdd4caSTejun Heo 	 */
38147fdd4caSTejun Heo 	rcu_read_lock();
38247fdd4caSTejun Heo 	icq = rcu_dereference(ioc->icq_hint);
38347fdd4caSTejun Heo 	if (icq && icq->q == q)
38447fdd4caSTejun Heo 		goto out;
38547fdd4caSTejun Heo 
38647fdd4caSTejun Heo 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
38747fdd4caSTejun Heo 	if (icq && icq->q == q)
38847fdd4caSTejun Heo 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
38947fdd4caSTejun Heo 	else
39047fdd4caSTejun Heo 		icq = NULL;
39147fdd4caSTejun Heo out:
39247fdd4caSTejun Heo 	rcu_read_unlock();
39347fdd4caSTejun Heo 	return icq;
39447fdd4caSTejun Heo }
39547fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq);
39647fdd4caSTejun Heo 
397f1f8cc94STejun Heo /**
398f1f8cc94STejun Heo  * ioc_create_icq - create and link io_cq
39924acfc34STejun Heo  * @ioc: io_context of interest
400f1f8cc94STejun Heo  * @q: request_queue of interest
401f1f8cc94STejun Heo  * @gfp_mask: allocation mask
402f1f8cc94STejun Heo  *
40324acfc34STejun Heo  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
40424acfc34STejun Heo  * will be created using @gfp_mask.
405f1f8cc94STejun Heo  *
406f1f8cc94STejun Heo  * The caller is responsible for ensuring @ioc won't go away and @q is
407f1f8cc94STejun Heo  * alive and will stay alive until this function returns.
408f1f8cc94STejun Heo  */
40987dd1d63SChristoph Hellwig static struct io_cq *ioc_create_icq(struct io_context *ioc,
41087dd1d63SChristoph Hellwig 		struct request_queue *q, gfp_t gfp_mask)
411f1f8cc94STejun Heo {
412f1f8cc94STejun Heo 	struct elevator_type *et = q->elevator->type;
413f1f8cc94STejun Heo 	struct io_cq *icq;
414f1f8cc94STejun Heo 
415f1f8cc94STejun Heo 	/* allocate stuff */
416f1f8cc94STejun Heo 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
417f1f8cc94STejun Heo 				    q->node);
418f1f8cc94STejun Heo 	if (!icq)
419f1f8cc94STejun Heo 		return NULL;
420f1f8cc94STejun Heo 
4215e4c0d97SJan Kara 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
422f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
423f1f8cc94STejun Heo 		return NULL;
424f1f8cc94STejun Heo 	}
425f1f8cc94STejun Heo 
426f1f8cc94STejun Heo 	icq->ioc = ioc;
427f1f8cc94STejun Heo 	icq->q = q;
428f1f8cc94STejun Heo 	INIT_LIST_HEAD(&icq->q_node);
429f1f8cc94STejun Heo 	INIT_HLIST_NODE(&icq->ioc_node);
430f1f8cc94STejun Heo 
431f1f8cc94STejun Heo 	/* lock both q and ioc and try to link @icq */
4320d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
433f1f8cc94STejun Heo 	spin_lock(&ioc->lock);
434f1f8cc94STejun Heo 
435f1f8cc94STejun Heo 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
436f1f8cc94STejun Heo 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
437f1f8cc94STejun Heo 		list_add(&icq->q_node, &q->icq_list);
438f9cd4bfeSJens Axboe 		if (et->ops.init_icq)
439f9cd4bfeSJens Axboe 			et->ops.init_icq(icq);
440f1f8cc94STejun Heo 	} else {
441f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
442f1f8cc94STejun Heo 		icq = ioc_lookup_icq(ioc, q);
443f1f8cc94STejun Heo 		if (!icq)
444f1f8cc94STejun Heo 			printk(KERN_ERR "cfq: icq link failed!\n");
445f1f8cc94STejun Heo 	}
446f1f8cc94STejun Heo 
447f1f8cc94STejun Heo 	spin_unlock(&ioc->lock);
4480d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
449f1f8cc94STejun Heo 	radix_tree_preload_end();
450f1f8cc94STejun Heo 	return icq;
451f1f8cc94STejun Heo }
452f1f8cc94STejun Heo 
45387dd1d63SChristoph Hellwig struct io_cq *ioc_find_get_icq(struct request_queue *q)
45487dd1d63SChristoph Hellwig {
45587dd1d63SChristoph Hellwig 	struct io_context *ioc;
45687dd1d63SChristoph Hellwig 	struct io_cq *icq;
45787dd1d63SChristoph Hellwig 
45887dd1d63SChristoph Hellwig 	/* create task io_context, if we don't have one already */
45987dd1d63SChristoph Hellwig 	if (unlikely(!current->io_context))
46087dd1d63SChristoph Hellwig 		create_task_io_context(current, GFP_ATOMIC, q->node);
46187dd1d63SChristoph Hellwig 
46287dd1d63SChristoph Hellwig 	/*
46387dd1d63SChristoph Hellwig 	 * May not have an IO context if it's a passthrough request
46487dd1d63SChristoph Hellwig 	 */
46587dd1d63SChristoph Hellwig 	ioc = current->io_context;
46687dd1d63SChristoph Hellwig 	if (!ioc)
46787dd1d63SChristoph Hellwig 		return NULL;
46887dd1d63SChristoph Hellwig 
46987dd1d63SChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
47087dd1d63SChristoph Hellwig 	icq = ioc_lookup_icq(ioc, q);
47187dd1d63SChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
47287dd1d63SChristoph Hellwig 
47387dd1d63SChristoph Hellwig 	if (!icq) {
47487dd1d63SChristoph Hellwig 		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
47587dd1d63SChristoph Hellwig 		if (!icq)
47687dd1d63SChristoph Hellwig 			return NULL;
47787dd1d63SChristoph Hellwig 	}
47887dd1d63SChristoph Hellwig 	get_io_context(icq->ioc);
47987dd1d63SChristoph Hellwig 	return icq;
48087dd1d63SChristoph Hellwig }
48187dd1d63SChristoph Hellwig EXPORT_SYMBOL_GPL(ioc_find_get_icq);
48287dd1d63SChristoph Hellwig 
48313341598SAdrian Bunk static int __init blk_ioc_init(void)
48486db1e29SJens Axboe {
48586db1e29SJens Axboe 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
48686db1e29SJens Axboe 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
48786db1e29SJens Axboe 	return 0;
48886db1e29SJens Axboe }
48986db1e29SJens Axboe subsys_initcall(blk_ioc_init);
490