xref: /openbmc/linux/block/blk-ioc.c (revision 3d492c2e)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to io context handling
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
95a0e3ad6STejun Heo #include <linux/slab.h>
1086db1e29SJens Axboe 
1186db1e29SJens Axboe #include "blk.h"
1286db1e29SJens Axboe 
1386db1e29SJens Axboe /*
1486db1e29SJens Axboe  * For io context allocations
1586db1e29SJens Axboe  */
1686db1e29SJens Axboe static struct kmem_cache *iocontext_cachep;
1786db1e29SJens Axboe 
186e736be7STejun Heo /**
196e736be7STejun Heo  * get_io_context - increment reference count to io_context
206e736be7STejun Heo  * @ioc: io_context to get
216e736be7STejun Heo  *
226e736be7STejun Heo  * Increment reference count to @ioc.
236e736be7STejun Heo  */
246e736be7STejun Heo void get_io_context(struct io_context *ioc)
256e736be7STejun Heo {
266e736be7STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
276e736be7STejun Heo 	atomic_long_inc(&ioc->refcount);
286e736be7STejun Heo }
296e736be7STejun Heo EXPORT_SYMBOL(get_io_context);
306e736be7STejun Heo 
317e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head)
327e5a8794STejun Heo {
337e5a8794STejun Heo 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
347e5a8794STejun Heo 
357e5a8794STejun Heo 	kmem_cache_free(icq->__rcu_icq_cache, icq);
367e5a8794STejun Heo }
377e5a8794STejun Heo 
383d492c2eSOmar Sandoval /*
393d492c2eSOmar Sandoval  * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
403d492c2eSOmar Sandoval  * mq.
413d492c2eSOmar Sandoval  */
427e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq)
437e5a8794STejun Heo {
44621032adSTejun Heo 	struct elevator_type *et = icq->q->elevator->type;
45621032adSTejun Heo 
46621032adSTejun Heo 	if (icq->flags & ICQ_EXITED)
47621032adSTejun Heo 		return;
48621032adSTejun Heo 
49bd166ef1SJens Axboe 	if (et->uses_mq && et->ops.mq.exit_icq)
50bd166ef1SJens Axboe 		et->ops.mq.exit_icq(icq);
51bd166ef1SJens Axboe 	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
52c51ca6cfSJens Axboe 		et->ops.sq.elevator_exit_icq_fn(icq);
53621032adSTejun Heo 
54621032adSTejun Heo 	icq->flags |= ICQ_EXITED;
55621032adSTejun Heo }
56621032adSTejun Heo 
57621032adSTejun Heo /* Release an icq.  Called with both ioc and q locked. */
58621032adSTejun Heo static void ioc_destroy_icq(struct io_cq *icq)
59621032adSTejun Heo {
607e5a8794STejun Heo 	struct io_context *ioc = icq->ioc;
617e5a8794STejun Heo 	struct request_queue *q = icq->q;
627e5a8794STejun Heo 	struct elevator_type *et = q->elevator->type;
637e5a8794STejun Heo 
647e5a8794STejun Heo 	lockdep_assert_held(&ioc->lock);
657e5a8794STejun Heo 	lockdep_assert_held(q->queue_lock);
667e5a8794STejun Heo 
677e5a8794STejun Heo 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
687e5a8794STejun Heo 	hlist_del_init(&icq->ioc_node);
697e5a8794STejun Heo 	list_del_init(&icq->q_node);
707e5a8794STejun Heo 
717e5a8794STejun Heo 	/*
727e5a8794STejun Heo 	 * Both setting lookup hint to and clearing it from @icq are done
737e5a8794STejun Heo 	 * under queue_lock.  If it's not pointing to @icq now, it never
747e5a8794STejun Heo 	 * will.  Hint assignment itself can race safely.
757e5a8794STejun Heo 	 */
76ec6c676aSPaul E. McKenney 	if (rcu_access_pointer(ioc->icq_hint) == icq)
777e5a8794STejun Heo 		rcu_assign_pointer(ioc->icq_hint, NULL);
787e5a8794STejun Heo 
79621032adSTejun Heo 	ioc_exit_icq(icq);
807e5a8794STejun Heo 
817e5a8794STejun Heo 	/*
827e5a8794STejun Heo 	 * @icq->q might have gone away by the time RCU callback runs
837e5a8794STejun Heo 	 * making it impossible to determine icq_cache.  Record it in @icq.
847e5a8794STejun Heo 	 */
857e5a8794STejun Heo 	icq->__rcu_icq_cache = et->icq_cache;
867e5a8794STejun Heo 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
877e5a8794STejun Heo }
887e5a8794STejun Heo 
89b2efa052STejun Heo /*
90b2efa052STejun Heo  * Slow path for ioc release in put_io_context().  Performs double-lock
91c5869807STejun Heo  * dancing to unlink all icq's and then frees ioc.
92b2efa052STejun Heo  */
93b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work)
94b2efa052STejun Heo {
95b2efa052STejun Heo 	struct io_context *ioc = container_of(work, struct io_context,
96b2efa052STejun Heo 					      release_work);
97d8c66c5dSTejun Heo 	unsigned long flags;
98b2efa052STejun Heo 
99d8c66c5dSTejun Heo 	/*
100d8c66c5dSTejun Heo 	 * Exiting icq may call into put_io_context() through elevator
101d8c66c5dSTejun Heo 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
102d8c66c5dSTejun Heo 	 * be different, use a different locking subclass here.  Use
103d8c66c5dSTejun Heo 	 * irqsave variant as there's no spin_lock_irq_nested().
104d8c66c5dSTejun Heo 	 */
105d8c66c5dSTejun Heo 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
106b2efa052STejun Heo 
107c5869807STejun Heo 	while (!hlist_empty(&ioc->icq_list)) {
108c5869807STejun Heo 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
109c5869807STejun Heo 						struct io_cq, ioc_node);
1102274b029STejun Heo 		struct request_queue *q = icq->q;
111b2efa052STejun Heo 
1122274b029STejun Heo 		if (spin_trylock(q->queue_lock)) {
113621032adSTejun Heo 			ioc_destroy_icq(icq);
1142274b029STejun Heo 			spin_unlock(q->queue_lock);
115b2efa052STejun Heo 		} else {
116d8c66c5dSTejun Heo 			spin_unlock_irqrestore(&ioc->lock, flags);
1172274b029STejun Heo 			cpu_relax();
1182274b029STejun Heo 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
119b2efa052STejun Heo 		}
1202274b029STejun Heo 	}
1212274b029STejun Heo 
1222274b029STejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
123b2efa052STejun Heo 
124b2efa052STejun Heo 	kmem_cache_free(iocontext_cachep, ioc);
12586db1e29SJens Axboe }
12686db1e29SJens Axboe 
12742ec57a8STejun Heo /**
12842ec57a8STejun Heo  * put_io_context - put a reference of io_context
12942ec57a8STejun Heo  * @ioc: io_context to put
13042ec57a8STejun Heo  *
13142ec57a8STejun Heo  * Decrement reference count of @ioc and release it if the count reaches
13211a3122fSTejun Heo  * zero.
13386db1e29SJens Axboe  */
13411a3122fSTejun Heo void put_io_context(struct io_context *ioc)
13586db1e29SJens Axboe {
136b2efa052STejun Heo 	unsigned long flags;
137ff8c1474SXiaotian Feng 	bool free_ioc = false;
138b2efa052STejun Heo 
13986db1e29SJens Axboe 	if (ioc == NULL)
14042ec57a8STejun Heo 		return;
14186db1e29SJens Axboe 
14242ec57a8STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
14342ec57a8STejun Heo 
144b2efa052STejun Heo 	/*
14511a3122fSTejun Heo 	 * Releasing ioc requires reverse order double locking and we may
14611a3122fSTejun Heo 	 * already be holding a queue_lock.  Do it asynchronously from wq.
147b2efa052STejun Heo 	 */
14811a3122fSTejun Heo 	if (atomic_long_dec_and_test(&ioc->refcount)) {
14911a3122fSTejun Heo 		spin_lock_irqsave(&ioc->lock, flags);
15011a3122fSTejun Heo 		if (!hlist_empty(&ioc->icq_list))
151695588f9SViresh Kumar 			queue_work(system_power_efficient_wq,
152695588f9SViresh Kumar 					&ioc->release_work);
153ff8c1474SXiaotian Feng 		else
154ff8c1474SXiaotian Feng 			free_ioc = true;
15511a3122fSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
15611a3122fSTejun Heo 	}
157ff8c1474SXiaotian Feng 
158ff8c1474SXiaotian Feng 	if (free_ioc)
159ff8c1474SXiaotian Feng 		kmem_cache_free(iocontext_cachep, ioc);
16086db1e29SJens Axboe }
16186db1e29SJens Axboe EXPORT_SYMBOL(put_io_context);
16286db1e29SJens Axboe 
163f6e8d01bSTejun Heo /**
164f6e8d01bSTejun Heo  * put_io_context_active - put active reference on ioc
165f6e8d01bSTejun Heo  * @ioc: ioc of interest
166f6e8d01bSTejun Heo  *
167f6e8d01bSTejun Heo  * Undo get_io_context_active().  If active reference reaches zero after
168f6e8d01bSTejun Heo  * put, @ioc can never issue further IOs and ioscheds are notified.
169f6e8d01bSTejun Heo  */
170f6e8d01bSTejun Heo void put_io_context_active(struct io_context *ioc)
17186db1e29SJens Axboe {
1723d492c2eSOmar Sandoval 	struct elevator_type *et;
173621032adSTejun Heo 	unsigned long flags;
174f6e8d01bSTejun Heo 	struct io_cq *icq;
17586db1e29SJens Axboe 
176f6e8d01bSTejun Heo 	if (!atomic_dec_and_test(&ioc->active_ref)) {
177621032adSTejun Heo 		put_io_context(ioc);
178621032adSTejun Heo 		return;
179621032adSTejun Heo 	}
180621032adSTejun Heo 
181621032adSTejun Heo 	/*
182621032adSTejun Heo 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
183621032adSTejun Heo 	 * reverse double locking.  Read comment in ioc_release_fn() for
184621032adSTejun Heo 	 * explanation on the nested locking annotation.
185621032adSTejun Heo 	 */
186621032adSTejun Heo retry:
187621032adSTejun Heo 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
188b67bfe0dSSasha Levin 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
189621032adSTejun Heo 		if (icq->flags & ICQ_EXITED)
190621032adSTejun Heo 			continue;
1913d492c2eSOmar Sandoval 
1923d492c2eSOmar Sandoval 		et = icq->q->elevator->type;
1933d492c2eSOmar Sandoval 		if (et->uses_mq) {
1943d492c2eSOmar Sandoval 			ioc_exit_icq(icq);
1953d492c2eSOmar Sandoval 		} else {
196621032adSTejun Heo 			if (spin_trylock(icq->q->queue_lock)) {
197621032adSTejun Heo 				ioc_exit_icq(icq);
198621032adSTejun Heo 				spin_unlock(icq->q->queue_lock);
199621032adSTejun Heo 			} else {
200621032adSTejun Heo 				spin_unlock_irqrestore(&ioc->lock, flags);
201621032adSTejun Heo 				cpu_relax();
202621032adSTejun Heo 				goto retry;
203621032adSTejun Heo 			}
204621032adSTejun Heo 		}
2053d492c2eSOmar Sandoval 	}
206621032adSTejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
207621032adSTejun Heo 
20811a3122fSTejun Heo 	put_io_context(ioc);
20986db1e29SJens Axboe }
21086db1e29SJens Axboe 
211f6e8d01bSTejun Heo /* Called by the exiting task */
212f6e8d01bSTejun Heo void exit_io_context(struct task_struct *task)
213f6e8d01bSTejun Heo {
214f6e8d01bSTejun Heo 	struct io_context *ioc;
215f6e8d01bSTejun Heo 
216f6e8d01bSTejun Heo 	task_lock(task);
217f6e8d01bSTejun Heo 	ioc = task->io_context;
218f6e8d01bSTejun Heo 	task->io_context = NULL;
219f6e8d01bSTejun Heo 	task_unlock(task);
220f6e8d01bSTejun Heo 
221f6e8d01bSTejun Heo 	atomic_dec(&ioc->nr_tasks);
222f6e8d01bSTejun Heo 	put_io_context_active(ioc);
223f6e8d01bSTejun Heo }
224f6e8d01bSTejun Heo 
2257e5a8794STejun Heo /**
2267e5a8794STejun Heo  * ioc_clear_queue - break any ioc association with the specified queue
2277e5a8794STejun Heo  * @q: request_queue being cleared
2287e5a8794STejun Heo  *
2297e5a8794STejun Heo  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
2307e5a8794STejun Heo  */
2317e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q)
2327e5a8794STejun Heo {
2337e5a8794STejun Heo 	lockdep_assert_held(q->queue_lock);
2347e5a8794STejun Heo 
2357e5a8794STejun Heo 	while (!list_empty(&q->icq_list)) {
2367e5a8794STejun Heo 		struct io_cq *icq = list_entry(q->icq_list.next,
2377e5a8794STejun Heo 					       struct io_cq, q_node);
2387e5a8794STejun Heo 		struct io_context *ioc = icq->ioc;
2397e5a8794STejun Heo 
2407e5a8794STejun Heo 		spin_lock(&ioc->lock);
241621032adSTejun Heo 		ioc_destroy_icq(icq);
2427e5a8794STejun Heo 		spin_unlock(&ioc->lock);
2437e5a8794STejun Heo 	}
2447e5a8794STejun Heo }
2457e5a8794STejun Heo 
24624acfc34STejun Heo int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
24786db1e29SJens Axboe {
248df415656SPaul Bolle 	struct io_context *ioc;
2493c9c708cSEric Dumazet 	int ret;
25086db1e29SJens Axboe 
25142ec57a8STejun Heo 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
25242ec57a8STejun Heo 				    node);
25342ec57a8STejun Heo 	if (unlikely(!ioc))
25424acfc34STejun Heo 		return -ENOMEM;
25542ec57a8STejun Heo 
25642ec57a8STejun Heo 	/* initialize */
257df415656SPaul Bolle 	atomic_long_set(&ioc->refcount, 1);
2584638a83eSOlof Johansson 	atomic_set(&ioc->nr_tasks, 1);
259f6e8d01bSTejun Heo 	atomic_set(&ioc->active_ref, 1);
260df415656SPaul Bolle 	spin_lock_init(&ioc->lock);
261c5869807STejun Heo 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
262c5869807STejun Heo 	INIT_HLIST_HEAD(&ioc->icq_list);
263b2efa052STejun Heo 	INIT_WORK(&ioc->release_work, ioc_release_fn);
26486db1e29SJens Axboe 
265fd638368STejun Heo 	/*
266fd638368STejun Heo 	 * Try to install.  ioc shouldn't be installed if someone else
267fd638368STejun Heo 	 * already did or @task, which isn't %current, is exiting.  Note
268fd638368STejun Heo 	 * that we need to allow ioc creation on exiting %current as exit
269fd638368STejun Heo 	 * path may issue IOs from e.g. exit_files().  The exit path is
270fd638368STejun Heo 	 * responsible for not issuing IO after exit_io_context().
271fd638368STejun Heo 	 */
2726e736be7STejun Heo 	task_lock(task);
273fd638368STejun Heo 	if (!task->io_context &&
274fd638368STejun Heo 	    (task == current || !(task->flags & PF_EXITING)))
2756e736be7STejun Heo 		task->io_context = ioc;
276f2dbd76aSTejun Heo 	else
2776e736be7STejun Heo 		kmem_cache_free(iocontext_cachep, ioc);
2783c9c708cSEric Dumazet 
2793c9c708cSEric Dumazet 	ret = task->io_context ? 0 : -EBUSY;
2803c9c708cSEric Dumazet 
2816e736be7STejun Heo 	task_unlock(task);
28224acfc34STejun Heo 
2833c9c708cSEric Dumazet 	return ret;
28486db1e29SJens Axboe }
28586db1e29SJens Axboe 
2866e736be7STejun Heo /**
2876e736be7STejun Heo  * get_task_io_context - get io_context of a task
2886e736be7STejun Heo  * @task: task of interest
2896e736be7STejun Heo  * @gfp_flags: allocation flags, used if allocation is necessary
2906e736be7STejun Heo  * @node: allocation node, used if allocation is necessary
29186db1e29SJens Axboe  *
2926e736be7STejun Heo  * Return io_context of @task.  If it doesn't exist, it is created with
2936e736be7STejun Heo  * @gfp_flags and @node.  The returned io_context has its reference count
2946e736be7STejun Heo  * incremented.
2956e736be7STejun Heo  *
2966e736be7STejun Heo  * This function always goes through task_lock() and it's better to use
297f2dbd76aSTejun Heo  * %current->io_context + get_io_context() for %current.
29886db1e29SJens Axboe  */
2996e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
3006e736be7STejun Heo 				       gfp_t gfp_flags, int node)
30186db1e29SJens Axboe {
3026e736be7STejun Heo 	struct io_context *ioc;
30386db1e29SJens Axboe 
304d0164adcSMel Gorman 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
30586db1e29SJens Axboe 
306f2dbd76aSTejun Heo 	do {
3076e736be7STejun Heo 		task_lock(task);
3086e736be7STejun Heo 		ioc = task->io_context;
3096e736be7STejun Heo 		if (likely(ioc)) {
3106e736be7STejun Heo 			get_io_context(ioc);
3116e736be7STejun Heo 			task_unlock(task);
312df415656SPaul Bolle 			return ioc;
31386db1e29SJens Axboe 		}
3146e736be7STejun Heo 		task_unlock(task);
31524acfc34STejun Heo 	} while (!create_task_io_context(task, gfp_flags, node));
3166e736be7STejun Heo 
317f2dbd76aSTejun Heo 	return NULL;
3186e736be7STejun Heo }
3196e736be7STejun Heo EXPORT_SYMBOL(get_task_io_context);
32086db1e29SJens Axboe 
32147fdd4caSTejun Heo /**
32247fdd4caSTejun Heo  * ioc_lookup_icq - lookup io_cq from ioc
32347fdd4caSTejun Heo  * @ioc: the associated io_context
32447fdd4caSTejun Heo  * @q: the associated request_queue
32547fdd4caSTejun Heo  *
32647fdd4caSTejun Heo  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
32747fdd4caSTejun Heo  * with @q->queue_lock held.
32847fdd4caSTejun Heo  */
32947fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
33047fdd4caSTejun Heo {
33147fdd4caSTejun Heo 	struct io_cq *icq;
33247fdd4caSTejun Heo 
33347fdd4caSTejun Heo 	lockdep_assert_held(q->queue_lock);
33447fdd4caSTejun Heo 
33547fdd4caSTejun Heo 	/*
33647fdd4caSTejun Heo 	 * icq's are indexed from @ioc using radix tree and hint pointer,
33747fdd4caSTejun Heo 	 * both of which are protected with RCU.  All removals are done
33847fdd4caSTejun Heo 	 * holding both q and ioc locks, and we're holding q lock - if we
33947fdd4caSTejun Heo 	 * find a icq which points to us, it's guaranteed to be valid.
34047fdd4caSTejun Heo 	 */
34147fdd4caSTejun Heo 	rcu_read_lock();
34247fdd4caSTejun Heo 	icq = rcu_dereference(ioc->icq_hint);
34347fdd4caSTejun Heo 	if (icq && icq->q == q)
34447fdd4caSTejun Heo 		goto out;
34547fdd4caSTejun Heo 
34647fdd4caSTejun Heo 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
34747fdd4caSTejun Heo 	if (icq && icq->q == q)
34847fdd4caSTejun Heo 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
34947fdd4caSTejun Heo 	else
35047fdd4caSTejun Heo 		icq = NULL;
35147fdd4caSTejun Heo out:
35247fdd4caSTejun Heo 	rcu_read_unlock();
35347fdd4caSTejun Heo 	return icq;
35447fdd4caSTejun Heo }
35547fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq);
35647fdd4caSTejun Heo 
357f1f8cc94STejun Heo /**
358f1f8cc94STejun Heo  * ioc_create_icq - create and link io_cq
35924acfc34STejun Heo  * @ioc: io_context of interest
360f1f8cc94STejun Heo  * @q: request_queue of interest
361f1f8cc94STejun Heo  * @gfp_mask: allocation mask
362f1f8cc94STejun Heo  *
36324acfc34STejun Heo  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
36424acfc34STejun Heo  * will be created using @gfp_mask.
365f1f8cc94STejun Heo  *
366f1f8cc94STejun Heo  * The caller is responsible for ensuring @ioc won't go away and @q is
367f1f8cc94STejun Heo  * alive and will stay alive until this function returns.
368f1f8cc94STejun Heo  */
36924acfc34STejun Heo struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
37024acfc34STejun Heo 			     gfp_t gfp_mask)
371f1f8cc94STejun Heo {
372f1f8cc94STejun Heo 	struct elevator_type *et = q->elevator->type;
373f1f8cc94STejun Heo 	struct io_cq *icq;
374f1f8cc94STejun Heo 
375f1f8cc94STejun Heo 	/* allocate stuff */
376f1f8cc94STejun Heo 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
377f1f8cc94STejun Heo 				    q->node);
378f1f8cc94STejun Heo 	if (!icq)
379f1f8cc94STejun Heo 		return NULL;
380f1f8cc94STejun Heo 
3815e4c0d97SJan Kara 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
382f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
383f1f8cc94STejun Heo 		return NULL;
384f1f8cc94STejun Heo 	}
385f1f8cc94STejun Heo 
386f1f8cc94STejun Heo 	icq->ioc = ioc;
387f1f8cc94STejun Heo 	icq->q = q;
388f1f8cc94STejun Heo 	INIT_LIST_HEAD(&icq->q_node);
389f1f8cc94STejun Heo 	INIT_HLIST_NODE(&icq->ioc_node);
390f1f8cc94STejun Heo 
391f1f8cc94STejun Heo 	/* lock both q and ioc and try to link @icq */
392f1f8cc94STejun Heo 	spin_lock_irq(q->queue_lock);
393f1f8cc94STejun Heo 	spin_lock(&ioc->lock);
394f1f8cc94STejun Heo 
395f1f8cc94STejun Heo 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
396f1f8cc94STejun Heo 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
397f1f8cc94STejun Heo 		list_add(&icq->q_node, &q->icq_list);
398bd166ef1SJens Axboe 		if (et->uses_mq && et->ops.mq.init_icq)
399bd166ef1SJens Axboe 			et->ops.mq.init_icq(icq);
400bd166ef1SJens Axboe 		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
401c51ca6cfSJens Axboe 			et->ops.sq.elevator_init_icq_fn(icq);
402f1f8cc94STejun Heo 	} else {
403f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
404f1f8cc94STejun Heo 		icq = ioc_lookup_icq(ioc, q);
405f1f8cc94STejun Heo 		if (!icq)
406f1f8cc94STejun Heo 			printk(KERN_ERR "cfq: icq link failed!\n");
407f1f8cc94STejun Heo 	}
408f1f8cc94STejun Heo 
409f1f8cc94STejun Heo 	spin_unlock(&ioc->lock);
410f1f8cc94STejun Heo 	spin_unlock_irq(q->queue_lock);
411f1f8cc94STejun Heo 	radix_tree_preload_end();
412f1f8cc94STejun Heo 	return icq;
413f1f8cc94STejun Heo }
414f1f8cc94STejun Heo 
41513341598SAdrian Bunk static int __init blk_ioc_init(void)
41686db1e29SJens Axboe {
41786db1e29SJens Axboe 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
41886db1e29SJens Axboe 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
41986db1e29SJens Axboe 	return 0;
42086db1e29SJens Axboe }
42186db1e29SJens Axboe subsys_initcall(blk_ioc_init);
422