xref: /openbmc/linux/block/blk-ioc.c (revision 0d945c1f)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
286db1e29SJens Axboe /*
386db1e29SJens Axboe  * Functions related to io context handling
486db1e29SJens Axboe  */
586db1e29SJens Axboe #include <linux/kernel.h>
686db1e29SJens Axboe #include <linux/module.h>
786db1e29SJens Axboe #include <linux/init.h>
886db1e29SJens Axboe #include <linux/bio.h>
986db1e29SJens Axboe #include <linux/blkdev.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11f719ff9bSIngo Molnar #include <linux/sched/task.h>
1286db1e29SJens Axboe 
1386db1e29SJens Axboe #include "blk.h"
1486db1e29SJens Axboe 
1586db1e29SJens Axboe /*
1686db1e29SJens Axboe  * For io context allocations
1786db1e29SJens Axboe  */
1886db1e29SJens Axboe static struct kmem_cache *iocontext_cachep;
1986db1e29SJens Axboe 
206e736be7STejun Heo /**
216e736be7STejun Heo  * get_io_context - increment reference count to io_context
226e736be7STejun Heo  * @ioc: io_context to get
236e736be7STejun Heo  *
246e736be7STejun Heo  * Increment reference count to @ioc.
256e736be7STejun Heo  */
266e736be7STejun Heo void get_io_context(struct io_context *ioc)
276e736be7STejun Heo {
286e736be7STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
296e736be7STejun Heo 	atomic_long_inc(&ioc->refcount);
306e736be7STejun Heo }
316e736be7STejun Heo 
327e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head)
337e5a8794STejun Heo {
347e5a8794STejun Heo 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
357e5a8794STejun Heo 
367e5a8794STejun Heo 	kmem_cache_free(icq->__rcu_icq_cache, icq);
377e5a8794STejun Heo }
387e5a8794STejun Heo 
393d492c2eSOmar Sandoval /*
407b36a718SJens Axboe  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
417b36a718SJens Axboe  * and queue locked for legacy.
423d492c2eSOmar Sandoval  */
437e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq)
447e5a8794STejun Heo {
45621032adSTejun Heo 	struct elevator_type *et = icq->q->elevator->type;
46621032adSTejun Heo 
47621032adSTejun Heo 	if (icq->flags & ICQ_EXITED)
48621032adSTejun Heo 		return;
49621032adSTejun Heo 
50f9cd4bfeSJens Axboe 	if (et->ops.exit_icq)
51f9cd4bfeSJens Axboe 		et->ops.exit_icq(icq);
52621032adSTejun Heo 
53621032adSTejun Heo 	icq->flags |= ICQ_EXITED;
54621032adSTejun Heo }
55621032adSTejun Heo 
567b36a718SJens Axboe /*
577b36a718SJens Axboe  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
587b36a718SJens Axboe  * and queue locked for legacy.
597b36a718SJens Axboe  */
60621032adSTejun Heo static void ioc_destroy_icq(struct io_cq *icq)
61621032adSTejun Heo {
627e5a8794STejun Heo 	struct io_context *ioc = icq->ioc;
637e5a8794STejun Heo 	struct request_queue *q = icq->q;
647e5a8794STejun Heo 	struct elevator_type *et = q->elevator->type;
657e5a8794STejun Heo 
667e5a8794STejun Heo 	lockdep_assert_held(&ioc->lock);
677e5a8794STejun Heo 
687e5a8794STejun Heo 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
697e5a8794STejun Heo 	hlist_del_init(&icq->ioc_node);
707e5a8794STejun Heo 	list_del_init(&icq->q_node);
717e5a8794STejun Heo 
727e5a8794STejun Heo 	/*
737e5a8794STejun Heo 	 * Both setting lookup hint to and clearing it from @icq are done
747e5a8794STejun Heo 	 * under queue_lock.  If it's not pointing to @icq now, it never
757e5a8794STejun Heo 	 * will.  Hint assignment itself can race safely.
767e5a8794STejun Heo 	 */
77ec6c676aSPaul E. McKenney 	if (rcu_access_pointer(ioc->icq_hint) == icq)
787e5a8794STejun Heo 		rcu_assign_pointer(ioc->icq_hint, NULL);
797e5a8794STejun Heo 
80621032adSTejun Heo 	ioc_exit_icq(icq);
817e5a8794STejun Heo 
827e5a8794STejun Heo 	/*
837e5a8794STejun Heo 	 * @icq->q might have gone away by the time RCU callback runs
847e5a8794STejun Heo 	 * making it impossible to determine icq_cache.  Record it in @icq.
857e5a8794STejun Heo 	 */
867e5a8794STejun Heo 	icq->__rcu_icq_cache = et->icq_cache;
877e5a8794STejun Heo 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
887e5a8794STejun Heo }
897e5a8794STejun Heo 
90b2efa052STejun Heo /*
91b2efa052STejun Heo  * Slow path for ioc release in put_io_context().  Performs double-lock
92c5869807STejun Heo  * dancing to unlink all icq's and then frees ioc.
93b2efa052STejun Heo  */
94b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work)
95b2efa052STejun Heo {
96b2efa052STejun Heo 	struct io_context *ioc = container_of(work, struct io_context,
97b2efa052STejun Heo 					      release_work);
98d8c66c5dSTejun Heo 	unsigned long flags;
99b2efa052STejun Heo 
100d8c66c5dSTejun Heo 	/*
101d8c66c5dSTejun Heo 	 * Exiting icq may call into put_io_context() through elevator
102d8c66c5dSTejun Heo 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
103d8c66c5dSTejun Heo 	 * be different, use a different locking subclass here.  Use
104d8c66c5dSTejun Heo 	 * irqsave variant as there's no spin_lock_irq_nested().
105d8c66c5dSTejun Heo 	 */
106d8c66c5dSTejun Heo 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
107b2efa052STejun Heo 
108c5869807STejun Heo 	while (!hlist_empty(&ioc->icq_list)) {
109c5869807STejun Heo 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
110c5869807STejun Heo 						struct io_cq, ioc_node);
1112274b029STejun Heo 		struct request_queue *q = icq->q;
112b2efa052STejun Heo 
1130d945c1fSChristoph Hellwig 		if (spin_trylock(&q->queue_lock)) {
114621032adSTejun Heo 			ioc_destroy_icq(icq);
1150d945c1fSChristoph Hellwig 			spin_unlock(&q->queue_lock);
116b2efa052STejun Heo 		} else {
117d8c66c5dSTejun Heo 			spin_unlock_irqrestore(&ioc->lock, flags);
1182274b029STejun Heo 			cpu_relax();
1192274b029STejun Heo 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
120b2efa052STejun Heo 		}
1212274b029STejun Heo 	}
1222274b029STejun Heo 
1232274b029STejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
124b2efa052STejun Heo 
125b2efa052STejun Heo 	kmem_cache_free(iocontext_cachep, ioc);
12686db1e29SJens Axboe }
12786db1e29SJens Axboe 
12842ec57a8STejun Heo /**
12942ec57a8STejun Heo  * put_io_context - put a reference of io_context
13042ec57a8STejun Heo  * @ioc: io_context to put
13142ec57a8STejun Heo  *
13242ec57a8STejun Heo  * Decrement reference count of @ioc and release it if the count reaches
13311a3122fSTejun Heo  * zero.
13486db1e29SJens Axboe  */
13511a3122fSTejun Heo void put_io_context(struct io_context *ioc)
13686db1e29SJens Axboe {
137b2efa052STejun Heo 	unsigned long flags;
138ff8c1474SXiaotian Feng 	bool free_ioc = false;
139b2efa052STejun Heo 
14086db1e29SJens Axboe 	if (ioc == NULL)
14142ec57a8STejun Heo 		return;
14286db1e29SJens Axboe 
14342ec57a8STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
14442ec57a8STejun Heo 
145b2efa052STejun Heo 	/*
14611a3122fSTejun Heo 	 * Releasing ioc requires reverse order double locking and we may
14711a3122fSTejun Heo 	 * already be holding a queue_lock.  Do it asynchronously from wq.
148b2efa052STejun Heo 	 */
14911a3122fSTejun Heo 	if (atomic_long_dec_and_test(&ioc->refcount)) {
15011a3122fSTejun Heo 		spin_lock_irqsave(&ioc->lock, flags);
15111a3122fSTejun Heo 		if (!hlist_empty(&ioc->icq_list))
152695588f9SViresh Kumar 			queue_work(system_power_efficient_wq,
153695588f9SViresh Kumar 					&ioc->release_work);
154ff8c1474SXiaotian Feng 		else
155ff8c1474SXiaotian Feng 			free_ioc = true;
15611a3122fSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
15711a3122fSTejun Heo 	}
158ff8c1474SXiaotian Feng 
159ff8c1474SXiaotian Feng 	if (free_ioc)
160ff8c1474SXiaotian Feng 		kmem_cache_free(iocontext_cachep, ioc);
16186db1e29SJens Axboe }
16286db1e29SJens Axboe 
163f6e8d01bSTejun Heo /**
164f6e8d01bSTejun Heo  * put_io_context_active - put active reference on ioc
165f6e8d01bSTejun Heo  * @ioc: ioc of interest
166f6e8d01bSTejun Heo  *
167f6e8d01bSTejun Heo  * Undo get_io_context_active().  If active reference reaches zero after
168f6e8d01bSTejun Heo  * put, @ioc can never issue further IOs and ioscheds are notified.
169f6e8d01bSTejun Heo  */
170f6e8d01bSTejun Heo void put_io_context_active(struct io_context *ioc)
17186db1e29SJens Axboe {
172621032adSTejun Heo 	unsigned long flags;
173f6e8d01bSTejun Heo 	struct io_cq *icq;
17486db1e29SJens Axboe 
175f6e8d01bSTejun Heo 	if (!atomic_dec_and_test(&ioc->active_ref)) {
176621032adSTejun Heo 		put_io_context(ioc);
177621032adSTejun Heo 		return;
178621032adSTejun Heo 	}
179621032adSTejun Heo 
180621032adSTejun Heo 	/*
181621032adSTejun Heo 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
182621032adSTejun Heo 	 * reverse double locking.  Read comment in ioc_release_fn() for
183621032adSTejun Heo 	 * explanation on the nested locking annotation.
184621032adSTejun Heo 	 */
185621032adSTejun Heo 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
186b67bfe0dSSasha Levin 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
187621032adSTejun Heo 		if (icq->flags & ICQ_EXITED)
188621032adSTejun Heo 			continue;
1893d492c2eSOmar Sandoval 
1903d492c2eSOmar Sandoval 		ioc_exit_icq(icq);
1913d492c2eSOmar Sandoval 	}
192621032adSTejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
193621032adSTejun Heo 
19411a3122fSTejun Heo 	put_io_context(ioc);
19586db1e29SJens Axboe }
19686db1e29SJens Axboe 
197f6e8d01bSTejun Heo /* Called by the exiting task */
198f6e8d01bSTejun Heo void exit_io_context(struct task_struct *task)
199f6e8d01bSTejun Heo {
200f6e8d01bSTejun Heo 	struct io_context *ioc;
201f6e8d01bSTejun Heo 
202f6e8d01bSTejun Heo 	task_lock(task);
203f6e8d01bSTejun Heo 	ioc = task->io_context;
204f6e8d01bSTejun Heo 	task->io_context = NULL;
205f6e8d01bSTejun Heo 	task_unlock(task);
206f6e8d01bSTejun Heo 
207f6e8d01bSTejun Heo 	atomic_dec(&ioc->nr_tasks);
208f6e8d01bSTejun Heo 	put_io_context_active(ioc);
209f6e8d01bSTejun Heo }
210f6e8d01bSTejun Heo 
2117b36a718SJens Axboe static void __ioc_clear_queue(struct list_head *icq_list)
2127b36a718SJens Axboe {
2137b36a718SJens Axboe 	unsigned long flags;
2147b36a718SJens Axboe 
2157b36a718SJens Axboe 	while (!list_empty(icq_list)) {
2167b36a718SJens Axboe 		struct io_cq *icq = list_entry(icq_list->next,
2177b36a718SJens Axboe 						struct io_cq, q_node);
2187b36a718SJens Axboe 		struct io_context *ioc = icq->ioc;
2197b36a718SJens Axboe 
2207b36a718SJens Axboe 		spin_lock_irqsave(&ioc->lock, flags);
2217b36a718SJens Axboe 		ioc_destroy_icq(icq);
2227b36a718SJens Axboe 		spin_unlock_irqrestore(&ioc->lock, flags);
2237b36a718SJens Axboe 	}
2247b36a718SJens Axboe }
2257b36a718SJens Axboe 
2267e5a8794STejun Heo /**
2277e5a8794STejun Heo  * ioc_clear_queue - break any ioc association with the specified queue
2287e5a8794STejun Heo  * @q: request_queue being cleared
2297e5a8794STejun Heo  *
2307b36a718SJens Axboe  * Walk @q->icq_list and exit all io_cq's.
2317e5a8794STejun Heo  */
2327e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q)
2337e5a8794STejun Heo {
2347b36a718SJens Axboe 	LIST_HEAD(icq_list);
2357e5a8794STejun Heo 
2360d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
2377b36a718SJens Axboe 	list_splice_init(&q->icq_list, &icq_list);
2380d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
2397e5a8794STejun Heo 
2407b36a718SJens Axboe 	__ioc_clear_queue(&icq_list);
2417e5a8794STejun Heo }
2427e5a8794STejun Heo 
24324acfc34STejun Heo int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
24486db1e29SJens Axboe {
245df415656SPaul Bolle 	struct io_context *ioc;
2463c9c708cSEric Dumazet 	int ret;
24786db1e29SJens Axboe 
24842ec57a8STejun Heo 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
24942ec57a8STejun Heo 				    node);
25042ec57a8STejun Heo 	if (unlikely(!ioc))
25124acfc34STejun Heo 		return -ENOMEM;
25242ec57a8STejun Heo 
25342ec57a8STejun Heo 	/* initialize */
254df415656SPaul Bolle 	atomic_long_set(&ioc->refcount, 1);
2554638a83eSOlof Johansson 	atomic_set(&ioc->nr_tasks, 1);
256f6e8d01bSTejun Heo 	atomic_set(&ioc->active_ref, 1);
257df415656SPaul Bolle 	spin_lock_init(&ioc->lock);
258c137969bSShakeel Butt 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
259c5869807STejun Heo 	INIT_HLIST_HEAD(&ioc->icq_list);
260b2efa052STejun Heo 	INIT_WORK(&ioc->release_work, ioc_release_fn);
26186db1e29SJens Axboe 
262fd638368STejun Heo 	/*
263fd638368STejun Heo 	 * Try to install.  ioc shouldn't be installed if someone else
264fd638368STejun Heo 	 * already did or @task, which isn't %current, is exiting.  Note
265fd638368STejun Heo 	 * that we need to allow ioc creation on exiting %current as exit
266fd638368STejun Heo 	 * path may issue IOs from e.g. exit_files().  The exit path is
267fd638368STejun Heo 	 * responsible for not issuing IO after exit_io_context().
268fd638368STejun Heo 	 */
2696e736be7STejun Heo 	task_lock(task);
270fd638368STejun Heo 	if (!task->io_context &&
271fd638368STejun Heo 	    (task == current || !(task->flags & PF_EXITING)))
2726e736be7STejun Heo 		task->io_context = ioc;
273f2dbd76aSTejun Heo 	else
2746e736be7STejun Heo 		kmem_cache_free(iocontext_cachep, ioc);
2753c9c708cSEric Dumazet 
2763c9c708cSEric Dumazet 	ret = task->io_context ? 0 : -EBUSY;
2773c9c708cSEric Dumazet 
2786e736be7STejun Heo 	task_unlock(task);
27924acfc34STejun Heo 
2803c9c708cSEric Dumazet 	return ret;
28186db1e29SJens Axboe }
28286db1e29SJens Axboe 
2836e736be7STejun Heo /**
2846e736be7STejun Heo  * get_task_io_context - get io_context of a task
2856e736be7STejun Heo  * @task: task of interest
2866e736be7STejun Heo  * @gfp_flags: allocation flags, used if allocation is necessary
2876e736be7STejun Heo  * @node: allocation node, used if allocation is necessary
28886db1e29SJens Axboe  *
2896e736be7STejun Heo  * Return io_context of @task.  If it doesn't exist, it is created with
2906e736be7STejun Heo  * @gfp_flags and @node.  The returned io_context has its reference count
2916e736be7STejun Heo  * incremented.
2926e736be7STejun Heo  *
2936e736be7STejun Heo  * This function always goes through task_lock() and it's better to use
294f2dbd76aSTejun Heo  * %current->io_context + get_io_context() for %current.
29586db1e29SJens Axboe  */
2966e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
2976e736be7STejun Heo 				       gfp_t gfp_flags, int node)
29886db1e29SJens Axboe {
2996e736be7STejun Heo 	struct io_context *ioc;
30086db1e29SJens Axboe 
301d0164adcSMel Gorman 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
30286db1e29SJens Axboe 
303f2dbd76aSTejun Heo 	do {
3046e736be7STejun Heo 		task_lock(task);
3056e736be7STejun Heo 		ioc = task->io_context;
3066e736be7STejun Heo 		if (likely(ioc)) {
3076e736be7STejun Heo 			get_io_context(ioc);
3086e736be7STejun Heo 			task_unlock(task);
309df415656SPaul Bolle 			return ioc;
31086db1e29SJens Axboe 		}
3116e736be7STejun Heo 		task_unlock(task);
31224acfc34STejun Heo 	} while (!create_task_io_context(task, gfp_flags, node));
3136e736be7STejun Heo 
314f2dbd76aSTejun Heo 	return NULL;
3156e736be7STejun Heo }
31686db1e29SJens Axboe 
31747fdd4caSTejun Heo /**
31847fdd4caSTejun Heo  * ioc_lookup_icq - lookup io_cq from ioc
31947fdd4caSTejun Heo  * @ioc: the associated io_context
32047fdd4caSTejun Heo  * @q: the associated request_queue
32147fdd4caSTejun Heo  *
32247fdd4caSTejun Heo  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
32347fdd4caSTejun Heo  * with @q->queue_lock held.
32447fdd4caSTejun Heo  */
32547fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
32647fdd4caSTejun Heo {
32747fdd4caSTejun Heo 	struct io_cq *icq;
32847fdd4caSTejun Heo 
3290d945c1fSChristoph Hellwig 	lockdep_assert_held(&q->queue_lock);
33047fdd4caSTejun Heo 
33147fdd4caSTejun Heo 	/*
33247fdd4caSTejun Heo 	 * icq's are indexed from @ioc using radix tree and hint pointer,
33347fdd4caSTejun Heo 	 * both of which are protected with RCU.  All removals are done
33447fdd4caSTejun Heo 	 * holding both q and ioc locks, and we're holding q lock - if we
33547fdd4caSTejun Heo 	 * find a icq which points to us, it's guaranteed to be valid.
33647fdd4caSTejun Heo 	 */
33747fdd4caSTejun Heo 	rcu_read_lock();
33847fdd4caSTejun Heo 	icq = rcu_dereference(ioc->icq_hint);
33947fdd4caSTejun Heo 	if (icq && icq->q == q)
34047fdd4caSTejun Heo 		goto out;
34147fdd4caSTejun Heo 
34247fdd4caSTejun Heo 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
34347fdd4caSTejun Heo 	if (icq && icq->q == q)
34447fdd4caSTejun Heo 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
34547fdd4caSTejun Heo 	else
34647fdd4caSTejun Heo 		icq = NULL;
34747fdd4caSTejun Heo out:
34847fdd4caSTejun Heo 	rcu_read_unlock();
34947fdd4caSTejun Heo 	return icq;
35047fdd4caSTejun Heo }
35147fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq);
35247fdd4caSTejun Heo 
353f1f8cc94STejun Heo /**
354f1f8cc94STejun Heo  * ioc_create_icq - create and link io_cq
35524acfc34STejun Heo  * @ioc: io_context of interest
356f1f8cc94STejun Heo  * @q: request_queue of interest
357f1f8cc94STejun Heo  * @gfp_mask: allocation mask
358f1f8cc94STejun Heo  *
35924acfc34STejun Heo  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
36024acfc34STejun Heo  * will be created using @gfp_mask.
361f1f8cc94STejun Heo  *
362f1f8cc94STejun Heo  * The caller is responsible for ensuring @ioc won't go away and @q is
363f1f8cc94STejun Heo  * alive and will stay alive until this function returns.
364f1f8cc94STejun Heo  */
36524acfc34STejun Heo struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
36624acfc34STejun Heo 			     gfp_t gfp_mask)
367f1f8cc94STejun Heo {
368f1f8cc94STejun Heo 	struct elevator_type *et = q->elevator->type;
369f1f8cc94STejun Heo 	struct io_cq *icq;
370f1f8cc94STejun Heo 
371f1f8cc94STejun Heo 	/* allocate stuff */
372f1f8cc94STejun Heo 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
373f1f8cc94STejun Heo 				    q->node);
374f1f8cc94STejun Heo 	if (!icq)
375f1f8cc94STejun Heo 		return NULL;
376f1f8cc94STejun Heo 
3775e4c0d97SJan Kara 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
378f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
379f1f8cc94STejun Heo 		return NULL;
380f1f8cc94STejun Heo 	}
381f1f8cc94STejun Heo 
382f1f8cc94STejun Heo 	icq->ioc = ioc;
383f1f8cc94STejun Heo 	icq->q = q;
384f1f8cc94STejun Heo 	INIT_LIST_HEAD(&icq->q_node);
385f1f8cc94STejun Heo 	INIT_HLIST_NODE(&icq->ioc_node);
386f1f8cc94STejun Heo 
387f1f8cc94STejun Heo 	/* lock both q and ioc and try to link @icq */
3880d945c1fSChristoph Hellwig 	spin_lock_irq(&q->queue_lock);
389f1f8cc94STejun Heo 	spin_lock(&ioc->lock);
390f1f8cc94STejun Heo 
391f1f8cc94STejun Heo 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
392f1f8cc94STejun Heo 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
393f1f8cc94STejun Heo 		list_add(&icq->q_node, &q->icq_list);
394f9cd4bfeSJens Axboe 		if (et->ops.init_icq)
395f9cd4bfeSJens Axboe 			et->ops.init_icq(icq);
396f1f8cc94STejun Heo 	} else {
397f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
398f1f8cc94STejun Heo 		icq = ioc_lookup_icq(ioc, q);
399f1f8cc94STejun Heo 		if (!icq)
400f1f8cc94STejun Heo 			printk(KERN_ERR "cfq: icq link failed!\n");
401f1f8cc94STejun Heo 	}
402f1f8cc94STejun Heo 
403f1f8cc94STejun Heo 	spin_unlock(&ioc->lock);
4040d945c1fSChristoph Hellwig 	spin_unlock_irq(&q->queue_lock);
405f1f8cc94STejun Heo 	radix_tree_preload_end();
406f1f8cc94STejun Heo 	return icq;
407f1f8cc94STejun Heo }
408f1f8cc94STejun Heo 
40913341598SAdrian Bunk static int __init blk_ioc_init(void)
41086db1e29SJens Axboe {
41186db1e29SJens Axboe 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
41286db1e29SJens Axboe 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
41386db1e29SJens Axboe 	return 0;
41486db1e29SJens Axboe }
41586db1e29SJens Axboe subsys_initcall(blk_ioc_init);
416