186db1e29SJens Axboe /* 286db1e29SJens Axboe * Functions related to io context handling 386db1e29SJens Axboe */ 486db1e29SJens Axboe #include <linux/kernel.h> 586db1e29SJens Axboe #include <linux/module.h> 686db1e29SJens Axboe #include <linux/init.h> 786db1e29SJens Axboe #include <linux/bio.h> 886db1e29SJens Axboe #include <linux/blkdev.h> 95a0e3ad6STejun Heo #include <linux/slab.h> 1086db1e29SJens Axboe 1186db1e29SJens Axboe #include "blk.h" 1286db1e29SJens Axboe 1386db1e29SJens Axboe /* 1486db1e29SJens Axboe * For io context allocations 1586db1e29SJens Axboe */ 1686db1e29SJens Axboe static struct kmem_cache *iocontext_cachep; 1786db1e29SJens Axboe 186e736be7STejun Heo /** 196e736be7STejun Heo * get_io_context - increment reference count to io_context 206e736be7STejun Heo * @ioc: io_context to get 216e736be7STejun Heo * 226e736be7STejun Heo * Increment reference count to @ioc. 236e736be7STejun Heo */ 246e736be7STejun Heo void get_io_context(struct io_context *ioc) 256e736be7STejun Heo { 266e736be7STejun Heo BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 276e736be7STejun Heo atomic_long_inc(&ioc->refcount); 286e736be7STejun Heo } 296e736be7STejun Heo EXPORT_SYMBOL(get_io_context); 306e736be7STejun Heo 317e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head) 327e5a8794STejun Heo { 337e5a8794STejun Heo struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 347e5a8794STejun Heo 357e5a8794STejun Heo kmem_cache_free(icq->__rcu_icq_cache, icq); 367e5a8794STejun Heo } 377e5a8794STejun Heo 383d492c2eSOmar Sandoval /* 397b36a718SJens Axboe * Exit an icq. Called with ioc locked for blk-mq, and with both ioc 407b36a718SJens Axboe * and queue locked for legacy. 413d492c2eSOmar Sandoval */ 427e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq) 437e5a8794STejun Heo { 44621032adSTejun Heo struct elevator_type *et = icq->q->elevator->type; 45621032adSTejun Heo 46621032adSTejun Heo if (icq->flags & ICQ_EXITED) 47621032adSTejun Heo return; 48621032adSTejun Heo 49bd166ef1SJens Axboe if (et->uses_mq && et->ops.mq.exit_icq) 50bd166ef1SJens Axboe et->ops.mq.exit_icq(icq); 51bd166ef1SJens Axboe else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn) 52c51ca6cfSJens Axboe et->ops.sq.elevator_exit_icq_fn(icq); 53621032adSTejun Heo 54621032adSTejun Heo icq->flags |= ICQ_EXITED; 55621032adSTejun Heo } 56621032adSTejun Heo 577b36a718SJens Axboe /* 587b36a718SJens Axboe * Release an icq. Called with ioc locked for blk-mq, and with both ioc 597b36a718SJens Axboe * and queue locked for legacy. 607b36a718SJens Axboe */ 61621032adSTejun Heo static void ioc_destroy_icq(struct io_cq *icq) 62621032adSTejun Heo { 637e5a8794STejun Heo struct io_context *ioc = icq->ioc; 647e5a8794STejun Heo struct request_queue *q = icq->q; 657e5a8794STejun Heo struct elevator_type *et = q->elevator->type; 667e5a8794STejun Heo 677e5a8794STejun Heo lockdep_assert_held(&ioc->lock); 687e5a8794STejun Heo 697e5a8794STejun Heo radix_tree_delete(&ioc->icq_tree, icq->q->id); 707e5a8794STejun Heo hlist_del_init(&icq->ioc_node); 717e5a8794STejun Heo list_del_init(&icq->q_node); 727e5a8794STejun Heo 737e5a8794STejun Heo /* 747e5a8794STejun Heo * Both setting lookup hint to and clearing it from @icq are done 757e5a8794STejun Heo * under queue_lock. If it's not pointing to @icq now, it never 767e5a8794STejun Heo * will. Hint assignment itself can race safely. 777e5a8794STejun Heo */ 78ec6c676aSPaul E. McKenney if (rcu_access_pointer(ioc->icq_hint) == icq) 797e5a8794STejun Heo rcu_assign_pointer(ioc->icq_hint, NULL); 807e5a8794STejun Heo 81621032adSTejun Heo ioc_exit_icq(icq); 827e5a8794STejun Heo 837e5a8794STejun Heo /* 847e5a8794STejun Heo * @icq->q might have gone away by the time RCU callback runs 857e5a8794STejun Heo * making it impossible to determine icq_cache. Record it in @icq. 867e5a8794STejun Heo */ 877e5a8794STejun Heo icq->__rcu_icq_cache = et->icq_cache; 887e5a8794STejun Heo call_rcu(&icq->__rcu_head, icq_free_icq_rcu); 897e5a8794STejun Heo } 907e5a8794STejun Heo 91b2efa052STejun Heo /* 92b2efa052STejun Heo * Slow path for ioc release in put_io_context(). Performs double-lock 93c5869807STejun Heo * dancing to unlink all icq's and then frees ioc. 94b2efa052STejun Heo */ 95b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work) 96b2efa052STejun Heo { 97b2efa052STejun Heo struct io_context *ioc = container_of(work, struct io_context, 98b2efa052STejun Heo release_work); 99d8c66c5dSTejun Heo unsigned long flags; 100b2efa052STejun Heo 101d8c66c5dSTejun Heo /* 102d8c66c5dSTejun Heo * Exiting icq may call into put_io_context() through elevator 103d8c66c5dSTejun Heo * which will trigger lockdep warning. The ioc's are guaranteed to 104d8c66c5dSTejun Heo * be different, use a different locking subclass here. Use 105d8c66c5dSTejun Heo * irqsave variant as there's no spin_lock_irq_nested(). 106d8c66c5dSTejun Heo */ 107d8c66c5dSTejun Heo spin_lock_irqsave_nested(&ioc->lock, flags, 1); 108b2efa052STejun Heo 109c5869807STejun Heo while (!hlist_empty(&ioc->icq_list)) { 110c5869807STejun Heo struct io_cq *icq = hlist_entry(ioc->icq_list.first, 111c5869807STejun Heo struct io_cq, ioc_node); 1122274b029STejun Heo struct request_queue *q = icq->q; 113b2efa052STejun Heo 1142274b029STejun Heo if (spin_trylock(q->queue_lock)) { 115621032adSTejun Heo ioc_destroy_icq(icq); 1162274b029STejun Heo spin_unlock(q->queue_lock); 117b2efa052STejun Heo } else { 118d8c66c5dSTejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 1192274b029STejun Heo cpu_relax(); 1202274b029STejun Heo spin_lock_irqsave_nested(&ioc->lock, flags, 1); 121b2efa052STejun Heo } 1222274b029STejun Heo } 1232274b029STejun Heo 1242274b029STejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 125b2efa052STejun Heo 126b2efa052STejun Heo kmem_cache_free(iocontext_cachep, ioc); 12786db1e29SJens Axboe } 12886db1e29SJens Axboe 12942ec57a8STejun Heo /** 13042ec57a8STejun Heo * put_io_context - put a reference of io_context 13142ec57a8STejun Heo * @ioc: io_context to put 13242ec57a8STejun Heo * 13342ec57a8STejun Heo * Decrement reference count of @ioc and release it if the count reaches 13411a3122fSTejun Heo * zero. 13586db1e29SJens Axboe */ 13611a3122fSTejun Heo void put_io_context(struct io_context *ioc) 13786db1e29SJens Axboe { 138b2efa052STejun Heo unsigned long flags; 139ff8c1474SXiaotian Feng bool free_ioc = false; 140b2efa052STejun Heo 14186db1e29SJens Axboe if (ioc == NULL) 14242ec57a8STejun Heo return; 14386db1e29SJens Axboe 14442ec57a8STejun Heo BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 14542ec57a8STejun Heo 146b2efa052STejun Heo /* 14711a3122fSTejun Heo * Releasing ioc requires reverse order double locking and we may 14811a3122fSTejun Heo * already be holding a queue_lock. Do it asynchronously from wq. 149b2efa052STejun Heo */ 15011a3122fSTejun Heo if (atomic_long_dec_and_test(&ioc->refcount)) { 15111a3122fSTejun Heo spin_lock_irqsave(&ioc->lock, flags); 15211a3122fSTejun Heo if (!hlist_empty(&ioc->icq_list)) 153695588f9SViresh Kumar queue_work(system_power_efficient_wq, 154695588f9SViresh Kumar &ioc->release_work); 155ff8c1474SXiaotian Feng else 156ff8c1474SXiaotian Feng free_ioc = true; 15711a3122fSTejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 15811a3122fSTejun Heo } 159ff8c1474SXiaotian Feng 160ff8c1474SXiaotian Feng if (free_ioc) 161ff8c1474SXiaotian Feng kmem_cache_free(iocontext_cachep, ioc); 16286db1e29SJens Axboe } 16386db1e29SJens Axboe EXPORT_SYMBOL(put_io_context); 16486db1e29SJens Axboe 165f6e8d01bSTejun Heo /** 166f6e8d01bSTejun Heo * put_io_context_active - put active reference on ioc 167f6e8d01bSTejun Heo * @ioc: ioc of interest 168f6e8d01bSTejun Heo * 169f6e8d01bSTejun Heo * Undo get_io_context_active(). If active reference reaches zero after 170f6e8d01bSTejun Heo * put, @ioc can never issue further IOs and ioscheds are notified. 171f6e8d01bSTejun Heo */ 172f6e8d01bSTejun Heo void put_io_context_active(struct io_context *ioc) 17386db1e29SJens Axboe { 1743d492c2eSOmar Sandoval struct elevator_type *et; 175621032adSTejun Heo unsigned long flags; 176f6e8d01bSTejun Heo struct io_cq *icq; 17786db1e29SJens Axboe 178f6e8d01bSTejun Heo if (!atomic_dec_and_test(&ioc->active_ref)) { 179621032adSTejun Heo put_io_context(ioc); 180621032adSTejun Heo return; 181621032adSTejun Heo } 182621032adSTejun Heo 183621032adSTejun Heo /* 184621032adSTejun Heo * Need ioc lock to walk icq_list and q lock to exit icq. Perform 185621032adSTejun Heo * reverse double locking. Read comment in ioc_release_fn() for 186621032adSTejun Heo * explanation on the nested locking annotation. 187621032adSTejun Heo */ 188621032adSTejun Heo retry: 189621032adSTejun Heo spin_lock_irqsave_nested(&ioc->lock, flags, 1); 190b67bfe0dSSasha Levin hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { 191621032adSTejun Heo if (icq->flags & ICQ_EXITED) 192621032adSTejun Heo continue; 1933d492c2eSOmar Sandoval 1943d492c2eSOmar Sandoval et = icq->q->elevator->type; 1953d492c2eSOmar Sandoval if (et->uses_mq) { 1963d492c2eSOmar Sandoval ioc_exit_icq(icq); 1973d492c2eSOmar Sandoval } else { 198621032adSTejun Heo if (spin_trylock(icq->q->queue_lock)) { 199621032adSTejun Heo ioc_exit_icq(icq); 200621032adSTejun Heo spin_unlock(icq->q->queue_lock); 201621032adSTejun Heo } else { 202621032adSTejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 203621032adSTejun Heo cpu_relax(); 204621032adSTejun Heo goto retry; 205621032adSTejun Heo } 206621032adSTejun Heo } 2073d492c2eSOmar Sandoval } 208621032adSTejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 209621032adSTejun Heo 21011a3122fSTejun Heo put_io_context(ioc); 21186db1e29SJens Axboe } 21286db1e29SJens Axboe 213f6e8d01bSTejun Heo /* Called by the exiting task */ 214f6e8d01bSTejun Heo void exit_io_context(struct task_struct *task) 215f6e8d01bSTejun Heo { 216f6e8d01bSTejun Heo struct io_context *ioc; 217f6e8d01bSTejun Heo 218f6e8d01bSTejun Heo task_lock(task); 219f6e8d01bSTejun Heo ioc = task->io_context; 220f6e8d01bSTejun Heo task->io_context = NULL; 221f6e8d01bSTejun Heo task_unlock(task); 222f6e8d01bSTejun Heo 223f6e8d01bSTejun Heo atomic_dec(&ioc->nr_tasks); 224f6e8d01bSTejun Heo put_io_context_active(ioc); 225f6e8d01bSTejun Heo } 226f6e8d01bSTejun Heo 2277b36a718SJens Axboe static void __ioc_clear_queue(struct list_head *icq_list) 2287b36a718SJens Axboe { 2297b36a718SJens Axboe unsigned long flags; 2307b36a718SJens Axboe 2317b36a718SJens Axboe while (!list_empty(icq_list)) { 2327b36a718SJens Axboe struct io_cq *icq = list_entry(icq_list->next, 2337b36a718SJens Axboe struct io_cq, q_node); 2347b36a718SJens Axboe struct io_context *ioc = icq->ioc; 2357b36a718SJens Axboe 2367b36a718SJens Axboe spin_lock_irqsave(&ioc->lock, flags); 2377b36a718SJens Axboe ioc_destroy_icq(icq); 2387b36a718SJens Axboe spin_unlock_irqrestore(&ioc->lock, flags); 2397b36a718SJens Axboe } 2407b36a718SJens Axboe } 2417b36a718SJens Axboe 2427e5a8794STejun Heo /** 2437e5a8794STejun Heo * ioc_clear_queue - break any ioc association with the specified queue 2447e5a8794STejun Heo * @q: request_queue being cleared 2457e5a8794STejun Heo * 2467b36a718SJens Axboe * Walk @q->icq_list and exit all io_cq's. 2477e5a8794STejun Heo */ 2487e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q) 2497e5a8794STejun Heo { 2507b36a718SJens Axboe LIST_HEAD(icq_list); 2517e5a8794STejun Heo 2527b36a718SJens Axboe spin_lock_irq(q->queue_lock); 2537b36a718SJens Axboe list_splice_init(&q->icq_list, &icq_list); 2547e5a8794STejun Heo 2557b36a718SJens Axboe if (q->mq_ops) { 2567b36a718SJens Axboe spin_unlock_irq(q->queue_lock); 2577b36a718SJens Axboe __ioc_clear_queue(&icq_list); 2587b36a718SJens Axboe } else { 2597b36a718SJens Axboe __ioc_clear_queue(&icq_list); 2607b36a718SJens Axboe spin_unlock_irq(q->queue_lock); 2617e5a8794STejun Heo } 2627e5a8794STejun Heo } 2637e5a8794STejun Heo 26424acfc34STejun Heo int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 26586db1e29SJens Axboe { 266df415656SPaul Bolle struct io_context *ioc; 2673c9c708cSEric Dumazet int ret; 26886db1e29SJens Axboe 26942ec57a8STejun Heo ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 27042ec57a8STejun Heo node); 27142ec57a8STejun Heo if (unlikely(!ioc)) 27224acfc34STejun Heo return -ENOMEM; 27342ec57a8STejun Heo 27442ec57a8STejun Heo /* initialize */ 275df415656SPaul Bolle atomic_long_set(&ioc->refcount, 1); 2764638a83eSOlof Johansson atomic_set(&ioc->nr_tasks, 1); 277f6e8d01bSTejun Heo atomic_set(&ioc->active_ref, 1); 278df415656SPaul Bolle spin_lock_init(&ioc->lock); 279c5869807STejun Heo INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); 280c5869807STejun Heo INIT_HLIST_HEAD(&ioc->icq_list); 281b2efa052STejun Heo INIT_WORK(&ioc->release_work, ioc_release_fn); 28286db1e29SJens Axboe 283fd638368STejun Heo /* 284fd638368STejun Heo * Try to install. ioc shouldn't be installed if someone else 285fd638368STejun Heo * already did or @task, which isn't %current, is exiting. Note 286fd638368STejun Heo * that we need to allow ioc creation on exiting %current as exit 287fd638368STejun Heo * path may issue IOs from e.g. exit_files(). The exit path is 288fd638368STejun Heo * responsible for not issuing IO after exit_io_context(). 289fd638368STejun Heo */ 2906e736be7STejun Heo task_lock(task); 291fd638368STejun Heo if (!task->io_context && 292fd638368STejun Heo (task == current || !(task->flags & PF_EXITING))) 2936e736be7STejun Heo task->io_context = ioc; 294f2dbd76aSTejun Heo else 2956e736be7STejun Heo kmem_cache_free(iocontext_cachep, ioc); 2963c9c708cSEric Dumazet 2973c9c708cSEric Dumazet ret = task->io_context ? 0 : -EBUSY; 2983c9c708cSEric Dumazet 2996e736be7STejun Heo task_unlock(task); 30024acfc34STejun Heo 3013c9c708cSEric Dumazet return ret; 30286db1e29SJens Axboe } 30386db1e29SJens Axboe 3046e736be7STejun Heo /** 3056e736be7STejun Heo * get_task_io_context - get io_context of a task 3066e736be7STejun Heo * @task: task of interest 3076e736be7STejun Heo * @gfp_flags: allocation flags, used if allocation is necessary 3086e736be7STejun Heo * @node: allocation node, used if allocation is necessary 30986db1e29SJens Axboe * 3106e736be7STejun Heo * Return io_context of @task. If it doesn't exist, it is created with 3116e736be7STejun Heo * @gfp_flags and @node. The returned io_context has its reference count 3126e736be7STejun Heo * incremented. 3136e736be7STejun Heo * 3146e736be7STejun Heo * This function always goes through task_lock() and it's better to use 315f2dbd76aSTejun Heo * %current->io_context + get_io_context() for %current. 31686db1e29SJens Axboe */ 3176e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task, 3186e736be7STejun Heo gfp_t gfp_flags, int node) 31986db1e29SJens Axboe { 3206e736be7STejun Heo struct io_context *ioc; 32186db1e29SJens Axboe 322d0164adcSMel Gorman might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 32386db1e29SJens Axboe 324f2dbd76aSTejun Heo do { 3256e736be7STejun Heo task_lock(task); 3266e736be7STejun Heo ioc = task->io_context; 3276e736be7STejun Heo if (likely(ioc)) { 3286e736be7STejun Heo get_io_context(ioc); 3296e736be7STejun Heo task_unlock(task); 330df415656SPaul Bolle return ioc; 33186db1e29SJens Axboe } 3326e736be7STejun Heo task_unlock(task); 33324acfc34STejun Heo } while (!create_task_io_context(task, gfp_flags, node)); 3346e736be7STejun Heo 335f2dbd76aSTejun Heo return NULL; 3366e736be7STejun Heo } 3376e736be7STejun Heo EXPORT_SYMBOL(get_task_io_context); 33886db1e29SJens Axboe 33947fdd4caSTejun Heo /** 34047fdd4caSTejun Heo * ioc_lookup_icq - lookup io_cq from ioc 34147fdd4caSTejun Heo * @ioc: the associated io_context 34247fdd4caSTejun Heo * @q: the associated request_queue 34347fdd4caSTejun Heo * 34447fdd4caSTejun Heo * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called 34547fdd4caSTejun Heo * with @q->queue_lock held. 34647fdd4caSTejun Heo */ 34747fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) 34847fdd4caSTejun Heo { 34947fdd4caSTejun Heo struct io_cq *icq; 35047fdd4caSTejun Heo 35147fdd4caSTejun Heo lockdep_assert_held(q->queue_lock); 35247fdd4caSTejun Heo 35347fdd4caSTejun Heo /* 35447fdd4caSTejun Heo * icq's are indexed from @ioc using radix tree and hint pointer, 35547fdd4caSTejun Heo * both of which are protected with RCU. All removals are done 35647fdd4caSTejun Heo * holding both q and ioc locks, and we're holding q lock - if we 35747fdd4caSTejun Heo * find a icq which points to us, it's guaranteed to be valid. 35847fdd4caSTejun Heo */ 35947fdd4caSTejun Heo rcu_read_lock(); 36047fdd4caSTejun Heo icq = rcu_dereference(ioc->icq_hint); 36147fdd4caSTejun Heo if (icq && icq->q == q) 36247fdd4caSTejun Heo goto out; 36347fdd4caSTejun Heo 36447fdd4caSTejun Heo icq = radix_tree_lookup(&ioc->icq_tree, q->id); 36547fdd4caSTejun Heo if (icq && icq->q == q) 36647fdd4caSTejun Heo rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ 36747fdd4caSTejun Heo else 36847fdd4caSTejun Heo icq = NULL; 36947fdd4caSTejun Heo out: 37047fdd4caSTejun Heo rcu_read_unlock(); 37147fdd4caSTejun Heo return icq; 37247fdd4caSTejun Heo } 37347fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq); 37447fdd4caSTejun Heo 375f1f8cc94STejun Heo /** 376f1f8cc94STejun Heo * ioc_create_icq - create and link io_cq 37724acfc34STejun Heo * @ioc: io_context of interest 378f1f8cc94STejun Heo * @q: request_queue of interest 379f1f8cc94STejun Heo * @gfp_mask: allocation mask 380f1f8cc94STejun Heo * 38124acfc34STejun Heo * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they 38224acfc34STejun Heo * will be created using @gfp_mask. 383f1f8cc94STejun Heo * 384f1f8cc94STejun Heo * The caller is responsible for ensuring @ioc won't go away and @q is 385f1f8cc94STejun Heo * alive and will stay alive until this function returns. 386f1f8cc94STejun Heo */ 38724acfc34STejun Heo struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 38824acfc34STejun Heo gfp_t gfp_mask) 389f1f8cc94STejun Heo { 390f1f8cc94STejun Heo struct elevator_type *et = q->elevator->type; 391f1f8cc94STejun Heo struct io_cq *icq; 392f1f8cc94STejun Heo 393f1f8cc94STejun Heo /* allocate stuff */ 394f1f8cc94STejun Heo icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, 395f1f8cc94STejun Heo q->node); 396f1f8cc94STejun Heo if (!icq) 397f1f8cc94STejun Heo return NULL; 398f1f8cc94STejun Heo 3995e4c0d97SJan Kara if (radix_tree_maybe_preload(gfp_mask) < 0) { 400f1f8cc94STejun Heo kmem_cache_free(et->icq_cache, icq); 401f1f8cc94STejun Heo return NULL; 402f1f8cc94STejun Heo } 403f1f8cc94STejun Heo 404f1f8cc94STejun Heo icq->ioc = ioc; 405f1f8cc94STejun Heo icq->q = q; 406f1f8cc94STejun Heo INIT_LIST_HEAD(&icq->q_node); 407f1f8cc94STejun Heo INIT_HLIST_NODE(&icq->ioc_node); 408f1f8cc94STejun Heo 409f1f8cc94STejun Heo /* lock both q and ioc and try to link @icq */ 410f1f8cc94STejun Heo spin_lock_irq(q->queue_lock); 411f1f8cc94STejun Heo spin_lock(&ioc->lock); 412f1f8cc94STejun Heo 413f1f8cc94STejun Heo if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { 414f1f8cc94STejun Heo hlist_add_head(&icq->ioc_node, &ioc->icq_list); 415f1f8cc94STejun Heo list_add(&icq->q_node, &q->icq_list); 416bd166ef1SJens Axboe if (et->uses_mq && et->ops.mq.init_icq) 417bd166ef1SJens Axboe et->ops.mq.init_icq(icq); 418bd166ef1SJens Axboe else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn) 419c51ca6cfSJens Axboe et->ops.sq.elevator_init_icq_fn(icq); 420f1f8cc94STejun Heo } else { 421f1f8cc94STejun Heo kmem_cache_free(et->icq_cache, icq); 422f1f8cc94STejun Heo icq = ioc_lookup_icq(ioc, q); 423f1f8cc94STejun Heo if (!icq) 424f1f8cc94STejun Heo printk(KERN_ERR "cfq: icq link failed!\n"); 425f1f8cc94STejun Heo } 426f1f8cc94STejun Heo 427f1f8cc94STejun Heo spin_unlock(&ioc->lock); 428f1f8cc94STejun Heo spin_unlock_irq(q->queue_lock); 429f1f8cc94STejun Heo radix_tree_preload_end(); 430f1f8cc94STejun Heo return icq; 431f1f8cc94STejun Heo } 432f1f8cc94STejun Heo 43313341598SAdrian Bunk static int __init blk_ioc_init(void) 43486db1e29SJens Axboe { 43586db1e29SJens Axboe iocontext_cachep = kmem_cache_create("blkdev_ioc", 43686db1e29SJens Axboe sizeof(struct io_context), 0, SLAB_PANIC, NULL); 43786db1e29SJens Axboe return 0; 43886db1e29SJens Axboe } 43986db1e29SJens Axboe subsys_initcall(blk_ioc_init); 440