1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 286db1e29SJens Axboe /* 386db1e29SJens Axboe * Functions related to io context handling 486db1e29SJens Axboe */ 586db1e29SJens Axboe #include <linux/kernel.h> 686db1e29SJens Axboe #include <linux/module.h> 786db1e29SJens Axboe #include <linux/init.h> 886db1e29SJens Axboe #include <linux/bio.h> 986db1e29SJens Axboe #include <linux/blkdev.h> 105a0e3ad6STejun Heo #include <linux/slab.h> 11f719ff9bSIngo Molnar #include <linux/sched/task.h> 1286db1e29SJens Axboe 1386db1e29SJens Axboe #include "blk.h" 142aa7745bSChristoph Hellwig #include "blk-mq-sched.h" 1586db1e29SJens Axboe 1686db1e29SJens Axboe /* 1786db1e29SJens Axboe * For io context allocations 1886db1e29SJens Axboe */ 1986db1e29SJens Axboe static struct kmem_cache *iocontext_cachep; 2086db1e29SJens Axboe 216e736be7STejun Heo /** 226e736be7STejun Heo * get_io_context - increment reference count to io_context 236e736be7STejun Heo * @ioc: io_context to get 246e736be7STejun Heo * 256e736be7STejun Heo * Increment reference count to @ioc. 266e736be7STejun Heo */ 276e736be7STejun Heo void get_io_context(struct io_context *ioc) 286e736be7STejun Heo { 296e736be7STejun Heo BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 306e736be7STejun Heo atomic_long_inc(&ioc->refcount); 316e736be7STejun Heo } 326e736be7STejun Heo 337e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head) 347e5a8794STejun Heo { 357e5a8794STejun Heo struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 367e5a8794STejun Heo 377e5a8794STejun Heo kmem_cache_free(icq->__rcu_icq_cache, icq); 387e5a8794STejun Heo } 397e5a8794STejun Heo 403d492c2eSOmar Sandoval /* 417b36a718SJens Axboe * Exit an icq. Called with ioc locked for blk-mq, and with both ioc 427b36a718SJens Axboe * and queue locked for legacy. 433d492c2eSOmar Sandoval */ 447e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq) 457e5a8794STejun Heo { 46621032adSTejun Heo struct elevator_type *et = icq->q->elevator->type; 47621032adSTejun Heo 48621032adSTejun Heo if (icq->flags & ICQ_EXITED) 49621032adSTejun Heo return; 50621032adSTejun Heo 51f9cd4bfeSJens Axboe if (et->ops.exit_icq) 52f9cd4bfeSJens Axboe et->ops.exit_icq(icq); 53621032adSTejun Heo 54621032adSTejun Heo icq->flags |= ICQ_EXITED; 55621032adSTejun Heo } 56621032adSTejun Heo 577b36a718SJens Axboe /* 587b36a718SJens Axboe * Release an icq. Called with ioc locked for blk-mq, and with both ioc 597b36a718SJens Axboe * and queue locked for legacy. 607b36a718SJens Axboe */ 61621032adSTejun Heo static void ioc_destroy_icq(struct io_cq *icq) 62621032adSTejun Heo { 637e5a8794STejun Heo struct io_context *ioc = icq->ioc; 647e5a8794STejun Heo struct request_queue *q = icq->q; 657e5a8794STejun Heo struct elevator_type *et = q->elevator->type; 667e5a8794STejun Heo 677e5a8794STejun Heo lockdep_assert_held(&ioc->lock); 687e5a8794STejun Heo 697e5a8794STejun Heo radix_tree_delete(&ioc->icq_tree, icq->q->id); 707e5a8794STejun Heo hlist_del_init(&icq->ioc_node); 717e5a8794STejun Heo list_del_init(&icq->q_node); 727e5a8794STejun Heo 737e5a8794STejun Heo /* 747e5a8794STejun Heo * Both setting lookup hint to and clearing it from @icq are done 757e5a8794STejun Heo * under queue_lock. If it's not pointing to @icq now, it never 767e5a8794STejun Heo * will. Hint assignment itself can race safely. 777e5a8794STejun Heo */ 78ec6c676aSPaul E. McKenney if (rcu_access_pointer(ioc->icq_hint) == icq) 797e5a8794STejun Heo rcu_assign_pointer(ioc->icq_hint, NULL); 807e5a8794STejun Heo 81621032adSTejun Heo ioc_exit_icq(icq); 827e5a8794STejun Heo 837e5a8794STejun Heo /* 847e5a8794STejun Heo * @icq->q might have gone away by the time RCU callback runs 857e5a8794STejun Heo * making it impossible to determine icq_cache. Record it in @icq. 867e5a8794STejun Heo */ 877e5a8794STejun Heo icq->__rcu_icq_cache = et->icq_cache; 8830a2da7bSSahitya Tummala icq->flags |= ICQ_DESTROYED; 897e5a8794STejun Heo call_rcu(&icq->__rcu_head, icq_free_icq_rcu); 907e5a8794STejun Heo } 917e5a8794STejun Heo 92b2efa052STejun Heo /* 93b2efa052STejun Heo * Slow path for ioc release in put_io_context(). Performs double-lock 94c5869807STejun Heo * dancing to unlink all icq's and then frees ioc. 95b2efa052STejun Heo */ 96b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work) 97b2efa052STejun Heo { 98b2efa052STejun Heo struct io_context *ioc = container_of(work, struct io_context, 99b2efa052STejun Heo release_work); 100a43f085fSJohn Ogness spin_lock_irq(&ioc->lock); 101b2efa052STejun Heo 102c5869807STejun Heo while (!hlist_empty(&ioc->icq_list)) { 103c5869807STejun Heo struct io_cq *icq = hlist_entry(ioc->icq_list.first, 104c5869807STejun Heo struct io_cq, ioc_node); 1052274b029STejun Heo struct request_queue *q = icq->q; 106b2efa052STejun Heo 1070d945c1fSChristoph Hellwig if (spin_trylock(&q->queue_lock)) { 108621032adSTejun Heo ioc_destroy_icq(icq); 1090d945c1fSChristoph Hellwig spin_unlock(&q->queue_lock); 110b2efa052STejun Heo } else { 111ab96bbabSJohn Ogness /* Make sure q and icq cannot be freed. */ 112ab96bbabSJohn Ogness rcu_read_lock(); 113ab96bbabSJohn Ogness 114ab96bbabSJohn Ogness /* Re-acquire the locks in the correct order. */ 115ab96bbabSJohn Ogness spin_unlock(&ioc->lock); 116ab96bbabSJohn Ogness spin_lock(&q->queue_lock); 117ab96bbabSJohn Ogness spin_lock(&ioc->lock); 118ab96bbabSJohn Ogness 119ab96bbabSJohn Ogness /* 120ab96bbabSJohn Ogness * The icq may have been destroyed when the ioc lock 121ab96bbabSJohn Ogness * was released. 122ab96bbabSJohn Ogness */ 123ab96bbabSJohn Ogness if (!(icq->flags & ICQ_DESTROYED)) 124ab96bbabSJohn Ogness ioc_destroy_icq(icq); 125ab96bbabSJohn Ogness 126ab96bbabSJohn Ogness spin_unlock(&q->queue_lock); 127ab96bbabSJohn Ogness rcu_read_unlock(); 128b2efa052STejun Heo } 1292274b029STejun Heo } 1302274b029STejun Heo 131a43f085fSJohn Ogness spin_unlock_irq(&ioc->lock); 132b2efa052STejun Heo 133b2efa052STejun Heo kmem_cache_free(iocontext_cachep, ioc); 13486db1e29SJens Axboe } 13586db1e29SJens Axboe 13642ec57a8STejun Heo /** 13742ec57a8STejun Heo * put_io_context - put a reference of io_context 13842ec57a8STejun Heo * @ioc: io_context to put 13942ec57a8STejun Heo * 14042ec57a8STejun Heo * Decrement reference count of @ioc and release it if the count reaches 14111a3122fSTejun Heo * zero. 14286db1e29SJens Axboe */ 14311a3122fSTejun Heo void put_io_context(struct io_context *ioc) 14486db1e29SJens Axboe { 145b2efa052STejun Heo unsigned long flags; 146ff8c1474SXiaotian Feng bool free_ioc = false; 147b2efa052STejun Heo 14886db1e29SJens Axboe if (ioc == NULL) 14942ec57a8STejun Heo return; 15086db1e29SJens Axboe 15142ec57a8STejun Heo BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 15242ec57a8STejun Heo 153b2efa052STejun Heo /* 15411a3122fSTejun Heo * Releasing ioc requires reverse order double locking and we may 15511a3122fSTejun Heo * already be holding a queue_lock. Do it asynchronously from wq. 156b2efa052STejun Heo */ 15711a3122fSTejun Heo if (atomic_long_dec_and_test(&ioc->refcount)) { 15811a3122fSTejun Heo spin_lock_irqsave(&ioc->lock, flags); 15911a3122fSTejun Heo if (!hlist_empty(&ioc->icq_list)) 160695588f9SViresh Kumar queue_work(system_power_efficient_wq, 161695588f9SViresh Kumar &ioc->release_work); 162ff8c1474SXiaotian Feng else 163ff8c1474SXiaotian Feng free_ioc = true; 16411a3122fSTejun Heo spin_unlock_irqrestore(&ioc->lock, flags); 16511a3122fSTejun Heo } 166ff8c1474SXiaotian Feng 167ff8c1474SXiaotian Feng if (free_ioc) 168ff8c1474SXiaotian Feng kmem_cache_free(iocontext_cachep, ioc); 16986db1e29SJens Axboe } 17086db1e29SJens Axboe 171f6e8d01bSTejun Heo /** 172f6e8d01bSTejun Heo * put_io_context_active - put active reference on ioc 173f6e8d01bSTejun Heo * @ioc: ioc of interest 174f6e8d01bSTejun Heo * 175f6e8d01bSTejun Heo * Undo get_io_context_active(). If active reference reaches zero after 176f6e8d01bSTejun Heo * put, @ioc can never issue further IOs and ioscheds are notified. 177f6e8d01bSTejun Heo */ 178*33047425SChristoph Hellwig static void put_io_context_active(struct io_context *ioc) 17986db1e29SJens Axboe { 180f6e8d01bSTejun Heo struct io_cq *icq; 18186db1e29SJens Axboe 182f6e8d01bSTejun Heo if (!atomic_dec_and_test(&ioc->active_ref)) { 183621032adSTejun Heo put_io_context(ioc); 184621032adSTejun Heo return; 185621032adSTejun Heo } 186621032adSTejun Heo 187a43f085fSJohn Ogness spin_lock_irq(&ioc->lock); 188b67bfe0dSSasha Levin hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { 189621032adSTejun Heo if (icq->flags & ICQ_EXITED) 190621032adSTejun Heo continue; 1913d492c2eSOmar Sandoval 1923d492c2eSOmar Sandoval ioc_exit_icq(icq); 1933d492c2eSOmar Sandoval } 194a43f085fSJohn Ogness spin_unlock_irq(&ioc->lock); 195621032adSTejun Heo 19611a3122fSTejun Heo put_io_context(ioc); 19786db1e29SJens Axboe } 19886db1e29SJens Axboe 199f6e8d01bSTejun Heo /* Called by the exiting task */ 200f6e8d01bSTejun Heo void exit_io_context(struct task_struct *task) 201f6e8d01bSTejun Heo { 202f6e8d01bSTejun Heo struct io_context *ioc; 203f6e8d01bSTejun Heo 204f6e8d01bSTejun Heo task_lock(task); 205f6e8d01bSTejun Heo ioc = task->io_context; 206f6e8d01bSTejun Heo task->io_context = NULL; 207f6e8d01bSTejun Heo task_unlock(task); 208f6e8d01bSTejun Heo 209f6e8d01bSTejun Heo atomic_dec(&ioc->nr_tasks); 210f6e8d01bSTejun Heo put_io_context_active(ioc); 211f6e8d01bSTejun Heo } 212f6e8d01bSTejun Heo 2137b36a718SJens Axboe static void __ioc_clear_queue(struct list_head *icq_list) 2147b36a718SJens Axboe { 2157b36a718SJens Axboe unsigned long flags; 2167b36a718SJens Axboe 21730a2da7bSSahitya Tummala rcu_read_lock(); 2187b36a718SJens Axboe while (!list_empty(icq_list)) { 2197b36a718SJens Axboe struct io_cq *icq = list_entry(icq_list->next, 2207b36a718SJens Axboe struct io_cq, q_node); 2217b36a718SJens Axboe struct io_context *ioc = icq->ioc; 2227b36a718SJens Axboe 2237b36a718SJens Axboe spin_lock_irqsave(&ioc->lock, flags); 22430a2da7bSSahitya Tummala if (icq->flags & ICQ_DESTROYED) { 22530a2da7bSSahitya Tummala spin_unlock_irqrestore(&ioc->lock, flags); 22630a2da7bSSahitya Tummala continue; 22730a2da7bSSahitya Tummala } 2287b36a718SJens Axboe ioc_destroy_icq(icq); 2297b36a718SJens Axboe spin_unlock_irqrestore(&ioc->lock, flags); 2307b36a718SJens Axboe } 23130a2da7bSSahitya Tummala rcu_read_unlock(); 2327b36a718SJens Axboe } 2337b36a718SJens Axboe 2347e5a8794STejun Heo /** 2357e5a8794STejun Heo * ioc_clear_queue - break any ioc association with the specified queue 2367e5a8794STejun Heo * @q: request_queue being cleared 2377e5a8794STejun Heo * 2387b36a718SJens Axboe * Walk @q->icq_list and exit all io_cq's. 2397e5a8794STejun Heo */ 2407e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q) 2417e5a8794STejun Heo { 2427b36a718SJens Axboe LIST_HEAD(icq_list); 2437e5a8794STejun Heo 2440d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 2457b36a718SJens Axboe list_splice_init(&q->icq_list, &icq_list); 2460d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 2477e5a8794STejun Heo 2487b36a718SJens Axboe __ioc_clear_queue(&icq_list); 2497e5a8794STejun Heo } 2507e5a8794STejun Heo 25124acfc34STejun Heo int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 25286db1e29SJens Axboe { 253df415656SPaul Bolle struct io_context *ioc; 2543c9c708cSEric Dumazet int ret; 25586db1e29SJens Axboe 25642ec57a8STejun Heo ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 25742ec57a8STejun Heo node); 25842ec57a8STejun Heo if (unlikely(!ioc)) 25924acfc34STejun Heo return -ENOMEM; 26042ec57a8STejun Heo 26142ec57a8STejun Heo /* initialize */ 262df415656SPaul Bolle atomic_long_set(&ioc->refcount, 1); 2634638a83eSOlof Johansson atomic_set(&ioc->nr_tasks, 1); 264f6e8d01bSTejun Heo atomic_set(&ioc->active_ref, 1); 265df415656SPaul Bolle spin_lock_init(&ioc->lock); 266c137969bSShakeel Butt INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); 267c5869807STejun Heo INIT_HLIST_HEAD(&ioc->icq_list); 268b2efa052STejun Heo INIT_WORK(&ioc->release_work, ioc_release_fn); 26986db1e29SJens Axboe 270fd638368STejun Heo /* 271fd638368STejun Heo * Try to install. ioc shouldn't be installed if someone else 272fd638368STejun Heo * already did or @task, which isn't %current, is exiting. Note 273fd638368STejun Heo * that we need to allow ioc creation on exiting %current as exit 274fd638368STejun Heo * path may issue IOs from e.g. exit_files(). The exit path is 275fd638368STejun Heo * responsible for not issuing IO after exit_io_context(). 276fd638368STejun Heo */ 2776e736be7STejun Heo task_lock(task); 278fd638368STejun Heo if (!task->io_context && 279fd638368STejun Heo (task == current || !(task->flags & PF_EXITING))) 2806e736be7STejun Heo task->io_context = ioc; 281f2dbd76aSTejun Heo else 2826e736be7STejun Heo kmem_cache_free(iocontext_cachep, ioc); 2833c9c708cSEric Dumazet 2843c9c708cSEric Dumazet ret = task->io_context ? 0 : -EBUSY; 2853c9c708cSEric Dumazet 2866e736be7STejun Heo task_unlock(task); 28724acfc34STejun Heo 2883c9c708cSEric Dumazet return ret; 28986db1e29SJens Axboe } 29086db1e29SJens Axboe 2916e736be7STejun Heo /** 2926e736be7STejun Heo * get_task_io_context - get io_context of a task 2936e736be7STejun Heo * @task: task of interest 2946e736be7STejun Heo * @gfp_flags: allocation flags, used if allocation is necessary 2956e736be7STejun Heo * @node: allocation node, used if allocation is necessary 29686db1e29SJens Axboe * 2976e736be7STejun Heo * Return io_context of @task. If it doesn't exist, it is created with 2986e736be7STejun Heo * @gfp_flags and @node. The returned io_context has its reference count 2996e736be7STejun Heo * incremented. 3006e736be7STejun Heo * 3016e736be7STejun Heo * This function always goes through task_lock() and it's better to use 302f2dbd76aSTejun Heo * %current->io_context + get_io_context() for %current. 30386db1e29SJens Axboe */ 3046e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task, 3056e736be7STejun Heo gfp_t gfp_flags, int node) 30686db1e29SJens Axboe { 3076e736be7STejun Heo struct io_context *ioc; 30886db1e29SJens Axboe 309d0164adcSMel Gorman might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 31086db1e29SJens Axboe 311f2dbd76aSTejun Heo do { 3126e736be7STejun Heo task_lock(task); 3136e736be7STejun Heo ioc = task->io_context; 3146e736be7STejun Heo if (likely(ioc)) { 3156e736be7STejun Heo get_io_context(ioc); 3166e736be7STejun Heo task_unlock(task); 317df415656SPaul Bolle return ioc; 31886db1e29SJens Axboe } 3196e736be7STejun Heo task_unlock(task); 32024acfc34STejun Heo } while (!create_task_io_context(task, gfp_flags, node)); 3216e736be7STejun Heo 322f2dbd76aSTejun Heo return NULL; 3236e736be7STejun Heo } 32486db1e29SJens Axboe 32588c9a2ceSChristoph Hellwig int __copy_io(unsigned long clone_flags, struct task_struct *tsk) 32688c9a2ceSChristoph Hellwig { 32788c9a2ceSChristoph Hellwig struct io_context *ioc = current->io_context; 32888c9a2ceSChristoph Hellwig struct io_context *new_ioc; 32988c9a2ceSChristoph Hellwig 33088c9a2ceSChristoph Hellwig /* 33188c9a2ceSChristoph Hellwig * Share io context with parent, if CLONE_IO is set 33288c9a2ceSChristoph Hellwig */ 33388c9a2ceSChristoph Hellwig if (clone_flags & CLONE_IO) { 33488c9a2ceSChristoph Hellwig get_io_context_active(ioc); 33588c9a2ceSChristoph Hellwig 33688c9a2ceSChristoph Hellwig WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); 33788c9a2ceSChristoph Hellwig atomic_inc(&ioc->nr_tasks); 33888c9a2ceSChristoph Hellwig 33988c9a2ceSChristoph Hellwig tsk->io_context = ioc; 34088c9a2ceSChristoph Hellwig } else if (ioprio_valid(ioc->ioprio)) { 34188c9a2ceSChristoph Hellwig new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); 34288c9a2ceSChristoph Hellwig if (unlikely(!new_ioc)) 34388c9a2ceSChristoph Hellwig return -ENOMEM; 34488c9a2ceSChristoph Hellwig 34588c9a2ceSChristoph Hellwig new_ioc->ioprio = ioc->ioprio; 34688c9a2ceSChristoph Hellwig put_io_context(new_ioc); 34788c9a2ceSChristoph Hellwig } 34888c9a2ceSChristoph Hellwig 34988c9a2ceSChristoph Hellwig return 0; 35088c9a2ceSChristoph Hellwig } 35188c9a2ceSChristoph Hellwig 35247fdd4caSTejun Heo /** 35347fdd4caSTejun Heo * ioc_lookup_icq - lookup io_cq from ioc 35447fdd4caSTejun Heo * @ioc: the associated io_context 35547fdd4caSTejun Heo * @q: the associated request_queue 35647fdd4caSTejun Heo * 35747fdd4caSTejun Heo * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called 35847fdd4caSTejun Heo * with @q->queue_lock held. 35947fdd4caSTejun Heo */ 36047fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) 36147fdd4caSTejun Heo { 36247fdd4caSTejun Heo struct io_cq *icq; 36347fdd4caSTejun Heo 3640d945c1fSChristoph Hellwig lockdep_assert_held(&q->queue_lock); 36547fdd4caSTejun Heo 36647fdd4caSTejun Heo /* 36747fdd4caSTejun Heo * icq's are indexed from @ioc using radix tree and hint pointer, 36847fdd4caSTejun Heo * both of which are protected with RCU. All removals are done 36947fdd4caSTejun Heo * holding both q and ioc locks, and we're holding q lock - if we 37047fdd4caSTejun Heo * find a icq which points to us, it's guaranteed to be valid. 37147fdd4caSTejun Heo */ 37247fdd4caSTejun Heo rcu_read_lock(); 37347fdd4caSTejun Heo icq = rcu_dereference(ioc->icq_hint); 37447fdd4caSTejun Heo if (icq && icq->q == q) 37547fdd4caSTejun Heo goto out; 37647fdd4caSTejun Heo 37747fdd4caSTejun Heo icq = radix_tree_lookup(&ioc->icq_tree, q->id); 37847fdd4caSTejun Heo if (icq && icq->q == q) 37947fdd4caSTejun Heo rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ 38047fdd4caSTejun Heo else 38147fdd4caSTejun Heo icq = NULL; 38247fdd4caSTejun Heo out: 38347fdd4caSTejun Heo rcu_read_unlock(); 38447fdd4caSTejun Heo return icq; 38547fdd4caSTejun Heo } 38647fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq); 38747fdd4caSTejun Heo 388f1f8cc94STejun Heo /** 389f1f8cc94STejun Heo * ioc_create_icq - create and link io_cq 39024acfc34STejun Heo * @ioc: io_context of interest 391f1f8cc94STejun Heo * @q: request_queue of interest 392f1f8cc94STejun Heo * @gfp_mask: allocation mask 393f1f8cc94STejun Heo * 39424acfc34STejun Heo * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they 39524acfc34STejun Heo * will be created using @gfp_mask. 396f1f8cc94STejun Heo * 397f1f8cc94STejun Heo * The caller is responsible for ensuring @ioc won't go away and @q is 398f1f8cc94STejun Heo * alive and will stay alive until this function returns. 399f1f8cc94STejun Heo */ 40024acfc34STejun Heo struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 40124acfc34STejun Heo gfp_t gfp_mask) 402f1f8cc94STejun Heo { 403f1f8cc94STejun Heo struct elevator_type *et = q->elevator->type; 404f1f8cc94STejun Heo struct io_cq *icq; 405f1f8cc94STejun Heo 406f1f8cc94STejun Heo /* allocate stuff */ 407f1f8cc94STejun Heo icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, 408f1f8cc94STejun Heo q->node); 409f1f8cc94STejun Heo if (!icq) 410f1f8cc94STejun Heo return NULL; 411f1f8cc94STejun Heo 4125e4c0d97SJan Kara if (radix_tree_maybe_preload(gfp_mask) < 0) { 413f1f8cc94STejun Heo kmem_cache_free(et->icq_cache, icq); 414f1f8cc94STejun Heo return NULL; 415f1f8cc94STejun Heo } 416f1f8cc94STejun Heo 417f1f8cc94STejun Heo icq->ioc = ioc; 418f1f8cc94STejun Heo icq->q = q; 419f1f8cc94STejun Heo INIT_LIST_HEAD(&icq->q_node); 420f1f8cc94STejun Heo INIT_HLIST_NODE(&icq->ioc_node); 421f1f8cc94STejun Heo 422f1f8cc94STejun Heo /* lock both q and ioc and try to link @icq */ 4230d945c1fSChristoph Hellwig spin_lock_irq(&q->queue_lock); 424f1f8cc94STejun Heo spin_lock(&ioc->lock); 425f1f8cc94STejun Heo 426f1f8cc94STejun Heo if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { 427f1f8cc94STejun Heo hlist_add_head(&icq->ioc_node, &ioc->icq_list); 428f1f8cc94STejun Heo list_add(&icq->q_node, &q->icq_list); 429f9cd4bfeSJens Axboe if (et->ops.init_icq) 430f9cd4bfeSJens Axboe et->ops.init_icq(icq); 431f1f8cc94STejun Heo } else { 432f1f8cc94STejun Heo kmem_cache_free(et->icq_cache, icq); 433f1f8cc94STejun Heo icq = ioc_lookup_icq(ioc, q); 434f1f8cc94STejun Heo if (!icq) 435f1f8cc94STejun Heo printk(KERN_ERR "cfq: icq link failed!\n"); 436f1f8cc94STejun Heo } 437f1f8cc94STejun Heo 438f1f8cc94STejun Heo spin_unlock(&ioc->lock); 4390d945c1fSChristoph Hellwig spin_unlock_irq(&q->queue_lock); 440f1f8cc94STejun Heo radix_tree_preload_end(); 441f1f8cc94STejun Heo return icq; 442f1f8cc94STejun Heo } 443f1f8cc94STejun Heo 44413341598SAdrian Bunk static int __init blk_ioc_init(void) 44586db1e29SJens Axboe { 44686db1e29SJens Axboe iocontext_cachep = kmem_cache_create("blkdev_ioc", 44786db1e29SJens Axboe sizeof(struct io_context), 0, SLAB_PANIC, NULL); 44886db1e29SJens Axboe return 0; 44986db1e29SJens Axboe } 45086db1e29SJens Axboe subsys_initcall(blk_ioc_init); 451