1 /* 2 * Functions related to io context handling 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/slab.h> 10 #include <linux/sched/task.h> 11 12 #include "blk.h" 13 14 /* 15 * For io context allocations 16 */ 17 static struct kmem_cache *iocontext_cachep; 18 19 /** 20 * get_io_context - increment reference count to io_context 21 * @ioc: io_context to get 22 * 23 * Increment reference count to @ioc. 24 */ 25 void get_io_context(struct io_context *ioc) 26 { 27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 28 atomic_long_inc(&ioc->refcount); 29 } 30 EXPORT_SYMBOL(get_io_context); 31 32 static void icq_free_icq_rcu(struct rcu_head *head) 33 { 34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); 35 36 kmem_cache_free(icq->__rcu_icq_cache, icq); 37 } 38 39 /* 40 * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for 41 * mq. 42 */ 43 static void ioc_exit_icq(struct io_cq *icq) 44 { 45 struct elevator_type *et = icq->q->elevator->type; 46 47 if (icq->flags & ICQ_EXITED) 48 return; 49 50 if (et->uses_mq && et->ops.mq.exit_icq) 51 et->ops.mq.exit_icq(icq); 52 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn) 53 et->ops.sq.elevator_exit_icq_fn(icq); 54 55 icq->flags |= ICQ_EXITED; 56 } 57 58 /* Release an icq. Called with both ioc and q locked. */ 59 static void ioc_destroy_icq(struct io_cq *icq) 60 { 61 struct io_context *ioc = icq->ioc; 62 struct request_queue *q = icq->q; 63 struct elevator_type *et = q->elevator->type; 64 65 lockdep_assert_held(&ioc->lock); 66 lockdep_assert_held(q->queue_lock); 67 68 radix_tree_delete(&ioc->icq_tree, icq->q->id); 69 hlist_del_init(&icq->ioc_node); 70 list_del_init(&icq->q_node); 71 72 /* 73 * Both setting lookup hint to and clearing it from @icq are done 74 * under queue_lock. If it's not pointing to @icq now, it never 75 * will. Hint assignment itself can race safely. 76 */ 77 if (rcu_access_pointer(ioc->icq_hint) == icq) 78 rcu_assign_pointer(ioc->icq_hint, NULL); 79 80 ioc_exit_icq(icq); 81 82 /* 83 * @icq->q might have gone away by the time RCU callback runs 84 * making it impossible to determine icq_cache. Record it in @icq. 85 */ 86 icq->__rcu_icq_cache = et->icq_cache; 87 call_rcu(&icq->__rcu_head, icq_free_icq_rcu); 88 } 89 90 /* 91 * Slow path for ioc release in put_io_context(). Performs double-lock 92 * dancing to unlink all icq's and then frees ioc. 93 */ 94 static void ioc_release_fn(struct work_struct *work) 95 { 96 struct io_context *ioc = container_of(work, struct io_context, 97 release_work); 98 unsigned long flags; 99 100 /* 101 * Exiting icq may call into put_io_context() through elevator 102 * which will trigger lockdep warning. The ioc's are guaranteed to 103 * be different, use a different locking subclass here. Use 104 * irqsave variant as there's no spin_lock_irq_nested(). 105 */ 106 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 107 108 while (!hlist_empty(&ioc->icq_list)) { 109 struct io_cq *icq = hlist_entry(ioc->icq_list.first, 110 struct io_cq, ioc_node); 111 struct request_queue *q = icq->q; 112 113 if (spin_trylock(q->queue_lock)) { 114 ioc_destroy_icq(icq); 115 spin_unlock(q->queue_lock); 116 } else { 117 spin_unlock_irqrestore(&ioc->lock, flags); 118 cpu_relax(); 119 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 120 } 121 } 122 123 spin_unlock_irqrestore(&ioc->lock, flags); 124 125 kmem_cache_free(iocontext_cachep, ioc); 126 } 127 128 /** 129 * put_io_context - put a reference of io_context 130 * @ioc: io_context to put 131 * 132 * Decrement reference count of @ioc and release it if the count reaches 133 * zero. 134 */ 135 void put_io_context(struct io_context *ioc) 136 { 137 unsigned long flags; 138 bool free_ioc = false; 139 140 if (ioc == NULL) 141 return; 142 143 BUG_ON(atomic_long_read(&ioc->refcount) <= 0); 144 145 /* 146 * Releasing ioc requires reverse order double locking and we may 147 * already be holding a queue_lock. Do it asynchronously from wq. 148 */ 149 if (atomic_long_dec_and_test(&ioc->refcount)) { 150 spin_lock_irqsave(&ioc->lock, flags); 151 if (!hlist_empty(&ioc->icq_list)) 152 queue_work(system_power_efficient_wq, 153 &ioc->release_work); 154 else 155 free_ioc = true; 156 spin_unlock_irqrestore(&ioc->lock, flags); 157 } 158 159 if (free_ioc) 160 kmem_cache_free(iocontext_cachep, ioc); 161 } 162 EXPORT_SYMBOL(put_io_context); 163 164 /** 165 * put_io_context_active - put active reference on ioc 166 * @ioc: ioc of interest 167 * 168 * Undo get_io_context_active(). If active reference reaches zero after 169 * put, @ioc can never issue further IOs and ioscheds are notified. 170 */ 171 void put_io_context_active(struct io_context *ioc) 172 { 173 struct elevator_type *et; 174 unsigned long flags; 175 struct io_cq *icq; 176 177 if (!atomic_dec_and_test(&ioc->active_ref)) { 178 put_io_context(ioc); 179 return; 180 } 181 182 /* 183 * Need ioc lock to walk icq_list and q lock to exit icq. Perform 184 * reverse double locking. Read comment in ioc_release_fn() for 185 * explanation on the nested locking annotation. 186 */ 187 retry: 188 spin_lock_irqsave_nested(&ioc->lock, flags, 1); 189 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { 190 if (icq->flags & ICQ_EXITED) 191 continue; 192 193 et = icq->q->elevator->type; 194 if (et->uses_mq) { 195 ioc_exit_icq(icq); 196 } else { 197 if (spin_trylock(icq->q->queue_lock)) { 198 ioc_exit_icq(icq); 199 spin_unlock(icq->q->queue_lock); 200 } else { 201 spin_unlock_irqrestore(&ioc->lock, flags); 202 cpu_relax(); 203 goto retry; 204 } 205 } 206 } 207 spin_unlock_irqrestore(&ioc->lock, flags); 208 209 put_io_context(ioc); 210 } 211 212 /* Called by the exiting task */ 213 void exit_io_context(struct task_struct *task) 214 { 215 struct io_context *ioc; 216 217 task_lock(task); 218 ioc = task->io_context; 219 task->io_context = NULL; 220 task_unlock(task); 221 222 atomic_dec(&ioc->nr_tasks); 223 put_io_context_active(ioc); 224 } 225 226 /** 227 * ioc_clear_queue - break any ioc association with the specified queue 228 * @q: request_queue being cleared 229 * 230 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked. 231 */ 232 void ioc_clear_queue(struct request_queue *q) 233 { 234 lockdep_assert_held(q->queue_lock); 235 236 while (!list_empty(&q->icq_list)) { 237 struct io_cq *icq = list_entry(q->icq_list.next, 238 struct io_cq, q_node); 239 struct io_context *ioc = icq->ioc; 240 241 spin_lock(&ioc->lock); 242 ioc_destroy_icq(icq); 243 spin_unlock(&ioc->lock); 244 } 245 } 246 247 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 248 { 249 struct io_context *ioc; 250 int ret; 251 252 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 253 node); 254 if (unlikely(!ioc)) 255 return -ENOMEM; 256 257 /* initialize */ 258 atomic_long_set(&ioc->refcount, 1); 259 atomic_set(&ioc->nr_tasks, 1); 260 atomic_set(&ioc->active_ref, 1); 261 spin_lock_init(&ioc->lock); 262 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); 263 INIT_HLIST_HEAD(&ioc->icq_list); 264 INIT_WORK(&ioc->release_work, ioc_release_fn); 265 266 /* 267 * Try to install. ioc shouldn't be installed if someone else 268 * already did or @task, which isn't %current, is exiting. Note 269 * that we need to allow ioc creation on exiting %current as exit 270 * path may issue IOs from e.g. exit_files(). The exit path is 271 * responsible for not issuing IO after exit_io_context(). 272 */ 273 task_lock(task); 274 if (!task->io_context && 275 (task == current || !(task->flags & PF_EXITING))) 276 task->io_context = ioc; 277 else 278 kmem_cache_free(iocontext_cachep, ioc); 279 280 ret = task->io_context ? 0 : -EBUSY; 281 282 task_unlock(task); 283 284 return ret; 285 } 286 287 /** 288 * get_task_io_context - get io_context of a task 289 * @task: task of interest 290 * @gfp_flags: allocation flags, used if allocation is necessary 291 * @node: allocation node, used if allocation is necessary 292 * 293 * Return io_context of @task. If it doesn't exist, it is created with 294 * @gfp_flags and @node. The returned io_context has its reference count 295 * incremented. 296 * 297 * This function always goes through task_lock() and it's better to use 298 * %current->io_context + get_io_context() for %current. 299 */ 300 struct io_context *get_task_io_context(struct task_struct *task, 301 gfp_t gfp_flags, int node) 302 { 303 struct io_context *ioc; 304 305 might_sleep_if(gfpflags_allow_blocking(gfp_flags)); 306 307 do { 308 task_lock(task); 309 ioc = task->io_context; 310 if (likely(ioc)) { 311 get_io_context(ioc); 312 task_unlock(task); 313 return ioc; 314 } 315 task_unlock(task); 316 } while (!create_task_io_context(task, gfp_flags, node)); 317 318 return NULL; 319 } 320 EXPORT_SYMBOL(get_task_io_context); 321 322 /** 323 * ioc_lookup_icq - lookup io_cq from ioc 324 * @ioc: the associated io_context 325 * @q: the associated request_queue 326 * 327 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called 328 * with @q->queue_lock held. 329 */ 330 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) 331 { 332 struct io_cq *icq; 333 334 lockdep_assert_held(q->queue_lock); 335 336 /* 337 * icq's are indexed from @ioc using radix tree and hint pointer, 338 * both of which are protected with RCU. All removals are done 339 * holding both q and ioc locks, and we're holding q lock - if we 340 * find a icq which points to us, it's guaranteed to be valid. 341 */ 342 rcu_read_lock(); 343 icq = rcu_dereference(ioc->icq_hint); 344 if (icq && icq->q == q) 345 goto out; 346 347 icq = radix_tree_lookup(&ioc->icq_tree, q->id); 348 if (icq && icq->q == q) 349 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ 350 else 351 icq = NULL; 352 out: 353 rcu_read_unlock(); 354 return icq; 355 } 356 EXPORT_SYMBOL(ioc_lookup_icq); 357 358 /** 359 * ioc_create_icq - create and link io_cq 360 * @ioc: io_context of interest 361 * @q: request_queue of interest 362 * @gfp_mask: allocation mask 363 * 364 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they 365 * will be created using @gfp_mask. 366 * 367 * The caller is responsible for ensuring @ioc won't go away and @q is 368 * alive and will stay alive until this function returns. 369 */ 370 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 371 gfp_t gfp_mask) 372 { 373 struct elevator_type *et = q->elevator->type; 374 struct io_cq *icq; 375 376 /* allocate stuff */ 377 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, 378 q->node); 379 if (!icq) 380 return NULL; 381 382 if (radix_tree_maybe_preload(gfp_mask) < 0) { 383 kmem_cache_free(et->icq_cache, icq); 384 return NULL; 385 } 386 387 icq->ioc = ioc; 388 icq->q = q; 389 INIT_LIST_HEAD(&icq->q_node); 390 INIT_HLIST_NODE(&icq->ioc_node); 391 392 /* lock both q and ioc and try to link @icq */ 393 spin_lock_irq(q->queue_lock); 394 spin_lock(&ioc->lock); 395 396 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { 397 hlist_add_head(&icq->ioc_node, &ioc->icq_list); 398 list_add(&icq->q_node, &q->icq_list); 399 if (et->uses_mq && et->ops.mq.init_icq) 400 et->ops.mq.init_icq(icq); 401 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn) 402 et->ops.sq.elevator_init_icq_fn(icq); 403 } else { 404 kmem_cache_free(et->icq_cache, icq); 405 icq = ioc_lookup_icq(ioc, q); 406 if (!icq) 407 printk(KERN_ERR "cfq: icq link failed!\n"); 408 } 409 410 spin_unlock(&ioc->lock); 411 spin_unlock_irq(q->queue_lock); 412 radix_tree_preload_end(); 413 return icq; 414 } 415 416 static int __init blk_ioc_init(void) 417 { 418 iocontext_cachep = kmem_cache_create("blkdev_ioc", 419 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 420 return 0; 421 } 422 subsys_initcall(blk_ioc_init); 423