1 /* 2 * Functions related to io context handling 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/init.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 10 11 #include "blk.h" 12 13 /* 14 * For io context allocations 15 */ 16 static struct kmem_cache *iocontext_cachep; 17 18 static void cfq_dtor(struct io_context *ioc) 19 { 20 if (!hlist_empty(&ioc->cic_list)) { 21 struct cfq_io_context *cic; 22 23 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 24 cic_list); 25 cic->dtor(ioc); 26 } 27 } 28 29 /* 30 * IO Context helper functions. put_io_context() returns 1 if there are no 31 * more users of this io context, 0 otherwise. 32 */ 33 int put_io_context(struct io_context *ioc) 34 { 35 if (ioc == NULL) 36 return 1; 37 38 BUG_ON(atomic_long_read(&ioc->refcount) == 0); 39 40 if (atomic_long_dec_and_test(&ioc->refcount)) { 41 rcu_read_lock(); 42 cfq_dtor(ioc); 43 rcu_read_unlock(); 44 45 kmem_cache_free(iocontext_cachep, ioc); 46 return 1; 47 } 48 return 0; 49 } 50 EXPORT_SYMBOL(put_io_context); 51 52 static void cfq_exit(struct io_context *ioc) 53 { 54 rcu_read_lock(); 55 56 if (!hlist_empty(&ioc->cic_list)) { 57 struct cfq_io_context *cic; 58 59 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 60 cic_list); 61 cic->exit(ioc); 62 } 63 rcu_read_unlock(); 64 } 65 66 /* Called by the exitting task */ 67 void exit_io_context(struct task_struct *task) 68 { 69 struct io_context *ioc; 70 71 task_lock(task); 72 ioc = task->io_context; 73 task->io_context = NULL; 74 task_unlock(task); 75 76 if (atomic_dec_and_test(&ioc->nr_tasks)) { 77 cfq_exit(ioc); 78 79 } 80 put_io_context(ioc); 81 } 82 83 struct io_context *alloc_io_context(gfp_t gfp_flags, int node) 84 { 85 struct io_context *ret; 86 87 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 88 if (ret) { 89 atomic_long_set(&ret->refcount, 1); 90 atomic_set(&ret->nr_tasks, 1); 91 spin_lock_init(&ret->lock); 92 ret->ioprio_changed = 0; 93 ret->ioprio = 0; 94 ret->last_waited = jiffies; /* doesn't matter... */ 95 ret->nr_batch_requests = 0; /* because this is 0 */ 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 97 INIT_HLIST_HEAD(&ret->cic_list); 98 ret->ioc_data = NULL; 99 } 100 101 return ret; 102 } 103 104 /* 105 * If the current task has no IO context then create one and initialise it. 106 * Otherwise, return its existing IO context. 107 * 108 * This returned IO context doesn't have a specifically elevated refcount, 109 * but since the current task itself holds a reference, the context can be 110 * used in general code, so long as it stays within `current` context. 111 */ 112 struct io_context *current_io_context(gfp_t gfp_flags, int node) 113 { 114 struct task_struct *tsk = current; 115 struct io_context *ret; 116 117 ret = tsk->io_context; 118 if (likely(ret)) 119 return ret; 120 121 ret = alloc_io_context(gfp_flags, node); 122 if (ret) { 123 /* make sure set_task_ioprio() sees the settings above */ 124 smp_wmb(); 125 tsk->io_context = ret; 126 } 127 128 return ret; 129 } 130 131 /* 132 * If the current task has no IO context then create one and initialise it. 133 * If it does have a context, take a ref on it. 134 * 135 * This is always called in the context of the task which submitted the I/O. 136 */ 137 struct io_context *get_io_context(gfp_t gfp_flags, int node) 138 { 139 struct io_context *ret = NULL; 140 141 /* 142 * Check for unlikely race with exiting task. ioc ref count is 143 * zero when ioc is being detached. 144 */ 145 do { 146 ret = current_io_context(gfp_flags, node); 147 if (unlikely(!ret)) 148 break; 149 } while (!atomic_long_inc_not_zero(&ret->refcount)); 150 151 return ret; 152 } 153 EXPORT_SYMBOL(get_io_context); 154 155 void copy_io_context(struct io_context **pdst, struct io_context **psrc) 156 { 157 struct io_context *src = *psrc; 158 struct io_context *dst = *pdst; 159 160 if (src) { 161 BUG_ON(atomic_long_read(&src->refcount) == 0); 162 atomic_long_inc(&src->refcount); 163 put_io_context(dst); 164 *pdst = src; 165 } 166 } 167 EXPORT_SYMBOL(copy_io_context); 168 169 static int __init blk_ioc_init(void) 170 { 171 iocontext_cachep = kmem_cache_create("blkdev_ioc", 172 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 173 return 0; 174 } 175 subsys_initcall(blk_ioc_init); 176