blk-ioc.c (42ec57a8f68311bbbf4ff96a5d33c8a2e90b9d05) blk-ioc.c (6e736be7f282fff705db7c34a15313281b372a76)
1/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/slab.h>
11
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
1/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/slab.h>
11
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
19/**
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
19static void cfq_dtor(struct io_context *ioc)
20{
21 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic;
23
24 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
25 cic_list);
26 cic->dtor(ioc);

--- 39 unchanged lines hidden (view full) ---

66 rcu_read_unlock();
67}
68
69/* Called by the exiting task */
70void exit_io_context(struct task_struct *task)
71{
72 struct io_context *ioc;
73
32static void cfq_dtor(struct io_context *ioc)
33{
34 if (!hlist_empty(&ioc->cic_list)) {
35 struct cfq_io_context *cic;
36
37 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
38 cic_list);
39 cic->dtor(ioc);

--- 39 unchanged lines hidden (view full) ---

79 rcu_read_unlock();
80}
81
82/* Called by the exiting task */
83void exit_io_context(struct task_struct *task)
84{
85 struct io_context *ioc;
86
87 /* PF_EXITING prevents new io_context from being attached to @task */
88 WARN_ON_ONCE(!(current->flags & PF_EXITING));
89
74 task_lock(task);
75 ioc = task->io_context;
76 task->io_context = NULL;
77 task_unlock(task);
78
79 if (atomic_dec_and_test(&ioc->nr_tasks))
80 cfq_exit(ioc);
81
82 put_io_context(ioc);
83}
84
90 task_lock(task);
91 ioc = task->io_context;
92 task->io_context = NULL;
93 task_unlock(task);
94
95 if (atomic_dec_and_test(&ioc->nr_tasks))
96 cfq_exit(ioc);
97
98 put_io_context(ioc);
99}
100
85struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
101static struct io_context *create_task_io_context(struct task_struct *task,
102 gfp_t gfp_flags, int node,
103 bool take_ref)
86{
87 struct io_context *ioc;
88
89 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
90 node);
91 if (unlikely(!ioc))
92 return NULL;
93
94 /* initialize */
95 atomic_long_set(&ioc->refcount, 1);
96 atomic_set(&ioc->nr_tasks, 1);
97 spin_lock_init(&ioc->lock);
98 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
99 INIT_HLIST_HEAD(&ioc->cic_list);
100
104{
105 struct io_context *ioc;
106
107 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
108 node);
109 if (unlikely(!ioc))
110 return NULL;
111
112 /* initialize */
113 atomic_long_set(&ioc->refcount, 1);
114 atomic_set(&ioc->nr_tasks, 1);
115 spin_lock_init(&ioc->lock);
116 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
117 INIT_HLIST_HEAD(&ioc->cic_list);
118
119 /* try to install, somebody might already have beaten us to it */
120 task_lock(task);
121
122 if (!task->io_context && !(task->flags & PF_EXITING)) {
123 task->io_context = ioc;
124 } else {
125 kmem_cache_free(iocontext_cachep, ioc);
126 ioc = task->io_context;
127 }
128
129 if (ioc && take_ref)
130 get_io_context(ioc);
131
132 task_unlock(task);
101 return ioc;
102}
103
104/**
105 * current_io_context - get io_context of %current
106 * @gfp_flags: allocation flags, used if allocation is necessary
107 * @node: allocation node, used if allocation is necessary
108 *
109 * Return io_context of %current. If it doesn't exist, it is created with
110 * @gfp_flags and @node. The returned io_context does NOT have its
111 * reference count incremented. Because io_context is exited only on task
112 * exit, %current can be sure that the returned io_context is valid and
113 * alive as long as it is executing.
114 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{
133 return ioc;
134}
135
136/**
137 * current_io_context - get io_context of %current
138 * @gfp_flags: allocation flags, used if allocation is necessary
139 * @node: allocation node, used if allocation is necessary
140 *
141 * Return io_context of %current. If it doesn't exist, it is created with
142 * @gfp_flags and @node. The returned io_context does NOT have its
143 * reference count incremented. Because io_context is exited only on task
144 * exit, %current can be sure that the returned io_context is valid and
145 * alive as long as it is executing.
146 */
147struct io_context *current_io_context(gfp_t gfp_flags, int node)
148{
117 struct task_struct *tsk = current;
118 struct io_context *ret;
149 might_sleep_if(gfp_flags & __GFP_WAIT);
119
150
120 ret = tsk->io_context;
121 if (likely(ret))
122 return ret;
151 if (current->io_context)
152 return current->io_context;
123
153
124 ret = alloc_io_context(gfp_flags, node);
125 if (ret) {
126 /* make sure set_task_ioprio() sees the settings above */
127 smp_wmb();
128 tsk->io_context = ret;
129 }
130
131 return ret;
154 return create_task_io_context(current, gfp_flags, node, false);
132}
155}
156EXPORT_SYMBOL(current_io_context);
133
157
134/*
135 * If the current task has no IO context then create one and initialise it.
136 * If it does have a context, take a ref on it.
158/**
159 * get_task_io_context - get io_context of a task
160 * @task: task of interest
161 * @gfp_flags: allocation flags, used if allocation is necessary
162 * @node: allocation node, used if allocation is necessary
137 *
163 *
138 * This is always called in the context of the task which submitted the I/O.
164 * Return io_context of @task. If it doesn't exist, it is created with
165 * @gfp_flags and @node. The returned io_context has its reference count
166 * incremented.
167 *
168 * This function always goes through task_lock() and it's better to use
169 * current_io_context() + get_io_context() for %current.
139 */
170 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node)
171struct io_context *get_task_io_context(struct task_struct *task,
172 gfp_t gfp_flags, int node)
141{
173{
142 struct io_context *ioc = NULL;
174 struct io_context *ioc;
143
175
144 /*
145 * Check for unlikely race with exiting task. ioc ref count is
146 * zero when ioc is being detached.
147 */
148 do {
149 ioc = current_io_context(gfp_flags, node);
150 if (unlikely(!ioc))
151 break;
152 } while (!atomic_long_inc_not_zero(&ioc->refcount));
176 might_sleep_if(gfp_flags & __GFP_WAIT);
153
177
154 return ioc;
178 task_lock(task);
179 ioc = task->io_context;
180 if (likely(ioc)) {
181 get_io_context(ioc);
182 task_unlock(task);
183 return ioc;
184 }
185 task_unlock(task);
186
187 return create_task_io_context(task, gfp_flags, node, true);
155}
188}
156EXPORT_SYMBOL(get_io_context);
189EXPORT_SYMBOL(get_task_io_context);
157
158static int __init blk_ioc_init(void)
159{
160 iocontext_cachep = kmem_cache_create("blkdev_ioc",
161 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
162 return 0;
163}
164subsys_initcall(blk_ioc_init);
190
191static int __init blk_ioc_init(void)
192{
193 iocontext_cachep = kmem_cache_create("blkdev_ioc",
194 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
195 return 0;
196}
197subsys_initcall(blk_ioc_init);