xref: /openbmc/linux/block/blk-ioc.c (revision d78c317f)
1 /*
2  * Functions related to io context handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
11 
12 #include "blk.h"
13 
14 /*
15  * For io context allocations
16  */
17 static struct kmem_cache *iocontext_cachep;
18 
19 /**
20  * get_io_context - increment reference count to io_context
21  * @ioc: io_context to get
22  *
23  * Increment reference count to @ioc.
24  */
25 void get_io_context(struct io_context *ioc)
26 {
27 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 	atomic_long_inc(&ioc->refcount);
29 }
30 EXPORT_SYMBOL(get_io_context);
31 
32 static void icq_free_icq_rcu(struct rcu_head *head)
33 {
34 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35 
36 	kmem_cache_free(icq->__rcu_icq_cache, icq);
37 }
38 
39 /*
40  * Exit and free an icq.  Called with both ioc and q locked.
41  */
42 static void ioc_exit_icq(struct io_cq *icq)
43 {
44 	struct io_context *ioc = icq->ioc;
45 	struct request_queue *q = icq->q;
46 	struct elevator_type *et = q->elevator->type;
47 
48 	lockdep_assert_held(&ioc->lock);
49 	lockdep_assert_held(q->queue_lock);
50 
51 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
52 	hlist_del_init(&icq->ioc_node);
53 	list_del_init(&icq->q_node);
54 
55 	/*
56 	 * Both setting lookup hint to and clearing it from @icq are done
57 	 * under queue_lock.  If it's not pointing to @icq now, it never
58 	 * will.  Hint assignment itself can race safely.
59 	 */
60 	if (rcu_dereference_raw(ioc->icq_hint) == icq)
61 		rcu_assign_pointer(ioc->icq_hint, NULL);
62 
63 	if (et->ops.elevator_exit_icq_fn)
64 		et->ops.elevator_exit_icq_fn(icq);
65 
66 	/*
67 	 * @icq->q might have gone away by the time RCU callback runs
68 	 * making it impossible to determine icq_cache.  Record it in @icq.
69 	 */
70 	icq->__rcu_icq_cache = et->icq_cache;
71 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
72 }
73 
74 /*
75  * Slow path for ioc release in put_io_context().  Performs double-lock
76  * dancing to unlink all icq's and then frees ioc.
77  */
78 static void ioc_release_fn(struct work_struct *work)
79 {
80 	struct io_context *ioc = container_of(work, struct io_context,
81 					      release_work);
82 	struct request_queue *last_q = NULL;
83 	unsigned long flags;
84 
85 	/*
86 	 * Exiting icq may call into put_io_context() through elevator
87 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
88 	 * be different, use a different locking subclass here.  Use
89 	 * irqsave variant as there's no spin_lock_irq_nested().
90 	 */
91 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
92 
93 	while (!hlist_empty(&ioc->icq_list)) {
94 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
95 						struct io_cq, ioc_node);
96 		struct request_queue *this_q = icq->q;
97 
98 		if (this_q != last_q) {
99 			/*
100 			 * Need to switch to @this_q.  Once we release
101 			 * @ioc->lock, it can go away along with @cic.
102 			 * Hold on to it.
103 			 */
104 			__blk_get_queue(this_q);
105 
106 			/*
107 			 * blk_put_queue() might sleep thanks to kobject
108 			 * idiocy.  Always release both locks, put and
109 			 * restart.
110 			 */
111 			if (last_q) {
112 				spin_unlock(last_q->queue_lock);
113 				spin_unlock_irqrestore(&ioc->lock, flags);
114 				blk_put_queue(last_q);
115 			} else {
116 				spin_unlock_irqrestore(&ioc->lock, flags);
117 			}
118 
119 			last_q = this_q;
120 			spin_lock_irqsave(this_q->queue_lock, flags);
121 			spin_lock_nested(&ioc->lock, 1);
122 			continue;
123 		}
124 		ioc_exit_icq(icq);
125 	}
126 
127 	if (last_q) {
128 		spin_unlock(last_q->queue_lock);
129 		spin_unlock_irqrestore(&ioc->lock, flags);
130 		blk_put_queue(last_q);
131 	} else {
132 		spin_unlock_irqrestore(&ioc->lock, flags);
133 	}
134 
135 	kmem_cache_free(iocontext_cachep, ioc);
136 }
137 
138 /**
139  * put_io_context - put a reference of io_context
140  * @ioc: io_context to put
141  *
142  * Decrement reference count of @ioc and release it if the count reaches
143  * zero.
144  */
145 void put_io_context(struct io_context *ioc)
146 {
147 	unsigned long flags;
148 
149 	if (ioc == NULL)
150 		return;
151 
152 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
153 
154 	/*
155 	 * Releasing ioc requires reverse order double locking and we may
156 	 * already be holding a queue_lock.  Do it asynchronously from wq.
157 	 */
158 	if (atomic_long_dec_and_test(&ioc->refcount)) {
159 		spin_lock_irqsave(&ioc->lock, flags);
160 		if (!hlist_empty(&ioc->icq_list))
161 			schedule_work(&ioc->release_work);
162 		spin_unlock_irqrestore(&ioc->lock, flags);
163 	}
164 }
165 EXPORT_SYMBOL(put_io_context);
166 
167 /* Called by the exiting task */
168 void exit_io_context(struct task_struct *task)
169 {
170 	struct io_context *ioc;
171 
172 	task_lock(task);
173 	ioc = task->io_context;
174 	task->io_context = NULL;
175 	task_unlock(task);
176 
177 	atomic_dec(&ioc->nr_tasks);
178 	put_io_context(ioc);
179 }
180 
181 /**
182  * ioc_clear_queue - break any ioc association with the specified queue
183  * @q: request_queue being cleared
184  *
185  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
186  */
187 void ioc_clear_queue(struct request_queue *q)
188 {
189 	lockdep_assert_held(q->queue_lock);
190 
191 	while (!list_empty(&q->icq_list)) {
192 		struct io_cq *icq = list_entry(q->icq_list.next,
193 					       struct io_cq, q_node);
194 		struct io_context *ioc = icq->ioc;
195 
196 		spin_lock(&ioc->lock);
197 		ioc_exit_icq(icq);
198 		spin_unlock(&ioc->lock);
199 	}
200 }
201 
202 void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
203 				int node)
204 {
205 	struct io_context *ioc;
206 
207 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
208 				    node);
209 	if (unlikely(!ioc))
210 		return;
211 
212 	/* initialize */
213 	atomic_long_set(&ioc->refcount, 1);
214 	atomic_set(&ioc->nr_tasks, 1);
215 	spin_lock_init(&ioc->lock);
216 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
217 	INIT_HLIST_HEAD(&ioc->icq_list);
218 	INIT_WORK(&ioc->release_work, ioc_release_fn);
219 
220 	/*
221 	 * Try to install.  ioc shouldn't be installed if someone else
222 	 * already did or @task, which isn't %current, is exiting.  Note
223 	 * that we need to allow ioc creation on exiting %current as exit
224 	 * path may issue IOs from e.g. exit_files().  The exit path is
225 	 * responsible for not issuing IO after exit_io_context().
226 	 */
227 	task_lock(task);
228 	if (!task->io_context &&
229 	    (task == current || !(task->flags & PF_EXITING)))
230 		task->io_context = ioc;
231 	else
232 		kmem_cache_free(iocontext_cachep, ioc);
233 	task_unlock(task);
234 }
235 
236 /**
237  * get_task_io_context - get io_context of a task
238  * @task: task of interest
239  * @gfp_flags: allocation flags, used if allocation is necessary
240  * @node: allocation node, used if allocation is necessary
241  *
242  * Return io_context of @task.  If it doesn't exist, it is created with
243  * @gfp_flags and @node.  The returned io_context has its reference count
244  * incremented.
245  *
246  * This function always goes through task_lock() and it's better to use
247  * %current->io_context + get_io_context() for %current.
248  */
249 struct io_context *get_task_io_context(struct task_struct *task,
250 				       gfp_t gfp_flags, int node)
251 {
252 	struct io_context *ioc;
253 
254 	might_sleep_if(gfp_flags & __GFP_WAIT);
255 
256 	do {
257 		task_lock(task);
258 		ioc = task->io_context;
259 		if (likely(ioc)) {
260 			get_io_context(ioc);
261 			task_unlock(task);
262 			return ioc;
263 		}
264 		task_unlock(task);
265 	} while (create_io_context(task, gfp_flags, node));
266 
267 	return NULL;
268 }
269 EXPORT_SYMBOL(get_task_io_context);
270 
271 /**
272  * ioc_lookup_icq - lookup io_cq from ioc
273  * @ioc: the associated io_context
274  * @q: the associated request_queue
275  *
276  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
277  * with @q->queue_lock held.
278  */
279 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
280 {
281 	struct io_cq *icq;
282 
283 	lockdep_assert_held(q->queue_lock);
284 
285 	/*
286 	 * icq's are indexed from @ioc using radix tree and hint pointer,
287 	 * both of which are protected with RCU.  All removals are done
288 	 * holding both q and ioc locks, and we're holding q lock - if we
289 	 * find a icq which points to us, it's guaranteed to be valid.
290 	 */
291 	rcu_read_lock();
292 	icq = rcu_dereference(ioc->icq_hint);
293 	if (icq && icq->q == q)
294 		goto out;
295 
296 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
297 	if (icq && icq->q == q)
298 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
299 	else
300 		icq = NULL;
301 out:
302 	rcu_read_unlock();
303 	return icq;
304 }
305 EXPORT_SYMBOL(ioc_lookup_icq);
306 
307 /**
308  * ioc_create_icq - create and link io_cq
309  * @q: request_queue of interest
310  * @gfp_mask: allocation mask
311  *
312  * Make sure io_cq linking %current->io_context and @q exists.  If either
313  * io_context and/or icq don't exist, they will be created using @gfp_mask.
314  *
315  * The caller is responsible for ensuring @ioc won't go away and @q is
316  * alive and will stay alive until this function returns.
317  */
318 struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
319 {
320 	struct elevator_type *et = q->elevator->type;
321 	struct io_context *ioc;
322 	struct io_cq *icq;
323 
324 	/* allocate stuff */
325 	ioc = create_io_context(current, gfp_mask, q->node);
326 	if (!ioc)
327 		return NULL;
328 
329 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
330 				    q->node);
331 	if (!icq)
332 		return NULL;
333 
334 	if (radix_tree_preload(gfp_mask) < 0) {
335 		kmem_cache_free(et->icq_cache, icq);
336 		return NULL;
337 	}
338 
339 	icq->ioc = ioc;
340 	icq->q = q;
341 	INIT_LIST_HEAD(&icq->q_node);
342 	INIT_HLIST_NODE(&icq->ioc_node);
343 
344 	/* lock both q and ioc and try to link @icq */
345 	spin_lock_irq(q->queue_lock);
346 	spin_lock(&ioc->lock);
347 
348 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
349 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
350 		list_add(&icq->q_node, &q->icq_list);
351 		if (et->ops.elevator_init_icq_fn)
352 			et->ops.elevator_init_icq_fn(icq);
353 	} else {
354 		kmem_cache_free(et->icq_cache, icq);
355 		icq = ioc_lookup_icq(ioc, q);
356 		if (!icq)
357 			printk(KERN_ERR "cfq: icq link failed!\n");
358 	}
359 
360 	spin_unlock(&ioc->lock);
361 	spin_unlock_irq(q->queue_lock);
362 	radix_tree_preload_end();
363 	return icq;
364 }
365 
366 void ioc_set_changed(struct io_context *ioc, int which)
367 {
368 	struct io_cq *icq;
369 	struct hlist_node *n;
370 
371 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
372 		set_bit(which, &icq->changed);
373 }
374 
375 /**
376  * ioc_ioprio_changed - notify ioprio change
377  * @ioc: io_context of interest
378  * @ioprio: new ioprio
379  *
380  * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
381  * icq's.  iosched is responsible for checking the bit and applying it on
382  * request issue path.
383  */
384 void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
385 {
386 	unsigned long flags;
387 
388 	spin_lock_irqsave(&ioc->lock, flags);
389 	ioc->ioprio = ioprio;
390 	ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
391 	spin_unlock_irqrestore(&ioc->lock, flags);
392 }
393 
394 /**
395  * ioc_cgroup_changed - notify cgroup change
396  * @ioc: io_context of interest
397  *
398  * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
399  * iosched is responsible for checking the bit and applying it on request
400  * issue path.
401  */
402 void ioc_cgroup_changed(struct io_context *ioc)
403 {
404 	unsigned long flags;
405 
406 	spin_lock_irqsave(&ioc->lock, flags);
407 	ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
408 	spin_unlock_irqrestore(&ioc->lock, flags);
409 }
410 EXPORT_SYMBOL(ioc_cgroup_changed);
411 
412 static int __init blk_ioc_init(void)
413 {
414 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
415 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
416 	return 0;
417 }
418 subsys_initcall(blk_ioc_init);
419