xref: /openbmc/linux/block/blk-ioc.c (revision 4f6cce39)
1 /*
2  * Functions related to io context handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/sched/task.h>
11 
12 #include "blk.h"
13 
14 /*
15  * For io context allocations
16  */
17 static struct kmem_cache *iocontext_cachep;
18 
19 /**
20  * get_io_context - increment reference count to io_context
21  * @ioc: io_context to get
22  *
23  * Increment reference count to @ioc.
24  */
25 void get_io_context(struct io_context *ioc)
26 {
27 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 	atomic_long_inc(&ioc->refcount);
29 }
30 EXPORT_SYMBOL(get_io_context);
31 
32 static void icq_free_icq_rcu(struct rcu_head *head)
33 {
34 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35 
36 	kmem_cache_free(icq->__rcu_icq_cache, icq);
37 }
38 
39 /*
40  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
41  * and queue locked for legacy.
42  */
43 static void ioc_exit_icq(struct io_cq *icq)
44 {
45 	struct elevator_type *et = icq->q->elevator->type;
46 
47 	if (icq->flags & ICQ_EXITED)
48 		return;
49 
50 	if (et->uses_mq && et->ops.mq.exit_icq)
51 		et->ops.mq.exit_icq(icq);
52 	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
53 		et->ops.sq.elevator_exit_icq_fn(icq);
54 
55 	icq->flags |= ICQ_EXITED;
56 }
57 
58 /*
59  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
60  * and queue locked for legacy.
61  */
62 static void ioc_destroy_icq(struct io_cq *icq)
63 {
64 	struct io_context *ioc = icq->ioc;
65 	struct request_queue *q = icq->q;
66 	struct elevator_type *et = q->elevator->type;
67 
68 	lockdep_assert_held(&ioc->lock);
69 
70 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
71 	hlist_del_init(&icq->ioc_node);
72 	list_del_init(&icq->q_node);
73 
74 	/*
75 	 * Both setting lookup hint to and clearing it from @icq are done
76 	 * under queue_lock.  If it's not pointing to @icq now, it never
77 	 * will.  Hint assignment itself can race safely.
78 	 */
79 	if (rcu_access_pointer(ioc->icq_hint) == icq)
80 		rcu_assign_pointer(ioc->icq_hint, NULL);
81 
82 	ioc_exit_icq(icq);
83 
84 	/*
85 	 * @icq->q might have gone away by the time RCU callback runs
86 	 * making it impossible to determine icq_cache.  Record it in @icq.
87 	 */
88 	icq->__rcu_icq_cache = et->icq_cache;
89 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90 }
91 
92 /*
93  * Slow path for ioc release in put_io_context().  Performs double-lock
94  * dancing to unlink all icq's and then frees ioc.
95  */
96 static void ioc_release_fn(struct work_struct *work)
97 {
98 	struct io_context *ioc = container_of(work, struct io_context,
99 					      release_work);
100 	unsigned long flags;
101 
102 	/*
103 	 * Exiting icq may call into put_io_context() through elevator
104 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
105 	 * be different, use a different locking subclass here.  Use
106 	 * irqsave variant as there's no spin_lock_irq_nested().
107 	 */
108 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
109 
110 	while (!hlist_empty(&ioc->icq_list)) {
111 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
112 						struct io_cq, ioc_node);
113 		struct request_queue *q = icq->q;
114 
115 		if (spin_trylock(q->queue_lock)) {
116 			ioc_destroy_icq(icq);
117 			spin_unlock(q->queue_lock);
118 		} else {
119 			spin_unlock_irqrestore(&ioc->lock, flags);
120 			cpu_relax();
121 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
122 		}
123 	}
124 
125 	spin_unlock_irqrestore(&ioc->lock, flags);
126 
127 	kmem_cache_free(iocontext_cachep, ioc);
128 }
129 
130 /**
131  * put_io_context - put a reference of io_context
132  * @ioc: io_context to put
133  *
134  * Decrement reference count of @ioc and release it if the count reaches
135  * zero.
136  */
137 void put_io_context(struct io_context *ioc)
138 {
139 	unsigned long flags;
140 	bool free_ioc = false;
141 
142 	if (ioc == NULL)
143 		return;
144 
145 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
146 
147 	/*
148 	 * Releasing ioc requires reverse order double locking and we may
149 	 * already be holding a queue_lock.  Do it asynchronously from wq.
150 	 */
151 	if (atomic_long_dec_and_test(&ioc->refcount)) {
152 		spin_lock_irqsave(&ioc->lock, flags);
153 		if (!hlist_empty(&ioc->icq_list))
154 			queue_work(system_power_efficient_wq,
155 					&ioc->release_work);
156 		else
157 			free_ioc = true;
158 		spin_unlock_irqrestore(&ioc->lock, flags);
159 	}
160 
161 	if (free_ioc)
162 		kmem_cache_free(iocontext_cachep, ioc);
163 }
164 EXPORT_SYMBOL(put_io_context);
165 
166 /**
167  * put_io_context_active - put active reference on ioc
168  * @ioc: ioc of interest
169  *
170  * Undo get_io_context_active().  If active reference reaches zero after
171  * put, @ioc can never issue further IOs and ioscheds are notified.
172  */
173 void put_io_context_active(struct io_context *ioc)
174 {
175 	struct elevator_type *et;
176 	unsigned long flags;
177 	struct io_cq *icq;
178 
179 	if (!atomic_dec_and_test(&ioc->active_ref)) {
180 		put_io_context(ioc);
181 		return;
182 	}
183 
184 	/*
185 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
186 	 * reverse double locking.  Read comment in ioc_release_fn() for
187 	 * explanation on the nested locking annotation.
188 	 */
189 retry:
190 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
191 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
192 		if (icq->flags & ICQ_EXITED)
193 			continue;
194 
195 		et = icq->q->elevator->type;
196 		if (et->uses_mq) {
197 			ioc_exit_icq(icq);
198 		} else {
199 			if (spin_trylock(icq->q->queue_lock)) {
200 				ioc_exit_icq(icq);
201 				spin_unlock(icq->q->queue_lock);
202 			} else {
203 				spin_unlock_irqrestore(&ioc->lock, flags);
204 				cpu_relax();
205 				goto retry;
206 			}
207 		}
208 	}
209 	spin_unlock_irqrestore(&ioc->lock, flags);
210 
211 	put_io_context(ioc);
212 }
213 
214 /* Called by the exiting task */
215 void exit_io_context(struct task_struct *task)
216 {
217 	struct io_context *ioc;
218 
219 	task_lock(task);
220 	ioc = task->io_context;
221 	task->io_context = NULL;
222 	task_unlock(task);
223 
224 	atomic_dec(&ioc->nr_tasks);
225 	put_io_context_active(ioc);
226 }
227 
228 static void __ioc_clear_queue(struct list_head *icq_list)
229 {
230 	unsigned long flags;
231 
232 	while (!list_empty(icq_list)) {
233 		struct io_cq *icq = list_entry(icq_list->next,
234 					       struct io_cq, q_node);
235 		struct io_context *ioc = icq->ioc;
236 
237 		spin_lock_irqsave(&ioc->lock, flags);
238 		ioc_destroy_icq(icq);
239 		spin_unlock_irqrestore(&ioc->lock, flags);
240 	}
241 }
242 
243 /**
244  * ioc_clear_queue - break any ioc association with the specified queue
245  * @q: request_queue being cleared
246  *
247  * Walk @q->icq_list and exit all io_cq's.
248  */
249 void ioc_clear_queue(struct request_queue *q)
250 {
251 	LIST_HEAD(icq_list);
252 
253 	spin_lock_irq(q->queue_lock);
254 	list_splice_init(&q->icq_list, &icq_list);
255 
256 	if (q->mq_ops) {
257 		spin_unlock_irq(q->queue_lock);
258 		__ioc_clear_queue(&icq_list);
259 	} else {
260 		__ioc_clear_queue(&icq_list);
261 		spin_unlock_irq(q->queue_lock);
262 	}
263 }
264 
265 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
266 {
267 	struct io_context *ioc;
268 	int ret;
269 
270 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
271 				    node);
272 	if (unlikely(!ioc))
273 		return -ENOMEM;
274 
275 	/* initialize */
276 	atomic_long_set(&ioc->refcount, 1);
277 	atomic_set(&ioc->nr_tasks, 1);
278 	atomic_set(&ioc->active_ref, 1);
279 	spin_lock_init(&ioc->lock);
280 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
281 	INIT_HLIST_HEAD(&ioc->icq_list);
282 	INIT_WORK(&ioc->release_work, ioc_release_fn);
283 
284 	/*
285 	 * Try to install.  ioc shouldn't be installed if someone else
286 	 * already did or @task, which isn't %current, is exiting.  Note
287 	 * that we need to allow ioc creation on exiting %current as exit
288 	 * path may issue IOs from e.g. exit_files().  The exit path is
289 	 * responsible for not issuing IO after exit_io_context().
290 	 */
291 	task_lock(task);
292 	if (!task->io_context &&
293 	    (task == current || !(task->flags & PF_EXITING)))
294 		task->io_context = ioc;
295 	else
296 		kmem_cache_free(iocontext_cachep, ioc);
297 
298 	ret = task->io_context ? 0 : -EBUSY;
299 
300 	task_unlock(task);
301 
302 	return ret;
303 }
304 
305 /**
306  * get_task_io_context - get io_context of a task
307  * @task: task of interest
308  * @gfp_flags: allocation flags, used if allocation is necessary
309  * @node: allocation node, used if allocation is necessary
310  *
311  * Return io_context of @task.  If it doesn't exist, it is created with
312  * @gfp_flags and @node.  The returned io_context has its reference count
313  * incremented.
314  *
315  * This function always goes through task_lock() and it's better to use
316  * %current->io_context + get_io_context() for %current.
317  */
318 struct io_context *get_task_io_context(struct task_struct *task,
319 				       gfp_t gfp_flags, int node)
320 {
321 	struct io_context *ioc;
322 
323 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
324 
325 	do {
326 		task_lock(task);
327 		ioc = task->io_context;
328 		if (likely(ioc)) {
329 			get_io_context(ioc);
330 			task_unlock(task);
331 			return ioc;
332 		}
333 		task_unlock(task);
334 	} while (!create_task_io_context(task, gfp_flags, node));
335 
336 	return NULL;
337 }
338 EXPORT_SYMBOL(get_task_io_context);
339 
340 /**
341  * ioc_lookup_icq - lookup io_cq from ioc
342  * @ioc: the associated io_context
343  * @q: the associated request_queue
344  *
345  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
346  * with @q->queue_lock held.
347  */
348 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
349 {
350 	struct io_cq *icq;
351 
352 	lockdep_assert_held(q->queue_lock);
353 
354 	/*
355 	 * icq's are indexed from @ioc using radix tree and hint pointer,
356 	 * both of which are protected with RCU.  All removals are done
357 	 * holding both q and ioc locks, and we're holding q lock - if we
358 	 * find a icq which points to us, it's guaranteed to be valid.
359 	 */
360 	rcu_read_lock();
361 	icq = rcu_dereference(ioc->icq_hint);
362 	if (icq && icq->q == q)
363 		goto out;
364 
365 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
366 	if (icq && icq->q == q)
367 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
368 	else
369 		icq = NULL;
370 out:
371 	rcu_read_unlock();
372 	return icq;
373 }
374 EXPORT_SYMBOL(ioc_lookup_icq);
375 
376 /**
377  * ioc_create_icq - create and link io_cq
378  * @ioc: io_context of interest
379  * @q: request_queue of interest
380  * @gfp_mask: allocation mask
381  *
382  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
383  * will be created using @gfp_mask.
384  *
385  * The caller is responsible for ensuring @ioc won't go away and @q is
386  * alive and will stay alive until this function returns.
387  */
388 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
389 			     gfp_t gfp_mask)
390 {
391 	struct elevator_type *et = q->elevator->type;
392 	struct io_cq *icq;
393 
394 	/* allocate stuff */
395 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
396 				    q->node);
397 	if (!icq)
398 		return NULL;
399 
400 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
401 		kmem_cache_free(et->icq_cache, icq);
402 		return NULL;
403 	}
404 
405 	icq->ioc = ioc;
406 	icq->q = q;
407 	INIT_LIST_HEAD(&icq->q_node);
408 	INIT_HLIST_NODE(&icq->ioc_node);
409 
410 	/* lock both q and ioc and try to link @icq */
411 	spin_lock_irq(q->queue_lock);
412 	spin_lock(&ioc->lock);
413 
414 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
415 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
416 		list_add(&icq->q_node, &q->icq_list);
417 		if (et->uses_mq && et->ops.mq.init_icq)
418 			et->ops.mq.init_icq(icq);
419 		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
420 			et->ops.sq.elevator_init_icq_fn(icq);
421 	} else {
422 		kmem_cache_free(et->icq_cache, icq);
423 		icq = ioc_lookup_icq(ioc, q);
424 		if (!icq)
425 			printk(KERN_ERR "cfq: icq link failed!\n");
426 	}
427 
428 	spin_unlock(&ioc->lock);
429 	spin_unlock_irq(q->queue_lock);
430 	radix_tree_preload_end();
431 	return icq;
432 }
433 
434 static int __init blk_ioc_init(void)
435 {
436 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
437 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
438 	return 0;
439 }
440 subsys_initcall(blk_ioc_init);
441