xref: /openbmc/linux/block/blk-ioc.c (revision d8c66c5d)
186db1e29SJens Axboe /*
286db1e29SJens Axboe  * Functions related to io context handling
386db1e29SJens Axboe  */
486db1e29SJens Axboe #include <linux/kernel.h>
586db1e29SJens Axboe #include <linux/module.h>
686db1e29SJens Axboe #include <linux/init.h>
786db1e29SJens Axboe #include <linux/bio.h>
886db1e29SJens Axboe #include <linux/blkdev.h>
986db1e29SJens Axboe #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
105a0e3ad6STejun Heo #include <linux/slab.h>
1186db1e29SJens Axboe 
1286db1e29SJens Axboe #include "blk.h"
1386db1e29SJens Axboe 
1486db1e29SJens Axboe /*
1586db1e29SJens Axboe  * For io context allocations
1686db1e29SJens Axboe  */
1786db1e29SJens Axboe static struct kmem_cache *iocontext_cachep;
1886db1e29SJens Axboe 
196e736be7STejun Heo /**
206e736be7STejun Heo  * get_io_context - increment reference count to io_context
216e736be7STejun Heo  * @ioc: io_context to get
226e736be7STejun Heo  *
236e736be7STejun Heo  * Increment reference count to @ioc.
246e736be7STejun Heo  */
256e736be7STejun Heo void get_io_context(struct io_context *ioc)
266e736be7STejun Heo {
276e736be7STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
286e736be7STejun Heo 	atomic_long_inc(&ioc->refcount);
296e736be7STejun Heo }
306e736be7STejun Heo EXPORT_SYMBOL(get_io_context);
316e736be7STejun Heo 
327e5a8794STejun Heo static void icq_free_icq_rcu(struct rcu_head *head)
337e5a8794STejun Heo {
347e5a8794STejun Heo 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
357e5a8794STejun Heo 
367e5a8794STejun Heo 	kmem_cache_free(icq->__rcu_icq_cache, icq);
377e5a8794STejun Heo }
387e5a8794STejun Heo 
397e5a8794STejun Heo /*
407e5a8794STejun Heo  * Exit and free an icq.  Called with both ioc and q locked.
417e5a8794STejun Heo  */
427e5a8794STejun Heo static void ioc_exit_icq(struct io_cq *icq)
437e5a8794STejun Heo {
447e5a8794STejun Heo 	struct io_context *ioc = icq->ioc;
457e5a8794STejun Heo 	struct request_queue *q = icq->q;
467e5a8794STejun Heo 	struct elevator_type *et = q->elevator->type;
477e5a8794STejun Heo 
487e5a8794STejun Heo 	lockdep_assert_held(&ioc->lock);
497e5a8794STejun Heo 	lockdep_assert_held(q->queue_lock);
507e5a8794STejun Heo 
517e5a8794STejun Heo 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
527e5a8794STejun Heo 	hlist_del_init(&icq->ioc_node);
537e5a8794STejun Heo 	list_del_init(&icq->q_node);
547e5a8794STejun Heo 
557e5a8794STejun Heo 	/*
567e5a8794STejun Heo 	 * Both setting lookup hint to and clearing it from @icq are done
577e5a8794STejun Heo 	 * under queue_lock.  If it's not pointing to @icq now, it never
587e5a8794STejun Heo 	 * will.  Hint assignment itself can race safely.
597e5a8794STejun Heo 	 */
607e5a8794STejun Heo 	if (rcu_dereference_raw(ioc->icq_hint) == icq)
617e5a8794STejun Heo 		rcu_assign_pointer(ioc->icq_hint, NULL);
627e5a8794STejun Heo 
6311a3122fSTejun Heo 	if (et->ops.elevator_exit_icq_fn)
647e5a8794STejun Heo 		et->ops.elevator_exit_icq_fn(icq);
657e5a8794STejun Heo 
667e5a8794STejun Heo 	/*
677e5a8794STejun Heo 	 * @icq->q might have gone away by the time RCU callback runs
687e5a8794STejun Heo 	 * making it impossible to determine icq_cache.  Record it in @icq.
697e5a8794STejun Heo 	 */
707e5a8794STejun Heo 	icq->__rcu_icq_cache = et->icq_cache;
717e5a8794STejun Heo 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
727e5a8794STejun Heo }
737e5a8794STejun Heo 
74b2efa052STejun Heo /*
75b2efa052STejun Heo  * Slow path for ioc release in put_io_context().  Performs double-lock
76c5869807STejun Heo  * dancing to unlink all icq's and then frees ioc.
77b2efa052STejun Heo  */
78b2efa052STejun Heo static void ioc_release_fn(struct work_struct *work)
79b2efa052STejun Heo {
80b2efa052STejun Heo 	struct io_context *ioc = container_of(work, struct io_context,
81b2efa052STejun Heo 					      release_work);
82b2efa052STejun Heo 	struct request_queue *last_q = NULL;
83d8c66c5dSTejun Heo 	unsigned long flags;
84b2efa052STejun Heo 
85d8c66c5dSTejun Heo 	/*
86d8c66c5dSTejun Heo 	 * Exiting icq may call into put_io_context() through elevator
87d8c66c5dSTejun Heo 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
88d8c66c5dSTejun Heo 	 * be different, use a different locking subclass here.  Use
89d8c66c5dSTejun Heo 	 * irqsave variant as there's no spin_lock_irq_nested().
90d8c66c5dSTejun Heo 	 */
91d8c66c5dSTejun Heo 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
92b2efa052STejun Heo 
93c5869807STejun Heo 	while (!hlist_empty(&ioc->icq_list)) {
94c5869807STejun Heo 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
95c5869807STejun Heo 						struct io_cq, ioc_node);
96c5869807STejun Heo 		struct request_queue *this_q = icq->q;
97b2efa052STejun Heo 
98b2efa052STejun Heo 		if (this_q != last_q) {
99b2efa052STejun Heo 			/*
100b2efa052STejun Heo 			 * Need to switch to @this_q.  Once we release
101b2efa052STejun Heo 			 * @ioc->lock, it can go away along with @cic.
102b2efa052STejun Heo 			 * Hold on to it.
103b2efa052STejun Heo 			 */
104b2efa052STejun Heo 			__blk_get_queue(this_q);
105b2efa052STejun Heo 
106b2efa052STejun Heo 			/*
107b2efa052STejun Heo 			 * blk_put_queue() might sleep thanks to kobject
108b2efa052STejun Heo 			 * idiocy.  Always release both locks, put and
109b2efa052STejun Heo 			 * restart.
110b2efa052STejun Heo 			 */
111b2efa052STejun Heo 			if (last_q) {
112b2efa052STejun Heo 				spin_unlock(last_q->queue_lock);
113d8c66c5dSTejun Heo 				spin_unlock_irqrestore(&ioc->lock, flags);
114b2efa052STejun Heo 				blk_put_queue(last_q);
115b2efa052STejun Heo 			} else {
116d8c66c5dSTejun Heo 				spin_unlock_irqrestore(&ioc->lock, flags);
117ffc4e759SJens Axboe 			}
118b2efa052STejun Heo 
119b2efa052STejun Heo 			last_q = this_q;
120d8c66c5dSTejun Heo 			spin_lock_irqsave(this_q->queue_lock, flags);
121d8c66c5dSTejun Heo 			spin_lock_nested(&ioc->lock, 1);
122b2efa052STejun Heo 			continue;
123b2efa052STejun Heo 		}
1247e5a8794STejun Heo 		ioc_exit_icq(icq);
125b2efa052STejun Heo 	}
126b2efa052STejun Heo 
127b2efa052STejun Heo 	if (last_q) {
128b2efa052STejun Heo 		spin_unlock(last_q->queue_lock);
129d8c66c5dSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
130b2efa052STejun Heo 		blk_put_queue(last_q);
131b2efa052STejun Heo 	} else {
132d8c66c5dSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
133b2efa052STejun Heo 	}
134b2efa052STejun Heo 
135b2efa052STejun Heo 	kmem_cache_free(iocontext_cachep, ioc);
13686db1e29SJens Axboe }
13786db1e29SJens Axboe 
13842ec57a8STejun Heo /**
13942ec57a8STejun Heo  * put_io_context - put a reference of io_context
14042ec57a8STejun Heo  * @ioc: io_context to put
14142ec57a8STejun Heo  *
14242ec57a8STejun Heo  * Decrement reference count of @ioc and release it if the count reaches
14311a3122fSTejun Heo  * zero.
14486db1e29SJens Axboe  */
14511a3122fSTejun Heo void put_io_context(struct io_context *ioc)
14686db1e29SJens Axboe {
147b2efa052STejun Heo 	unsigned long flags;
148b2efa052STejun Heo 
14986db1e29SJens Axboe 	if (ioc == NULL)
15042ec57a8STejun Heo 		return;
15186db1e29SJens Axboe 
15242ec57a8STejun Heo 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
15342ec57a8STejun Heo 
154b2efa052STejun Heo 	/*
15511a3122fSTejun Heo 	 * Releasing ioc requires reverse order double locking and we may
15611a3122fSTejun Heo 	 * already be holding a queue_lock.  Do it asynchronously from wq.
157b2efa052STejun Heo 	 */
15811a3122fSTejun Heo 	if (atomic_long_dec_and_test(&ioc->refcount)) {
15911a3122fSTejun Heo 		spin_lock_irqsave(&ioc->lock, flags);
16011a3122fSTejun Heo 		if (!hlist_empty(&ioc->icq_list))
161b2efa052STejun Heo 			schedule_work(&ioc->release_work);
16211a3122fSTejun Heo 		spin_unlock_irqrestore(&ioc->lock, flags);
16311a3122fSTejun Heo 	}
16486db1e29SJens Axboe }
16586db1e29SJens Axboe EXPORT_SYMBOL(put_io_context);
16686db1e29SJens Axboe 
16727667c99SBart Van Assche /* Called by the exiting task */
168b69f2292SLouis Rilling void exit_io_context(struct task_struct *task)
16986db1e29SJens Axboe {
17086db1e29SJens Axboe 	struct io_context *ioc;
17186db1e29SJens Axboe 
172b69f2292SLouis Rilling 	task_lock(task);
173b69f2292SLouis Rilling 	ioc = task->io_context;
174b69f2292SLouis Rilling 	task->io_context = NULL;
175b69f2292SLouis Rilling 	task_unlock(task);
17686db1e29SJens Axboe 
177b2efa052STejun Heo 	atomic_dec(&ioc->nr_tasks);
17811a3122fSTejun Heo 	put_io_context(ioc);
17986db1e29SJens Axboe }
18086db1e29SJens Axboe 
1817e5a8794STejun Heo /**
1827e5a8794STejun Heo  * ioc_clear_queue - break any ioc association with the specified queue
1837e5a8794STejun Heo  * @q: request_queue being cleared
1847e5a8794STejun Heo  *
1857e5a8794STejun Heo  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
1867e5a8794STejun Heo  */
1877e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q)
1887e5a8794STejun Heo {
1897e5a8794STejun Heo 	lockdep_assert_held(q->queue_lock);
1907e5a8794STejun Heo 
1917e5a8794STejun Heo 	while (!list_empty(&q->icq_list)) {
1927e5a8794STejun Heo 		struct io_cq *icq = list_entry(q->icq_list.next,
1937e5a8794STejun Heo 					       struct io_cq, q_node);
1947e5a8794STejun Heo 		struct io_context *ioc = icq->ioc;
1957e5a8794STejun Heo 
1967e5a8794STejun Heo 		spin_lock(&ioc->lock);
1977e5a8794STejun Heo 		ioc_exit_icq(icq);
1987e5a8794STejun Heo 		spin_unlock(&ioc->lock);
1997e5a8794STejun Heo 	}
2007e5a8794STejun Heo }
2017e5a8794STejun Heo 
202f2dbd76aSTejun Heo void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
203f2dbd76aSTejun Heo 				int node)
20486db1e29SJens Axboe {
205df415656SPaul Bolle 	struct io_context *ioc;
20686db1e29SJens Axboe 
20742ec57a8STejun Heo 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
20842ec57a8STejun Heo 				    node);
20942ec57a8STejun Heo 	if (unlikely(!ioc))
210f2dbd76aSTejun Heo 		return;
21142ec57a8STejun Heo 
21242ec57a8STejun Heo 	/* initialize */
213df415656SPaul Bolle 	atomic_long_set(&ioc->refcount, 1);
214df415656SPaul Bolle 	atomic_set(&ioc->nr_tasks, 1);
215df415656SPaul Bolle 	spin_lock_init(&ioc->lock);
216c5869807STejun Heo 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
217c5869807STejun Heo 	INIT_HLIST_HEAD(&ioc->icq_list);
218b2efa052STejun Heo 	INIT_WORK(&ioc->release_work, ioc_release_fn);
21986db1e29SJens Axboe 
220fd638368STejun Heo 	/*
221fd638368STejun Heo 	 * Try to install.  ioc shouldn't be installed if someone else
222fd638368STejun Heo 	 * already did or @task, which isn't %current, is exiting.  Note
223fd638368STejun Heo 	 * that we need to allow ioc creation on exiting %current as exit
224fd638368STejun Heo 	 * path may issue IOs from e.g. exit_files().  The exit path is
225fd638368STejun Heo 	 * responsible for not issuing IO after exit_io_context().
226fd638368STejun Heo 	 */
2276e736be7STejun Heo 	task_lock(task);
228fd638368STejun Heo 	if (!task->io_context &&
229fd638368STejun Heo 	    (task == current || !(task->flags & PF_EXITING)))
2306e736be7STejun Heo 		task->io_context = ioc;
231f2dbd76aSTejun Heo 	else
2326e736be7STejun Heo 		kmem_cache_free(iocontext_cachep, ioc);
2336e736be7STejun Heo 	task_unlock(task);
23486db1e29SJens Axboe }
23586db1e29SJens Axboe 
2366e736be7STejun Heo /**
2376e736be7STejun Heo  * get_task_io_context - get io_context of a task
2386e736be7STejun Heo  * @task: task of interest
2396e736be7STejun Heo  * @gfp_flags: allocation flags, used if allocation is necessary
2406e736be7STejun Heo  * @node: allocation node, used if allocation is necessary
24186db1e29SJens Axboe  *
2426e736be7STejun Heo  * Return io_context of @task.  If it doesn't exist, it is created with
2436e736be7STejun Heo  * @gfp_flags and @node.  The returned io_context has its reference count
2446e736be7STejun Heo  * incremented.
2456e736be7STejun Heo  *
2466e736be7STejun Heo  * This function always goes through task_lock() and it's better to use
247f2dbd76aSTejun Heo  * %current->io_context + get_io_context() for %current.
24886db1e29SJens Axboe  */
2496e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
2506e736be7STejun Heo 				       gfp_t gfp_flags, int node)
25186db1e29SJens Axboe {
2526e736be7STejun Heo 	struct io_context *ioc;
25386db1e29SJens Axboe 
2546e736be7STejun Heo 	might_sleep_if(gfp_flags & __GFP_WAIT);
25586db1e29SJens Axboe 
256f2dbd76aSTejun Heo 	do {
2576e736be7STejun Heo 		task_lock(task);
2586e736be7STejun Heo 		ioc = task->io_context;
2596e736be7STejun Heo 		if (likely(ioc)) {
2606e736be7STejun Heo 			get_io_context(ioc);
2616e736be7STejun Heo 			task_unlock(task);
262df415656SPaul Bolle 			return ioc;
26386db1e29SJens Axboe 		}
2646e736be7STejun Heo 		task_unlock(task);
265f2dbd76aSTejun Heo 	} while (create_io_context(task, gfp_flags, node));
2666e736be7STejun Heo 
267f2dbd76aSTejun Heo 	return NULL;
2686e736be7STejun Heo }
2696e736be7STejun Heo EXPORT_SYMBOL(get_task_io_context);
27086db1e29SJens Axboe 
27147fdd4caSTejun Heo /**
27247fdd4caSTejun Heo  * ioc_lookup_icq - lookup io_cq from ioc
27347fdd4caSTejun Heo  * @ioc: the associated io_context
27447fdd4caSTejun Heo  * @q: the associated request_queue
27547fdd4caSTejun Heo  *
27647fdd4caSTejun Heo  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
27747fdd4caSTejun Heo  * with @q->queue_lock held.
27847fdd4caSTejun Heo  */
27947fdd4caSTejun Heo struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
28047fdd4caSTejun Heo {
28147fdd4caSTejun Heo 	struct io_cq *icq;
28247fdd4caSTejun Heo 
28347fdd4caSTejun Heo 	lockdep_assert_held(q->queue_lock);
28447fdd4caSTejun Heo 
28547fdd4caSTejun Heo 	/*
28647fdd4caSTejun Heo 	 * icq's are indexed from @ioc using radix tree and hint pointer,
28747fdd4caSTejun Heo 	 * both of which are protected with RCU.  All removals are done
28847fdd4caSTejun Heo 	 * holding both q and ioc locks, and we're holding q lock - if we
28947fdd4caSTejun Heo 	 * find a icq which points to us, it's guaranteed to be valid.
29047fdd4caSTejun Heo 	 */
29147fdd4caSTejun Heo 	rcu_read_lock();
29247fdd4caSTejun Heo 	icq = rcu_dereference(ioc->icq_hint);
29347fdd4caSTejun Heo 	if (icq && icq->q == q)
29447fdd4caSTejun Heo 		goto out;
29547fdd4caSTejun Heo 
29647fdd4caSTejun Heo 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
29747fdd4caSTejun Heo 	if (icq && icq->q == q)
29847fdd4caSTejun Heo 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
29947fdd4caSTejun Heo 	else
30047fdd4caSTejun Heo 		icq = NULL;
30147fdd4caSTejun Heo out:
30247fdd4caSTejun Heo 	rcu_read_unlock();
30347fdd4caSTejun Heo 	return icq;
30447fdd4caSTejun Heo }
30547fdd4caSTejun Heo EXPORT_SYMBOL(ioc_lookup_icq);
30647fdd4caSTejun Heo 
307f1f8cc94STejun Heo /**
308f1f8cc94STejun Heo  * ioc_create_icq - create and link io_cq
309f1f8cc94STejun Heo  * @q: request_queue of interest
310f1f8cc94STejun Heo  * @gfp_mask: allocation mask
311f1f8cc94STejun Heo  *
312f1f8cc94STejun Heo  * Make sure io_cq linking %current->io_context and @q exists.  If either
313f1f8cc94STejun Heo  * io_context and/or icq don't exist, they will be created using @gfp_mask.
314f1f8cc94STejun Heo  *
315f1f8cc94STejun Heo  * The caller is responsible for ensuring @ioc won't go away and @q is
316f1f8cc94STejun Heo  * alive and will stay alive until this function returns.
317f1f8cc94STejun Heo  */
318f1f8cc94STejun Heo struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
319f1f8cc94STejun Heo {
320f1f8cc94STejun Heo 	struct elevator_type *et = q->elevator->type;
321f1f8cc94STejun Heo 	struct io_context *ioc;
322f1f8cc94STejun Heo 	struct io_cq *icq;
323f1f8cc94STejun Heo 
324f1f8cc94STejun Heo 	/* allocate stuff */
325f1f8cc94STejun Heo 	ioc = create_io_context(current, gfp_mask, q->node);
326f1f8cc94STejun Heo 	if (!ioc)
327f1f8cc94STejun Heo 		return NULL;
328f1f8cc94STejun Heo 
329f1f8cc94STejun Heo 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
330f1f8cc94STejun Heo 				    q->node);
331f1f8cc94STejun Heo 	if (!icq)
332f1f8cc94STejun Heo 		return NULL;
333f1f8cc94STejun Heo 
334f1f8cc94STejun Heo 	if (radix_tree_preload(gfp_mask) < 0) {
335f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
336f1f8cc94STejun Heo 		return NULL;
337f1f8cc94STejun Heo 	}
338f1f8cc94STejun Heo 
339f1f8cc94STejun Heo 	icq->ioc = ioc;
340f1f8cc94STejun Heo 	icq->q = q;
341f1f8cc94STejun Heo 	INIT_LIST_HEAD(&icq->q_node);
342f1f8cc94STejun Heo 	INIT_HLIST_NODE(&icq->ioc_node);
343f1f8cc94STejun Heo 
344f1f8cc94STejun Heo 	/* lock both q and ioc and try to link @icq */
345f1f8cc94STejun Heo 	spin_lock_irq(q->queue_lock);
346f1f8cc94STejun Heo 	spin_lock(&ioc->lock);
347f1f8cc94STejun Heo 
348f1f8cc94STejun Heo 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
349f1f8cc94STejun Heo 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
350f1f8cc94STejun Heo 		list_add(&icq->q_node, &q->icq_list);
351f1f8cc94STejun Heo 		if (et->ops.elevator_init_icq_fn)
352f1f8cc94STejun Heo 			et->ops.elevator_init_icq_fn(icq);
353f1f8cc94STejun Heo 	} else {
354f1f8cc94STejun Heo 		kmem_cache_free(et->icq_cache, icq);
355f1f8cc94STejun Heo 		icq = ioc_lookup_icq(ioc, q);
356f1f8cc94STejun Heo 		if (!icq)
357f1f8cc94STejun Heo 			printk(KERN_ERR "cfq: icq link failed!\n");
358f1f8cc94STejun Heo 	}
359f1f8cc94STejun Heo 
360f1f8cc94STejun Heo 	spin_unlock(&ioc->lock);
361f1f8cc94STejun Heo 	spin_unlock_irq(q->queue_lock);
362f1f8cc94STejun Heo 	radix_tree_preload_end();
363f1f8cc94STejun Heo 	return icq;
364f1f8cc94STejun Heo }
365f1f8cc94STejun Heo 
366dc86900eSTejun Heo void ioc_set_changed(struct io_context *ioc, int which)
367dc86900eSTejun Heo {
368c5869807STejun Heo 	struct io_cq *icq;
369dc86900eSTejun Heo 	struct hlist_node *n;
370dc86900eSTejun Heo 
371c5869807STejun Heo 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
372c5869807STejun Heo 		set_bit(which, &icq->changed);
373dc86900eSTejun Heo }
374dc86900eSTejun Heo 
375dc86900eSTejun Heo /**
376dc86900eSTejun Heo  * ioc_ioprio_changed - notify ioprio change
377dc86900eSTejun Heo  * @ioc: io_context of interest
378dc86900eSTejun Heo  * @ioprio: new ioprio
379dc86900eSTejun Heo  *
380c5869807STejun Heo  * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
381c5869807STejun Heo  * icq's.  iosched is responsible for checking the bit and applying it on
382dc86900eSTejun Heo  * request issue path.
383dc86900eSTejun Heo  */
384dc86900eSTejun Heo void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
385dc86900eSTejun Heo {
386dc86900eSTejun Heo 	unsigned long flags;
387dc86900eSTejun Heo 
388dc86900eSTejun Heo 	spin_lock_irqsave(&ioc->lock, flags);
389dc86900eSTejun Heo 	ioc->ioprio = ioprio;
390c5869807STejun Heo 	ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
391dc86900eSTejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
392dc86900eSTejun Heo }
393dc86900eSTejun Heo 
394dc86900eSTejun Heo /**
395dc86900eSTejun Heo  * ioc_cgroup_changed - notify cgroup change
396dc86900eSTejun Heo  * @ioc: io_context of interest
397dc86900eSTejun Heo  *
398c5869807STejun Heo  * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
399dc86900eSTejun Heo  * iosched is responsible for checking the bit and applying it on request
400dc86900eSTejun Heo  * issue path.
401dc86900eSTejun Heo  */
402dc86900eSTejun Heo void ioc_cgroup_changed(struct io_context *ioc)
403dc86900eSTejun Heo {
404dc86900eSTejun Heo 	unsigned long flags;
405dc86900eSTejun Heo 
406dc86900eSTejun Heo 	spin_lock_irqsave(&ioc->lock, flags);
407c5869807STejun Heo 	ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
408dc86900eSTejun Heo 	spin_unlock_irqrestore(&ioc->lock, flags);
409dc86900eSTejun Heo }
41064c42998SJens Axboe EXPORT_SYMBOL(ioc_cgroup_changed);
411dc86900eSTejun Heo 
41213341598SAdrian Bunk static int __init blk_ioc_init(void)
41386db1e29SJens Axboe {
41486db1e29SJens Axboe 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
41586db1e29SJens Axboe 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
41686db1e29SJens Axboe 	return 0;
41786db1e29SJens Axboe }
41886db1e29SJens Axboe subsys_initcall(blk_ioc_init);
419