xref: /openbmc/linux/include/linux/iocontext.h (revision 7c768f84)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef IOCONTEXT_H
3 #define IOCONTEXT_H
4 
5 #include <linux/radix-tree.h>
6 #include <linux/rcupdate.h>
7 #include <linux/workqueue.h>
8 
9 enum {
10 	ICQ_EXITED		= 1 << 2,
11 };
12 
13 /*
14  * An io_cq (icq) is association between an io_context (ioc) and a
15  * request_queue (q).  This is used by elevators which need to track
16  * information per ioc - q pair.
17  *
18  * Elevator can request use of icq by setting elevator_type->icq_size and
19  * ->icq_align.  Both size and align must be larger than that of struct
20  * io_cq and elevator can use the tail area for private information.  The
21  * recommended way to do this is defining a struct which contains io_cq as
22  * the first member followed by private members and using its size and
23  * align.  For example,
24  *
25  *	struct snail_io_cq {
26  *		struct io_cq	icq;
27  *		int		poke_snail;
28  *		int		feed_snail;
29  *	};
30  *
31  *	struct elevator_type snail_elv_type {
32  *		.ops =		{ ... },
33  *		.icq_size =	sizeof(struct snail_io_cq),
34  *		.icq_align =	__alignof__(struct snail_io_cq),
35  *		...
36  *	};
37  *
38  * If icq_size is set, block core will manage icq's.  All requests will
39  * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
40  * is called and be holding a reference to the associated io_context.
41  *
42  * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
43  * called and, on destruction, ->elevator_exit_icq_fn().  Both functions
44  * are called with both the associated io_context and queue locks held.
45  *
46  * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
47  * queue lock but the returned icq is valid only until the queue lock is
48  * released.  Elevators can not and should not try to create or destroy
49  * icq's.
50  *
51  * As icq's are linked from both ioc and q, the locking rules are a bit
52  * complex.
53  *
54  * - ioc lock nests inside q lock.
55  *
56  * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
57  *   q->icq_list and icq->q_node by q lock.
58  *
59  * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
60  *   itself is protected by q lock.  However, both the indexes and icq
61  *   itself are also RCU managed and lookup can be performed holding only
62  *   the q lock.
63  *
64  * - icq's are not reference counted.  They are destroyed when either the
65  *   ioc or q goes away.  Each request with icq set holds an extra
66  *   reference to ioc to ensure it stays until the request is completed.
67  *
68  * - Linking and unlinking icq's are performed while holding both ioc and q
69  *   locks.  Due to the lock ordering, q exit is simple but ioc exit
70  *   requires reverse-order double lock dance.
71  */
72 struct io_cq {
73 	struct request_queue	*q;
74 	struct io_context	*ioc;
75 
76 	/*
77 	 * q_node and ioc_node link io_cq through icq_list of q and ioc
78 	 * respectively.  Both fields are unused once ioc_exit_icq() is
79 	 * called and shared with __rcu_icq_cache and __rcu_head which are
80 	 * used for RCU free of io_cq.
81 	 */
82 	union {
83 		struct list_head	q_node;
84 		struct kmem_cache	*__rcu_icq_cache;
85 	};
86 	union {
87 		struct hlist_node	ioc_node;
88 		struct rcu_head		__rcu_head;
89 	};
90 
91 	unsigned int		flags;
92 };
93 
94 /*
95  * I/O subsystem state of the associated processes.  It is refcounted
96  * and kmalloc'ed. These could be shared between processes.
97  */
98 struct io_context {
99 	atomic_long_t refcount;
100 	atomic_t active_ref;
101 	atomic_t nr_tasks;
102 
103 	/* all the fields below are protected by this lock */
104 	spinlock_t lock;
105 
106 	unsigned short ioprio;
107 
108 	/*
109 	 * For request batching
110 	 */
111 	int nr_batch_requests;     /* Number of requests left in the batch */
112 	unsigned long last_waited; /* Time last woken after wait for request */
113 
114 	struct radix_tree_root	icq_tree;
115 	struct io_cq __rcu	*icq_hint;
116 	struct hlist_head	icq_list;
117 
118 	struct work_struct release_work;
119 };
120 
121 /**
122  * get_io_context_active - get active reference on ioc
123  * @ioc: ioc of interest
124  *
125  * Only iocs with active reference can issue new IOs.  This function
126  * acquires an active reference on @ioc.  The caller must already have an
127  * active reference on @ioc.
128  */
129 static inline void get_io_context_active(struct io_context *ioc)
130 {
131 	WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
132 	WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
133 	atomic_long_inc(&ioc->refcount);
134 	atomic_inc(&ioc->active_ref);
135 }
136 
137 static inline void ioc_task_link(struct io_context *ioc)
138 {
139 	get_io_context_active(ioc);
140 
141 	WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
142 	atomic_inc(&ioc->nr_tasks);
143 }
144 
145 struct task_struct;
146 #ifdef CONFIG_BLOCK
147 void put_io_context(struct io_context *ioc);
148 void put_io_context_active(struct io_context *ioc);
149 void exit_io_context(struct task_struct *task);
150 struct io_context *get_task_io_context(struct task_struct *task,
151 				       gfp_t gfp_flags, int node);
152 #else
153 struct io_context;
154 static inline void put_io_context(struct io_context *ioc) { }
155 static inline void exit_io_context(struct task_struct *task) { }
156 #endif
157 
158 #endif
159