1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6 
7 #ifndef RXE_QUEUE_H
8 #define RXE_QUEUE_H
9 
10 /* for definition of shared struct rxe_queue_buf */
11 #include <uapi/rdma/rdma_user_rxe.h>
12 
13 /* Implements a simple circular buffer that is shared between user
14  * and the driver and can be resized. The requested element size is
15  * rounded up to a power of 2 and the number of elements in the buffer
16  * is also rounded up to a power of 2. Since the queue is empty when
17  * the producer and consumer indices match the maximum capacity of the
18  * queue is one less than the number of element slots.
19  *
20  * Notes:
21  *   - The driver indices are always masked off to q->index_mask
22  *     before storing so do not need to be checked on reads.
23  *   - The user whether user space or kernel is generally
24  *     not trusted so its parameters are masked to make sure
25  *     they do not access the queue out of bounds on reads.
26  *   - The driver indices for queues must not be written
27  *     by user so a local copy is used and a shared copy is
28  *     stored when the local copy is changed.
29  *   - By passing the type in the parameter list separate from q
30  *     the compiler can eliminate the switch statement when the
31  *     actual queue type is known when the function is called at
32  *     compile time.
33  *   - These queues are lock free. The user and driver must protect
34  *     changes to their end of the queues with locks if more than one
35  *     CPU can be accessing it at the same time.
36  */
37 
38 /**
39  * enum queue_type - type of queue
40  * @QUEUE_TYPE_TO_CLIENT:	Queue is written by rxe driver and
41  *				read by client. Used by rxe driver only.
42  * @QUEUE_TYPE_FROM_CLIENT:	Queue is written by client and
43  *				read by rxe driver. Used by rxe driver only.
44  * @QUEUE_TYPE_TO_DRIVER:	Queue is written by client and
45  *				read by rxe driver. Used by kernel client only.
46  * @QUEUE_TYPE_FROM_DRIVER:	Queue is written by rxe driver and
47  *				read by client. Used by kernel client only.
48  */
49 enum queue_type {
50 	QUEUE_TYPE_TO_CLIENT,
51 	QUEUE_TYPE_FROM_CLIENT,
52 	QUEUE_TYPE_TO_DRIVER,
53 	QUEUE_TYPE_FROM_DRIVER,
54 };
55 
56 struct rxe_queue {
57 	struct rxe_dev		*rxe;
58 	struct rxe_queue_buf	*buf;
59 	struct rxe_mmap_info	*ip;
60 	size_t			buf_size;
61 	size_t			elem_size;
62 	unsigned int		log2_elem_size;
63 	u32			index_mask;
64 	enum queue_type		type;
65 	/* private copy of index for shared queues between
66 	 * kernel space and user space. Kernel reads and writes
67 	 * this copy and then replicates to rxe_queue_buf
68 	 * for read access by user space.
69 	 */
70 	u32			index;
71 };
72 
73 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
74 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
75 		 size_t buf_size, struct rxe_mmap_info **ip_p);
76 
77 void rxe_queue_reset(struct rxe_queue *q);
78 
79 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
80 			unsigned int elem_size, enum queue_type type);
81 
82 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
83 		     unsigned int elem_size, struct ib_udata *udata,
84 		     struct mminfo __user *outbuf,
85 		     spinlock_t *producer_lock, spinlock_t *consumer_lock);
86 
87 void rxe_queue_cleanup(struct rxe_queue *queue);
88 
89 static inline u32 queue_next_index(struct rxe_queue *q, int index)
90 {
91 	return (index + 1) & q->index_mask;
92 }
93 
94 static inline u32 queue_get_producer(const struct rxe_queue *q,
95 				     enum queue_type type)
96 {
97 	u32 prod;
98 
99 	switch (type) {
100 	case QUEUE_TYPE_FROM_CLIENT:
101 		/* protect user index */
102 		prod = smp_load_acquire(&q->buf->producer_index);
103 		break;
104 	case QUEUE_TYPE_TO_CLIENT:
105 		prod = q->index;
106 		break;
107 	case QUEUE_TYPE_FROM_DRIVER:
108 		/* protect driver index */
109 		prod = smp_load_acquire(&q->buf->producer_index);
110 		break;
111 	case QUEUE_TYPE_TO_DRIVER:
112 		prod = q->buf->producer_index;
113 		break;
114 	}
115 
116 	return prod;
117 }
118 
119 static inline u32 queue_get_consumer(const struct rxe_queue *q,
120 				     enum queue_type type)
121 {
122 	u32 cons;
123 
124 	switch (type) {
125 	case QUEUE_TYPE_FROM_CLIENT:
126 		cons = q->index;
127 		break;
128 	case QUEUE_TYPE_TO_CLIENT:
129 		/* protect user index */
130 		cons = smp_load_acquire(&q->buf->consumer_index);
131 		break;
132 	case QUEUE_TYPE_FROM_DRIVER:
133 		cons = q->buf->consumer_index;
134 		break;
135 	case QUEUE_TYPE_TO_DRIVER:
136 		/* protect driver index */
137 		cons = smp_load_acquire(&q->buf->consumer_index);
138 		break;
139 	}
140 
141 	return cons;
142 }
143 
144 static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
145 {
146 	u32 prod = queue_get_producer(q, type);
147 	u32 cons = queue_get_consumer(q, type);
148 
149 	return ((prod - cons) & q->index_mask) == 0;
150 }
151 
152 static inline int queue_full(struct rxe_queue *q, enum queue_type type)
153 {
154 	u32 prod = queue_get_producer(q, type);
155 	u32 cons = queue_get_consumer(q, type);
156 
157 	return ((prod + 1 - cons) & q->index_mask) == 0;
158 }
159 
160 static inline u32 queue_count(const struct rxe_queue *q,
161 					enum queue_type type)
162 {
163 	u32 prod = queue_get_producer(q, type);
164 	u32 cons = queue_get_consumer(q, type);
165 
166 	return (prod - cons) & q->index_mask;
167 }
168 
169 static inline void queue_advance_producer(struct rxe_queue *q,
170 					  enum queue_type type)
171 {
172 	u32 prod;
173 
174 	switch (type) {
175 	case QUEUE_TYPE_FROM_CLIENT:
176 		pr_warn("%s: attempt to advance client index\n",
177 			__func__);
178 		break;
179 	case QUEUE_TYPE_TO_CLIENT:
180 		prod = q->index;
181 		prod = (prod + 1) & q->index_mask;
182 		q->index = prod;
183 		/* protect user index */
184 		smp_store_release(&q->buf->producer_index, prod);
185 		break;
186 	case QUEUE_TYPE_FROM_DRIVER:
187 		pr_warn("%s: attempt to advance driver index\n",
188 			__func__);
189 		break;
190 	case QUEUE_TYPE_TO_DRIVER:
191 		prod = q->buf->producer_index;
192 		prod = (prod + 1) & q->index_mask;
193 		q->buf->producer_index = prod;
194 		break;
195 	}
196 }
197 
198 static inline void queue_advance_consumer(struct rxe_queue *q,
199 					  enum queue_type type)
200 {
201 	u32 cons;
202 
203 	switch (type) {
204 	case QUEUE_TYPE_FROM_CLIENT:
205 		cons = q->index;
206 		cons = (cons + 1) & q->index_mask;
207 		q->index = cons;
208 		/* protect user index */
209 		smp_store_release(&q->buf->consumer_index, cons);
210 		break;
211 	case QUEUE_TYPE_TO_CLIENT:
212 		pr_warn("%s: attempt to advance client index\n",
213 			__func__);
214 		break;
215 	case QUEUE_TYPE_FROM_DRIVER:
216 		cons = q->buf->consumer_index;
217 		cons = (cons + 1) & q->index_mask;
218 		q->buf->consumer_index = cons;
219 		break;
220 	case QUEUE_TYPE_TO_DRIVER:
221 		pr_warn("%s: attempt to advance driver index\n",
222 			__func__);
223 		break;
224 	}
225 }
226 
227 static inline void *queue_producer_addr(struct rxe_queue *q,
228 					enum queue_type type)
229 {
230 	u32 prod = queue_get_producer(q, type);
231 
232 	return q->buf->data + (prod << q->log2_elem_size);
233 }
234 
235 static inline void *queue_consumer_addr(struct rxe_queue *q,
236 					enum queue_type type)
237 {
238 	u32 cons = queue_get_consumer(q, type);
239 
240 	return q->buf->data + (cons << q->log2_elem_size);
241 }
242 
243 static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
244 {
245 	return q->buf->data + ((index & q->index_mask)
246 				<< q->log2_elem_size);
247 }
248 
249 static inline u32 queue_index_from_addr(const struct rxe_queue *q,
250 				const void *addr)
251 {
252 	return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
253 				& q->index_mask;
254 }
255 
256 static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
257 {
258 	return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
259 }
260 
261 #endif /* RXE_QUEUE_H */
262