xref: /openbmc/linux/drivers/infiniband/sw/rxe/rxe_queue.c (revision 7f2e85840871f199057e65232ebde846192ed989)
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must retailuce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/vmalloc.h>
35 #include "rxe.h"
36 #include "rxe_loc.h"
37 #include "rxe_queue.h"
38 
39 int do_mmap_info(struct rxe_dev *rxe,
40 		 struct ib_udata *udata,
41 		 bool is_req,
42 		 struct ib_ucontext *context,
43 		 struct rxe_queue_buf *buf,
44 		 size_t buf_size,
45 		 struct rxe_mmap_info **ip_p)
46 {
47 	int err;
48 	u32 len, offset;
49 	struct rxe_mmap_info *ip = NULL;
50 
51 	if (udata) {
52 		if (is_req) {
53 			len = udata->outlen - sizeof(struct mminfo);
54 			offset = sizeof(struct mminfo);
55 		} else {
56 			len = udata->outlen;
57 			offset = 0;
58 		}
59 
60 		if (len < sizeof(ip->info))
61 			goto err1;
62 
63 		ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
64 		if (!ip)
65 			goto err1;
66 
67 		err = copy_to_user(udata->outbuf + offset, &ip->info,
68 				   sizeof(ip->info));
69 		if (err)
70 			goto err2;
71 
72 		spin_lock_bh(&rxe->pending_lock);
73 		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
74 		spin_unlock_bh(&rxe->pending_lock);
75 	}
76 
77 	*ip_p = ip;
78 
79 	return 0;
80 
81 err2:
82 	kfree(ip);
83 err1:
84 	return -EINVAL;
85 }
86 
87 inline void rxe_queue_reset(struct rxe_queue *q)
88 {
89 	/* queue is comprised from header and the memory
90 	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
91 	 * reset only the queue itself and not the management header
92 	 */
93 	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
94 }
95 
96 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
97 				 int *num_elem,
98 				 unsigned int elem_size)
99 {
100 	struct rxe_queue *q;
101 	size_t buf_size;
102 	unsigned int num_slots;
103 
104 	/* num_elem == 0 is allowed, but uninteresting */
105 	if (*num_elem < 0)
106 		goto err1;
107 
108 	q = kmalloc(sizeof(*q), GFP_KERNEL);
109 	if (!q)
110 		goto err1;
111 
112 	q->rxe = rxe;
113 
114 	/* used in resize, only need to copy used part of queue */
115 	q->elem_size = elem_size;
116 
117 	/* pad element up to at least a cacheline and always a power of 2 */
118 	if (elem_size < cache_line_size())
119 		elem_size = cache_line_size();
120 	elem_size = roundup_pow_of_two(elem_size);
121 
122 	q->log2_elem_size = order_base_2(elem_size);
123 
124 	num_slots = *num_elem + 1;
125 	num_slots = roundup_pow_of_two(num_slots);
126 	q->index_mask = num_slots - 1;
127 
128 	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
129 
130 	q->buf = vmalloc_user(buf_size);
131 	if (!q->buf)
132 		goto err2;
133 
134 	q->buf->log2_elem_size = q->log2_elem_size;
135 	q->buf->index_mask = q->index_mask;
136 
137 	q->buf_size = buf_size;
138 
139 	*num_elem = num_slots - 1;
140 	return q;
141 
142 err2:
143 	kfree(q);
144 err1:
145 	return NULL;
146 }
147 
148 /* copies elements from original q to new q and then swaps the contents of the
149  * two q headers. This is so that if anyone is holding a pointer to q it will
150  * still work
151  */
152 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
153 			 unsigned int num_elem)
154 {
155 	if (!queue_empty(q) && (num_elem < queue_count(q)))
156 		return -EINVAL;
157 
158 	while (!queue_empty(q)) {
159 		memcpy(producer_addr(new_q), consumer_addr(q),
160 		       new_q->elem_size);
161 		advance_producer(new_q);
162 		advance_consumer(q);
163 	}
164 
165 	swap(*q, *new_q);
166 
167 	return 0;
168 }
169 
170 int rxe_queue_resize(struct rxe_queue *q,
171 		     unsigned int *num_elem_p,
172 		     unsigned int elem_size,
173 		     struct ib_ucontext *context,
174 		     struct ib_udata *udata,
175 		     spinlock_t *producer_lock,
176 		     spinlock_t *consumer_lock)
177 {
178 	struct rxe_queue *new_q;
179 	unsigned int num_elem = *num_elem_p;
180 	int err;
181 	unsigned long flags = 0, flags1;
182 
183 	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
184 	if (!new_q)
185 		return -ENOMEM;
186 
187 	err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
188 			   new_q->buf_size, &new_q->ip);
189 	if (err) {
190 		vfree(new_q->buf);
191 		kfree(new_q);
192 		goto err1;
193 	}
194 
195 	spin_lock_irqsave(consumer_lock, flags1);
196 
197 	if (producer_lock) {
198 		spin_lock_irqsave(producer_lock, flags);
199 		err = resize_finish(q, new_q, num_elem);
200 		spin_unlock_irqrestore(producer_lock, flags);
201 	} else {
202 		err = resize_finish(q, new_q, num_elem);
203 	}
204 
205 	spin_unlock_irqrestore(consumer_lock, flags1);
206 
207 	rxe_queue_cleanup(new_q);	/* new/old dep on err */
208 	if (err)
209 		goto err1;
210 
211 	*num_elem_p = num_elem;
212 	return 0;
213 
214 err1:
215 	return err;
216 }
217 
218 void rxe_queue_cleanup(struct rxe_queue *q)
219 {
220 	if (q->ip)
221 		kref_put(&q->ip->ref, rxe_mmap_release);
222 	else
223 		vfree(q->buf);
224 
225 	kfree(q);
226 }
227