1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must retailuce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/vmalloc.h>
35 #include "rxe.h"
36 #include "rxe_loc.h"
37 #include "rxe_queue.h"
38 
39 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
40 		 struct ib_udata *udata, struct rxe_queue_buf *buf,
41 		 size_t buf_size, struct rxe_mmap_info **ip_p)
42 {
43 	int err;
44 	struct rxe_mmap_info *ip = NULL;
45 
46 	if (outbuf) {
47 		ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
48 		if (!ip)
49 			goto err1;
50 
51 		err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
52 		if (err)
53 			goto err2;
54 
55 		spin_lock_bh(&rxe->pending_lock);
56 		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
57 		spin_unlock_bh(&rxe->pending_lock);
58 	}
59 
60 	*ip_p = ip;
61 
62 	return 0;
63 
64 err2:
65 	kfree(ip);
66 err1:
67 	return -EINVAL;
68 }
69 
70 inline void rxe_queue_reset(struct rxe_queue *q)
71 {
72 	/* queue is comprised from header and the memory
73 	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
74 	 * reset only the queue itself and not the management header
75 	 */
76 	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
77 }
78 
79 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
80 				 int *num_elem,
81 				 unsigned int elem_size)
82 {
83 	struct rxe_queue *q;
84 	size_t buf_size;
85 	unsigned int num_slots;
86 
87 	/* num_elem == 0 is allowed, but uninteresting */
88 	if (*num_elem < 0)
89 		goto err1;
90 
91 	q = kmalloc(sizeof(*q), GFP_KERNEL);
92 	if (!q)
93 		goto err1;
94 
95 	q->rxe = rxe;
96 
97 	/* used in resize, only need to copy used part of queue */
98 	q->elem_size = elem_size;
99 
100 	/* pad element up to at least a cacheline and always a power of 2 */
101 	if (elem_size < cache_line_size())
102 		elem_size = cache_line_size();
103 	elem_size = roundup_pow_of_two(elem_size);
104 
105 	q->log2_elem_size = order_base_2(elem_size);
106 
107 	num_slots = *num_elem + 1;
108 	num_slots = roundup_pow_of_two(num_slots);
109 	q->index_mask = num_slots - 1;
110 
111 	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
112 
113 	q->buf = vmalloc_user(buf_size);
114 	if (!q->buf)
115 		goto err2;
116 
117 	q->buf->log2_elem_size = q->log2_elem_size;
118 	q->buf->index_mask = q->index_mask;
119 
120 	q->buf_size = buf_size;
121 
122 	*num_elem = num_slots - 1;
123 	return q;
124 
125 err2:
126 	kfree(q);
127 err1:
128 	return NULL;
129 }
130 
131 /* copies elements from original q to new q and then swaps the contents of the
132  * two q headers. This is so that if anyone is holding a pointer to q it will
133  * still work
134  */
135 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
136 			 unsigned int num_elem)
137 {
138 	if (!queue_empty(q) && (num_elem < queue_count(q)))
139 		return -EINVAL;
140 
141 	while (!queue_empty(q)) {
142 		memcpy(producer_addr(new_q), consumer_addr(q),
143 		       new_q->elem_size);
144 		advance_producer(new_q);
145 		advance_consumer(q);
146 	}
147 
148 	swap(*q, *new_q);
149 
150 	return 0;
151 }
152 
153 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
154 		     unsigned int elem_size, struct ib_udata *udata,
155 		     struct mminfo __user *outbuf, spinlock_t *producer_lock,
156 		     spinlock_t *consumer_lock)
157 {
158 	struct rxe_queue *new_q;
159 	unsigned int num_elem = *num_elem_p;
160 	int err;
161 	unsigned long flags = 0, flags1;
162 
163 	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
164 	if (!new_q)
165 		return -ENOMEM;
166 
167 	err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
168 			   new_q->buf_size, &new_q->ip);
169 	if (err) {
170 		vfree(new_q->buf);
171 		kfree(new_q);
172 		goto err1;
173 	}
174 
175 	spin_lock_irqsave(consumer_lock, flags1);
176 
177 	if (producer_lock) {
178 		spin_lock_irqsave(producer_lock, flags);
179 		err = resize_finish(q, new_q, num_elem);
180 		spin_unlock_irqrestore(producer_lock, flags);
181 	} else {
182 		err = resize_finish(q, new_q, num_elem);
183 	}
184 
185 	spin_unlock_irqrestore(consumer_lock, flags1);
186 
187 	rxe_queue_cleanup(new_q);	/* new/old dep on err */
188 	if (err)
189 		goto err1;
190 
191 	*num_elem_p = num_elem;
192 	return 0;
193 
194 err1:
195 	return err;
196 }
197 
198 void rxe_queue_cleanup(struct rxe_queue *q)
199 {
200 	if (q->ip)
201 		kref_put(&q->ip->ref, rxe_mmap_release);
202 	else
203 		vfree(q->buf);
204 
205 	kfree(q);
206 }
207