1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include "hns_roce_device.h"
37 #include <rdma/ib_umem.h>
38 
39 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
40 {
41 	int ret = 0;
42 
43 	spin_lock(&bitmap->lock);
44 	*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
45 	if (*obj >= bitmap->max) {
46 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
47 			       & bitmap->mask;
48 		*obj = find_first_zero_bit(bitmap->table, bitmap->max);
49 	}
50 
51 	if (*obj < bitmap->max) {
52 		set_bit(*obj, bitmap->table);
53 		bitmap->last = (*obj + 1);
54 		if (bitmap->last == bitmap->max)
55 			bitmap->last = 0;
56 		*obj |= bitmap->top;
57 	} else {
58 		ret = -EINVAL;
59 	}
60 
61 	spin_unlock(&bitmap->lock);
62 
63 	return ret;
64 }
65 
66 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
67 			  int rr)
68 {
69 	hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
70 }
71 
72 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
73 				int align, unsigned long *obj)
74 {
75 	int ret = 0;
76 	int i;
77 
78 	if (likely(cnt == 1 && align == 1))
79 		return hns_roce_bitmap_alloc(bitmap, obj);
80 
81 	spin_lock(&bitmap->lock);
82 
83 	*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
84 					  bitmap->last, cnt, align - 1);
85 	if (*obj >= bitmap->max) {
86 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
87 			       & bitmap->mask;
88 		*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
89 						  cnt, align - 1);
90 	}
91 
92 	if (*obj < bitmap->max) {
93 		for (i = 0; i < cnt; i++)
94 			set_bit(*obj + i, bitmap->table);
95 
96 		if (*obj == bitmap->last) {
97 			bitmap->last = (*obj + cnt);
98 			if (bitmap->last >= bitmap->max)
99 				bitmap->last = 0;
100 		}
101 		*obj |= bitmap->top;
102 	} else {
103 		ret = -EINVAL;
104 	}
105 
106 	spin_unlock(&bitmap->lock);
107 
108 	return ret;
109 }
110 
111 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
112 				unsigned long obj, int cnt,
113 				int rr)
114 {
115 	int i;
116 
117 	obj &= bitmap->max + bitmap->reserved_top - 1;
118 
119 	spin_lock(&bitmap->lock);
120 	for (i = 0; i < cnt; i++)
121 		clear_bit(obj + i, bitmap->table);
122 
123 	if (!rr)
124 		bitmap->last = min(bitmap->last, obj);
125 	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
126 		       & bitmap->mask;
127 	spin_unlock(&bitmap->lock);
128 }
129 
130 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
131 			 u32 reserved_bot, u32 reserved_top)
132 {
133 	u32 i;
134 
135 	if (num != roundup_pow_of_two(num))
136 		return -EINVAL;
137 
138 	bitmap->last = 0;
139 	bitmap->top = 0;
140 	bitmap->max = num - reserved_top;
141 	bitmap->mask = mask;
142 	bitmap->reserved_top = reserved_top;
143 	spin_lock_init(&bitmap->lock);
144 	bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
145 				GFP_KERNEL);
146 	if (!bitmap->table)
147 		return -ENOMEM;
148 
149 	for (i = 0; i < reserved_bot; ++i)
150 		set_bit(i, bitmap->table);
151 
152 	return 0;
153 }
154 
155 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
156 {
157 	kfree(bitmap->table);
158 }
159 
160 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
161 		       struct hns_roce_buf *buf)
162 {
163 	int i;
164 	struct device *dev = hr_dev->dev;
165 
166 	if (buf->nbufs == 1) {
167 		dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
168 	} else {
169 		for (i = 0; i < buf->nbufs; ++i)
170 			if (buf->page_list[i].buf)
171 				dma_free_coherent(dev, 1 << buf->page_shift,
172 						  buf->page_list[i].buf,
173 						  buf->page_list[i].map);
174 		kfree(buf->page_list);
175 	}
176 }
177 
178 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
179 		       struct hns_roce_buf *buf, u32 page_shift)
180 {
181 	int i = 0;
182 	dma_addr_t t;
183 	struct device *dev = hr_dev->dev;
184 	u32 page_size = 1 << page_shift;
185 	u32 order;
186 
187 	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
188 	if (size <= max_direct) {
189 		buf->nbufs = 1;
190 		/* Npages calculated by page_size */
191 		order = get_order(size);
192 		if (order <= page_shift - PAGE_SHIFT)
193 			order = 0;
194 		else
195 			order -= page_shift - PAGE_SHIFT;
196 		buf->npages = 1 << order;
197 		buf->page_shift = page_shift;
198 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
199 		buf->direct.buf = dma_alloc_coherent(dev, size, &t,
200 						     GFP_KERNEL);
201 		if (!buf->direct.buf)
202 			return -ENOMEM;
203 
204 		buf->direct.map = t;
205 
206 		while (t & ((1 << buf->page_shift) - 1)) {
207 			--buf->page_shift;
208 			buf->npages *= 2;
209 		}
210 	} else {
211 		buf->nbufs = (size + page_size - 1) / page_size;
212 		buf->npages = buf->nbufs;
213 		buf->page_shift = page_shift;
214 		buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
215 					 GFP_KERNEL);
216 
217 		if (!buf->page_list)
218 			return -ENOMEM;
219 
220 		for (i = 0; i < buf->nbufs; ++i) {
221 			buf->page_list[i].buf = dma_alloc_coherent(dev,
222 								   page_size,
223 								   &t,
224 								   GFP_KERNEL);
225 
226 			if (!buf->page_list[i].buf)
227 				goto err_free;
228 
229 			buf->page_list[i].map = t;
230 		}
231 	}
232 
233 	return 0;
234 
235 err_free:
236 	hns_roce_buf_free(hr_dev, size, buf);
237 	return -ENOMEM;
238 }
239 
240 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
241 			   int buf_cnt, int start, struct hns_roce_buf *buf)
242 {
243 	int i, end;
244 	int total;
245 
246 	end = start + buf_cnt;
247 	if (end > buf->npages) {
248 		dev_err(hr_dev->dev,
249 			"invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
250 			start, buf_cnt, buf->npages);
251 		return -EINVAL;
252 	}
253 
254 	total = 0;
255 	for (i = start; i < end; i++)
256 		if (buf->nbufs == 1)
257 			bufs[total++] = buf->direct.map +
258 					((dma_addr_t)i << buf->page_shift);
259 		else
260 			bufs[total++] = buf->page_list[i].map;
261 
262 	return total;
263 }
264 
265 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
266 			   int buf_cnt, int start, struct ib_umem *umem,
267 			   int page_shift)
268 {
269 	struct ib_block_iter biter;
270 	int total = 0;
271 	int idx = 0;
272 	u64 addr;
273 
274 	if (page_shift < PAGE_SHIFT) {
275 		dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift);
276 		return -EINVAL;
277 	}
278 
279 	/* convert system page cnt to hw page cnt */
280 	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
281 			    1 << page_shift) {
282 		addr = rdma_block_iter_dma_address(&biter);
283 		if (idx >= start) {
284 			bufs[total++] = addr;
285 			if (total >= buf_cnt)
286 				goto done;
287 		}
288 		idx++;
289 	}
290 
291 done:
292 	return total;
293 }
294 
295 void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
296 			      int offset, int buf_cnt)
297 {
298 	if (hopnum == HNS_ROCE_HOP_NUM_0)
299 		region->hopnum = 0;
300 	else
301 		region->hopnum = hopnum;
302 
303 	region->offset = offset;
304 	region->count = buf_cnt;
305 }
306 
307 void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt)
308 {
309 	int i;
310 
311 	for (i = 0; i < region_cnt; i++) {
312 		kfree(bufs[i]);
313 		bufs[i] = NULL;
314 	}
315 }
316 
317 int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
318 			    dma_addr_t **bufs, int region_cnt)
319 {
320 	struct hns_roce_buf_region *r;
321 	int i;
322 
323 	for (i = 0; i < region_cnt; i++) {
324 		r = &regions[i];
325 		bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL);
326 		if (!bufs[i])
327 			goto err_alloc;
328 	}
329 
330 	return 0;
331 
332 err_alloc:
333 	hns_roce_free_buf_list(bufs, i);
334 
335 	return -ENOMEM;
336 }
337 
338 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
339 {
340 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
341 		hns_roce_cleanup_srq_table(hr_dev);
342 	hns_roce_cleanup_qp_table(hr_dev);
343 	hns_roce_cleanup_cq_table(hr_dev);
344 	hns_roce_cleanup_mr_table(hr_dev);
345 	hns_roce_cleanup_pd_table(hr_dev);
346 	hns_roce_cleanup_uar_table(hr_dev);
347 }
348