1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 
37 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
38 {
39 	int ret = 0;
40 
41 	spin_lock(&bitmap->lock);
42 	*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
43 	if (*obj >= bitmap->max) {
44 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
45 			       & bitmap->mask;
46 		*obj = find_first_zero_bit(bitmap->table, bitmap->max);
47 	}
48 
49 	if (*obj < bitmap->max) {
50 		set_bit(*obj, bitmap->table);
51 		bitmap->last = (*obj + 1);
52 		if (bitmap->last == bitmap->max)
53 			bitmap->last = 0;
54 		*obj |= bitmap->top;
55 	} else {
56 		ret = -1;
57 	}
58 
59 	spin_unlock(&bitmap->lock);
60 
61 	return ret;
62 }
63 
64 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
65 {
66 	hns_roce_bitmap_free_range(bitmap, obj, 1);
67 }
68 
69 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
70 				int align, unsigned long *obj)
71 {
72 	int ret = 0;
73 	int i;
74 
75 	if (likely(cnt == 1 && align == 1))
76 		return hns_roce_bitmap_alloc(bitmap, obj);
77 
78 	spin_lock(&bitmap->lock);
79 
80 	*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
81 					  bitmap->last, cnt, align - 1);
82 	if (*obj >= bitmap->max) {
83 		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
84 			       & bitmap->mask;
85 		*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
86 						  cnt, align - 1);
87 	}
88 
89 	if (*obj < bitmap->max) {
90 		for (i = 0; i < cnt; i++)
91 			set_bit(*obj + i, bitmap->table);
92 
93 		if (*obj == bitmap->last) {
94 			bitmap->last = (*obj + cnt);
95 			if (bitmap->last >= bitmap->max)
96 				bitmap->last = 0;
97 		}
98 		*obj |= bitmap->top;
99 	} else {
100 		ret = -1;
101 	}
102 
103 	spin_unlock(&bitmap->lock);
104 
105 	return ret;
106 }
107 
108 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
109 				unsigned long obj, int cnt)
110 {
111 	int i;
112 
113 	obj &= bitmap->max + bitmap->reserved_top - 1;
114 
115 	spin_lock(&bitmap->lock);
116 	for (i = 0; i < cnt; i++)
117 		clear_bit(obj + i, bitmap->table);
118 
119 	bitmap->last = min(bitmap->last, obj);
120 	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
121 		       & bitmap->mask;
122 	spin_unlock(&bitmap->lock);
123 }
124 
125 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
126 			 u32 reserved_bot, u32 reserved_top)
127 {
128 	u32 i;
129 
130 	if (num != roundup_pow_of_two(num))
131 		return -EINVAL;
132 
133 	bitmap->last = 0;
134 	bitmap->top = 0;
135 	bitmap->max = num - reserved_top;
136 	bitmap->mask = mask;
137 	bitmap->reserved_top = reserved_top;
138 	spin_lock_init(&bitmap->lock);
139 	bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
140 				GFP_KERNEL);
141 	if (!bitmap->table)
142 		return -ENOMEM;
143 
144 	for (i = 0; i < reserved_bot; ++i)
145 		set_bit(i, bitmap->table);
146 
147 	return 0;
148 }
149 
150 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
151 {
152 	kfree(bitmap->table);
153 }
154 
155 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
156 		       struct hns_roce_buf *buf)
157 {
158 	int i;
159 	struct device *dev = &hr_dev->pdev->dev;
160 	u32 bits_per_long = BITS_PER_LONG;
161 
162 	if (buf->nbufs == 1) {
163 		dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
164 	} else {
165 		if (bits_per_long == 64)
166 			vunmap(buf->direct.buf);
167 
168 		for (i = 0; i < buf->nbufs; ++i)
169 			if (buf->page_list[i].buf)
170 				dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
171 						  buf->page_list[i].buf,
172 						  buf->page_list[i].map);
173 		kfree(buf->page_list);
174 	}
175 }
176 
177 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
178 		       struct hns_roce_buf *buf)
179 {
180 	int i = 0;
181 	dma_addr_t t;
182 	struct page **pages;
183 	struct device *dev = &hr_dev->pdev->dev;
184 	u32 bits_per_long = BITS_PER_LONG;
185 
186 	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
187 	if (size <= max_direct) {
188 		buf->nbufs = 1;
189 		/* Npages calculated by page_size */
190 		buf->npages = 1 << get_order(size);
191 		buf->page_shift = PAGE_SHIFT;
192 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
193 		buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
194 		if (!buf->direct.buf)
195 			return -ENOMEM;
196 
197 		buf->direct.map = t;
198 
199 		while (t & ((1 << buf->page_shift) - 1)) {
200 			--buf->page_shift;
201 			buf->npages *= 2;
202 		}
203 
204 		memset(buf->direct.buf, 0, size);
205 	} else {
206 		buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
207 		buf->npages = buf->nbufs;
208 		buf->page_shift = PAGE_SHIFT;
209 		buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
210 					 GFP_KERNEL);
211 
212 		if (!buf->page_list)
213 			return -ENOMEM;
214 
215 		for (i = 0; i < buf->nbufs; ++i) {
216 			buf->page_list[i].buf = dma_alloc_coherent(dev,
217 								  PAGE_SIZE, &t,
218 								  GFP_KERNEL);
219 
220 			if (!buf->page_list[i].buf)
221 				goto err_free;
222 
223 			buf->page_list[i].map = t;
224 			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
225 		}
226 		if (bits_per_long == 64) {
227 			pages = kmalloc_array(buf->nbufs, sizeof(*pages),
228 					      GFP_KERNEL);
229 			if (!pages)
230 				goto err_free;
231 
232 			for (i = 0; i < buf->nbufs; ++i)
233 				pages[i] = virt_to_page(buf->page_list[i].buf);
234 
235 			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
236 					       PAGE_KERNEL);
237 			kfree(pages);
238 			if (!buf->direct.buf)
239 				goto err_free;
240 		}
241 	}
242 
243 	return 0;
244 
245 err_free:
246 	hns_roce_buf_free(hr_dev, size, buf);
247 	return -ENOMEM;
248 }
249 
250 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
251 {
252 	hns_roce_cleanup_qp_table(hr_dev);
253 	hns_roce_cleanup_cq_table(hr_dev);
254 	hns_roce_cleanup_mr_table(hr_dev);
255 	hns_roce_cleanup_pd_table(hr_dev);
256 	hns_roce_cleanup_uar_table(hr_dev);
257 }
258