1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 /* Crude resource management */
33 #include <linux/kernel.h>
34 #include <linux/random.h>
35 #include <linux/slab.h>
36 #include <linux/kfifo.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/genalloc.h>
40 #include "iw_cxgb4.h"
41 
42 #define RANDOM_SIZE 16
43 
44 static int __c4iw_init_resource_fifo(struct kfifo *fifo,
45 				   spinlock_t *fifo_lock,
46 				   u32 nr, u32 skip_low,
47 				   u32 skip_high,
48 				   int random)
49 {
50 	u32 i, j, entry = 0, idx;
51 	u32 random_bytes;
52 	u32 rarray[16];
53 	spin_lock_init(fifo_lock);
54 
55 	if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
56 		return -ENOMEM;
57 
58 	for (i = 0; i < skip_low + skip_high; i++)
59 		kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
60 	if (random) {
61 		j = 0;
62 		random_bytes = random32();
63 		for (i = 0; i < RANDOM_SIZE; i++)
64 			rarray[i] = i + skip_low;
65 		for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
66 			if (j >= RANDOM_SIZE) {
67 				j = 0;
68 				random_bytes = random32();
69 			}
70 			idx = (random_bytes >> (j * 2)) & 0xF;
71 			kfifo_in(fifo,
72 				(unsigned char *) &rarray[idx],
73 				sizeof(u32));
74 			rarray[idx] = i;
75 			j++;
76 		}
77 		for (i = 0; i < RANDOM_SIZE; i++)
78 			kfifo_in(fifo,
79 				(unsigned char *) &rarray[i],
80 				sizeof(u32));
81 	} else
82 		for (i = skip_low; i < nr - skip_high; i++)
83 			kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
84 
85 	for (i = 0; i < skip_low + skip_high; i++)
86 		if (kfifo_out_locked(fifo, (unsigned char *) &entry,
87 				     sizeof(u32), fifo_lock))
88 			break;
89 	return 0;
90 }
91 
92 static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
93 				   u32 nr, u32 skip_low, u32 skip_high)
94 {
95 	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
96 					  skip_high, 0);
97 }
98 
99 static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
100 				   spinlock_t *fifo_lock,
101 				   u32 nr, u32 skip_low, u32 skip_high)
102 {
103 	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
104 					  skip_high, 1);
105 }
106 
107 static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
108 {
109 	u32 i;
110 
111 	spin_lock_init(&rdev->resource.qid_fifo_lock);
112 
113 	if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
114 			sizeof(u32), GFP_KERNEL))
115 		return -ENOMEM;
116 
117 	for (i = rdev->lldi.vr->qp.start;
118 	     i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
119 		if (!(i & rdev->qpmask))
120 			kfifo_in(&rdev->resource.qid_fifo,
121 				    (unsigned char *) &i, sizeof(u32));
122 	return 0;
123 }
124 
125 /* nr_* must be power of 2 */
126 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
127 {
128 	int err = 0;
129 	err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
130 					     &rdev->resource.tpt_fifo_lock,
131 					     nr_tpt, 1, 0);
132 	if (err)
133 		goto tpt_err;
134 	err = c4iw_init_qid_fifo(rdev);
135 	if (err)
136 		goto qid_err;
137 	err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
138 				      &rdev->resource.pdid_fifo_lock,
139 				      nr_pdid, 1, 0);
140 	if (err)
141 		goto pdid_err;
142 	return 0;
143 pdid_err:
144 	kfifo_free(&rdev->resource.qid_fifo);
145 qid_err:
146 	kfifo_free(&rdev->resource.tpt_fifo);
147 tpt_err:
148 	return -ENOMEM;
149 }
150 
151 /*
152  * returns 0 if no resource available
153  */
154 u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
155 {
156 	u32 entry;
157 	if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
158 		return entry;
159 	else
160 		return 0;
161 }
162 
163 void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
164 {
165 	PDBG("%s entry 0x%x\n", __func__, entry);
166 	kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
167 }
168 
169 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
170 {
171 	struct c4iw_qid_list *entry;
172 	u32 qid;
173 	int i;
174 
175 	mutex_lock(&uctx->lock);
176 	if (!list_empty(&uctx->cqids)) {
177 		entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
178 				   entry);
179 		list_del(&entry->entry);
180 		qid = entry->qid;
181 		kfree(entry);
182 	} else {
183 		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
184 					&rdev->resource.qid_fifo_lock);
185 		if (!qid)
186 			goto out;
187 		for (i = qid+1; i & rdev->qpmask; i++) {
188 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
189 			if (!entry)
190 				goto out;
191 			entry->qid = i;
192 			list_add_tail(&entry->entry, &uctx->cqids);
193 		}
194 
195 		/*
196 		 * now put the same ids on the qp list since they all
197 		 * map to the same db/gts page.
198 		 */
199 		entry = kmalloc(sizeof *entry, GFP_KERNEL);
200 		if (!entry)
201 			goto out;
202 		entry->qid = qid;
203 		list_add_tail(&entry->entry, &uctx->qpids);
204 		for (i = qid+1; i & rdev->qpmask; i++) {
205 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
206 			if (!entry)
207 				goto out;
208 			entry->qid = i;
209 			list_add_tail(&entry->entry, &uctx->qpids);
210 		}
211 	}
212 out:
213 	mutex_unlock(&uctx->lock);
214 	PDBG("%s qid 0x%x\n", __func__, qid);
215 	return qid;
216 }
217 
218 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
219 		   struct c4iw_dev_ucontext *uctx)
220 {
221 	struct c4iw_qid_list *entry;
222 
223 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
224 	if (!entry)
225 		return;
226 	PDBG("%s qid 0x%x\n", __func__, qid);
227 	entry->qid = qid;
228 	mutex_lock(&uctx->lock);
229 	list_add_tail(&entry->entry, &uctx->cqids);
230 	mutex_unlock(&uctx->lock);
231 }
232 
233 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
234 {
235 	struct c4iw_qid_list *entry;
236 	u32 qid;
237 	int i;
238 
239 	mutex_lock(&uctx->lock);
240 	if (!list_empty(&uctx->qpids)) {
241 		entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
242 				   entry);
243 		list_del(&entry->entry);
244 		qid = entry->qid;
245 		kfree(entry);
246 	} else {
247 		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
248 					&rdev->resource.qid_fifo_lock);
249 		if (!qid)
250 			goto out;
251 		for (i = qid+1; i & rdev->qpmask; i++) {
252 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
253 			if (!entry)
254 				goto out;
255 			entry->qid = i;
256 			list_add_tail(&entry->entry, &uctx->qpids);
257 		}
258 
259 		/*
260 		 * now put the same ids on the cq list since they all
261 		 * map to the same db/gts page.
262 		 */
263 		entry = kmalloc(sizeof *entry, GFP_KERNEL);
264 		if (!entry)
265 			goto out;
266 		entry->qid = qid;
267 		list_add_tail(&entry->entry, &uctx->cqids);
268 		for (i = qid; i & rdev->qpmask; i++) {
269 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
270 			if (!entry)
271 				goto out;
272 			entry->qid = i;
273 			list_add_tail(&entry->entry, &uctx->cqids);
274 		}
275 	}
276 out:
277 	mutex_unlock(&uctx->lock);
278 	PDBG("%s qid 0x%x\n", __func__, qid);
279 	return qid;
280 }
281 
282 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
283 		   struct c4iw_dev_ucontext *uctx)
284 {
285 	struct c4iw_qid_list *entry;
286 
287 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
288 	if (!entry)
289 		return;
290 	PDBG("%s qid 0x%x\n", __func__, qid);
291 	entry->qid = qid;
292 	mutex_lock(&uctx->lock);
293 	list_add_tail(&entry->entry, &uctx->qpids);
294 	mutex_unlock(&uctx->lock);
295 }
296 
297 void c4iw_destroy_resource(struct c4iw_resource *rscp)
298 {
299 	kfifo_free(&rscp->tpt_fifo);
300 	kfifo_free(&rscp->qid_fifo);
301 	kfifo_free(&rscp->pdid_fifo);
302 }
303 
304 /*
305  * PBL Memory Manager.  Uses Linux generic allocator.
306  */
307 
308 #define MIN_PBL_SHIFT 8			/* 256B == min PBL size (32 entries) */
309 
310 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
311 {
312 	unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
313 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
314 	if (!addr && printk_ratelimit())
315 		printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
316 		       pci_name(rdev->lldi.pdev));
317 	return (u32)addr;
318 }
319 
320 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
321 {
322 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
323 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
324 }
325 
326 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
327 {
328 	unsigned pbl_start, pbl_chunk, pbl_top;
329 
330 	rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
331 	if (!rdev->pbl_pool)
332 		return -ENOMEM;
333 
334 	pbl_start = rdev->lldi.vr->pbl.start;
335 	pbl_chunk = rdev->lldi.vr->pbl.size;
336 	pbl_top = pbl_start + pbl_chunk;
337 
338 	while (pbl_start < pbl_top) {
339 		pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
340 		if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
341 			PDBG("%s failed to add PBL chunk (%x/%x)\n",
342 			     __func__, pbl_start, pbl_chunk);
343 			if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
344 				printk(KERN_WARNING MOD
345 				       "Failed to add all PBL chunks (%x/%x)\n",
346 				       pbl_start,
347 				       pbl_top - pbl_start);
348 				return 0;
349 			}
350 			pbl_chunk >>= 1;
351 		} else {
352 			PDBG("%s added PBL chunk (%x/%x)\n",
353 			     __func__, pbl_start, pbl_chunk);
354 			pbl_start += pbl_chunk;
355 		}
356 	}
357 
358 	return 0;
359 }
360 
361 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
362 {
363 	gen_pool_destroy(rdev->pbl_pool);
364 }
365 
366 /*
367  * RQT Memory Manager.  Uses Linux generic allocator.
368  */
369 
370 #define MIN_RQT_SHIFT 10	/* 1KB == min RQT size (16 entries) */
371 
372 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
373 {
374 	unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
375 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
376 	if (!addr && printk_ratelimit())
377 		printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
378 		       pci_name(rdev->lldi.pdev));
379 	return (u32)addr;
380 }
381 
382 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
383 {
384 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
385 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
386 }
387 
388 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
389 {
390 	unsigned rqt_start, rqt_chunk, rqt_top;
391 
392 	rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
393 	if (!rdev->rqt_pool)
394 		return -ENOMEM;
395 
396 	rqt_start = rdev->lldi.vr->rq.start;
397 	rqt_chunk = rdev->lldi.vr->rq.size;
398 	rqt_top = rqt_start + rqt_chunk;
399 
400 	while (rqt_start < rqt_top) {
401 		rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
402 		if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
403 			PDBG("%s failed to add RQT chunk (%x/%x)\n",
404 			     __func__, rqt_start, rqt_chunk);
405 			if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
406 				printk(KERN_WARNING MOD
407 				       "Failed to add all RQT chunks (%x/%x)\n",
408 				       rqt_start, rqt_top - rqt_start);
409 				return 0;
410 			}
411 			rqt_chunk >>= 1;
412 		} else {
413 			PDBG("%s added RQT chunk (%x/%x)\n",
414 			     __func__, rqt_start, rqt_chunk);
415 			rqt_start += rqt_chunk;
416 		}
417 	}
418 	return 0;
419 }
420 
421 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
422 {
423 	gen_pool_destroy(rdev->rqt_pool);
424 }
425 
426 /*
427  * On-Chip QP Memory.
428  */
429 #define MIN_OCQP_SHIFT 12	/* 4KB == min ocqp size */
430 
431 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
432 {
433 	unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
434 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
435 	return (u32)addr;
436 }
437 
438 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
439 {
440 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
441 	gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
442 }
443 
444 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
445 {
446 	unsigned start, chunk, top;
447 
448 	rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
449 	if (!rdev->ocqp_pool)
450 		return -ENOMEM;
451 
452 	start = rdev->lldi.vr->ocq.start;
453 	chunk = rdev->lldi.vr->ocq.size;
454 	top = start + chunk;
455 
456 	while (start < top) {
457 		chunk = min(top - start + 1, chunk);
458 		if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
459 			PDBG("%s failed to add OCQP chunk (%x/%x)\n",
460 			     __func__, start, chunk);
461 			if (chunk <= 1024 << MIN_OCQP_SHIFT) {
462 				printk(KERN_WARNING MOD
463 				       "Failed to add all OCQP chunks (%x/%x)\n",
464 				       start, top - start);
465 				return 0;
466 			}
467 			chunk >>= 1;
468 		} else {
469 			PDBG("%s added OCQP chunk (%x/%x)\n",
470 			     __func__, start, chunk);
471 			start += chunk;
472 		}
473 	}
474 	return 0;
475 }
476 
477 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
478 {
479 	gen_pool_destroy(rdev->ocqp_pool);
480 }
481