1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 /* Crude resource management */
33 #include <linux/kernel.h>
34 #include <linux/random.h>
35 #include <linux/slab.h>
36 #include <linux/kfifo.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/genalloc.h>
40 #include <linux/ratelimit.h>
41 #include "iw_cxgb4.h"
42 
43 #define RANDOM_SIZE 16
44 
45 static int __c4iw_init_resource_fifo(struct kfifo *fifo,
46 				   spinlock_t *fifo_lock,
47 				   u32 nr, u32 skip_low,
48 				   u32 skip_high,
49 				   int random)
50 {
51 	u32 i, j, entry = 0, idx;
52 	u32 random_bytes;
53 	u32 rarray[16];
54 	spin_lock_init(fifo_lock);
55 
56 	if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
57 		return -ENOMEM;
58 
59 	for (i = 0; i < skip_low + skip_high; i++)
60 		kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
61 	if (random) {
62 		j = 0;
63 		random_bytes = random32();
64 		for (i = 0; i < RANDOM_SIZE; i++)
65 			rarray[i] = i + skip_low;
66 		for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
67 			if (j >= RANDOM_SIZE) {
68 				j = 0;
69 				random_bytes = random32();
70 			}
71 			idx = (random_bytes >> (j * 2)) & 0xF;
72 			kfifo_in(fifo,
73 				(unsigned char *) &rarray[idx],
74 				sizeof(u32));
75 			rarray[idx] = i;
76 			j++;
77 		}
78 		for (i = 0; i < RANDOM_SIZE; i++)
79 			kfifo_in(fifo,
80 				(unsigned char *) &rarray[i],
81 				sizeof(u32));
82 	} else
83 		for (i = skip_low; i < nr - skip_high; i++)
84 			kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
85 
86 	for (i = 0; i < skip_low + skip_high; i++)
87 		if (kfifo_out_locked(fifo, (unsigned char *) &entry,
88 				     sizeof(u32), fifo_lock))
89 			break;
90 	return 0;
91 }
92 
93 static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
94 				   u32 nr, u32 skip_low, u32 skip_high)
95 {
96 	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
97 					  skip_high, 0);
98 }
99 
100 static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
101 				   spinlock_t *fifo_lock,
102 				   u32 nr, u32 skip_low, u32 skip_high)
103 {
104 	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
105 					  skip_high, 1);
106 }
107 
108 static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
109 {
110 	u32 i;
111 
112 	spin_lock_init(&rdev->resource.qid_fifo_lock);
113 
114 	if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
115 			sizeof(u32), GFP_KERNEL))
116 		return -ENOMEM;
117 
118 	for (i = rdev->lldi.vr->qp.start;
119 	     i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
120 		if (!(i & rdev->qpmask))
121 			kfifo_in(&rdev->resource.qid_fifo,
122 				    (unsigned char *) &i, sizeof(u32));
123 	return 0;
124 }
125 
126 /* nr_* must be power of 2 */
127 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
128 {
129 	int err = 0;
130 	err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
131 					     &rdev->resource.tpt_fifo_lock,
132 					     nr_tpt, 1, 0);
133 	if (err)
134 		goto tpt_err;
135 	err = c4iw_init_qid_fifo(rdev);
136 	if (err)
137 		goto qid_err;
138 	err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
139 				      &rdev->resource.pdid_fifo_lock,
140 				      nr_pdid, 1, 0);
141 	if (err)
142 		goto pdid_err;
143 	return 0;
144 pdid_err:
145 	kfifo_free(&rdev->resource.qid_fifo);
146 qid_err:
147 	kfifo_free(&rdev->resource.tpt_fifo);
148 tpt_err:
149 	return -ENOMEM;
150 }
151 
152 /*
153  * returns 0 if no resource available
154  */
155 u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
156 {
157 	u32 entry;
158 	if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
159 		return entry;
160 	else
161 		return 0;
162 }
163 
164 void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
165 {
166 	PDBG("%s entry 0x%x\n", __func__, entry);
167 	kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
168 }
169 
170 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
171 {
172 	struct c4iw_qid_list *entry;
173 	u32 qid;
174 	int i;
175 
176 	mutex_lock(&uctx->lock);
177 	if (!list_empty(&uctx->cqids)) {
178 		entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
179 				   entry);
180 		list_del(&entry->entry);
181 		qid = entry->qid;
182 		kfree(entry);
183 	} else {
184 		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
185 					&rdev->resource.qid_fifo_lock);
186 		if (!qid)
187 			goto out;
188 		for (i = qid+1; i & rdev->qpmask; i++) {
189 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
190 			if (!entry)
191 				goto out;
192 			entry->qid = i;
193 			list_add_tail(&entry->entry, &uctx->cqids);
194 		}
195 
196 		/*
197 		 * now put the same ids on the qp list since they all
198 		 * map to the same db/gts page.
199 		 */
200 		entry = kmalloc(sizeof *entry, GFP_KERNEL);
201 		if (!entry)
202 			goto out;
203 		entry->qid = qid;
204 		list_add_tail(&entry->entry, &uctx->qpids);
205 		for (i = qid+1; i & rdev->qpmask; i++) {
206 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
207 			if (!entry)
208 				goto out;
209 			entry->qid = i;
210 			list_add_tail(&entry->entry, &uctx->qpids);
211 		}
212 	}
213 out:
214 	mutex_unlock(&uctx->lock);
215 	PDBG("%s qid 0x%x\n", __func__, qid);
216 	return qid;
217 }
218 
219 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
220 		   struct c4iw_dev_ucontext *uctx)
221 {
222 	struct c4iw_qid_list *entry;
223 
224 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
225 	if (!entry)
226 		return;
227 	PDBG("%s qid 0x%x\n", __func__, qid);
228 	entry->qid = qid;
229 	mutex_lock(&uctx->lock);
230 	list_add_tail(&entry->entry, &uctx->cqids);
231 	mutex_unlock(&uctx->lock);
232 }
233 
234 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
235 {
236 	struct c4iw_qid_list *entry;
237 	u32 qid;
238 	int i;
239 
240 	mutex_lock(&uctx->lock);
241 	if (!list_empty(&uctx->qpids)) {
242 		entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
243 				   entry);
244 		list_del(&entry->entry);
245 		qid = entry->qid;
246 		kfree(entry);
247 	} else {
248 		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
249 					&rdev->resource.qid_fifo_lock);
250 		if (!qid)
251 			goto out;
252 		for (i = qid+1; i & rdev->qpmask; i++) {
253 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
254 			if (!entry)
255 				goto out;
256 			entry->qid = i;
257 			list_add_tail(&entry->entry, &uctx->qpids);
258 		}
259 
260 		/*
261 		 * now put the same ids on the cq list since they all
262 		 * map to the same db/gts page.
263 		 */
264 		entry = kmalloc(sizeof *entry, GFP_KERNEL);
265 		if (!entry)
266 			goto out;
267 		entry->qid = qid;
268 		list_add_tail(&entry->entry, &uctx->cqids);
269 		for (i = qid; i & rdev->qpmask; i++) {
270 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
271 			if (!entry)
272 				goto out;
273 			entry->qid = i;
274 			list_add_tail(&entry->entry, &uctx->cqids);
275 		}
276 	}
277 out:
278 	mutex_unlock(&uctx->lock);
279 	PDBG("%s qid 0x%x\n", __func__, qid);
280 	return qid;
281 }
282 
283 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
284 		   struct c4iw_dev_ucontext *uctx)
285 {
286 	struct c4iw_qid_list *entry;
287 
288 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
289 	if (!entry)
290 		return;
291 	PDBG("%s qid 0x%x\n", __func__, qid);
292 	entry->qid = qid;
293 	mutex_lock(&uctx->lock);
294 	list_add_tail(&entry->entry, &uctx->qpids);
295 	mutex_unlock(&uctx->lock);
296 }
297 
298 void c4iw_destroy_resource(struct c4iw_resource *rscp)
299 {
300 	kfifo_free(&rscp->tpt_fifo);
301 	kfifo_free(&rscp->qid_fifo);
302 	kfifo_free(&rscp->pdid_fifo);
303 }
304 
305 /*
306  * PBL Memory Manager.  Uses Linux generic allocator.
307  */
308 
309 #define MIN_PBL_SHIFT 8			/* 256B == min PBL size (32 entries) */
310 
311 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
312 {
313 	unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
314 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
315 	if (!addr)
316 		printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
317 		       pci_name(rdev->lldi.pdev));
318 	return (u32)addr;
319 }
320 
321 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
322 {
323 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
324 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
325 }
326 
327 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
328 {
329 	unsigned pbl_start, pbl_chunk, pbl_top;
330 
331 	rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
332 	if (!rdev->pbl_pool)
333 		return -ENOMEM;
334 
335 	pbl_start = rdev->lldi.vr->pbl.start;
336 	pbl_chunk = rdev->lldi.vr->pbl.size;
337 	pbl_top = pbl_start + pbl_chunk;
338 
339 	while (pbl_start < pbl_top) {
340 		pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
341 		if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
342 			PDBG("%s failed to add PBL chunk (%x/%x)\n",
343 			     __func__, pbl_start, pbl_chunk);
344 			if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
345 				printk(KERN_WARNING MOD
346 				       "Failed to add all PBL chunks (%x/%x)\n",
347 				       pbl_start,
348 				       pbl_top - pbl_start);
349 				return 0;
350 			}
351 			pbl_chunk >>= 1;
352 		} else {
353 			PDBG("%s added PBL chunk (%x/%x)\n",
354 			     __func__, pbl_start, pbl_chunk);
355 			pbl_start += pbl_chunk;
356 		}
357 	}
358 
359 	return 0;
360 }
361 
362 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
363 {
364 	gen_pool_destroy(rdev->pbl_pool);
365 }
366 
367 /*
368  * RQT Memory Manager.  Uses Linux generic allocator.
369  */
370 
371 #define MIN_RQT_SHIFT 10	/* 1KB == min RQT size (16 entries) */
372 
373 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
374 {
375 	unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
376 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
377 	if (!addr)
378 		printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
379 		       pci_name(rdev->lldi.pdev));
380 	return (u32)addr;
381 }
382 
383 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
384 {
385 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
386 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
387 }
388 
389 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
390 {
391 	unsigned rqt_start, rqt_chunk, rqt_top;
392 
393 	rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
394 	if (!rdev->rqt_pool)
395 		return -ENOMEM;
396 
397 	rqt_start = rdev->lldi.vr->rq.start;
398 	rqt_chunk = rdev->lldi.vr->rq.size;
399 	rqt_top = rqt_start + rqt_chunk;
400 
401 	while (rqt_start < rqt_top) {
402 		rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
403 		if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
404 			PDBG("%s failed to add RQT chunk (%x/%x)\n",
405 			     __func__, rqt_start, rqt_chunk);
406 			if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
407 				printk(KERN_WARNING MOD
408 				       "Failed to add all RQT chunks (%x/%x)\n",
409 				       rqt_start, rqt_top - rqt_start);
410 				return 0;
411 			}
412 			rqt_chunk >>= 1;
413 		} else {
414 			PDBG("%s added RQT chunk (%x/%x)\n",
415 			     __func__, rqt_start, rqt_chunk);
416 			rqt_start += rqt_chunk;
417 		}
418 	}
419 	return 0;
420 }
421 
422 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
423 {
424 	gen_pool_destroy(rdev->rqt_pool);
425 }
426 
427 /*
428  * On-Chip QP Memory.
429  */
430 #define MIN_OCQP_SHIFT 12	/* 4KB == min ocqp size */
431 
432 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
433 {
434 	unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
435 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
436 	return (u32)addr;
437 }
438 
439 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
440 {
441 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
442 	gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
443 }
444 
445 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
446 {
447 	unsigned start, chunk, top;
448 
449 	rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
450 	if (!rdev->ocqp_pool)
451 		return -ENOMEM;
452 
453 	start = rdev->lldi.vr->ocq.start;
454 	chunk = rdev->lldi.vr->ocq.size;
455 	top = start + chunk;
456 
457 	while (start < top) {
458 		chunk = min(top - start + 1, chunk);
459 		if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
460 			PDBG("%s failed to add OCQP chunk (%x/%x)\n",
461 			     __func__, start, chunk);
462 			if (chunk <= 1024 << MIN_OCQP_SHIFT) {
463 				printk(KERN_WARNING MOD
464 				       "Failed to add all OCQP chunks (%x/%x)\n",
465 				       start, top - start);
466 				return 0;
467 			}
468 			chunk >>= 1;
469 		} else {
470 			PDBG("%s added OCQP chunk (%x/%x)\n",
471 			     __func__, start, chunk);
472 			start += chunk;
473 		}
474 	}
475 	return 0;
476 }
477 
478 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
479 {
480 	gen_pool_destroy(rdev->ocqp_pool);
481 }
482