Lines Matching refs:rdev

38 static int c4iw_init_qid_table(struct c4iw_rdev *rdev)  in c4iw_init_qid_table()  argument
42 if (c4iw_id_table_alloc(&rdev->resource.qid_table, in c4iw_init_qid_table()
43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table()
44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table()
45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table()
48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table()
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table()
50 if (!(i & rdev->qpmask)) in c4iw_init_qid_table()
51 c4iw_id_free(&rdev->resource.qid_table, i); in c4iw_init_qid_table()
56 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, in c4iw_init_resource() argument
60 err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, in c4iw_init_resource()
64 err = c4iw_init_qid_table(rdev); in c4iw_init_resource()
67 err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, in c4iw_init_resource()
72 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, in c4iw_init_resource()
75 err = c4iw_id_table_alloc(&rdev->resource.srq_table, 0, in c4iw_init_resource()
81 c4iw_id_table_free(&rdev->resource.pdid_table); in c4iw_init_resource()
83 c4iw_id_table_free(&rdev->resource.qid_table); in c4iw_init_resource()
85 c4iw_id_table_free(&rdev->resource.tpt_table); in c4iw_init_resource()
108 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) in c4iw_get_cqid() argument
122 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid()
125 mutex_lock(&rdev->stats.lock); in c4iw_get_cqid()
126 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid()
127 mutex_unlock(&rdev->stats.lock); in c4iw_get_cqid()
128 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
145 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
156 mutex_lock(&rdev->stats.lock); in c4iw_get_cqid()
157 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_cqid()
158 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_cqid()
159 mutex_unlock(&rdev->stats.lock); in c4iw_get_cqid()
163 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, in c4iw_put_cqid() argument
178 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) in c4iw_get_qpid() argument
192 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_qpid()
194 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
195 rdev->stats.qid.fail++; in c4iw_get_qpid()
196 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
199 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
200 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_qpid()
201 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
202 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_qpid()
219 for (i = qid + 1; i & rdev->qpmask; i++) { in c4iw_get_qpid()
230 mutex_lock(&rdev->stats.lock); in c4iw_get_qpid()
231 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_qpid()
232 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_qpid()
233 mutex_unlock(&rdev->stats.lock); in c4iw_get_qpid()
237 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, in c4iw_put_qpid() argument
265 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) in c4iw_pblpool_alloc() argument
267 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); in c4iw_pblpool_alloc()
269 mutex_lock(&rdev->stats.lock); in c4iw_pblpool_alloc()
271 rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); in c4iw_pblpool_alloc()
272 if (rdev->stats.pbl.cur > rdev->stats.pbl.max) in c4iw_pblpool_alloc()
273 rdev->stats.pbl.max = rdev->stats.pbl.cur; in c4iw_pblpool_alloc()
274 kref_get(&rdev->pbl_kref); in c4iw_pblpool_alloc()
276 rdev->stats.pbl.fail++; in c4iw_pblpool_alloc()
277 mutex_unlock(&rdev->stats.lock); in c4iw_pblpool_alloc()
283 struct c4iw_rdev *rdev; in destroy_pblpool() local
285 rdev = container_of(kref, struct c4iw_rdev, pbl_kref); in destroy_pblpool()
286 gen_pool_destroy(rdev->pbl_pool); in destroy_pblpool()
287 complete(&rdev->pbl_compl); in destroy_pblpool()
290 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) in c4iw_pblpool_free() argument
293 mutex_lock(&rdev->stats.lock); in c4iw_pblpool_free()
294 rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); in c4iw_pblpool_free()
295 mutex_unlock(&rdev->stats.lock); in c4iw_pblpool_free()
296 gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); in c4iw_pblpool_free()
297 kref_put(&rdev->pbl_kref, destroy_pblpool); in c4iw_pblpool_free()
300 int c4iw_pblpool_create(struct c4iw_rdev *rdev) in c4iw_pblpool_create() argument
304 rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); in c4iw_pblpool_create()
305 if (!rdev->pbl_pool) in c4iw_pblpool_create()
308 pbl_start = rdev->lldi.vr->pbl.start; in c4iw_pblpool_create()
309 pbl_chunk = rdev->lldi.vr->pbl.size; in c4iw_pblpool_create()
314 if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { in c4iw_pblpool_create()
333 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) in c4iw_pblpool_destroy() argument
335 kref_put(&rdev->pbl_kref, destroy_pblpool); in c4iw_pblpool_destroy()
344 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) in c4iw_rqtpool_alloc() argument
346 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); in c4iw_rqtpool_alloc()
350 pci_name(rdev->lldi.pdev)); in c4iw_rqtpool_alloc()
351 mutex_lock(&rdev->stats.lock); in c4iw_rqtpool_alloc()
353 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); in c4iw_rqtpool_alloc()
354 if (rdev->stats.rqt.cur > rdev->stats.rqt.max) in c4iw_rqtpool_alloc()
355 rdev->stats.rqt.max = rdev->stats.rqt.cur; in c4iw_rqtpool_alloc()
356 kref_get(&rdev->rqt_kref); in c4iw_rqtpool_alloc()
358 rdev->stats.rqt.fail++; in c4iw_rqtpool_alloc()
359 mutex_unlock(&rdev->stats.lock); in c4iw_rqtpool_alloc()
365 struct c4iw_rdev *rdev; in destroy_rqtpool() local
367 rdev = container_of(kref, struct c4iw_rdev, rqt_kref); in destroy_rqtpool()
368 gen_pool_destroy(rdev->rqt_pool); in destroy_rqtpool()
369 complete(&rdev->rqt_compl); in destroy_rqtpool()
372 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) in c4iw_rqtpool_free() argument
375 mutex_lock(&rdev->stats.lock); in c4iw_rqtpool_free()
376 rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); in c4iw_rqtpool_free()
377 mutex_unlock(&rdev->stats.lock); in c4iw_rqtpool_free()
378 gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); in c4iw_rqtpool_free()
379 kref_put(&rdev->rqt_kref, destroy_rqtpool); in c4iw_rqtpool_free()
382 int c4iw_rqtpool_create(struct c4iw_rdev *rdev) in c4iw_rqtpool_create() argument
387 rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); in c4iw_rqtpool_create()
388 if (!rdev->rqt_pool) in c4iw_rqtpool_create()
395 if (rdev->lldi.vr->srq.size) in c4iw_rqtpool_create()
398 rqt_start = rdev->lldi.vr->rq.start + skip; in c4iw_rqtpool_create()
399 rqt_chunk = rdev->lldi.vr->rq.size - skip; in c4iw_rqtpool_create()
404 if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { in c4iw_rqtpool_create()
422 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) in c4iw_rqtpool_destroy() argument
424 kref_put(&rdev->rqt_kref, destroy_rqtpool); in c4iw_rqtpool_destroy()
427 int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev) in c4iw_alloc_srq_idx() argument
431 idx = c4iw_id_alloc(&rdev->resource.srq_table); in c4iw_alloc_srq_idx()
432 mutex_lock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
434 rdev->stats.srqt.fail++; in c4iw_alloc_srq_idx()
435 mutex_unlock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
438 rdev->stats.srqt.cur++; in c4iw_alloc_srq_idx()
439 if (rdev->stats.srqt.cur > rdev->stats.srqt.max) in c4iw_alloc_srq_idx()
440 rdev->stats.srqt.max = rdev->stats.srqt.cur; in c4iw_alloc_srq_idx()
441 mutex_unlock(&rdev->stats.lock); in c4iw_alloc_srq_idx()
445 void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx) in c4iw_free_srq_idx() argument
447 c4iw_id_free(&rdev->resource.srq_table, idx); in c4iw_free_srq_idx()
448 mutex_lock(&rdev->stats.lock); in c4iw_free_srq_idx()
449 rdev->stats.srqt.cur--; in c4iw_free_srq_idx()
450 mutex_unlock(&rdev->stats.lock); in c4iw_free_srq_idx()
458 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) in c4iw_ocqp_pool_alloc() argument
460 unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); in c4iw_ocqp_pool_alloc()
463 mutex_lock(&rdev->stats.lock); in c4iw_ocqp_pool_alloc()
464 rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); in c4iw_ocqp_pool_alloc()
465 if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) in c4iw_ocqp_pool_alloc()
466 rdev->stats.ocqp.max = rdev->stats.ocqp.cur; in c4iw_ocqp_pool_alloc()
467 mutex_unlock(&rdev->stats.lock); in c4iw_ocqp_pool_alloc()
472 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) in c4iw_ocqp_pool_free() argument
475 mutex_lock(&rdev->stats.lock); in c4iw_ocqp_pool_free()
476 rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); in c4iw_ocqp_pool_free()
477 mutex_unlock(&rdev->stats.lock); in c4iw_ocqp_pool_free()
478 gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); in c4iw_ocqp_pool_free()
481 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) in c4iw_ocqp_pool_create() argument
485 rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); in c4iw_ocqp_pool_create()
486 if (!rdev->ocqp_pool) in c4iw_ocqp_pool_create()
489 start = rdev->lldi.vr->ocq.start; in c4iw_ocqp_pool_create()
490 chunk = rdev->lldi.vr->ocq.size; in c4iw_ocqp_pool_create()
495 if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { in c4iw_ocqp_pool_create()
513 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) in c4iw_ocqp_pool_destroy() argument
515 gen_pool_destroy(rdev->ocqp_pool); in c4iw_ocqp_pool_destroy()