Lines Matching refs:mr

788 			wr->wr.reg.mr = reg_wr(ibwr)->mr;  in init_send_wr()
1229 struct rxe_mr *mr; in rxe_get_dma_mr() local
1232 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in rxe_get_dma_mr()
1233 if (!mr) in rxe_get_dma_mr()
1236 err = rxe_add_to_pool(&rxe->mr_pool, mr); in rxe_get_dma_mr()
1243 mr->ibmr.pd = ibpd; in rxe_get_dma_mr()
1244 mr->ibmr.device = ibpd->device; in rxe_get_dma_mr()
1246 rxe_mr_init_dma(access, mr); in rxe_get_dma_mr()
1247 rxe_finalize(mr); in rxe_get_dma_mr()
1248 return &mr->ibmr; in rxe_get_dma_mr()
1251 kfree(mr); in rxe_get_dma_mr()
1262 struct rxe_mr *mr; in rxe_reg_user_mr() local
1271 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in rxe_reg_user_mr()
1272 if (!mr) in rxe_reg_user_mr()
1275 err = rxe_add_to_pool(&rxe->mr_pool, mr); in rxe_reg_user_mr()
1282 mr->ibmr.pd = ibpd; in rxe_reg_user_mr()
1283 mr->ibmr.device = ibpd->device; in rxe_reg_user_mr()
1285 err = rxe_mr_init_user(rxe, start, length, iova, access, mr); in rxe_reg_user_mr()
1287 rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err); in rxe_reg_user_mr()
1291 rxe_finalize(mr); in rxe_reg_user_mr()
1292 return &mr->ibmr; in rxe_reg_user_mr()
1295 cleanup_err = rxe_cleanup(mr); in rxe_reg_user_mr()
1297 rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); in rxe_reg_user_mr()
1299 kfree(mr); in rxe_reg_user_mr()
1309 struct rxe_mr *mr = to_rmr(ibmr); in rxe_rereg_user_mr() local
1317 rxe_err_mr(mr, "flags = %#x not supported", flags); in rxe_rereg_user_mr()
1324 mr->ibmr.pd = ibpd; in rxe_rereg_user_mr()
1329 rxe_err_mr(mr, "access = %#x not supported", access); in rxe_rereg_user_mr()
1332 mr->access = access; in rxe_rereg_user_mr()
1343 struct rxe_mr *mr; in rxe_alloc_mr() local
1353 mr = kzalloc(sizeof(*mr), GFP_KERNEL); in rxe_alloc_mr()
1354 if (!mr) in rxe_alloc_mr()
1357 err = rxe_add_to_pool(&rxe->mr_pool, mr); in rxe_alloc_mr()
1362 mr->ibmr.pd = ibpd; in rxe_alloc_mr()
1363 mr->ibmr.device = ibpd->device; in rxe_alloc_mr()
1365 err = rxe_mr_init_fast(max_num_sg, mr); in rxe_alloc_mr()
1367 rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err); in rxe_alloc_mr()
1371 rxe_finalize(mr); in rxe_alloc_mr()
1372 return &mr->ibmr; in rxe_alloc_mr()
1375 cleanup_err = rxe_cleanup(mr); in rxe_alloc_mr()
1377 rxe_err_mr(mr, "cleanup failed, err = %d", err); in rxe_alloc_mr()
1379 kfree(mr); in rxe_alloc_mr()
1387 struct rxe_mr *mr = to_rmr(ibmr); in rxe_dereg_mr() local
1391 if (atomic_read(&mr->num_mw) > 0) { in rxe_dereg_mr()
1393 rxe_dbg_mr(mr, "mr has mw's bound"); in rxe_dereg_mr()
1397 cleanup_err = rxe_cleanup(mr); in rxe_dereg_mr()
1399 rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err); in rxe_dereg_mr()
1401 kfree_rcu_mightsleep(mr); in rxe_dereg_mr()
1405 rxe_err_mr(mr, "returned err = %d", err); in rxe_dereg_mr()