1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016 HGST, a Western Digital Company. 4 */ 5 #include <linux/moduleparam.h> 6 #include <linux/slab.h> 7 #include <linux/pci-p2pdma.h> 8 #include <rdma/mr_pool.h> 9 #include <rdma/rw.h> 10 11 enum { 12 RDMA_RW_SINGLE_WR, 13 RDMA_RW_MULTI_WR, 14 RDMA_RW_MR, 15 RDMA_RW_SIG_MR, 16 }; 17 18 static bool rdma_rw_force_mr; 19 module_param_named(force_mr, rdma_rw_force_mr, bool, 0); 20 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); 21 22 /* 23 * Report whether memory registration should be used. Memory registration must 24 * be used for iWarp devices because of iWARP-specific limitations. Memory 25 * registration is also enabled if registering memory might yield better 26 * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() 27 */ 28 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) 29 { 30 if (rdma_protocol_iwarp(dev, port_num)) 31 return true; 32 if (dev->attrs.max_sgl_rd) 33 return true; 34 if (unlikely(rdma_rw_force_mr)) 35 return true; 36 return false; 37 } 38 39 /* 40 * Check if the device will use memory registration for this RW operation. 41 * For RDMA READs we must use MRs on iWarp and can optionally use them as an 42 * optimization otherwise. Additionally we have a debug option to force usage 43 * of MRs to help testing this code path. 44 */ 45 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, 46 enum dma_data_direction dir, int dma_nents) 47 { 48 if (dir == DMA_FROM_DEVICE) { 49 if (rdma_protocol_iwarp(dev, port_num)) 50 return true; 51 if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd) 52 return true; 53 } 54 if (unlikely(rdma_rw_force_mr)) 55 return true; 56 return false; 57 } 58 59 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, 60 bool pi_support) 61 { 62 u32 max_pages; 63 64 if (pi_support) 65 max_pages = dev->attrs.max_pi_fast_reg_page_list_len; 66 else 67 max_pages = dev->attrs.max_fast_reg_page_list_len; 68 69 /* arbitrary limit to avoid allocating gigantic resources */ 70 return min_t(u32, max_pages, 256); 71 } 72 73 static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) 74 { 75 int count = 0; 76 77 if (reg->mr->need_inval) { 78 reg->inv_wr.opcode = IB_WR_LOCAL_INV; 79 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; 80 reg->inv_wr.next = ®->reg_wr.wr; 81 count++; 82 } else { 83 reg->inv_wr.next = NULL; 84 } 85 86 return count; 87 } 88 89 /* Caller must have zero-initialized *reg. */ 90 static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, 91 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, 92 u32 sg_cnt, u32 offset) 93 { 94 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 95 qp->integrity_en); 96 u32 nents = min(sg_cnt, pages_per_mr); 97 int count = 0, ret; 98 99 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); 100 if (!reg->mr) 101 return -EAGAIN; 102 103 count += rdma_rw_inv_key(reg); 104 105 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); 106 if (ret < 0 || ret < nents) { 107 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); 108 return -EINVAL; 109 } 110 111 reg->reg_wr.wr.opcode = IB_WR_REG_MR; 112 reg->reg_wr.mr = reg->mr; 113 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 114 if (rdma_protocol_iwarp(qp->device, port_num)) 115 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 116 count++; 117 118 reg->sge.addr = reg->mr->iova; 119 reg->sge.length = reg->mr->length; 120 return count; 121 } 122 123 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 124 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, 125 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 126 { 127 struct rdma_rw_reg_ctx *prev = NULL; 128 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 129 qp->integrity_en); 130 int i, j, ret = 0, count = 0; 131 132 ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr; 133 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); 134 if (!ctx->reg) { 135 ret = -ENOMEM; 136 goto out; 137 } 138 139 for (i = 0; i < ctx->nr_ops; i++) { 140 struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; 141 u32 nents = min(sg_cnt, pages_per_mr); 142 143 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, 144 offset); 145 if (ret < 0) 146 goto out_free; 147 count += ret; 148 149 if (prev) { 150 if (reg->mr->need_inval) 151 prev->wr.wr.next = ®->inv_wr; 152 else 153 prev->wr.wr.next = ®->reg_wr.wr; 154 } 155 156 reg->reg_wr.wr.next = ®->wr.wr; 157 158 reg->wr.wr.sg_list = ®->sge; 159 reg->wr.wr.num_sge = 1; 160 reg->wr.remote_addr = remote_addr; 161 reg->wr.rkey = rkey; 162 if (dir == DMA_TO_DEVICE) { 163 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; 164 } else if (!rdma_cap_read_inv(qp->device, port_num)) { 165 reg->wr.wr.opcode = IB_WR_RDMA_READ; 166 } else { 167 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; 168 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; 169 } 170 count++; 171 172 remote_addr += reg->sge.length; 173 sg_cnt -= nents; 174 for (j = 0; j < nents; j++) 175 sg = sg_next(sg); 176 prev = reg; 177 offset = 0; 178 } 179 180 if (prev) 181 prev->wr.wr.next = NULL; 182 183 ctx->type = RDMA_RW_MR; 184 return count; 185 186 out_free: 187 while (--i >= 0) 188 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 189 kfree(ctx->reg); 190 out: 191 return ret; 192 } 193 194 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 195 struct scatterlist *sg, u32 sg_cnt, u32 offset, 196 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 197 { 198 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : 199 qp->max_read_sge; 200 struct ib_sge *sge; 201 u32 total_len = 0, i, j; 202 203 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); 204 205 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); 206 if (!ctx->map.sges) 207 goto out; 208 209 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); 210 if (!ctx->map.wrs) 211 goto out_free_sges; 212 213 for (i = 0; i < ctx->nr_ops; i++) { 214 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; 215 u32 nr_sge = min(sg_cnt, max_sge); 216 217 if (dir == DMA_TO_DEVICE) 218 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 219 else 220 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 221 rdma_wr->remote_addr = remote_addr + total_len; 222 rdma_wr->rkey = rkey; 223 rdma_wr->wr.num_sge = nr_sge; 224 rdma_wr->wr.sg_list = sge; 225 226 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { 227 sge->addr = sg_dma_address(sg) + offset; 228 sge->length = sg_dma_len(sg) - offset; 229 sge->lkey = qp->pd->local_dma_lkey; 230 231 total_len += sge->length; 232 sge++; 233 sg_cnt--; 234 offset = 0; 235 } 236 237 rdma_wr->wr.next = i + 1 < ctx->nr_ops ? 238 &ctx->map.wrs[i + 1].wr : NULL; 239 } 240 241 ctx->type = RDMA_RW_MULTI_WR; 242 return ctx->nr_ops; 243 244 out_free_sges: 245 kfree(ctx->map.sges); 246 out: 247 return -ENOMEM; 248 } 249 250 static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 251 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, 252 enum dma_data_direction dir) 253 { 254 struct ib_rdma_wr *rdma_wr = &ctx->single.wr; 255 256 ctx->nr_ops = 1; 257 258 ctx->single.sge.lkey = qp->pd->local_dma_lkey; 259 ctx->single.sge.addr = sg_dma_address(sg) + offset; 260 ctx->single.sge.length = sg_dma_len(sg) - offset; 261 262 memset(rdma_wr, 0, sizeof(*rdma_wr)); 263 if (dir == DMA_TO_DEVICE) 264 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 265 else 266 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 267 rdma_wr->wr.sg_list = &ctx->single.sge; 268 rdma_wr->wr.num_sge = 1; 269 rdma_wr->remote_addr = remote_addr; 270 rdma_wr->rkey = rkey; 271 272 ctx->type = RDMA_RW_SINGLE_WR; 273 return 1; 274 } 275 276 /** 277 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context 278 * @ctx: context to initialize 279 * @qp: queue pair to operate on 280 * @port_num: port num to which the connection is bound 281 * @sg: scatterlist to READ/WRITE from/to 282 * @sg_cnt: number of entries in @sg 283 * @sg_offset: current byte offset into @sg 284 * @remote_addr:remote address to read/write (relative to @rkey) 285 * @rkey: remote key to operate on 286 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 287 * 288 * Returns the number of WQEs that will be needed on the workqueue if 289 * successful, or a negative error code. 290 */ 291 int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 292 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, 293 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 294 { 295 struct ib_device *dev = qp->pd->device; 296 int ret; 297 298 if (is_pci_p2pdma_page(sg_page(sg))) 299 ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); 300 else 301 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); 302 303 if (!ret) 304 return -ENOMEM; 305 sg_cnt = ret; 306 307 /* 308 * Skip to the S/G entry that sg_offset falls into: 309 */ 310 for (;;) { 311 u32 len = sg_dma_len(sg); 312 313 if (sg_offset < len) 314 break; 315 316 sg = sg_next(sg); 317 sg_offset -= len; 318 sg_cnt--; 319 } 320 321 ret = -EIO; 322 if (WARN_ON_ONCE(sg_cnt == 0)) 323 goto out_unmap_sg; 324 325 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { 326 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, 327 sg_offset, remote_addr, rkey, dir); 328 } else if (sg_cnt > 1) { 329 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, 330 remote_addr, rkey, dir); 331 } else { 332 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, 333 remote_addr, rkey, dir); 334 } 335 336 if (ret < 0) 337 goto out_unmap_sg; 338 return ret; 339 340 out_unmap_sg: 341 ib_dma_unmap_sg(dev, sg, sg_cnt, dir); 342 return ret; 343 } 344 EXPORT_SYMBOL(rdma_rw_ctx_init); 345 346 /** 347 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload 348 * @ctx: context to initialize 349 * @qp: queue pair to operate on 350 * @port_num: port num to which the connection is bound 351 * @sg: scatterlist to READ/WRITE from/to 352 * @sg_cnt: number of entries in @sg 353 * @prot_sg: scatterlist to READ/WRITE protection information from/to 354 * @prot_sg_cnt: number of entries in @prot_sg 355 * @sig_attrs: signature offloading algorithms 356 * @remote_addr:remote address to read/write (relative to @rkey) 357 * @rkey: remote key to operate on 358 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 359 * 360 * Returns the number of WQEs that will be needed on the workqueue if 361 * successful, or a negative error code. 362 */ 363 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 364 u8 port_num, struct scatterlist *sg, u32 sg_cnt, 365 struct scatterlist *prot_sg, u32 prot_sg_cnt, 366 struct ib_sig_attrs *sig_attrs, 367 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 368 { 369 struct ib_device *dev = qp->pd->device; 370 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 371 qp->integrity_en); 372 struct ib_rdma_wr *rdma_wr; 373 int count = 0, ret; 374 375 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { 376 pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n", 377 sg_cnt, prot_sg_cnt, pages_per_mr); 378 return -EINVAL; 379 } 380 381 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); 382 if (!ret) 383 return -ENOMEM; 384 sg_cnt = ret; 385 386 if (prot_sg_cnt) { 387 ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); 388 if (!ret) { 389 ret = -ENOMEM; 390 goto out_unmap_sg; 391 } 392 prot_sg_cnt = ret; 393 } 394 395 ctx->type = RDMA_RW_SIG_MR; 396 ctx->nr_ops = 1; 397 ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL); 398 if (!ctx->reg) { 399 ret = -ENOMEM; 400 goto out_unmap_prot_sg; 401 } 402 403 ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); 404 if (!ctx->reg->mr) { 405 ret = -EAGAIN; 406 goto out_free_ctx; 407 } 408 409 count += rdma_rw_inv_key(ctx->reg); 410 411 memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); 412 413 ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg, 414 prot_sg_cnt, NULL, SZ_4K); 415 if (unlikely(ret)) { 416 pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt); 417 goto out_destroy_sig_mr; 418 } 419 420 ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; 421 ctx->reg->reg_wr.wr.wr_cqe = NULL; 422 ctx->reg->reg_wr.wr.num_sge = 0; 423 ctx->reg->reg_wr.wr.send_flags = 0; 424 ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 425 if (rdma_protocol_iwarp(qp->device, port_num)) 426 ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 427 ctx->reg->reg_wr.mr = ctx->reg->mr; 428 ctx->reg->reg_wr.key = ctx->reg->mr->lkey; 429 count++; 430 431 ctx->reg->sge.addr = ctx->reg->mr->iova; 432 ctx->reg->sge.length = ctx->reg->mr->length; 433 if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) 434 ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; 435 436 rdma_wr = &ctx->reg->wr; 437 rdma_wr->wr.sg_list = &ctx->reg->sge; 438 rdma_wr->wr.num_sge = 1; 439 rdma_wr->remote_addr = remote_addr; 440 rdma_wr->rkey = rkey; 441 if (dir == DMA_TO_DEVICE) 442 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 443 else 444 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 445 ctx->reg->reg_wr.wr.next = &rdma_wr->wr; 446 count++; 447 448 return count; 449 450 out_destroy_sig_mr: 451 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 452 out_free_ctx: 453 kfree(ctx->reg); 454 out_unmap_prot_sg: 455 if (prot_sg_cnt) 456 ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); 457 out_unmap_sg: 458 ib_dma_unmap_sg(dev, sg, sg_cnt, dir); 459 return ret; 460 } 461 EXPORT_SYMBOL(rdma_rw_ctx_signature_init); 462 463 /* 464 * Now that we are going to post the WRs we can update the lkey and need_inval 465 * state on the MRs. If we were doing this at init time, we would get double 466 * or missing invalidations if a context was initialized but not actually 467 * posted. 468 */ 469 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) 470 { 471 reg->mr->need_inval = need_inval; 472 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); 473 reg->reg_wr.key = reg->mr->lkey; 474 reg->sge.lkey = reg->mr->lkey; 475 } 476 477 /** 478 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation 479 * @ctx: context to operate on 480 * @qp: queue pair to operate on 481 * @port_num: port num to which the connection is bound 482 * @cqe: completion queue entry for the last WR 483 * @chain_wr: WR to append to the posted chain 484 * 485 * Return the WR chain for the set of RDMA READ/WRITE operations described by 486 * @ctx, as well as any memory registration operations needed. If @chain_wr 487 * is non-NULL the WR it points to will be appended to the chain of WRs posted. 488 * If @chain_wr is not set @cqe must be set so that the caller gets a 489 * completion notification. 490 */ 491 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 492 u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 493 { 494 struct ib_send_wr *first_wr, *last_wr; 495 int i; 496 497 switch (ctx->type) { 498 case RDMA_RW_SIG_MR: 499 case RDMA_RW_MR: 500 /* fallthrough */ 501 for (i = 0; i < ctx->nr_ops; i++) { 502 rdma_rw_update_lkey(&ctx->reg[i], 503 ctx->reg[i].wr.wr.opcode != 504 IB_WR_RDMA_READ_WITH_INV); 505 } 506 507 if (ctx->reg[0].inv_wr.next) 508 first_wr = &ctx->reg[0].inv_wr; 509 else 510 first_wr = &ctx->reg[0].reg_wr.wr; 511 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; 512 break; 513 case RDMA_RW_MULTI_WR: 514 first_wr = &ctx->map.wrs[0].wr; 515 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; 516 break; 517 case RDMA_RW_SINGLE_WR: 518 first_wr = &ctx->single.wr.wr; 519 last_wr = &ctx->single.wr.wr; 520 break; 521 default: 522 BUG(); 523 } 524 525 if (chain_wr) { 526 last_wr->next = chain_wr; 527 } else { 528 last_wr->wr_cqe = cqe; 529 last_wr->send_flags |= IB_SEND_SIGNALED; 530 } 531 532 return first_wr; 533 } 534 EXPORT_SYMBOL(rdma_rw_ctx_wrs); 535 536 /** 537 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation 538 * @ctx: context to operate on 539 * @qp: queue pair to operate on 540 * @port_num: port num to which the connection is bound 541 * @cqe: completion queue entry for the last WR 542 * @chain_wr: WR to append to the posted chain 543 * 544 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as 545 * any memory registration operations needed. If @chain_wr is non-NULL the 546 * WR it points to will be appended to the chain of WRs posted. If @chain_wr 547 * is not set @cqe must be set so that the caller gets a completion 548 * notification. 549 */ 550 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 551 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 552 { 553 struct ib_send_wr *first_wr; 554 555 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); 556 return ib_post_send(qp, first_wr, NULL); 557 } 558 EXPORT_SYMBOL(rdma_rw_ctx_post); 559 560 /** 561 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init 562 * @ctx: context to release 563 * @qp: queue pair to operate on 564 * @port_num: port num to which the connection is bound 565 * @sg: scatterlist that was used for the READ/WRITE 566 * @sg_cnt: number of entries in @sg 567 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 568 */ 569 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 570 struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) 571 { 572 int i; 573 574 switch (ctx->type) { 575 case RDMA_RW_MR: 576 for (i = 0; i < ctx->nr_ops; i++) 577 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 578 kfree(ctx->reg); 579 break; 580 case RDMA_RW_MULTI_WR: 581 kfree(ctx->map.wrs); 582 kfree(ctx->map.sges); 583 break; 584 case RDMA_RW_SINGLE_WR: 585 break; 586 default: 587 BUG(); 588 break; 589 } 590 591 if (is_pci_p2pdma_page(sg_page(sg))) 592 pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg, 593 sg_cnt, dir); 594 else 595 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 596 } 597 EXPORT_SYMBOL(rdma_rw_ctx_destroy); 598 599 /** 600 * rdma_rw_ctx_destroy_signature - release all resources allocated by 601 * rdma_rw_ctx_signature_init 602 * @ctx: context to release 603 * @qp: queue pair to operate on 604 * @port_num: port num to which the connection is bound 605 * @sg: scatterlist that was used for the READ/WRITE 606 * @sg_cnt: number of entries in @sg 607 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI 608 * @prot_sg_cnt: number of entries in @prot_sg 609 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 610 */ 611 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 612 u8 port_num, struct scatterlist *sg, u32 sg_cnt, 613 struct scatterlist *prot_sg, u32 prot_sg_cnt, 614 enum dma_data_direction dir) 615 { 616 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) 617 return; 618 619 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 620 kfree(ctx->reg); 621 622 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 623 if (prot_sg_cnt) 624 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); 625 } 626 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); 627 628 /** 629 * rdma_rw_mr_factor - return number of MRs required for a payload 630 * @device: device handling the connection 631 * @port_num: port num to which the connection is bound 632 * @maxpages: maximum payload pages per rdma_rw_ctx 633 * 634 * Returns the number of MRs the device requires to move @maxpayload 635 * bytes. The returned value is used during transport creation to 636 * compute max_rdma_ctxts and the size of the transport's Send and 637 * Send Completion Queues. 638 */ 639 unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, 640 unsigned int maxpages) 641 { 642 unsigned int mr_pages; 643 644 if (rdma_rw_can_use_mr(device, port_num)) 645 mr_pages = rdma_rw_fr_page_list_len(device, false); 646 else 647 mr_pages = device->attrs.max_sge_rd; 648 return DIV_ROUND_UP(maxpages, mr_pages); 649 } 650 EXPORT_SYMBOL(rdma_rw_mr_factor); 651 652 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) 653 { 654 u32 factor; 655 656 WARN_ON_ONCE(attr->port_num == 0); 657 658 /* 659 * Each context needs at least one RDMA READ or WRITE WR. 660 * 661 * For some hardware we might need more, eventually we should ask the 662 * HCA driver for a multiplier here. 663 */ 664 factor = 1; 665 666 /* 667 * If the devices needs MRs to perform RDMA READ or WRITE operations, 668 * we'll need two additional MRs for the registrations and the 669 * invalidation. 670 */ 671 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || 672 rdma_rw_can_use_mr(dev, attr->port_num)) 673 factor += 2; /* inv + reg */ 674 675 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; 676 677 /* 678 * But maybe we were just too high in the sky and the device doesn't 679 * even support all we need, and we'll have to live with what we get.. 680 */ 681 attr->cap.max_send_wr = 682 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); 683 } 684 685 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) 686 { 687 struct ib_device *dev = qp->pd->device; 688 u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; 689 int ret = 0; 690 691 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { 692 nr_sig_mrs = attr->cap.max_rdma_ctxs; 693 nr_mrs = attr->cap.max_rdma_ctxs; 694 max_num_sg = rdma_rw_fr_page_list_len(dev, true); 695 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { 696 nr_mrs = attr->cap.max_rdma_ctxs; 697 max_num_sg = rdma_rw_fr_page_list_len(dev, false); 698 } 699 700 if (nr_mrs) { 701 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, 702 IB_MR_TYPE_MEM_REG, 703 max_num_sg, 0); 704 if (ret) { 705 pr_err("%s: failed to allocated %d MRs\n", 706 __func__, nr_mrs); 707 return ret; 708 } 709 } 710 711 if (nr_sig_mrs) { 712 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, 713 IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); 714 if (ret) { 715 pr_err("%s: failed to allocated %d SIG MRs\n", 716 __func__, nr_sig_mrs); 717 goto out_free_rdma_mrs; 718 } 719 } 720 721 return 0; 722 723 out_free_rdma_mrs: 724 ib_mr_pool_destroy(qp, &qp->rdma_mrs); 725 return ret; 726 } 727 728 void rdma_rw_cleanup_mrs(struct ib_qp *qp) 729 { 730 ib_mr_pool_destroy(qp, &qp->sig_mrs); 731 ib_mr_pool_destroy(qp, &qp->rdma_mrs); 732 } 733