1 /* 2 * Copyright (c) 2016 HGST, a Western Digital Company. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 */ 13 #include <linux/moduleparam.h> 14 #include <linux/slab.h> 15 #include <linux/pci-p2pdma.h> 16 #include <rdma/mr_pool.h> 17 #include <rdma/rw.h> 18 19 enum { 20 RDMA_RW_SINGLE_WR, 21 RDMA_RW_MULTI_WR, 22 RDMA_RW_MR, 23 RDMA_RW_SIG_MR, 24 }; 25 26 static bool rdma_rw_force_mr; 27 module_param_named(force_mr, rdma_rw_force_mr, bool, 0); 28 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); 29 30 /* 31 * Check if the device might use memory registration. This is currently only 32 * true for iWarp devices. In the future we can hopefully fine tune this based 33 * on HCA driver input. 34 */ 35 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num) 36 { 37 if (rdma_protocol_iwarp(dev, port_num)) 38 return true; 39 if (unlikely(rdma_rw_force_mr)) 40 return true; 41 return false; 42 } 43 44 /* 45 * Check if the device will use memory registration for this RW operation. 46 * We currently always use memory registrations for iWarp RDMA READs, and 47 * have a debug option to force usage of MRs. 48 * 49 * XXX: In the future we can hopefully fine tune this based on HCA driver 50 * input. 51 */ 52 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, 53 enum dma_data_direction dir, int dma_nents) 54 { 55 if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE) 56 return true; 57 if (unlikely(rdma_rw_force_mr)) 58 return true; 59 return false; 60 } 61 62 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, 63 bool pi_support) 64 { 65 u32 max_pages; 66 67 if (pi_support) 68 max_pages = dev->attrs.max_pi_fast_reg_page_list_len; 69 else 70 max_pages = dev->attrs.max_fast_reg_page_list_len; 71 72 /* arbitrary limit to avoid allocating gigantic resources */ 73 return min_t(u32, max_pages, 256); 74 } 75 76 static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) 77 { 78 int count = 0; 79 80 if (reg->mr->need_inval) { 81 reg->inv_wr.opcode = IB_WR_LOCAL_INV; 82 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; 83 reg->inv_wr.next = ®->reg_wr.wr; 84 count++; 85 } else { 86 reg->inv_wr.next = NULL; 87 } 88 89 return count; 90 } 91 92 /* Caller must have zero-initialized *reg. */ 93 static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num, 94 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, 95 u32 sg_cnt, u32 offset) 96 { 97 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 98 qp->integrity_en); 99 u32 nents = min(sg_cnt, pages_per_mr); 100 int count = 0, ret; 101 102 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); 103 if (!reg->mr) 104 return -EAGAIN; 105 106 count += rdma_rw_inv_key(reg); 107 108 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); 109 if (ret < 0 || ret < nents) { 110 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); 111 return -EINVAL; 112 } 113 114 reg->reg_wr.wr.opcode = IB_WR_REG_MR; 115 reg->reg_wr.mr = reg->mr; 116 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 117 if (rdma_protocol_iwarp(qp->device, port_num)) 118 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 119 count++; 120 121 reg->sge.addr = reg->mr->iova; 122 reg->sge.length = reg->mr->length; 123 return count; 124 } 125 126 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 127 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, 128 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 129 { 130 struct rdma_rw_reg_ctx *prev = NULL; 131 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 132 qp->integrity_en); 133 int i, j, ret = 0, count = 0; 134 135 ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr; 136 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); 137 if (!ctx->reg) { 138 ret = -ENOMEM; 139 goto out; 140 } 141 142 for (i = 0; i < ctx->nr_ops; i++) { 143 struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; 144 u32 nents = min(sg_cnt, pages_per_mr); 145 146 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, 147 offset); 148 if (ret < 0) 149 goto out_free; 150 count += ret; 151 152 if (prev) { 153 if (reg->mr->need_inval) 154 prev->wr.wr.next = ®->inv_wr; 155 else 156 prev->wr.wr.next = ®->reg_wr.wr; 157 } 158 159 reg->reg_wr.wr.next = ®->wr.wr; 160 161 reg->wr.wr.sg_list = ®->sge; 162 reg->wr.wr.num_sge = 1; 163 reg->wr.remote_addr = remote_addr; 164 reg->wr.rkey = rkey; 165 if (dir == DMA_TO_DEVICE) { 166 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; 167 } else if (!rdma_cap_read_inv(qp->device, port_num)) { 168 reg->wr.wr.opcode = IB_WR_RDMA_READ; 169 } else { 170 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; 171 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; 172 } 173 count++; 174 175 remote_addr += reg->sge.length; 176 sg_cnt -= nents; 177 for (j = 0; j < nents; j++) 178 sg = sg_next(sg); 179 prev = reg; 180 offset = 0; 181 } 182 183 if (prev) 184 prev->wr.wr.next = NULL; 185 186 ctx->type = RDMA_RW_MR; 187 return count; 188 189 out_free: 190 while (--i >= 0) 191 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 192 kfree(ctx->reg); 193 out: 194 return ret; 195 } 196 197 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 198 struct scatterlist *sg, u32 sg_cnt, u32 offset, 199 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 200 { 201 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : 202 qp->max_read_sge; 203 struct ib_sge *sge; 204 u32 total_len = 0, i, j; 205 206 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); 207 208 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); 209 if (!ctx->map.sges) 210 goto out; 211 212 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); 213 if (!ctx->map.wrs) 214 goto out_free_sges; 215 216 for (i = 0; i < ctx->nr_ops; i++) { 217 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; 218 u32 nr_sge = min(sg_cnt, max_sge); 219 220 if (dir == DMA_TO_DEVICE) 221 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 222 else 223 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 224 rdma_wr->remote_addr = remote_addr + total_len; 225 rdma_wr->rkey = rkey; 226 rdma_wr->wr.num_sge = nr_sge; 227 rdma_wr->wr.sg_list = sge; 228 229 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { 230 sge->addr = sg_dma_address(sg) + offset; 231 sge->length = sg_dma_len(sg) - offset; 232 sge->lkey = qp->pd->local_dma_lkey; 233 234 total_len += sge->length; 235 sge++; 236 sg_cnt--; 237 offset = 0; 238 } 239 240 rdma_wr->wr.next = i + 1 < ctx->nr_ops ? 241 &ctx->map.wrs[i + 1].wr : NULL; 242 } 243 244 ctx->type = RDMA_RW_MULTI_WR; 245 return ctx->nr_ops; 246 247 out_free_sges: 248 kfree(ctx->map.sges); 249 out: 250 return -ENOMEM; 251 } 252 253 static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 254 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, 255 enum dma_data_direction dir) 256 { 257 struct ib_rdma_wr *rdma_wr = &ctx->single.wr; 258 259 ctx->nr_ops = 1; 260 261 ctx->single.sge.lkey = qp->pd->local_dma_lkey; 262 ctx->single.sge.addr = sg_dma_address(sg) + offset; 263 ctx->single.sge.length = sg_dma_len(sg) - offset; 264 265 memset(rdma_wr, 0, sizeof(*rdma_wr)); 266 if (dir == DMA_TO_DEVICE) 267 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 268 else 269 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 270 rdma_wr->wr.sg_list = &ctx->single.sge; 271 rdma_wr->wr.num_sge = 1; 272 rdma_wr->remote_addr = remote_addr; 273 rdma_wr->rkey = rkey; 274 275 ctx->type = RDMA_RW_SINGLE_WR; 276 return 1; 277 } 278 279 /** 280 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context 281 * @ctx: context to initialize 282 * @qp: queue pair to operate on 283 * @port_num: port num to which the connection is bound 284 * @sg: scatterlist to READ/WRITE from/to 285 * @sg_cnt: number of entries in @sg 286 * @sg_offset: current byte offset into @sg 287 * @remote_addr:remote address to read/write (relative to @rkey) 288 * @rkey: remote key to operate on 289 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 290 * 291 * Returns the number of WQEs that will be needed on the workqueue if 292 * successful, or a negative error code. 293 */ 294 int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 295 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, 296 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 297 { 298 struct ib_device *dev = qp->pd->device; 299 int ret; 300 301 if (is_pci_p2pdma_page(sg_page(sg))) 302 ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); 303 else 304 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); 305 306 if (!ret) 307 return -ENOMEM; 308 sg_cnt = ret; 309 310 /* 311 * Skip to the S/G entry that sg_offset falls into: 312 */ 313 for (;;) { 314 u32 len = sg_dma_len(sg); 315 316 if (sg_offset < len) 317 break; 318 319 sg = sg_next(sg); 320 sg_offset -= len; 321 sg_cnt--; 322 } 323 324 ret = -EIO; 325 if (WARN_ON_ONCE(sg_cnt == 0)) 326 goto out_unmap_sg; 327 328 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { 329 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, 330 sg_offset, remote_addr, rkey, dir); 331 } else if (sg_cnt > 1) { 332 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, 333 remote_addr, rkey, dir); 334 } else { 335 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, 336 remote_addr, rkey, dir); 337 } 338 339 if (ret < 0) 340 goto out_unmap_sg; 341 return ret; 342 343 out_unmap_sg: 344 ib_dma_unmap_sg(dev, sg, sg_cnt, dir); 345 return ret; 346 } 347 EXPORT_SYMBOL(rdma_rw_ctx_init); 348 349 /** 350 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload 351 * @ctx: context to initialize 352 * @qp: queue pair to operate on 353 * @port_num: port num to which the connection is bound 354 * @sg: scatterlist to READ/WRITE from/to 355 * @sg_cnt: number of entries in @sg 356 * @prot_sg: scatterlist to READ/WRITE protection information from/to 357 * @prot_sg_cnt: number of entries in @prot_sg 358 * @sig_attrs: signature offloading algorithms 359 * @remote_addr:remote address to read/write (relative to @rkey) 360 * @rkey: remote key to operate on 361 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 362 * 363 * Returns the number of WQEs that will be needed on the workqueue if 364 * successful, or a negative error code. 365 */ 366 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 367 u8 port_num, struct scatterlist *sg, u32 sg_cnt, 368 struct scatterlist *prot_sg, u32 prot_sg_cnt, 369 struct ib_sig_attrs *sig_attrs, 370 u64 remote_addr, u32 rkey, enum dma_data_direction dir) 371 { 372 struct ib_device *dev = qp->pd->device; 373 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 374 qp->integrity_en); 375 struct ib_rdma_wr *rdma_wr; 376 int count = 0, ret; 377 378 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { 379 pr_err("SG count too large: sg_cnt=%d, prot_sg_cnt=%d, pages_per_mr=%d\n", 380 sg_cnt, prot_sg_cnt, pages_per_mr); 381 return -EINVAL; 382 } 383 384 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); 385 if (!ret) 386 return -ENOMEM; 387 sg_cnt = ret; 388 389 if (prot_sg_cnt) { 390 ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); 391 if (!ret) { 392 ret = -ENOMEM; 393 goto out_unmap_sg; 394 } 395 prot_sg_cnt = ret; 396 } 397 398 ctx->type = RDMA_RW_SIG_MR; 399 ctx->nr_ops = 1; 400 ctx->reg = kcalloc(1, sizeof(*ctx->reg), GFP_KERNEL); 401 if (!ctx->reg) { 402 ret = -ENOMEM; 403 goto out_unmap_prot_sg; 404 } 405 406 ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); 407 if (!ctx->reg->mr) { 408 ret = -EAGAIN; 409 goto out_free_ctx; 410 } 411 412 count += rdma_rw_inv_key(ctx->reg); 413 414 memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); 415 416 ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg, 417 prot_sg_cnt, NULL, SZ_4K); 418 if (unlikely(ret)) { 419 pr_err("failed to map PI sg (%d)\n", sg_cnt + prot_sg_cnt); 420 goto out_destroy_sig_mr; 421 } 422 423 ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; 424 ctx->reg->reg_wr.wr.wr_cqe = NULL; 425 ctx->reg->reg_wr.wr.num_sge = 0; 426 ctx->reg->reg_wr.wr.send_flags = 0; 427 ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 428 if (rdma_protocol_iwarp(qp->device, port_num)) 429 ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 430 ctx->reg->reg_wr.mr = ctx->reg->mr; 431 ctx->reg->reg_wr.key = ctx->reg->mr->lkey; 432 count++; 433 434 ctx->reg->sge.addr = ctx->reg->mr->iova; 435 ctx->reg->sge.length = ctx->reg->mr->length; 436 if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) 437 ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; 438 439 rdma_wr = &ctx->reg->wr; 440 rdma_wr->wr.sg_list = &ctx->reg->sge; 441 rdma_wr->wr.num_sge = 1; 442 rdma_wr->remote_addr = remote_addr; 443 rdma_wr->rkey = rkey; 444 if (dir == DMA_TO_DEVICE) 445 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 446 else 447 rdma_wr->wr.opcode = IB_WR_RDMA_READ; 448 ctx->reg->reg_wr.wr.next = &rdma_wr->wr; 449 count++; 450 451 return count; 452 453 out_destroy_sig_mr: 454 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 455 out_free_ctx: 456 kfree(ctx->reg); 457 out_unmap_prot_sg: 458 if (prot_sg_cnt) 459 ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); 460 out_unmap_sg: 461 ib_dma_unmap_sg(dev, sg, sg_cnt, dir); 462 return ret; 463 } 464 EXPORT_SYMBOL(rdma_rw_ctx_signature_init); 465 466 /* 467 * Now that we are going to post the WRs we can update the lkey and need_inval 468 * state on the MRs. If we were doing this at init time, we would get double 469 * or missing invalidations if a context was initialized but not actually 470 * posted. 471 */ 472 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) 473 { 474 reg->mr->need_inval = need_inval; 475 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); 476 reg->reg_wr.key = reg->mr->lkey; 477 reg->sge.lkey = reg->mr->lkey; 478 } 479 480 /** 481 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation 482 * @ctx: context to operate on 483 * @qp: queue pair to operate on 484 * @port_num: port num to which the connection is bound 485 * @cqe: completion queue entry for the last WR 486 * @chain_wr: WR to append to the posted chain 487 * 488 * Return the WR chain for the set of RDMA READ/WRITE operations described by 489 * @ctx, as well as any memory registration operations needed. If @chain_wr 490 * is non-NULL the WR it points to will be appended to the chain of WRs posted. 491 * If @chain_wr is not set @cqe must be set so that the caller gets a 492 * completion notification. 493 */ 494 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 495 u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 496 { 497 struct ib_send_wr *first_wr, *last_wr; 498 int i; 499 500 switch (ctx->type) { 501 case RDMA_RW_SIG_MR: 502 case RDMA_RW_MR: 503 /* fallthrough */ 504 for (i = 0; i < ctx->nr_ops; i++) { 505 rdma_rw_update_lkey(&ctx->reg[i], 506 ctx->reg[i].wr.wr.opcode != 507 IB_WR_RDMA_READ_WITH_INV); 508 } 509 510 if (ctx->reg[0].inv_wr.next) 511 first_wr = &ctx->reg[0].inv_wr; 512 else 513 first_wr = &ctx->reg[0].reg_wr.wr; 514 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; 515 break; 516 case RDMA_RW_MULTI_WR: 517 first_wr = &ctx->map.wrs[0].wr; 518 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; 519 break; 520 case RDMA_RW_SINGLE_WR: 521 first_wr = &ctx->single.wr.wr; 522 last_wr = &ctx->single.wr.wr; 523 break; 524 default: 525 BUG(); 526 } 527 528 if (chain_wr) { 529 last_wr->next = chain_wr; 530 } else { 531 last_wr->wr_cqe = cqe; 532 last_wr->send_flags |= IB_SEND_SIGNALED; 533 } 534 535 return first_wr; 536 } 537 EXPORT_SYMBOL(rdma_rw_ctx_wrs); 538 539 /** 540 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation 541 * @ctx: context to operate on 542 * @qp: queue pair to operate on 543 * @port_num: port num to which the connection is bound 544 * @cqe: completion queue entry for the last WR 545 * @chain_wr: WR to append to the posted chain 546 * 547 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as 548 * any memory registration operations needed. If @chain_wr is non-NULL the 549 * WR it points to will be appended to the chain of WRs posted. If @chain_wr 550 * is not set @cqe must be set so that the caller gets a completion 551 * notification. 552 */ 553 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 554 struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 555 { 556 struct ib_send_wr *first_wr; 557 558 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); 559 return ib_post_send(qp, first_wr, NULL); 560 } 561 EXPORT_SYMBOL(rdma_rw_ctx_post); 562 563 /** 564 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init 565 * @ctx: context to release 566 * @qp: queue pair to operate on 567 * @port_num: port num to which the connection is bound 568 * @sg: scatterlist that was used for the READ/WRITE 569 * @sg_cnt: number of entries in @sg 570 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 571 */ 572 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, 573 struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir) 574 { 575 int i; 576 577 switch (ctx->type) { 578 case RDMA_RW_MR: 579 for (i = 0; i < ctx->nr_ops; i++) 580 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 581 kfree(ctx->reg); 582 break; 583 case RDMA_RW_MULTI_WR: 584 kfree(ctx->map.wrs); 585 kfree(ctx->map.sges); 586 break; 587 case RDMA_RW_SINGLE_WR: 588 break; 589 default: 590 BUG(); 591 break; 592 } 593 594 /* P2PDMA contexts do not need to be unmapped */ 595 if (!is_pci_p2pdma_page(sg_page(sg))) 596 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 597 } 598 EXPORT_SYMBOL(rdma_rw_ctx_destroy); 599 600 /** 601 * rdma_rw_ctx_destroy_signature - release all resources allocated by 602 * rdma_rw_ctx_signature_init 603 * @ctx: context to release 604 * @qp: queue pair to operate on 605 * @port_num: port num to which the connection is bound 606 * @sg: scatterlist that was used for the READ/WRITE 607 * @sg_cnt: number of entries in @sg 608 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI 609 * @prot_sg_cnt: number of entries in @prot_sg 610 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 611 */ 612 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 613 u8 port_num, struct scatterlist *sg, u32 sg_cnt, 614 struct scatterlist *prot_sg, u32 prot_sg_cnt, 615 enum dma_data_direction dir) 616 { 617 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) 618 return; 619 620 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 621 kfree(ctx->reg); 622 623 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 624 if (prot_sg_cnt) 625 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); 626 } 627 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); 628 629 /** 630 * rdma_rw_mr_factor - return number of MRs required for a payload 631 * @device: device handling the connection 632 * @port_num: port num to which the connection is bound 633 * @maxpages: maximum payload pages per rdma_rw_ctx 634 * 635 * Returns the number of MRs the device requires to move @maxpayload 636 * bytes. The returned value is used during transport creation to 637 * compute max_rdma_ctxts and the size of the transport's Send and 638 * Send Completion Queues. 639 */ 640 unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, 641 unsigned int maxpages) 642 { 643 unsigned int mr_pages; 644 645 if (rdma_rw_can_use_mr(device, port_num)) 646 mr_pages = rdma_rw_fr_page_list_len(device, false); 647 else 648 mr_pages = device->attrs.max_sge_rd; 649 return DIV_ROUND_UP(maxpages, mr_pages); 650 } 651 EXPORT_SYMBOL(rdma_rw_mr_factor); 652 653 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) 654 { 655 u32 factor; 656 657 WARN_ON_ONCE(attr->port_num == 0); 658 659 /* 660 * Each context needs at least one RDMA READ or WRITE WR. 661 * 662 * For some hardware we might need more, eventually we should ask the 663 * HCA driver for a multiplier here. 664 */ 665 factor = 1; 666 667 /* 668 * If the devices needs MRs to perform RDMA READ or WRITE operations, 669 * we'll need two additional MRs for the registrations and the 670 * invalidation. 671 */ 672 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || 673 rdma_rw_can_use_mr(dev, attr->port_num)) 674 factor += 2; /* inv + reg */ 675 676 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; 677 678 /* 679 * But maybe we were just too high in the sky and the device doesn't 680 * even support all we need, and we'll have to live with what we get.. 681 */ 682 attr->cap.max_send_wr = 683 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); 684 } 685 686 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) 687 { 688 struct ib_device *dev = qp->pd->device; 689 u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; 690 int ret = 0; 691 692 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { 693 nr_sig_mrs = attr->cap.max_rdma_ctxs; 694 nr_mrs = attr->cap.max_rdma_ctxs; 695 max_num_sg = rdma_rw_fr_page_list_len(dev, true); 696 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { 697 nr_mrs = attr->cap.max_rdma_ctxs; 698 max_num_sg = rdma_rw_fr_page_list_len(dev, false); 699 } 700 701 if (nr_mrs) { 702 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, 703 IB_MR_TYPE_MEM_REG, 704 max_num_sg, 0); 705 if (ret) { 706 pr_err("%s: failed to allocated %d MRs\n", 707 __func__, nr_mrs); 708 return ret; 709 } 710 } 711 712 if (nr_sig_mrs) { 713 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, 714 IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); 715 if (ret) { 716 pr_err("%s: failed to allocated %d SIG MRs\n", 717 __func__, nr_sig_mrs); 718 goto out_free_rdma_mrs; 719 } 720 } 721 722 return 0; 723 724 out_free_rdma_mrs: 725 ib_mr_pool_destroy(qp, &qp->rdma_mrs); 726 return ret; 727 } 728 729 void rdma_rw_cleanup_mrs(struct ib_qp *qp) 730 { 731 ib_mr_pool_destroy(qp, &qp->sig_mrs); 732 ib_mr_pool_destroy(qp, &qp->rdma_mrs); 733 } 734