12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2a060b562SChristoph Hellwig /* 3a060b562SChristoph Hellwig * Copyright (c) 2016 HGST, a Western Digital Company. 4a060b562SChristoph Hellwig */ 5a060b562SChristoph Hellwig #include <linux/moduleparam.h> 6a060b562SChristoph Hellwig #include <linux/slab.h> 750b7d220SLogan Gunthorpe #include <linux/pci-p2pdma.h> 8a060b562SChristoph Hellwig #include <rdma/mr_pool.h> 9a060b562SChristoph Hellwig #include <rdma/rw.h> 10a060b562SChristoph Hellwig 11a060b562SChristoph Hellwig enum { 12a060b562SChristoph Hellwig RDMA_RW_SINGLE_WR, 13a060b562SChristoph Hellwig RDMA_RW_MULTI_WR, 14a060b562SChristoph Hellwig RDMA_RW_MR, 150e353e34SChristoph Hellwig RDMA_RW_SIG_MR, 16a060b562SChristoph Hellwig }; 17a060b562SChristoph Hellwig 18a060b562SChristoph Hellwig static bool rdma_rw_force_mr; 19a060b562SChristoph Hellwig module_param_named(force_mr, rdma_rw_force_mr, bool, 0); 20a060b562SChristoph Hellwig MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations"); 21a060b562SChristoph Hellwig 22a060b562SChristoph Hellwig /* 2300bd1439SYamin Friedman * Report whether memory registration should be used. Memory registration must 2400bd1439SYamin Friedman * be used for iWarp devices because of iWARP-specific limitations. Memory 2500bd1439SYamin Friedman * registration is also enabled if registering memory might yield better 2600bd1439SYamin Friedman * performance than using multiple SGE entries, see rdma_rw_io_needs_mr() 27a060b562SChristoph Hellwig */ 281fb7f897SMark Bloch static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num) 29a060b562SChristoph Hellwig { 30a060b562SChristoph Hellwig if (rdma_protocol_iwarp(dev, port_num)) 31a060b562SChristoph Hellwig return true; 3200bd1439SYamin Friedman if (dev->attrs.max_sgl_rd) 3300bd1439SYamin Friedman return true; 34a060b562SChristoph Hellwig if (unlikely(rdma_rw_force_mr)) 35a060b562SChristoph Hellwig return true; 36a060b562SChristoph Hellwig return false; 37a060b562SChristoph Hellwig } 38a060b562SChristoph Hellwig 39a060b562SChristoph Hellwig /* 40a060b562SChristoph Hellwig * Check if the device will use memory registration for this RW operation. 4100bd1439SYamin Friedman * For RDMA READs we must use MRs on iWarp and can optionally use them as an 4200bd1439SYamin Friedman * optimization otherwise. Additionally we have a debug option to force usage 4300bd1439SYamin Friedman * of MRs to help testing this code path. 44a060b562SChristoph Hellwig */ 451fb7f897SMark Bloch static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num, 46a060b562SChristoph Hellwig enum dma_data_direction dir, int dma_nents) 47a060b562SChristoph Hellwig { 4800bd1439SYamin Friedman if (dir == DMA_FROM_DEVICE) { 4900bd1439SYamin Friedman if (rdma_protocol_iwarp(dev, port_num)) 50a060b562SChristoph Hellwig return true; 5100bd1439SYamin Friedman if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd) 5200bd1439SYamin Friedman return true; 5300bd1439SYamin Friedman } 54a060b562SChristoph Hellwig if (unlikely(rdma_rw_force_mr)) 55a060b562SChristoph Hellwig return true; 56a060b562SChristoph Hellwig return false; 57a060b562SChristoph Hellwig } 58a060b562SChristoph Hellwig 59e9a53e73SIsrael Rukshin static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev, 60e9a53e73SIsrael Rukshin bool pi_support) 61a060b562SChristoph Hellwig { 62e9a53e73SIsrael Rukshin u32 max_pages; 63e9a53e73SIsrael Rukshin 64e9a53e73SIsrael Rukshin if (pi_support) 65e9a53e73SIsrael Rukshin max_pages = dev->attrs.max_pi_fast_reg_page_list_len; 66e9a53e73SIsrael Rukshin else 67e9a53e73SIsrael Rukshin max_pages = dev->attrs.max_fast_reg_page_list_len; 68e9a53e73SIsrael Rukshin 69a060b562SChristoph Hellwig /* arbitrary limit to avoid allocating gigantic resources */ 70e9a53e73SIsrael Rukshin return min_t(u32, max_pages, 256); 71a060b562SChristoph Hellwig } 72a060b562SChristoph Hellwig 736cb2d5b1SIsrael Rukshin static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg) 746cb2d5b1SIsrael Rukshin { 756cb2d5b1SIsrael Rukshin int count = 0; 766cb2d5b1SIsrael Rukshin 776cb2d5b1SIsrael Rukshin if (reg->mr->need_inval) { 786cb2d5b1SIsrael Rukshin reg->inv_wr.opcode = IB_WR_LOCAL_INV; 796cb2d5b1SIsrael Rukshin reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey; 806cb2d5b1SIsrael Rukshin reg->inv_wr.next = ®->reg_wr.wr; 816cb2d5b1SIsrael Rukshin count++; 826cb2d5b1SIsrael Rukshin } else { 836cb2d5b1SIsrael Rukshin reg->inv_wr.next = NULL; 846cb2d5b1SIsrael Rukshin } 856cb2d5b1SIsrael Rukshin 866cb2d5b1SIsrael Rukshin return count; 876cb2d5b1SIsrael Rukshin } 886cb2d5b1SIsrael Rukshin 89eaa74ec7SBart Van Assche /* Caller must have zero-initialized *reg. */ 901fb7f897SMark Bloch static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num, 91a060b562SChristoph Hellwig struct rdma_rw_reg_ctx *reg, struct scatterlist *sg, 92a060b562SChristoph Hellwig u32 sg_cnt, u32 offset) 93a060b562SChristoph Hellwig { 94e9a53e73SIsrael Rukshin u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 95e9a53e73SIsrael Rukshin qp->integrity_en); 96a060b562SChristoph Hellwig u32 nents = min(sg_cnt, pages_per_mr); 97a060b562SChristoph Hellwig int count = 0, ret; 98a060b562SChristoph Hellwig 99a060b562SChristoph Hellwig reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs); 100a060b562SChristoph Hellwig if (!reg->mr) 101a060b562SChristoph Hellwig return -EAGAIN; 102a060b562SChristoph Hellwig 1036cb2d5b1SIsrael Rukshin count += rdma_rw_inv_key(reg); 104a060b562SChristoph Hellwig 1059aa8b321SBart Van Assche ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE); 106c2d7c8ffSDan Carpenter if (ret < 0 || ret < nents) { 107a060b562SChristoph Hellwig ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr); 108a060b562SChristoph Hellwig return -EINVAL; 109a060b562SChristoph Hellwig } 110a060b562SChristoph Hellwig 111a060b562SChristoph Hellwig reg->reg_wr.wr.opcode = IB_WR_REG_MR; 112a060b562SChristoph Hellwig reg->reg_wr.mr = reg->mr; 113a060b562SChristoph Hellwig reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 114a060b562SChristoph Hellwig if (rdma_protocol_iwarp(qp->device, port_num)) 115a060b562SChristoph Hellwig reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 116a060b562SChristoph Hellwig count++; 117a060b562SChristoph Hellwig 118a060b562SChristoph Hellwig reg->sge.addr = reg->mr->iova; 119a060b562SChristoph Hellwig reg->sge.length = reg->mr->length; 120a060b562SChristoph Hellwig return count; 121a060b562SChristoph Hellwig } 122a060b562SChristoph Hellwig 123a060b562SChristoph Hellwig static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 1241fb7f897SMark Bloch u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset, 125a060b562SChristoph Hellwig u64 remote_addr, u32 rkey, enum dma_data_direction dir) 126a060b562SChristoph Hellwig { 127eaa74ec7SBart Van Assche struct rdma_rw_reg_ctx *prev = NULL; 128e9a53e73SIsrael Rukshin u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 129e9a53e73SIsrael Rukshin qp->integrity_en); 130a060b562SChristoph Hellwig int i, j, ret = 0, count = 0; 131a060b562SChristoph Hellwig 13295a776e8SMax Gurtovoy ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr); 133a060b562SChristoph Hellwig ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL); 134a060b562SChristoph Hellwig if (!ctx->reg) { 135a060b562SChristoph Hellwig ret = -ENOMEM; 136a060b562SChristoph Hellwig goto out; 137a060b562SChristoph Hellwig } 138a060b562SChristoph Hellwig 139a060b562SChristoph Hellwig for (i = 0; i < ctx->nr_ops; i++) { 140a060b562SChristoph Hellwig struct rdma_rw_reg_ctx *reg = &ctx->reg[i]; 141a060b562SChristoph Hellwig u32 nents = min(sg_cnt, pages_per_mr); 142a060b562SChristoph Hellwig 143a060b562SChristoph Hellwig ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt, 144a060b562SChristoph Hellwig offset); 145a060b562SChristoph Hellwig if (ret < 0) 146a060b562SChristoph Hellwig goto out_free; 147a060b562SChristoph Hellwig count += ret; 148a060b562SChristoph Hellwig 149a060b562SChristoph Hellwig if (prev) { 150a060b562SChristoph Hellwig if (reg->mr->need_inval) 151a060b562SChristoph Hellwig prev->wr.wr.next = ®->inv_wr; 152a060b562SChristoph Hellwig else 153a060b562SChristoph Hellwig prev->wr.wr.next = ®->reg_wr.wr; 154a060b562SChristoph Hellwig } 155a060b562SChristoph Hellwig 156a060b562SChristoph Hellwig reg->reg_wr.wr.next = ®->wr.wr; 157a060b562SChristoph Hellwig 158a060b562SChristoph Hellwig reg->wr.wr.sg_list = ®->sge; 159a060b562SChristoph Hellwig reg->wr.wr.num_sge = 1; 160a060b562SChristoph Hellwig reg->wr.remote_addr = remote_addr; 161a060b562SChristoph Hellwig reg->wr.rkey = rkey; 162a060b562SChristoph Hellwig if (dir == DMA_TO_DEVICE) { 163a060b562SChristoph Hellwig reg->wr.wr.opcode = IB_WR_RDMA_WRITE; 164a060b562SChristoph Hellwig } else if (!rdma_cap_read_inv(qp->device, port_num)) { 165a060b562SChristoph Hellwig reg->wr.wr.opcode = IB_WR_RDMA_READ; 166a060b562SChristoph Hellwig } else { 167a060b562SChristoph Hellwig reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; 168a060b562SChristoph Hellwig reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey; 169a060b562SChristoph Hellwig } 170a060b562SChristoph Hellwig count++; 171a060b562SChristoph Hellwig 172a060b562SChristoph Hellwig remote_addr += reg->sge.length; 173a060b562SChristoph Hellwig sg_cnt -= nents; 174a060b562SChristoph Hellwig for (j = 0; j < nents; j++) 175a060b562SChristoph Hellwig sg = sg_next(sg); 176eaa74ec7SBart Van Assche prev = reg; 177a060b562SChristoph Hellwig offset = 0; 178a060b562SChristoph Hellwig } 179a060b562SChristoph Hellwig 180eaa74ec7SBart Van Assche if (prev) 181eaa74ec7SBart Van Assche prev->wr.wr.next = NULL; 182eaa74ec7SBart Van Assche 183a060b562SChristoph Hellwig ctx->type = RDMA_RW_MR; 184a060b562SChristoph Hellwig return count; 185a060b562SChristoph Hellwig 186a060b562SChristoph Hellwig out_free: 187a060b562SChristoph Hellwig while (--i >= 0) 188a060b562SChristoph Hellwig ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 189a060b562SChristoph Hellwig kfree(ctx->reg); 190a060b562SChristoph Hellwig out: 191a060b562SChristoph Hellwig return ret; 192a060b562SChristoph Hellwig } 193a060b562SChristoph Hellwig 194a060b562SChristoph Hellwig static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 195a060b562SChristoph Hellwig struct scatterlist *sg, u32 sg_cnt, u32 offset, 196a060b562SChristoph Hellwig u64 remote_addr, u32 rkey, enum dma_data_direction dir) 197a060b562SChristoph Hellwig { 198632bc3f6SBart Van Assche u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge : 199632bc3f6SBart Van Assche qp->max_read_sge; 200a060b562SChristoph Hellwig struct ib_sge *sge; 201a060b562SChristoph Hellwig u32 total_len = 0, i, j; 202a060b562SChristoph Hellwig 203a060b562SChristoph Hellwig ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge); 204a060b562SChristoph Hellwig 205a060b562SChristoph Hellwig ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); 206a060b562SChristoph Hellwig if (!ctx->map.sges) 207a060b562SChristoph Hellwig goto out; 208a060b562SChristoph Hellwig 209a060b562SChristoph Hellwig ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); 210a060b562SChristoph Hellwig if (!ctx->map.wrs) 211a060b562SChristoph Hellwig goto out_free_sges; 212a060b562SChristoph Hellwig 213a060b562SChristoph Hellwig for (i = 0; i < ctx->nr_ops; i++) { 214a060b562SChristoph Hellwig struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; 215a060b562SChristoph Hellwig u32 nr_sge = min(sg_cnt, max_sge); 216a060b562SChristoph Hellwig 217a060b562SChristoph Hellwig if (dir == DMA_TO_DEVICE) 218a060b562SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 219a060b562SChristoph Hellwig else 220a060b562SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_READ; 221a060b562SChristoph Hellwig rdma_wr->remote_addr = remote_addr + total_len; 222a060b562SChristoph Hellwig rdma_wr->rkey = rkey; 223eaa74ec7SBart Van Assche rdma_wr->wr.num_sge = nr_sge; 224a060b562SChristoph Hellwig rdma_wr->wr.sg_list = sge; 225a060b562SChristoph Hellwig 226a060b562SChristoph Hellwig for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) { 227a163afc8SBart Van Assche sge->addr = sg_dma_address(sg) + offset; 228a163afc8SBart Van Assche sge->length = sg_dma_len(sg) - offset; 229a060b562SChristoph Hellwig sge->lkey = qp->pd->local_dma_lkey; 230a060b562SChristoph Hellwig 231a060b562SChristoph Hellwig total_len += sge->length; 232a060b562SChristoph Hellwig sge++; 233a060b562SChristoph Hellwig sg_cnt--; 234a060b562SChristoph Hellwig offset = 0; 235a060b562SChristoph Hellwig } 236a060b562SChristoph Hellwig 237eaa74ec7SBart Van Assche rdma_wr->wr.next = i + 1 < ctx->nr_ops ? 238eaa74ec7SBart Van Assche &ctx->map.wrs[i + 1].wr : NULL; 239a060b562SChristoph Hellwig } 240a060b562SChristoph Hellwig 241a060b562SChristoph Hellwig ctx->type = RDMA_RW_MULTI_WR; 242a060b562SChristoph Hellwig return ctx->nr_ops; 243a060b562SChristoph Hellwig 244a060b562SChristoph Hellwig out_free_sges: 245a060b562SChristoph Hellwig kfree(ctx->map.sges); 246a060b562SChristoph Hellwig out: 247a060b562SChristoph Hellwig return -ENOMEM; 248a060b562SChristoph Hellwig } 249a060b562SChristoph Hellwig 250a060b562SChristoph Hellwig static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 251a060b562SChristoph Hellwig struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, 252a060b562SChristoph Hellwig enum dma_data_direction dir) 253a060b562SChristoph Hellwig { 254a060b562SChristoph Hellwig struct ib_rdma_wr *rdma_wr = &ctx->single.wr; 255a060b562SChristoph Hellwig 256a060b562SChristoph Hellwig ctx->nr_ops = 1; 257a060b562SChristoph Hellwig 258a060b562SChristoph Hellwig ctx->single.sge.lkey = qp->pd->local_dma_lkey; 259a163afc8SBart Van Assche ctx->single.sge.addr = sg_dma_address(sg) + offset; 260a163afc8SBart Van Assche ctx->single.sge.length = sg_dma_len(sg) - offset; 261a060b562SChristoph Hellwig 262a060b562SChristoph Hellwig memset(rdma_wr, 0, sizeof(*rdma_wr)); 263a060b562SChristoph Hellwig if (dir == DMA_TO_DEVICE) 264a060b562SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 265a060b562SChristoph Hellwig else 266a060b562SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_READ; 267a060b562SChristoph Hellwig rdma_wr->wr.sg_list = &ctx->single.sge; 268a060b562SChristoph Hellwig rdma_wr->wr.num_sge = 1; 269a060b562SChristoph Hellwig rdma_wr->remote_addr = remote_addr; 270a060b562SChristoph Hellwig rdma_wr->rkey = rkey; 271a060b562SChristoph Hellwig 272a060b562SChristoph Hellwig ctx->type = RDMA_RW_SINGLE_WR; 273a060b562SChristoph Hellwig return 1; 274a060b562SChristoph Hellwig } 275a060b562SChristoph Hellwig 2766affca14SMax Gurtovoy static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, 2776affca14SMax Gurtovoy u32 sg_cnt, enum dma_data_direction dir) 2786affca14SMax Gurtovoy { 2796affca14SMax Gurtovoy if (is_pci_p2pdma_page(sg_page(sg))) 2806affca14SMax Gurtovoy pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir); 2816affca14SMax Gurtovoy else 2826affca14SMax Gurtovoy ib_dma_unmap_sg(dev, sg, sg_cnt, dir); 2836affca14SMax Gurtovoy } 2846affca14SMax Gurtovoy 285*8e913a8dSLogan Gunthorpe static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt, 286*8e913a8dSLogan Gunthorpe enum dma_data_direction dir) 2876affca14SMax Gurtovoy { 288*8e913a8dSLogan Gunthorpe int nents; 289*8e913a8dSLogan Gunthorpe 290*8e913a8dSLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(sgt->sgl))) { 2915a7a9e03SChristoph Hellwig if (WARN_ON_ONCE(ib_uses_virt_dma(dev))) 2925a7a9e03SChristoph Hellwig return 0; 293*8e913a8dSLogan Gunthorpe nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl, 294*8e913a8dSLogan Gunthorpe sgt->orig_nents, dir); 295*8e913a8dSLogan Gunthorpe if (!nents) 296*8e913a8dSLogan Gunthorpe return -EIO; 297*8e913a8dSLogan Gunthorpe sgt->nents = nents; 298*8e913a8dSLogan Gunthorpe return 0; 2995a7a9e03SChristoph Hellwig } 300*8e913a8dSLogan Gunthorpe return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0); 3016affca14SMax Gurtovoy } 3026affca14SMax Gurtovoy 303a060b562SChristoph Hellwig /** 304a060b562SChristoph Hellwig * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context 305a060b562SChristoph Hellwig * @ctx: context to initialize 306a060b562SChristoph Hellwig * @qp: queue pair to operate on 307a060b562SChristoph Hellwig * @port_num: port num to which the connection is bound 308a060b562SChristoph Hellwig * @sg: scatterlist to READ/WRITE from/to 309a060b562SChristoph Hellwig * @sg_cnt: number of entries in @sg 310a060b562SChristoph Hellwig * @sg_offset: current byte offset into @sg 311a060b562SChristoph Hellwig * @remote_addr:remote address to read/write (relative to @rkey) 312a060b562SChristoph Hellwig * @rkey: remote key to operate on 313a060b562SChristoph Hellwig * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 314a060b562SChristoph Hellwig * 315a060b562SChristoph Hellwig * Returns the number of WQEs that will be needed on the workqueue if 316a060b562SChristoph Hellwig * successful, or a negative error code. 317a060b562SChristoph Hellwig */ 3181fb7f897SMark Bloch int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 319a060b562SChristoph Hellwig struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, 320a060b562SChristoph Hellwig u64 remote_addr, u32 rkey, enum dma_data_direction dir) 321a060b562SChristoph Hellwig { 322a060b562SChristoph Hellwig struct ib_device *dev = qp->pd->device; 323*8e913a8dSLogan Gunthorpe struct sg_table sgt = { 324*8e913a8dSLogan Gunthorpe .sgl = sg, 325*8e913a8dSLogan Gunthorpe .orig_nents = sg_cnt, 326*8e913a8dSLogan Gunthorpe }; 327a060b562SChristoph Hellwig int ret; 328a060b562SChristoph Hellwig 329*8e913a8dSLogan Gunthorpe ret = rdma_rw_map_sgtable(dev, &sgt, dir); 330*8e913a8dSLogan Gunthorpe if (ret) 331*8e913a8dSLogan Gunthorpe return ret; 332*8e913a8dSLogan Gunthorpe sg_cnt = sgt.nents; 333a060b562SChristoph Hellwig 334a060b562SChristoph Hellwig /* 335a060b562SChristoph Hellwig * Skip to the S/G entry that sg_offset falls into: 336a060b562SChristoph Hellwig */ 337a060b562SChristoph Hellwig for (;;) { 338a163afc8SBart Van Assche u32 len = sg_dma_len(sg); 339a060b562SChristoph Hellwig 340a060b562SChristoph Hellwig if (sg_offset < len) 341a060b562SChristoph Hellwig break; 342a060b562SChristoph Hellwig 343a060b562SChristoph Hellwig sg = sg_next(sg); 344a060b562SChristoph Hellwig sg_offset -= len; 345a060b562SChristoph Hellwig sg_cnt--; 346a060b562SChristoph Hellwig } 347a060b562SChristoph Hellwig 348a060b562SChristoph Hellwig ret = -EIO; 349a060b562SChristoph Hellwig if (WARN_ON_ONCE(sg_cnt == 0)) 350a060b562SChristoph Hellwig goto out_unmap_sg; 351a060b562SChristoph Hellwig 352a060b562SChristoph Hellwig if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) { 353a060b562SChristoph Hellwig ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt, 354a060b562SChristoph Hellwig sg_offset, remote_addr, rkey, dir); 355a060b562SChristoph Hellwig } else if (sg_cnt > 1) { 356a060b562SChristoph Hellwig ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset, 357a060b562SChristoph Hellwig remote_addr, rkey, dir); 358a060b562SChristoph Hellwig } else { 359a060b562SChristoph Hellwig ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset, 360a060b562SChristoph Hellwig remote_addr, rkey, dir); 361a060b562SChristoph Hellwig } 362a060b562SChristoph Hellwig 363a060b562SChristoph Hellwig if (ret < 0) 364a060b562SChristoph Hellwig goto out_unmap_sg; 365a060b562SChristoph Hellwig return ret; 366a060b562SChristoph Hellwig 367a060b562SChristoph Hellwig out_unmap_sg: 368*8e913a8dSLogan Gunthorpe rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir); 369a060b562SChristoph Hellwig return ret; 370a060b562SChristoph Hellwig } 371a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_init); 372a060b562SChristoph Hellwig 3730e353e34SChristoph Hellwig /** 374222c7b1fSBart Van Assche * rdma_rw_ctx_signature_init - initialize a RW context with signature offload 3750e353e34SChristoph Hellwig * @ctx: context to initialize 3760e353e34SChristoph Hellwig * @qp: queue pair to operate on 3770e353e34SChristoph Hellwig * @port_num: port num to which the connection is bound 3780e353e34SChristoph Hellwig * @sg: scatterlist to READ/WRITE from/to 3790e353e34SChristoph Hellwig * @sg_cnt: number of entries in @sg 3800e353e34SChristoph Hellwig * @prot_sg: scatterlist to READ/WRITE protection information from/to 3810e353e34SChristoph Hellwig * @prot_sg_cnt: number of entries in @prot_sg 3820e353e34SChristoph Hellwig * @sig_attrs: signature offloading algorithms 3830e353e34SChristoph Hellwig * @remote_addr:remote address to read/write (relative to @rkey) 3840e353e34SChristoph Hellwig * @rkey: remote key to operate on 3850e353e34SChristoph Hellwig * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 3860e353e34SChristoph Hellwig * 3870e353e34SChristoph Hellwig * Returns the number of WQEs that will be needed on the workqueue if 3880e353e34SChristoph Hellwig * successful, or a negative error code. 3890e353e34SChristoph Hellwig */ 3900e353e34SChristoph Hellwig int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 3911fb7f897SMark Bloch u32 port_num, struct scatterlist *sg, u32 sg_cnt, 3920e353e34SChristoph Hellwig struct scatterlist *prot_sg, u32 prot_sg_cnt, 3930e353e34SChristoph Hellwig struct ib_sig_attrs *sig_attrs, 3940e353e34SChristoph Hellwig u64 remote_addr, u32 rkey, enum dma_data_direction dir) 3950e353e34SChristoph Hellwig { 3960e353e34SChristoph Hellwig struct ib_device *dev = qp->pd->device; 397e9a53e73SIsrael Rukshin u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, 398e9a53e73SIsrael Rukshin qp->integrity_en); 399*8e913a8dSLogan Gunthorpe struct sg_table sgt = { 400*8e913a8dSLogan Gunthorpe .sgl = sg, 401*8e913a8dSLogan Gunthorpe .orig_nents = sg_cnt, 402*8e913a8dSLogan Gunthorpe }; 403*8e913a8dSLogan Gunthorpe struct sg_table prot_sgt = { 404*8e913a8dSLogan Gunthorpe .sgl = prot_sg, 405*8e913a8dSLogan Gunthorpe .orig_nents = prot_sg_cnt, 406*8e913a8dSLogan Gunthorpe }; 4070e353e34SChristoph Hellwig struct ib_rdma_wr *rdma_wr; 4080e353e34SChristoph Hellwig int count = 0, ret; 4090e353e34SChristoph Hellwig 4100e353e34SChristoph Hellwig if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) { 4113cea7b4aSWenpeng Liang pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n", 41253bfbf9bSMax Gurtovoy sg_cnt, prot_sg_cnt, pages_per_mr); 4130e353e34SChristoph Hellwig return -EINVAL; 4140e353e34SChristoph Hellwig } 4150e353e34SChristoph Hellwig 416*8e913a8dSLogan Gunthorpe ret = rdma_rw_map_sgtable(dev, &sgt, dir); 417*8e913a8dSLogan Gunthorpe if (ret) 418*8e913a8dSLogan Gunthorpe return ret; 4190e353e34SChristoph Hellwig 420e9a53e73SIsrael Rukshin if (prot_sg_cnt) { 421*8e913a8dSLogan Gunthorpe ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir); 422*8e913a8dSLogan Gunthorpe if (ret) 4230e353e34SChristoph Hellwig goto out_unmap_sg; 4240e353e34SChristoph Hellwig } 4250e353e34SChristoph Hellwig 4260e353e34SChristoph Hellwig ctx->type = RDMA_RW_SIG_MR; 4270e353e34SChristoph Hellwig ctx->nr_ops = 1; 428aaf1226bSZheng Yongjun ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL); 429e9a53e73SIsrael Rukshin if (!ctx->reg) { 4300e353e34SChristoph Hellwig ret = -ENOMEM; 4310e353e34SChristoph Hellwig goto out_unmap_prot_sg; 4320e353e34SChristoph Hellwig } 4330e353e34SChristoph Hellwig 434e9a53e73SIsrael Rukshin ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs); 435e9a53e73SIsrael Rukshin if (!ctx->reg->mr) { 4360e353e34SChristoph Hellwig ret = -EAGAIN; 437e9a53e73SIsrael Rukshin goto out_free_ctx; 4380e353e34SChristoph Hellwig } 4390e353e34SChristoph Hellwig 440e9a53e73SIsrael Rukshin count += rdma_rw_inv_key(ctx->reg); 4410e353e34SChristoph Hellwig 442e9a53e73SIsrael Rukshin memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); 4430e353e34SChristoph Hellwig 444*8e913a8dSLogan Gunthorpe ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg, 445*8e913a8dSLogan Gunthorpe prot_sgt.nents, NULL, SZ_4K); 446e9a53e73SIsrael Rukshin if (unlikely(ret)) { 447*8e913a8dSLogan Gunthorpe pr_err("failed to map PI sg (%u)\n", 448*8e913a8dSLogan Gunthorpe sgt.nents + prot_sgt.nents); 449e9a53e73SIsrael Rukshin goto out_destroy_sig_mr; 4500e353e34SChristoph Hellwig } 4510e353e34SChristoph Hellwig 452e9a53e73SIsrael Rukshin ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY; 453e9a53e73SIsrael Rukshin ctx->reg->reg_wr.wr.wr_cqe = NULL; 454e9a53e73SIsrael Rukshin ctx->reg->reg_wr.wr.num_sge = 0; 455e9a53e73SIsrael Rukshin ctx->reg->reg_wr.wr.send_flags = 0; 456e9a53e73SIsrael Rukshin ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE; 457e9a53e73SIsrael Rukshin if (rdma_protocol_iwarp(qp->device, port_num)) 458e9a53e73SIsrael Rukshin ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE; 459e9a53e73SIsrael Rukshin ctx->reg->reg_wr.mr = ctx->reg->mr; 460e9a53e73SIsrael Rukshin ctx->reg->reg_wr.key = ctx->reg->mr->lkey; 4610e353e34SChristoph Hellwig count++; 4620e353e34SChristoph Hellwig 463e9a53e73SIsrael Rukshin ctx->reg->sge.addr = ctx->reg->mr->iova; 464e9a53e73SIsrael Rukshin ctx->reg->sge.length = ctx->reg->mr->length; 465e9a53e73SIsrael Rukshin if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE) 466e9a53e73SIsrael Rukshin ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length; 4670e353e34SChristoph Hellwig 468e9a53e73SIsrael Rukshin rdma_wr = &ctx->reg->wr; 469e9a53e73SIsrael Rukshin rdma_wr->wr.sg_list = &ctx->reg->sge; 4700e353e34SChristoph Hellwig rdma_wr->wr.num_sge = 1; 4710e353e34SChristoph Hellwig rdma_wr->remote_addr = remote_addr; 4720e353e34SChristoph Hellwig rdma_wr->rkey = rkey; 4730e353e34SChristoph Hellwig if (dir == DMA_TO_DEVICE) 4740e353e34SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; 4750e353e34SChristoph Hellwig else 4760e353e34SChristoph Hellwig rdma_wr->wr.opcode = IB_WR_RDMA_READ; 477e9a53e73SIsrael Rukshin ctx->reg->reg_wr.wr.next = &rdma_wr->wr; 4780e353e34SChristoph Hellwig count++; 4790e353e34SChristoph Hellwig 4800e353e34SChristoph Hellwig return count; 4810e353e34SChristoph Hellwig 482e9a53e73SIsrael Rukshin out_destroy_sig_mr: 483e9a53e73SIsrael Rukshin ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 4840e353e34SChristoph Hellwig out_free_ctx: 485e9a53e73SIsrael Rukshin kfree(ctx->reg); 4860e353e34SChristoph Hellwig out_unmap_prot_sg: 487*8e913a8dSLogan Gunthorpe if (prot_sgt.nents) 488*8e913a8dSLogan Gunthorpe rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir); 4890e353e34SChristoph Hellwig out_unmap_sg: 490*8e913a8dSLogan Gunthorpe rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir); 4910e353e34SChristoph Hellwig return ret; 4920e353e34SChristoph Hellwig } 4930e353e34SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_signature_init); 4940e353e34SChristoph Hellwig 495a060b562SChristoph Hellwig /* 496a060b562SChristoph Hellwig * Now that we are going to post the WRs we can update the lkey and need_inval 497a060b562SChristoph Hellwig * state on the MRs. If we were doing this at init time, we would get double 498a060b562SChristoph Hellwig * or missing invalidations if a context was initialized but not actually 499a060b562SChristoph Hellwig * posted. 500a060b562SChristoph Hellwig */ 501a060b562SChristoph Hellwig static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval) 502a060b562SChristoph Hellwig { 503a060b562SChristoph Hellwig reg->mr->need_inval = need_inval; 504a060b562SChristoph Hellwig ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey)); 505a060b562SChristoph Hellwig reg->reg_wr.key = reg->mr->lkey; 506a060b562SChristoph Hellwig reg->sge.lkey = reg->mr->lkey; 507a060b562SChristoph Hellwig } 508a060b562SChristoph Hellwig 509a060b562SChristoph Hellwig /** 510a060b562SChristoph Hellwig * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation 511a060b562SChristoph Hellwig * @ctx: context to operate on 512a060b562SChristoph Hellwig * @qp: queue pair to operate on 513a060b562SChristoph Hellwig * @port_num: port num to which the connection is bound 514a060b562SChristoph Hellwig * @cqe: completion queue entry for the last WR 515a060b562SChristoph Hellwig * @chain_wr: WR to append to the posted chain 516a060b562SChristoph Hellwig * 517a060b562SChristoph Hellwig * Return the WR chain for the set of RDMA READ/WRITE operations described by 518a060b562SChristoph Hellwig * @ctx, as well as any memory registration operations needed. If @chain_wr 519a060b562SChristoph Hellwig * is non-NULL the WR it points to will be appended to the chain of WRs posted. 520a060b562SChristoph Hellwig * If @chain_wr is not set @cqe must be set so that the caller gets a 521a060b562SChristoph Hellwig * completion notification. 522a060b562SChristoph Hellwig */ 523a060b562SChristoph Hellwig struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 5241fb7f897SMark Bloch u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 525a060b562SChristoph Hellwig { 526a060b562SChristoph Hellwig struct ib_send_wr *first_wr, *last_wr; 527a060b562SChristoph Hellwig int i; 528a060b562SChristoph Hellwig 529a060b562SChristoph Hellwig switch (ctx->type) { 5300e353e34SChristoph Hellwig case RDMA_RW_SIG_MR: 531a060b562SChristoph Hellwig case RDMA_RW_MR: 532a060b562SChristoph Hellwig for (i = 0; i < ctx->nr_ops; i++) { 533a060b562SChristoph Hellwig rdma_rw_update_lkey(&ctx->reg[i], 534a060b562SChristoph Hellwig ctx->reg[i].wr.wr.opcode != 535a060b562SChristoph Hellwig IB_WR_RDMA_READ_WITH_INV); 536a060b562SChristoph Hellwig } 537a060b562SChristoph Hellwig 538a060b562SChristoph Hellwig if (ctx->reg[0].inv_wr.next) 539a060b562SChristoph Hellwig first_wr = &ctx->reg[0].inv_wr; 540a060b562SChristoph Hellwig else 541a060b562SChristoph Hellwig first_wr = &ctx->reg[0].reg_wr.wr; 542a060b562SChristoph Hellwig last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr; 543a060b562SChristoph Hellwig break; 544a060b562SChristoph Hellwig case RDMA_RW_MULTI_WR: 545a060b562SChristoph Hellwig first_wr = &ctx->map.wrs[0].wr; 546a060b562SChristoph Hellwig last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; 547a060b562SChristoph Hellwig break; 548a060b562SChristoph Hellwig case RDMA_RW_SINGLE_WR: 549a060b562SChristoph Hellwig first_wr = &ctx->single.wr.wr; 550a060b562SChristoph Hellwig last_wr = &ctx->single.wr.wr; 551a060b562SChristoph Hellwig break; 552a060b562SChristoph Hellwig default: 553a060b562SChristoph Hellwig BUG(); 554a060b562SChristoph Hellwig } 555a060b562SChristoph Hellwig 556a060b562SChristoph Hellwig if (chain_wr) { 557a060b562SChristoph Hellwig last_wr->next = chain_wr; 558a060b562SChristoph Hellwig } else { 559a060b562SChristoph Hellwig last_wr->wr_cqe = cqe; 560a060b562SChristoph Hellwig last_wr->send_flags |= IB_SEND_SIGNALED; 561a060b562SChristoph Hellwig } 562a060b562SChristoph Hellwig 563a060b562SChristoph Hellwig return first_wr; 564a060b562SChristoph Hellwig } 565a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_wrs); 566a060b562SChristoph Hellwig 567a060b562SChristoph Hellwig /** 568a060b562SChristoph Hellwig * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation 569a060b562SChristoph Hellwig * @ctx: context to operate on 570a060b562SChristoph Hellwig * @qp: queue pair to operate on 571a060b562SChristoph Hellwig * @port_num: port num to which the connection is bound 572a060b562SChristoph Hellwig * @cqe: completion queue entry for the last WR 573a060b562SChristoph Hellwig * @chain_wr: WR to append to the posted chain 574a060b562SChristoph Hellwig * 575a060b562SChristoph Hellwig * Post the set of RDMA READ/WRITE operations described by @ctx, as well as 576a060b562SChristoph Hellwig * any memory registration operations needed. If @chain_wr is non-NULL the 577a060b562SChristoph Hellwig * WR it points to will be appended to the chain of WRs posted. If @chain_wr 578a060b562SChristoph Hellwig * is not set @cqe must be set so that the caller gets a completion 579a060b562SChristoph Hellwig * notification. 580a060b562SChristoph Hellwig */ 5811fb7f897SMark Bloch int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, 582a060b562SChristoph Hellwig struct ib_cqe *cqe, struct ib_send_wr *chain_wr) 583a060b562SChristoph Hellwig { 5841fec77bfSBart Van Assche struct ib_send_wr *first_wr; 585a060b562SChristoph Hellwig 586a060b562SChristoph Hellwig first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); 5871fec77bfSBart Van Assche return ib_post_send(qp, first_wr, NULL); 588a060b562SChristoph Hellwig } 589a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_post); 590a060b562SChristoph Hellwig 591a060b562SChristoph Hellwig /** 592a060b562SChristoph Hellwig * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init 593a060b562SChristoph Hellwig * @ctx: context to release 594a060b562SChristoph Hellwig * @qp: queue pair to operate on 595a060b562SChristoph Hellwig * @port_num: port num to which the connection is bound 596a060b562SChristoph Hellwig * @sg: scatterlist that was used for the READ/WRITE 597a060b562SChristoph Hellwig * @sg_cnt: number of entries in @sg 598a060b562SChristoph Hellwig * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 599a060b562SChristoph Hellwig */ 6001fb7f897SMark Bloch void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 6011fb7f897SMark Bloch u32 port_num, struct scatterlist *sg, u32 sg_cnt, 6021fb7f897SMark Bloch enum dma_data_direction dir) 603a060b562SChristoph Hellwig { 604a060b562SChristoph Hellwig int i; 605a060b562SChristoph Hellwig 606a060b562SChristoph Hellwig switch (ctx->type) { 607a060b562SChristoph Hellwig case RDMA_RW_MR: 608a060b562SChristoph Hellwig for (i = 0; i < ctx->nr_ops; i++) 609a060b562SChristoph Hellwig ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr); 610a060b562SChristoph Hellwig kfree(ctx->reg); 611a060b562SChristoph Hellwig break; 612a060b562SChristoph Hellwig case RDMA_RW_MULTI_WR: 613a060b562SChristoph Hellwig kfree(ctx->map.wrs); 614a060b562SChristoph Hellwig kfree(ctx->map.sges); 615a060b562SChristoph Hellwig break; 616a060b562SChristoph Hellwig case RDMA_RW_SINGLE_WR: 617a060b562SChristoph Hellwig break; 618a060b562SChristoph Hellwig default: 619a060b562SChristoph Hellwig BUG(); 620a060b562SChristoph Hellwig break; 621a060b562SChristoph Hellwig } 622a060b562SChristoph Hellwig 6236affca14SMax Gurtovoy rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 624a060b562SChristoph Hellwig } 625a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_destroy); 626a060b562SChristoph Hellwig 6270e353e34SChristoph Hellwig /** 6280e353e34SChristoph Hellwig * rdma_rw_ctx_destroy_signature - release all resources allocated by 6292d465a16SIsrael Rukshin * rdma_rw_ctx_signature_init 6300e353e34SChristoph Hellwig * @ctx: context to release 6310e353e34SChristoph Hellwig * @qp: queue pair to operate on 6320e353e34SChristoph Hellwig * @port_num: port num to which the connection is bound 6330e353e34SChristoph Hellwig * @sg: scatterlist that was used for the READ/WRITE 6340e353e34SChristoph Hellwig * @sg_cnt: number of entries in @sg 6350e353e34SChristoph Hellwig * @prot_sg: scatterlist that was used for the READ/WRITE of the PI 6360e353e34SChristoph Hellwig * @prot_sg_cnt: number of entries in @prot_sg 6370e353e34SChristoph Hellwig * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ 6380e353e34SChristoph Hellwig */ 6390e353e34SChristoph Hellwig void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, 6401fb7f897SMark Bloch u32 port_num, struct scatterlist *sg, u32 sg_cnt, 6410e353e34SChristoph Hellwig struct scatterlist *prot_sg, u32 prot_sg_cnt, 6420e353e34SChristoph Hellwig enum dma_data_direction dir) 6430e353e34SChristoph Hellwig { 6440e353e34SChristoph Hellwig if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR)) 6450e353e34SChristoph Hellwig return; 6460e353e34SChristoph Hellwig 647e9a53e73SIsrael Rukshin ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); 648e9a53e73SIsrael Rukshin kfree(ctx->reg); 649e9a53e73SIsrael Rukshin 650e9a53e73SIsrael Rukshin if (prot_sg_cnt) 65167982414SMax Gurtovoy rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); 65267982414SMax Gurtovoy rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir); 6530e353e34SChristoph Hellwig } 6540e353e34SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); 6550e353e34SChristoph Hellwig 65600628182SChuck Lever /** 65700628182SChuck Lever * rdma_rw_mr_factor - return number of MRs required for a payload 65800628182SChuck Lever * @device: device handling the connection 65900628182SChuck Lever * @port_num: port num to which the connection is bound 66000628182SChuck Lever * @maxpages: maximum payload pages per rdma_rw_ctx 66100628182SChuck Lever * 66200628182SChuck Lever * Returns the number of MRs the device requires to move @maxpayload 66300628182SChuck Lever * bytes. The returned value is used during transport creation to 66400628182SChuck Lever * compute max_rdma_ctxts and the size of the transport's Send and 66500628182SChuck Lever * Send Completion Queues. 66600628182SChuck Lever */ 6671fb7f897SMark Bloch unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, 66800628182SChuck Lever unsigned int maxpages) 66900628182SChuck Lever { 67000628182SChuck Lever unsigned int mr_pages; 67100628182SChuck Lever 67200628182SChuck Lever if (rdma_rw_can_use_mr(device, port_num)) 673e9a53e73SIsrael Rukshin mr_pages = rdma_rw_fr_page_list_len(device, false); 67400628182SChuck Lever else 67500628182SChuck Lever mr_pages = device->attrs.max_sge_rd; 67600628182SChuck Lever return DIV_ROUND_UP(maxpages, mr_pages); 67700628182SChuck Lever } 67800628182SChuck Lever EXPORT_SYMBOL(rdma_rw_mr_factor); 67900628182SChuck Lever 680a060b562SChristoph Hellwig void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) 681a060b562SChristoph Hellwig { 682a060b562SChristoph Hellwig u32 factor; 683a060b562SChristoph Hellwig 684a060b562SChristoph Hellwig WARN_ON_ONCE(attr->port_num == 0); 685a060b562SChristoph Hellwig 686a060b562SChristoph Hellwig /* 687a060b562SChristoph Hellwig * Each context needs at least one RDMA READ or WRITE WR. 688a060b562SChristoph Hellwig * 689a060b562SChristoph Hellwig * For some hardware we might need more, eventually we should ask the 690a060b562SChristoph Hellwig * HCA driver for a multiplier here. 691a060b562SChristoph Hellwig */ 692a060b562SChristoph Hellwig factor = 1; 693a060b562SChristoph Hellwig 694a060b562SChristoph Hellwig /* 695a060b562SChristoph Hellwig * If the devices needs MRs to perform RDMA READ or WRITE operations, 696a060b562SChristoph Hellwig * we'll need two additional MRs for the registrations and the 697a060b562SChristoph Hellwig * invalidation. 698a060b562SChristoph Hellwig */ 699e9a53e73SIsrael Rukshin if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN || 700e9a53e73SIsrael Rukshin rdma_rw_can_use_mr(dev, attr->port_num)) 701a060b562SChristoph Hellwig factor += 2; /* inv + reg */ 702a060b562SChristoph Hellwig 703a060b562SChristoph Hellwig attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs; 704a060b562SChristoph Hellwig 705a060b562SChristoph Hellwig /* 706a060b562SChristoph Hellwig * But maybe we were just too high in the sky and the device doesn't 707a060b562SChristoph Hellwig * even support all we need, and we'll have to live with what we get.. 708a060b562SChristoph Hellwig */ 709a060b562SChristoph Hellwig attr->cap.max_send_wr = 710a060b562SChristoph Hellwig min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr); 711a060b562SChristoph Hellwig } 712a060b562SChristoph Hellwig 713a060b562SChristoph Hellwig int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr) 714a060b562SChristoph Hellwig { 715a060b562SChristoph Hellwig struct ib_device *dev = qp->pd->device; 716e9a53e73SIsrael Rukshin u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0; 717a060b562SChristoph Hellwig int ret = 0; 718a060b562SChristoph Hellwig 719c0a6cbb9SIsrael Rukshin if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) { 7200e353e34SChristoph Hellwig nr_sig_mrs = attr->cap.max_rdma_ctxs; 721e9a53e73SIsrael Rukshin nr_mrs = attr->cap.max_rdma_ctxs; 722e9a53e73SIsrael Rukshin max_num_sg = rdma_rw_fr_page_list_len(dev, true); 7230e353e34SChristoph Hellwig } else if (rdma_rw_can_use_mr(dev, attr->port_num)) { 7240e353e34SChristoph Hellwig nr_mrs = attr->cap.max_rdma_ctxs; 725e9a53e73SIsrael Rukshin max_num_sg = rdma_rw_fr_page_list_len(dev, false); 726a060b562SChristoph Hellwig } 727a060b562SChristoph Hellwig 7280e353e34SChristoph Hellwig if (nr_mrs) { 7290e353e34SChristoph Hellwig ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs, 7300e353e34SChristoph Hellwig IB_MR_TYPE_MEM_REG, 731e9a53e73SIsrael Rukshin max_num_sg, 0); 7320e353e34SChristoph Hellwig if (ret) { 7333cea7b4aSWenpeng Liang pr_err("%s: failed to allocated %u MRs\n", 7340e353e34SChristoph Hellwig __func__, nr_mrs); 7350e353e34SChristoph Hellwig return ret; 7360e353e34SChristoph Hellwig } 7370e353e34SChristoph Hellwig } 7380e353e34SChristoph Hellwig 7390e353e34SChristoph Hellwig if (nr_sig_mrs) { 7400e353e34SChristoph Hellwig ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs, 741e9a53e73SIsrael Rukshin IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg); 7420e353e34SChristoph Hellwig if (ret) { 7433cea7b4aSWenpeng Liang pr_err("%s: failed to allocated %u SIG MRs\n", 744f73e4076SIsrael Rukshin __func__, nr_sig_mrs); 7450e353e34SChristoph Hellwig goto out_free_rdma_mrs; 7460e353e34SChristoph Hellwig } 7470e353e34SChristoph Hellwig } 7480e353e34SChristoph Hellwig 7490e353e34SChristoph Hellwig return 0; 7500e353e34SChristoph Hellwig 7510e353e34SChristoph Hellwig out_free_rdma_mrs: 7520e353e34SChristoph Hellwig ib_mr_pool_destroy(qp, &qp->rdma_mrs); 753a060b562SChristoph Hellwig return ret; 754a060b562SChristoph Hellwig } 755a060b562SChristoph Hellwig 756a060b562SChristoph Hellwig void rdma_rw_cleanup_mrs(struct ib_qp *qp) 757a060b562SChristoph Hellwig { 7580e353e34SChristoph Hellwig ib_mr_pool_destroy(qp, &qp->sig_mrs); 759a060b562SChristoph Hellwig ib_mr_pool_destroy(qp, &qp->rdma_mrs); 760a060b562SChristoph Hellwig } 761