xref: /openbmc/linux/drivers/infiniband/core/rw.c (revision 1e97af7f)
12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2a060b562SChristoph Hellwig /*
3a060b562SChristoph Hellwig  * Copyright (c) 2016 HGST, a Western Digital Company.
4a060b562SChristoph Hellwig  */
5dc90f084SChristoph Hellwig #include <linux/memremap.h>
6a060b562SChristoph Hellwig #include <linux/moduleparam.h>
7a060b562SChristoph Hellwig #include <linux/slab.h>
850b7d220SLogan Gunthorpe #include <linux/pci-p2pdma.h>
9a060b562SChristoph Hellwig #include <rdma/mr_pool.h>
10a060b562SChristoph Hellwig #include <rdma/rw.h>
11a060b562SChristoph Hellwig 
12a060b562SChristoph Hellwig enum {
13a060b562SChristoph Hellwig 	RDMA_RW_SINGLE_WR,
14a060b562SChristoph Hellwig 	RDMA_RW_MULTI_WR,
15a060b562SChristoph Hellwig 	RDMA_RW_MR,
160e353e34SChristoph Hellwig 	RDMA_RW_SIG_MR,
17a060b562SChristoph Hellwig };
18a060b562SChristoph Hellwig 
19a060b562SChristoph Hellwig static bool rdma_rw_force_mr;
20a060b562SChristoph Hellwig module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
21a060b562SChristoph Hellwig MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
22a060b562SChristoph Hellwig 
23a060b562SChristoph Hellwig /*
2400bd1439SYamin Friedman  * Report whether memory registration should be used. Memory registration must
2500bd1439SYamin Friedman  * be used for iWarp devices because of iWARP-specific limitations. Memory
2600bd1439SYamin Friedman  * registration is also enabled if registering memory might yield better
2700bd1439SYamin Friedman  * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
28a060b562SChristoph Hellwig  */
rdma_rw_can_use_mr(struct ib_device * dev,u32 port_num)291fb7f897SMark Bloch static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
30a060b562SChristoph Hellwig {
31a060b562SChristoph Hellwig 	if (rdma_protocol_iwarp(dev, port_num))
32a060b562SChristoph Hellwig 		return true;
3300bd1439SYamin Friedman 	if (dev->attrs.max_sgl_rd)
3400bd1439SYamin Friedman 		return true;
35a060b562SChristoph Hellwig 	if (unlikely(rdma_rw_force_mr))
36a060b562SChristoph Hellwig 		return true;
37a060b562SChristoph Hellwig 	return false;
38a060b562SChristoph Hellwig }
39a060b562SChristoph Hellwig 
40a060b562SChristoph Hellwig /*
41a060b562SChristoph Hellwig  * Check if the device will use memory registration for this RW operation.
4200bd1439SYamin Friedman  * For RDMA READs we must use MRs on iWarp and can optionally use them as an
4300bd1439SYamin Friedman  * optimization otherwise.  Additionally we have a debug option to force usage
4400bd1439SYamin Friedman  * of MRs to help testing this code path.
45a060b562SChristoph Hellwig  */
rdma_rw_io_needs_mr(struct ib_device * dev,u32 port_num,enum dma_data_direction dir,int dma_nents)461fb7f897SMark Bloch static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
47a060b562SChristoph Hellwig 		enum dma_data_direction dir, int dma_nents)
48a060b562SChristoph Hellwig {
4900bd1439SYamin Friedman 	if (dir == DMA_FROM_DEVICE) {
5000bd1439SYamin Friedman 		if (rdma_protocol_iwarp(dev, port_num))
51a060b562SChristoph Hellwig 			return true;
5200bd1439SYamin Friedman 		if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
5300bd1439SYamin Friedman 			return true;
5400bd1439SYamin Friedman 	}
55a060b562SChristoph Hellwig 	if (unlikely(rdma_rw_force_mr))
56a060b562SChristoph Hellwig 		return true;
57a060b562SChristoph Hellwig 	return false;
58a060b562SChristoph Hellwig }
59a060b562SChristoph Hellwig 
rdma_rw_fr_page_list_len(struct ib_device * dev,bool pi_support)60e9a53e73SIsrael Rukshin static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
61e9a53e73SIsrael Rukshin 					   bool pi_support)
62a060b562SChristoph Hellwig {
63e9a53e73SIsrael Rukshin 	u32 max_pages;
64e9a53e73SIsrael Rukshin 
65e9a53e73SIsrael Rukshin 	if (pi_support)
66e9a53e73SIsrael Rukshin 		max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
67e9a53e73SIsrael Rukshin 	else
68e9a53e73SIsrael Rukshin 		max_pages = dev->attrs.max_fast_reg_page_list_len;
69e9a53e73SIsrael Rukshin 
70a060b562SChristoph Hellwig 	/* arbitrary limit to avoid allocating gigantic resources */
71e9a53e73SIsrael Rukshin 	return min_t(u32, max_pages, 256);
72a060b562SChristoph Hellwig }
73a060b562SChristoph Hellwig 
rdma_rw_inv_key(struct rdma_rw_reg_ctx * reg)746cb2d5b1SIsrael Rukshin static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
756cb2d5b1SIsrael Rukshin {
766cb2d5b1SIsrael Rukshin 	int count = 0;
776cb2d5b1SIsrael Rukshin 
786cb2d5b1SIsrael Rukshin 	if (reg->mr->need_inval) {
796cb2d5b1SIsrael Rukshin 		reg->inv_wr.opcode = IB_WR_LOCAL_INV;
806cb2d5b1SIsrael Rukshin 		reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
816cb2d5b1SIsrael Rukshin 		reg->inv_wr.next = &reg->reg_wr.wr;
826cb2d5b1SIsrael Rukshin 		count++;
836cb2d5b1SIsrael Rukshin 	} else {
846cb2d5b1SIsrael Rukshin 		reg->inv_wr.next = NULL;
856cb2d5b1SIsrael Rukshin 	}
866cb2d5b1SIsrael Rukshin 
876cb2d5b1SIsrael Rukshin 	return count;
886cb2d5b1SIsrael Rukshin }
896cb2d5b1SIsrael Rukshin 
90eaa74ec7SBart Van Assche /* Caller must have zero-initialized *reg. */
rdma_rw_init_one_mr(struct ib_qp * qp,u32 port_num,struct rdma_rw_reg_ctx * reg,struct scatterlist * sg,u32 sg_cnt,u32 offset)911fb7f897SMark Bloch static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
92a060b562SChristoph Hellwig 		struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
93a060b562SChristoph Hellwig 		u32 sg_cnt, u32 offset)
94a060b562SChristoph Hellwig {
95e9a53e73SIsrael Rukshin 	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
96e9a53e73SIsrael Rukshin 						    qp->integrity_en);
97a060b562SChristoph Hellwig 	u32 nents = min(sg_cnt, pages_per_mr);
98a060b562SChristoph Hellwig 	int count = 0, ret;
99a060b562SChristoph Hellwig 
100a060b562SChristoph Hellwig 	reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
101a060b562SChristoph Hellwig 	if (!reg->mr)
102a060b562SChristoph Hellwig 		return -EAGAIN;
103a060b562SChristoph Hellwig 
1046cb2d5b1SIsrael Rukshin 	count += rdma_rw_inv_key(reg);
105a060b562SChristoph Hellwig 
1069aa8b321SBart Van Assche 	ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
107c2d7c8ffSDan Carpenter 	if (ret < 0 || ret < nents) {
108a060b562SChristoph Hellwig 		ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
109a060b562SChristoph Hellwig 		return -EINVAL;
110a060b562SChristoph Hellwig 	}
111a060b562SChristoph Hellwig 
112a060b562SChristoph Hellwig 	reg->reg_wr.wr.opcode = IB_WR_REG_MR;
113a060b562SChristoph Hellwig 	reg->reg_wr.mr = reg->mr;
114a060b562SChristoph Hellwig 	reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
115a060b562SChristoph Hellwig 	if (rdma_protocol_iwarp(qp->device, port_num))
116a060b562SChristoph Hellwig 		reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
117a060b562SChristoph Hellwig 	count++;
118a060b562SChristoph Hellwig 
119a060b562SChristoph Hellwig 	reg->sge.addr = reg->mr->iova;
120a060b562SChristoph Hellwig 	reg->sge.length = reg->mr->length;
121a060b562SChristoph Hellwig 	return count;
122a060b562SChristoph Hellwig }
123a060b562SChristoph Hellwig 
rdma_rw_init_mr_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)124a060b562SChristoph Hellwig static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
1251fb7f897SMark Bloch 		u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
126a060b562SChristoph Hellwig 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
127a060b562SChristoph Hellwig {
128eaa74ec7SBart Van Assche 	struct rdma_rw_reg_ctx *prev = NULL;
129e9a53e73SIsrael Rukshin 	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
130e9a53e73SIsrael Rukshin 						    qp->integrity_en);
131a060b562SChristoph Hellwig 	int i, j, ret = 0, count = 0;
132a060b562SChristoph Hellwig 
13395a776e8SMax Gurtovoy 	ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
134a060b562SChristoph Hellwig 	ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
135a060b562SChristoph Hellwig 	if (!ctx->reg) {
136a060b562SChristoph Hellwig 		ret = -ENOMEM;
137a060b562SChristoph Hellwig 		goto out;
138a060b562SChristoph Hellwig 	}
139a060b562SChristoph Hellwig 
140a060b562SChristoph Hellwig 	for (i = 0; i < ctx->nr_ops; i++) {
141a060b562SChristoph Hellwig 		struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
142a060b562SChristoph Hellwig 		u32 nents = min(sg_cnt, pages_per_mr);
143a060b562SChristoph Hellwig 
144a060b562SChristoph Hellwig 		ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
145a060b562SChristoph Hellwig 				offset);
146a060b562SChristoph Hellwig 		if (ret < 0)
147a060b562SChristoph Hellwig 			goto out_free;
148a060b562SChristoph Hellwig 		count += ret;
149a060b562SChristoph Hellwig 
150a060b562SChristoph Hellwig 		if (prev) {
151a060b562SChristoph Hellwig 			if (reg->mr->need_inval)
152a060b562SChristoph Hellwig 				prev->wr.wr.next = &reg->inv_wr;
153a060b562SChristoph Hellwig 			else
154a060b562SChristoph Hellwig 				prev->wr.wr.next = &reg->reg_wr.wr;
155a060b562SChristoph Hellwig 		}
156a060b562SChristoph Hellwig 
157a060b562SChristoph Hellwig 		reg->reg_wr.wr.next = &reg->wr.wr;
158a060b562SChristoph Hellwig 
159a060b562SChristoph Hellwig 		reg->wr.wr.sg_list = &reg->sge;
160a060b562SChristoph Hellwig 		reg->wr.wr.num_sge = 1;
161a060b562SChristoph Hellwig 		reg->wr.remote_addr = remote_addr;
162a060b562SChristoph Hellwig 		reg->wr.rkey = rkey;
163a060b562SChristoph Hellwig 		if (dir == DMA_TO_DEVICE) {
164a060b562SChristoph Hellwig 			reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
165a060b562SChristoph Hellwig 		} else if (!rdma_cap_read_inv(qp->device, port_num)) {
166a060b562SChristoph Hellwig 			reg->wr.wr.opcode = IB_WR_RDMA_READ;
167a060b562SChristoph Hellwig 		} else {
168a060b562SChristoph Hellwig 			reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
169a060b562SChristoph Hellwig 			reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
170a060b562SChristoph Hellwig 		}
171a060b562SChristoph Hellwig 		count++;
172a060b562SChristoph Hellwig 
173a060b562SChristoph Hellwig 		remote_addr += reg->sge.length;
174a060b562SChristoph Hellwig 		sg_cnt -= nents;
175a060b562SChristoph Hellwig 		for (j = 0; j < nents; j++)
176a060b562SChristoph Hellwig 			sg = sg_next(sg);
177eaa74ec7SBart Van Assche 		prev = reg;
178a060b562SChristoph Hellwig 		offset = 0;
179a060b562SChristoph Hellwig 	}
180a060b562SChristoph Hellwig 
181eaa74ec7SBart Van Assche 	if (prev)
182eaa74ec7SBart Van Assche 		prev->wr.wr.next = NULL;
183eaa74ec7SBart Van Assche 
184a060b562SChristoph Hellwig 	ctx->type = RDMA_RW_MR;
185a060b562SChristoph Hellwig 	return count;
186a060b562SChristoph Hellwig 
187a060b562SChristoph Hellwig out_free:
188a060b562SChristoph Hellwig 	while (--i >= 0)
189a060b562SChristoph Hellwig 		ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
190a060b562SChristoph Hellwig 	kfree(ctx->reg);
191a060b562SChristoph Hellwig out:
192a060b562SChristoph Hellwig 	return ret;
193a060b562SChristoph Hellwig }
194a060b562SChristoph Hellwig 
rdma_rw_init_map_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,struct scatterlist * sg,u32 sg_cnt,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)195a060b562SChristoph Hellwig static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
196a060b562SChristoph Hellwig 		struct scatterlist *sg, u32 sg_cnt, u32 offset,
197a060b562SChristoph Hellwig 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
198a060b562SChristoph Hellwig {
199632bc3f6SBart Van Assche 	u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
200632bc3f6SBart Van Assche 		      qp->max_read_sge;
201a060b562SChristoph Hellwig 	struct ib_sge *sge;
202a060b562SChristoph Hellwig 	u32 total_len = 0, i, j;
203a060b562SChristoph Hellwig 
204a060b562SChristoph Hellwig 	ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
205a060b562SChristoph Hellwig 
206a060b562SChristoph Hellwig 	ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
207a060b562SChristoph Hellwig 	if (!ctx->map.sges)
208a060b562SChristoph Hellwig 		goto out;
209a060b562SChristoph Hellwig 
210a060b562SChristoph Hellwig 	ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
211a060b562SChristoph Hellwig 	if (!ctx->map.wrs)
212a060b562SChristoph Hellwig 		goto out_free_sges;
213a060b562SChristoph Hellwig 
214a060b562SChristoph Hellwig 	for (i = 0; i < ctx->nr_ops; i++) {
215a060b562SChristoph Hellwig 		struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
216a060b562SChristoph Hellwig 		u32 nr_sge = min(sg_cnt, max_sge);
217a060b562SChristoph Hellwig 
218a060b562SChristoph Hellwig 		if (dir == DMA_TO_DEVICE)
219a060b562SChristoph Hellwig 			rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
220a060b562SChristoph Hellwig 		else
221a060b562SChristoph Hellwig 			rdma_wr->wr.opcode = IB_WR_RDMA_READ;
222a060b562SChristoph Hellwig 		rdma_wr->remote_addr = remote_addr + total_len;
223a060b562SChristoph Hellwig 		rdma_wr->rkey = rkey;
224eaa74ec7SBart Van Assche 		rdma_wr->wr.num_sge = nr_sge;
225a060b562SChristoph Hellwig 		rdma_wr->wr.sg_list = sge;
226a060b562SChristoph Hellwig 
227a060b562SChristoph Hellwig 		for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
228a163afc8SBart Van Assche 			sge->addr = sg_dma_address(sg) + offset;
229a163afc8SBart Van Assche 			sge->length = sg_dma_len(sg) - offset;
230a060b562SChristoph Hellwig 			sge->lkey = qp->pd->local_dma_lkey;
231a060b562SChristoph Hellwig 
232a060b562SChristoph Hellwig 			total_len += sge->length;
233a060b562SChristoph Hellwig 			sge++;
234a060b562SChristoph Hellwig 			sg_cnt--;
235a060b562SChristoph Hellwig 			offset = 0;
236a060b562SChristoph Hellwig 		}
237a060b562SChristoph Hellwig 
238eaa74ec7SBart Van Assche 		rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
239eaa74ec7SBart Van Assche 			&ctx->map.wrs[i + 1].wr : NULL;
240a060b562SChristoph Hellwig 	}
241a060b562SChristoph Hellwig 
242a060b562SChristoph Hellwig 	ctx->type = RDMA_RW_MULTI_WR;
243a060b562SChristoph Hellwig 	return ctx->nr_ops;
244a060b562SChristoph Hellwig 
245a060b562SChristoph Hellwig out_free_sges:
246a060b562SChristoph Hellwig 	kfree(ctx->map.sges);
247a060b562SChristoph Hellwig out:
248a060b562SChristoph Hellwig 	return -ENOMEM;
249a060b562SChristoph Hellwig }
250a060b562SChristoph Hellwig 
rdma_rw_init_single_wr(struct rdma_rw_ctx * ctx,struct ib_qp * qp,struct scatterlist * sg,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)251a060b562SChristoph Hellwig static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
252a060b562SChristoph Hellwig 		struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
253a060b562SChristoph Hellwig 		enum dma_data_direction dir)
254a060b562SChristoph Hellwig {
255a060b562SChristoph Hellwig 	struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
256a060b562SChristoph Hellwig 
257a060b562SChristoph Hellwig 	ctx->nr_ops = 1;
258a060b562SChristoph Hellwig 
259a060b562SChristoph Hellwig 	ctx->single.sge.lkey = qp->pd->local_dma_lkey;
260a163afc8SBart Van Assche 	ctx->single.sge.addr = sg_dma_address(sg) + offset;
261a163afc8SBart Van Assche 	ctx->single.sge.length = sg_dma_len(sg) - offset;
262a060b562SChristoph Hellwig 
263a060b562SChristoph Hellwig 	memset(rdma_wr, 0, sizeof(*rdma_wr));
264a060b562SChristoph Hellwig 	if (dir == DMA_TO_DEVICE)
265a060b562SChristoph Hellwig 		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
266a060b562SChristoph Hellwig 	else
267a060b562SChristoph Hellwig 		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
268a060b562SChristoph Hellwig 	rdma_wr->wr.sg_list = &ctx->single.sge;
269a060b562SChristoph Hellwig 	rdma_wr->wr.num_sge = 1;
270a060b562SChristoph Hellwig 	rdma_wr->remote_addr = remote_addr;
271a060b562SChristoph Hellwig 	rdma_wr->rkey = rkey;
272a060b562SChristoph Hellwig 
273a060b562SChristoph Hellwig 	ctx->type = RDMA_RW_SINGLE_WR;
274a060b562SChristoph Hellwig 	return 1;
275a060b562SChristoph Hellwig }
276a060b562SChristoph Hellwig 
277a060b562SChristoph Hellwig /**
278a060b562SChristoph Hellwig  * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
279a060b562SChristoph Hellwig  * @ctx:	context to initialize
280a060b562SChristoph Hellwig  * @qp:		queue pair to operate on
281a060b562SChristoph Hellwig  * @port_num:	port num to which the connection is bound
282a060b562SChristoph Hellwig  * @sg:		scatterlist to READ/WRITE from/to
283a060b562SChristoph Hellwig  * @sg_cnt:	number of entries in @sg
284a060b562SChristoph Hellwig  * @sg_offset:	current byte offset into @sg
285a060b562SChristoph Hellwig  * @remote_addr:remote address to read/write (relative to @rkey)
286a060b562SChristoph Hellwig  * @rkey:	remote key to operate on
287a060b562SChristoph Hellwig  * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
288a060b562SChristoph Hellwig  *
289a060b562SChristoph Hellwig  * Returns the number of WQEs that will be needed on the workqueue if
290a060b562SChristoph Hellwig  * successful, or a negative error code.
291a060b562SChristoph Hellwig  */
rdma_rw_ctx_init(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,u32 sg_offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)2921fb7f897SMark Bloch int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
293a060b562SChristoph Hellwig 		struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
294a060b562SChristoph Hellwig 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
295a060b562SChristoph Hellwig {
296a060b562SChristoph Hellwig 	struct ib_device *dev = qp->pd->device;
2978e913a8dSLogan Gunthorpe 	struct sg_table sgt = {
2988e913a8dSLogan Gunthorpe 		.sgl = sg,
2998e913a8dSLogan Gunthorpe 		.orig_nents = sg_cnt,
3008e913a8dSLogan Gunthorpe 	};
301a060b562SChristoph Hellwig 	int ret;
302a060b562SChristoph Hellwig 
303*1e97af7fSLogan Gunthorpe 	ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
3048e913a8dSLogan Gunthorpe 	if (ret)
3058e913a8dSLogan Gunthorpe 		return ret;
3068e913a8dSLogan Gunthorpe 	sg_cnt = sgt.nents;
307a060b562SChristoph Hellwig 
308a060b562SChristoph Hellwig 	/*
309a060b562SChristoph Hellwig 	 * Skip to the S/G entry that sg_offset falls into:
310a060b562SChristoph Hellwig 	 */
311a060b562SChristoph Hellwig 	for (;;) {
312a163afc8SBart Van Assche 		u32 len = sg_dma_len(sg);
313a060b562SChristoph Hellwig 
314a060b562SChristoph Hellwig 		if (sg_offset < len)
315a060b562SChristoph Hellwig 			break;
316a060b562SChristoph Hellwig 
317a060b562SChristoph Hellwig 		sg = sg_next(sg);
318a060b562SChristoph Hellwig 		sg_offset -= len;
319a060b562SChristoph Hellwig 		sg_cnt--;
320a060b562SChristoph Hellwig 	}
321a060b562SChristoph Hellwig 
322a060b562SChristoph Hellwig 	ret = -EIO;
323a060b562SChristoph Hellwig 	if (WARN_ON_ONCE(sg_cnt == 0))
324a060b562SChristoph Hellwig 		goto out_unmap_sg;
325a060b562SChristoph Hellwig 
326a060b562SChristoph Hellwig 	if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
327a060b562SChristoph Hellwig 		ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
328a060b562SChristoph Hellwig 				sg_offset, remote_addr, rkey, dir);
329a060b562SChristoph Hellwig 	} else if (sg_cnt > 1) {
330a060b562SChristoph Hellwig 		ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
331a060b562SChristoph Hellwig 				remote_addr, rkey, dir);
332a060b562SChristoph Hellwig 	} else {
333a060b562SChristoph Hellwig 		ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
334a060b562SChristoph Hellwig 				remote_addr, rkey, dir);
335a060b562SChristoph Hellwig 	}
336a060b562SChristoph Hellwig 
337a060b562SChristoph Hellwig 	if (ret < 0)
338a060b562SChristoph Hellwig 		goto out_unmap_sg;
339a060b562SChristoph Hellwig 	return ret;
340a060b562SChristoph Hellwig 
341a060b562SChristoph Hellwig out_unmap_sg:
342*1e97af7fSLogan Gunthorpe 	ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
343a060b562SChristoph Hellwig 	return ret;
344a060b562SChristoph Hellwig }
345a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_init);
346a060b562SChristoph Hellwig 
3470e353e34SChristoph Hellwig /**
348222c7b1fSBart Van Assche  * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
3490e353e34SChristoph Hellwig  * @ctx:	context to initialize
3500e353e34SChristoph Hellwig  * @qp:		queue pair to operate on
3510e353e34SChristoph Hellwig  * @port_num:	port num to which the connection is bound
3520e353e34SChristoph Hellwig  * @sg:		scatterlist to READ/WRITE from/to
3530e353e34SChristoph Hellwig  * @sg_cnt:	number of entries in @sg
3540e353e34SChristoph Hellwig  * @prot_sg:	scatterlist to READ/WRITE protection information from/to
3550e353e34SChristoph Hellwig  * @prot_sg_cnt: number of entries in @prot_sg
3560e353e34SChristoph Hellwig  * @sig_attrs:	signature offloading algorithms
3570e353e34SChristoph Hellwig  * @remote_addr:remote address to read/write (relative to @rkey)
3580e353e34SChristoph Hellwig  * @rkey:	remote key to operate on
3590e353e34SChristoph Hellwig  * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
3600e353e34SChristoph Hellwig  *
3610e353e34SChristoph Hellwig  * Returns the number of WQEs that will be needed on the workqueue if
3620e353e34SChristoph Hellwig  * successful, or a negative error code.
3630e353e34SChristoph Hellwig  */
rdma_rw_ctx_signature_init(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,struct scatterlist * prot_sg,u32 prot_sg_cnt,struct ib_sig_attrs * sig_attrs,u64 remote_addr,u32 rkey,enum dma_data_direction dir)3640e353e34SChristoph Hellwig int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
3651fb7f897SMark Bloch 		u32 port_num, struct scatterlist *sg, u32 sg_cnt,
3660e353e34SChristoph Hellwig 		struct scatterlist *prot_sg, u32 prot_sg_cnt,
3670e353e34SChristoph Hellwig 		struct ib_sig_attrs *sig_attrs,
3680e353e34SChristoph Hellwig 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
3690e353e34SChristoph Hellwig {
3700e353e34SChristoph Hellwig 	struct ib_device *dev = qp->pd->device;
371e9a53e73SIsrael Rukshin 	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
372e9a53e73SIsrael Rukshin 						    qp->integrity_en);
3738e913a8dSLogan Gunthorpe 	struct sg_table sgt = {
3748e913a8dSLogan Gunthorpe 		.sgl = sg,
3758e913a8dSLogan Gunthorpe 		.orig_nents = sg_cnt,
3768e913a8dSLogan Gunthorpe 	};
3778e913a8dSLogan Gunthorpe 	struct sg_table prot_sgt = {
3788e913a8dSLogan Gunthorpe 		.sgl = prot_sg,
3798e913a8dSLogan Gunthorpe 		.orig_nents = prot_sg_cnt,
3808e913a8dSLogan Gunthorpe 	};
3810e353e34SChristoph Hellwig 	struct ib_rdma_wr *rdma_wr;
3820e353e34SChristoph Hellwig 	int count = 0, ret;
3830e353e34SChristoph Hellwig 
3840e353e34SChristoph Hellwig 	if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
3853cea7b4aSWenpeng Liang 		pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n",
38653bfbf9bSMax Gurtovoy 		       sg_cnt, prot_sg_cnt, pages_per_mr);
3870e353e34SChristoph Hellwig 		return -EINVAL;
3880e353e34SChristoph Hellwig 	}
3890e353e34SChristoph Hellwig 
390*1e97af7fSLogan Gunthorpe 	ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
3918e913a8dSLogan Gunthorpe 	if (ret)
3928e913a8dSLogan Gunthorpe 		return ret;
3930e353e34SChristoph Hellwig 
394e9a53e73SIsrael Rukshin 	if (prot_sg_cnt) {
395*1e97af7fSLogan Gunthorpe 		ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
3968e913a8dSLogan Gunthorpe 		if (ret)
3970e353e34SChristoph Hellwig 			goto out_unmap_sg;
3980e353e34SChristoph Hellwig 	}
3990e353e34SChristoph Hellwig 
4000e353e34SChristoph Hellwig 	ctx->type = RDMA_RW_SIG_MR;
4010e353e34SChristoph Hellwig 	ctx->nr_ops = 1;
402aaf1226bSZheng Yongjun 	ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
403e9a53e73SIsrael Rukshin 	if (!ctx->reg) {
4040e353e34SChristoph Hellwig 		ret = -ENOMEM;
4050e353e34SChristoph Hellwig 		goto out_unmap_prot_sg;
4060e353e34SChristoph Hellwig 	}
4070e353e34SChristoph Hellwig 
408e9a53e73SIsrael Rukshin 	ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
409e9a53e73SIsrael Rukshin 	if (!ctx->reg->mr) {
4100e353e34SChristoph Hellwig 		ret = -EAGAIN;
411e9a53e73SIsrael Rukshin 		goto out_free_ctx;
4120e353e34SChristoph Hellwig 	}
4130e353e34SChristoph Hellwig 
414e9a53e73SIsrael Rukshin 	count += rdma_rw_inv_key(ctx->reg);
4150e353e34SChristoph Hellwig 
416e9a53e73SIsrael Rukshin 	memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
4170e353e34SChristoph Hellwig 
4188e913a8dSLogan Gunthorpe 	ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
4198e913a8dSLogan Gunthorpe 			      prot_sgt.nents, NULL, SZ_4K);
420e9a53e73SIsrael Rukshin 	if (unlikely(ret)) {
4218e913a8dSLogan Gunthorpe 		pr_err("failed to map PI sg (%u)\n",
4228e913a8dSLogan Gunthorpe 		       sgt.nents + prot_sgt.nents);
423e9a53e73SIsrael Rukshin 		goto out_destroy_sig_mr;
4240e353e34SChristoph Hellwig 	}
4250e353e34SChristoph Hellwig 
426e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
427e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.wr.wr_cqe = NULL;
428e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.wr.num_sge = 0;
429e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.wr.send_flags = 0;
430e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
431e9a53e73SIsrael Rukshin 	if (rdma_protocol_iwarp(qp->device, port_num))
432e9a53e73SIsrael Rukshin 		ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
433e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.mr = ctx->reg->mr;
434e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
4350e353e34SChristoph Hellwig 	count++;
4360e353e34SChristoph Hellwig 
437e9a53e73SIsrael Rukshin 	ctx->reg->sge.addr = ctx->reg->mr->iova;
438e9a53e73SIsrael Rukshin 	ctx->reg->sge.length = ctx->reg->mr->length;
439e9a53e73SIsrael Rukshin 	if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
440e9a53e73SIsrael Rukshin 		ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
4410e353e34SChristoph Hellwig 
442e9a53e73SIsrael Rukshin 	rdma_wr = &ctx->reg->wr;
443e9a53e73SIsrael Rukshin 	rdma_wr->wr.sg_list = &ctx->reg->sge;
4440e353e34SChristoph Hellwig 	rdma_wr->wr.num_sge = 1;
4450e353e34SChristoph Hellwig 	rdma_wr->remote_addr = remote_addr;
4460e353e34SChristoph Hellwig 	rdma_wr->rkey = rkey;
4470e353e34SChristoph Hellwig 	if (dir == DMA_TO_DEVICE)
4480e353e34SChristoph Hellwig 		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
4490e353e34SChristoph Hellwig 	else
4500e353e34SChristoph Hellwig 		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
451e9a53e73SIsrael Rukshin 	ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
4520e353e34SChristoph Hellwig 	count++;
4530e353e34SChristoph Hellwig 
4540e353e34SChristoph Hellwig 	return count;
4550e353e34SChristoph Hellwig 
456e9a53e73SIsrael Rukshin out_destroy_sig_mr:
457e9a53e73SIsrael Rukshin 	ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
4580e353e34SChristoph Hellwig out_free_ctx:
459e9a53e73SIsrael Rukshin 	kfree(ctx->reg);
4600e353e34SChristoph Hellwig out_unmap_prot_sg:
4618e913a8dSLogan Gunthorpe 	if (prot_sgt.nents)
462*1e97af7fSLogan Gunthorpe 		ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
4630e353e34SChristoph Hellwig out_unmap_sg:
464*1e97af7fSLogan Gunthorpe 	ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
4650e353e34SChristoph Hellwig 	return ret;
4660e353e34SChristoph Hellwig }
4670e353e34SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
4680e353e34SChristoph Hellwig 
469a060b562SChristoph Hellwig /*
470a060b562SChristoph Hellwig  * Now that we are going to post the WRs we can update the lkey and need_inval
471a060b562SChristoph Hellwig  * state on the MRs.  If we were doing this at init time, we would get double
472a060b562SChristoph Hellwig  * or missing invalidations if a context was initialized but not actually
473a060b562SChristoph Hellwig  * posted.
474a060b562SChristoph Hellwig  */
rdma_rw_update_lkey(struct rdma_rw_reg_ctx * reg,bool need_inval)475a060b562SChristoph Hellwig static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
476a060b562SChristoph Hellwig {
477a060b562SChristoph Hellwig 	reg->mr->need_inval = need_inval;
478a060b562SChristoph Hellwig 	ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
479a060b562SChristoph Hellwig 	reg->reg_wr.key = reg->mr->lkey;
480a060b562SChristoph Hellwig 	reg->sge.lkey = reg->mr->lkey;
481a060b562SChristoph Hellwig }
482a060b562SChristoph Hellwig 
483a060b562SChristoph Hellwig /**
484a060b562SChristoph Hellwig  * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
485a060b562SChristoph Hellwig  * @ctx:	context to operate on
486a060b562SChristoph Hellwig  * @qp:		queue pair to operate on
487a060b562SChristoph Hellwig  * @port_num:	port num to which the connection is bound
488a060b562SChristoph Hellwig  * @cqe:	completion queue entry for the last WR
489a060b562SChristoph Hellwig  * @chain_wr:	WR to append to the posted chain
490a060b562SChristoph Hellwig  *
491a060b562SChristoph Hellwig  * Return the WR chain for the set of RDMA READ/WRITE operations described by
492a060b562SChristoph Hellwig  * @ctx, as well as any memory registration operations needed.  If @chain_wr
493a060b562SChristoph Hellwig  * is non-NULL the WR it points to will be appended to the chain of WRs posted.
494a060b562SChristoph Hellwig  * If @chain_wr is not set @cqe must be set so that the caller gets a
495a060b562SChristoph Hellwig  * completion notification.
496a060b562SChristoph Hellwig  */
rdma_rw_ctx_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct ib_cqe * cqe,struct ib_send_wr * chain_wr)497a060b562SChristoph Hellwig struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
4981fb7f897SMark Bloch 		u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
499a060b562SChristoph Hellwig {
500a060b562SChristoph Hellwig 	struct ib_send_wr *first_wr, *last_wr;
501a060b562SChristoph Hellwig 	int i;
502a060b562SChristoph Hellwig 
503a060b562SChristoph Hellwig 	switch (ctx->type) {
5040e353e34SChristoph Hellwig 	case RDMA_RW_SIG_MR:
505a060b562SChristoph Hellwig 	case RDMA_RW_MR:
506a060b562SChristoph Hellwig 		for (i = 0; i < ctx->nr_ops; i++) {
507a060b562SChristoph Hellwig 			rdma_rw_update_lkey(&ctx->reg[i],
508a060b562SChristoph Hellwig 				ctx->reg[i].wr.wr.opcode !=
509a060b562SChristoph Hellwig 					IB_WR_RDMA_READ_WITH_INV);
510a060b562SChristoph Hellwig 		}
511a060b562SChristoph Hellwig 
512a060b562SChristoph Hellwig 		if (ctx->reg[0].inv_wr.next)
513a060b562SChristoph Hellwig 			first_wr = &ctx->reg[0].inv_wr;
514a060b562SChristoph Hellwig 		else
515a060b562SChristoph Hellwig 			first_wr = &ctx->reg[0].reg_wr.wr;
516a060b562SChristoph Hellwig 		last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
517a060b562SChristoph Hellwig 		break;
518a060b562SChristoph Hellwig 	case RDMA_RW_MULTI_WR:
519a060b562SChristoph Hellwig 		first_wr = &ctx->map.wrs[0].wr;
520a060b562SChristoph Hellwig 		last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
521a060b562SChristoph Hellwig 		break;
522a060b562SChristoph Hellwig 	case RDMA_RW_SINGLE_WR:
523a060b562SChristoph Hellwig 		first_wr = &ctx->single.wr.wr;
524a060b562SChristoph Hellwig 		last_wr = &ctx->single.wr.wr;
525a060b562SChristoph Hellwig 		break;
526a060b562SChristoph Hellwig 	default:
527a060b562SChristoph Hellwig 		BUG();
528a060b562SChristoph Hellwig 	}
529a060b562SChristoph Hellwig 
530a060b562SChristoph Hellwig 	if (chain_wr) {
531a060b562SChristoph Hellwig 		last_wr->next = chain_wr;
532a060b562SChristoph Hellwig 	} else {
533a060b562SChristoph Hellwig 		last_wr->wr_cqe = cqe;
534a060b562SChristoph Hellwig 		last_wr->send_flags |= IB_SEND_SIGNALED;
535a060b562SChristoph Hellwig 	}
536a060b562SChristoph Hellwig 
537a060b562SChristoph Hellwig 	return first_wr;
538a060b562SChristoph Hellwig }
539a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_wrs);
540a060b562SChristoph Hellwig 
541a060b562SChristoph Hellwig /**
542a060b562SChristoph Hellwig  * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
543a060b562SChristoph Hellwig  * @ctx:	context to operate on
544a060b562SChristoph Hellwig  * @qp:		queue pair to operate on
545a060b562SChristoph Hellwig  * @port_num:	port num to which the connection is bound
546a060b562SChristoph Hellwig  * @cqe:	completion queue entry for the last WR
547a060b562SChristoph Hellwig  * @chain_wr:	WR to append to the posted chain
548a060b562SChristoph Hellwig  *
549a060b562SChristoph Hellwig  * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
550a060b562SChristoph Hellwig  * any memory registration operations needed.  If @chain_wr is non-NULL the
551a060b562SChristoph Hellwig  * WR it points to will be appended to the chain of WRs posted.  If @chain_wr
552a060b562SChristoph Hellwig  * is not set @cqe must be set so that the caller gets a completion
553a060b562SChristoph Hellwig  * notification.
554a060b562SChristoph Hellwig  */
rdma_rw_ctx_post(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct ib_cqe * cqe,struct ib_send_wr * chain_wr)5551fb7f897SMark Bloch int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
556a060b562SChristoph Hellwig 		struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
557a060b562SChristoph Hellwig {
5581fec77bfSBart Van Assche 	struct ib_send_wr *first_wr;
559a060b562SChristoph Hellwig 
560a060b562SChristoph Hellwig 	first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
5611fec77bfSBart Van Assche 	return ib_post_send(qp, first_wr, NULL);
562a060b562SChristoph Hellwig }
563a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_post);
564a060b562SChristoph Hellwig 
565a060b562SChristoph Hellwig /**
566a060b562SChristoph Hellwig  * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
567a060b562SChristoph Hellwig  * @ctx:	context to release
568a060b562SChristoph Hellwig  * @qp:		queue pair to operate on
569a060b562SChristoph Hellwig  * @port_num:	port num to which the connection is bound
570a060b562SChristoph Hellwig  * @sg:		scatterlist that was used for the READ/WRITE
571a060b562SChristoph Hellwig  * @sg_cnt:	number of entries in @sg
572a060b562SChristoph Hellwig  * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
573a060b562SChristoph Hellwig  */
rdma_rw_ctx_destroy(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,enum dma_data_direction dir)5741fb7f897SMark Bloch void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
5751fb7f897SMark Bloch 			 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
5761fb7f897SMark Bloch 			 enum dma_data_direction dir)
577a060b562SChristoph Hellwig {
578a060b562SChristoph Hellwig 	int i;
579a060b562SChristoph Hellwig 
580a060b562SChristoph Hellwig 	switch (ctx->type) {
581a060b562SChristoph Hellwig 	case RDMA_RW_MR:
582a060b562SChristoph Hellwig 		for (i = 0; i < ctx->nr_ops; i++)
583a060b562SChristoph Hellwig 			ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
584a060b562SChristoph Hellwig 		kfree(ctx->reg);
585a060b562SChristoph Hellwig 		break;
586a060b562SChristoph Hellwig 	case RDMA_RW_MULTI_WR:
587a060b562SChristoph Hellwig 		kfree(ctx->map.wrs);
588a060b562SChristoph Hellwig 		kfree(ctx->map.sges);
589a060b562SChristoph Hellwig 		break;
590a060b562SChristoph Hellwig 	case RDMA_RW_SINGLE_WR:
591a060b562SChristoph Hellwig 		break;
592a060b562SChristoph Hellwig 	default:
593a060b562SChristoph Hellwig 		BUG();
594a060b562SChristoph Hellwig 		break;
595a060b562SChristoph Hellwig 	}
596a060b562SChristoph Hellwig 
597*1e97af7fSLogan Gunthorpe 	ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
598a060b562SChristoph Hellwig }
599a060b562SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_destroy);
600a060b562SChristoph Hellwig 
6010e353e34SChristoph Hellwig /**
6020e353e34SChristoph Hellwig  * rdma_rw_ctx_destroy_signature - release all resources allocated by
6032d465a16SIsrael Rukshin  *	rdma_rw_ctx_signature_init
6040e353e34SChristoph Hellwig  * @ctx:	context to release
6050e353e34SChristoph Hellwig  * @qp:		queue pair to operate on
6060e353e34SChristoph Hellwig  * @port_num:	port num to which the connection is bound
6070e353e34SChristoph Hellwig  * @sg:		scatterlist that was used for the READ/WRITE
6080e353e34SChristoph Hellwig  * @sg_cnt:	number of entries in @sg
6090e353e34SChristoph Hellwig  * @prot_sg:	scatterlist that was used for the READ/WRITE of the PI
6100e353e34SChristoph Hellwig  * @prot_sg_cnt: number of entries in @prot_sg
6110e353e34SChristoph Hellwig  * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
6120e353e34SChristoph Hellwig  */
rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,struct scatterlist * prot_sg,u32 prot_sg_cnt,enum dma_data_direction dir)6130e353e34SChristoph Hellwig void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
6141fb7f897SMark Bloch 		u32 port_num, struct scatterlist *sg, u32 sg_cnt,
6150e353e34SChristoph Hellwig 		struct scatterlist *prot_sg, u32 prot_sg_cnt,
6160e353e34SChristoph Hellwig 		enum dma_data_direction dir)
6170e353e34SChristoph Hellwig {
6180e353e34SChristoph Hellwig 	if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
6190e353e34SChristoph Hellwig 		return;
6200e353e34SChristoph Hellwig 
621e9a53e73SIsrael Rukshin 	ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
622e9a53e73SIsrael Rukshin 	kfree(ctx->reg);
623e9a53e73SIsrael Rukshin 
624e9a53e73SIsrael Rukshin 	if (prot_sg_cnt)
625*1e97af7fSLogan Gunthorpe 		ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
626*1e97af7fSLogan Gunthorpe 	ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
6270e353e34SChristoph Hellwig }
6280e353e34SChristoph Hellwig EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
6290e353e34SChristoph Hellwig 
63000628182SChuck Lever /**
63100628182SChuck Lever  * rdma_rw_mr_factor - return number of MRs required for a payload
63200628182SChuck Lever  * @device:	device handling the connection
63300628182SChuck Lever  * @port_num:	port num to which the connection is bound
63400628182SChuck Lever  * @maxpages:	maximum payload pages per rdma_rw_ctx
63500628182SChuck Lever  *
63600628182SChuck Lever  * Returns the number of MRs the device requires to move @maxpayload
63700628182SChuck Lever  * bytes. The returned value is used during transport creation to
63800628182SChuck Lever  * compute max_rdma_ctxts and the size of the transport's Send and
63900628182SChuck Lever  * Send Completion Queues.
64000628182SChuck Lever  */
rdma_rw_mr_factor(struct ib_device * device,u32 port_num,unsigned int maxpages)6411fb7f897SMark Bloch unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
64200628182SChuck Lever 			       unsigned int maxpages)
64300628182SChuck Lever {
64400628182SChuck Lever 	unsigned int mr_pages;
64500628182SChuck Lever 
64600628182SChuck Lever 	if (rdma_rw_can_use_mr(device, port_num))
647e9a53e73SIsrael Rukshin 		mr_pages = rdma_rw_fr_page_list_len(device, false);
64800628182SChuck Lever 	else
64900628182SChuck Lever 		mr_pages = device->attrs.max_sge_rd;
65000628182SChuck Lever 	return DIV_ROUND_UP(maxpages, mr_pages);
65100628182SChuck Lever }
65200628182SChuck Lever EXPORT_SYMBOL(rdma_rw_mr_factor);
65300628182SChuck Lever 
rdma_rw_init_qp(struct ib_device * dev,struct ib_qp_init_attr * attr)654a060b562SChristoph Hellwig void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
655a060b562SChristoph Hellwig {
656a060b562SChristoph Hellwig 	u32 factor;
657a060b562SChristoph Hellwig 
658a060b562SChristoph Hellwig 	WARN_ON_ONCE(attr->port_num == 0);
659a060b562SChristoph Hellwig 
660a060b562SChristoph Hellwig 	/*
661a060b562SChristoph Hellwig 	 * Each context needs at least one RDMA READ or WRITE WR.
662a060b562SChristoph Hellwig 	 *
663a060b562SChristoph Hellwig 	 * For some hardware we might need more, eventually we should ask the
664a060b562SChristoph Hellwig 	 * HCA driver for a multiplier here.
665a060b562SChristoph Hellwig 	 */
666a060b562SChristoph Hellwig 	factor = 1;
667a060b562SChristoph Hellwig 
668a060b562SChristoph Hellwig 	/*
669a060b562SChristoph Hellwig 	 * If the devices needs MRs to perform RDMA READ or WRITE operations,
670a060b562SChristoph Hellwig 	 * we'll need two additional MRs for the registrations and the
671a060b562SChristoph Hellwig 	 * invalidation.
672a060b562SChristoph Hellwig 	 */
673e9a53e73SIsrael Rukshin 	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
674e9a53e73SIsrael Rukshin 	    rdma_rw_can_use_mr(dev, attr->port_num))
675a060b562SChristoph Hellwig 		factor += 2;	/* inv + reg */
676a060b562SChristoph Hellwig 
677a060b562SChristoph Hellwig 	attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
678a060b562SChristoph Hellwig 
679a060b562SChristoph Hellwig 	/*
680a060b562SChristoph Hellwig 	 * But maybe we were just too high in the sky and the device doesn't
681a060b562SChristoph Hellwig 	 * even support all we need, and we'll have to live with what we get..
682a060b562SChristoph Hellwig 	 */
683a060b562SChristoph Hellwig 	attr->cap.max_send_wr =
684a060b562SChristoph Hellwig 		min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
685a060b562SChristoph Hellwig }
686a060b562SChristoph Hellwig 
rdma_rw_init_mrs(struct ib_qp * qp,struct ib_qp_init_attr * attr)687a060b562SChristoph Hellwig int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
688a060b562SChristoph Hellwig {
689a060b562SChristoph Hellwig 	struct ib_device *dev = qp->pd->device;
690e9a53e73SIsrael Rukshin 	u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
691a060b562SChristoph Hellwig 	int ret = 0;
692a060b562SChristoph Hellwig 
693c0a6cbb9SIsrael Rukshin 	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
6940e353e34SChristoph Hellwig 		nr_sig_mrs = attr->cap.max_rdma_ctxs;
695e9a53e73SIsrael Rukshin 		nr_mrs = attr->cap.max_rdma_ctxs;
696e9a53e73SIsrael Rukshin 		max_num_sg = rdma_rw_fr_page_list_len(dev, true);
6970e353e34SChristoph Hellwig 	} else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
6980e353e34SChristoph Hellwig 		nr_mrs = attr->cap.max_rdma_ctxs;
699e9a53e73SIsrael Rukshin 		max_num_sg = rdma_rw_fr_page_list_len(dev, false);
700a060b562SChristoph Hellwig 	}
701a060b562SChristoph Hellwig 
7020e353e34SChristoph Hellwig 	if (nr_mrs) {
7030e353e34SChristoph Hellwig 		ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
7040e353e34SChristoph Hellwig 				IB_MR_TYPE_MEM_REG,
705e9a53e73SIsrael Rukshin 				max_num_sg, 0);
7060e353e34SChristoph Hellwig 		if (ret) {
7073cea7b4aSWenpeng Liang 			pr_err("%s: failed to allocated %u MRs\n",
7080e353e34SChristoph Hellwig 				__func__, nr_mrs);
7090e353e34SChristoph Hellwig 			return ret;
7100e353e34SChristoph Hellwig 		}
7110e353e34SChristoph Hellwig 	}
7120e353e34SChristoph Hellwig 
7130e353e34SChristoph Hellwig 	if (nr_sig_mrs) {
7140e353e34SChristoph Hellwig 		ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
715e9a53e73SIsrael Rukshin 				IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
7160e353e34SChristoph Hellwig 		if (ret) {
7173cea7b4aSWenpeng Liang 			pr_err("%s: failed to allocated %u SIG MRs\n",
718f73e4076SIsrael Rukshin 				__func__, nr_sig_mrs);
7190e353e34SChristoph Hellwig 			goto out_free_rdma_mrs;
7200e353e34SChristoph Hellwig 		}
7210e353e34SChristoph Hellwig 	}
7220e353e34SChristoph Hellwig 
7230e353e34SChristoph Hellwig 	return 0;
7240e353e34SChristoph Hellwig 
7250e353e34SChristoph Hellwig out_free_rdma_mrs:
7260e353e34SChristoph Hellwig 	ib_mr_pool_destroy(qp, &qp->rdma_mrs);
727a060b562SChristoph Hellwig 	return ret;
728a060b562SChristoph Hellwig }
729a060b562SChristoph Hellwig 
rdma_rw_cleanup_mrs(struct ib_qp * qp)730a060b562SChristoph Hellwig void rdma_rw_cleanup_mrs(struct ib_qp *qp)
731a060b562SChristoph Hellwig {
7320e353e34SChristoph Hellwig 	ib_mr_pool_destroy(qp, &qp->sig_mrs);
733a060b562SChristoph Hellwig 	ib_mr_pool_destroy(qp, &qp->rdma_mrs);
734a060b562SChristoph Hellwig }
735