1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4  *
5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6  */
7 
8 #include <rdma/rw.h>
9 
10 #include <linux/sunrpc/rpc_rdma.h>
11 #include <linux/sunrpc/svc_rdma.h>
12 
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
15 
16 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
17 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
18 
19 /* Each R/W context contains state for one chain of RDMA Read or
20  * Write Work Requests.
21  *
22  * Each WR chain handles a single contiguous server-side buffer,
23  * because scatterlist entries after the first have to start on
24  * page alignment. xdr_buf iovecs cannot guarantee alignment.
25  *
26  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
27  * from a client may contain a unique R_key, so each WR chain moves
28  * up to one segment at a time.
29  *
30  * The scatterlist makes this data structure over 4KB in size. To
31  * make it less likely to fail, and to handle the allocation for
32  * smaller I/O requests without disabling bottom-halves, these
33  * contexts are created on demand, but cached and reused until the
34  * controlling svcxprt_rdma is destroyed.
35  */
36 struct svc_rdma_rw_ctxt {
37 	struct list_head	rw_list;
38 	struct rdma_rw_ctx	rw_ctx;
39 	unsigned int		rw_nents;
40 	struct sg_table		rw_sg_table;
41 	struct scatterlist	rw_first_sgl[];
42 };
43 
44 static inline struct svc_rdma_rw_ctxt *
45 svc_rdma_next_ctxt(struct list_head *list)
46 {
47 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
48 					rw_list);
49 }
50 
51 static struct svc_rdma_rw_ctxt *
52 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
53 {
54 	struct svc_rdma_rw_ctxt *ctxt;
55 
56 	spin_lock(&rdma->sc_rw_ctxt_lock);
57 
58 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
59 	if (ctxt) {
60 		list_del(&ctxt->rw_list);
61 		spin_unlock(&rdma->sc_rw_ctxt_lock);
62 	} else {
63 		spin_unlock(&rdma->sc_rw_ctxt_lock);
64 		ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
65 			       GFP_KERNEL);
66 		if (!ctxt)
67 			goto out_noctx;
68 		INIT_LIST_HEAD(&ctxt->rw_list);
69 	}
70 
71 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
72 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
73 				   ctxt->rw_sg_table.sgl,
74 				   SG_CHUNK_SIZE))
75 		goto out_free;
76 	return ctxt;
77 
78 out_free:
79 	kfree(ctxt);
80 out_noctx:
81 	trace_svcrdma_no_rwctx_err(rdma, sges);
82 	return NULL;
83 }
84 
85 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
86 				 struct svc_rdma_rw_ctxt *ctxt)
87 {
88 	sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
89 
90 	spin_lock(&rdma->sc_rw_ctxt_lock);
91 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
92 	spin_unlock(&rdma->sc_rw_ctxt_lock);
93 }
94 
95 /**
96  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
97  * @rdma: transport about to be destroyed
98  *
99  */
100 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
101 {
102 	struct svc_rdma_rw_ctxt *ctxt;
103 
104 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
105 		list_del(&ctxt->rw_list);
106 		kfree(ctxt);
107 	}
108 }
109 
110 /**
111  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
112  * @rdma: controlling transport instance
113  * @ctxt: R/W context to prepare
114  * @offset: RDMA offset
115  * @handle: RDMA tag/handle
116  * @direction: I/O direction
117  *
118  * Returns on success, the number of WQEs that will be needed
119  * on the workqueue, or a negative errno.
120  */
121 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
122 				struct svc_rdma_rw_ctxt *ctxt,
123 				u64 offset, u32 handle,
124 				enum dma_data_direction direction)
125 {
126 	int ret;
127 
128 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
129 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
130 			       0, offset, handle, direction);
131 	if (unlikely(ret < 0)) {
132 		svc_rdma_put_rw_ctxt(rdma, ctxt);
133 		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
134 	}
135 	return ret;
136 }
137 
138 /* A chunk context tracks all I/O for moving one Read or Write
139  * chunk. This is a a set of rdma_rw's that handle data movement
140  * for all segments of one chunk.
141  *
142  * These are small, acquired with a single allocator call, and
143  * no more than one is needed per chunk. They are allocated on
144  * demand, and not cached.
145  */
146 struct svc_rdma_chunk_ctxt {
147 	struct ib_cqe		cc_cqe;
148 	struct svcxprt_rdma	*cc_rdma;
149 	struct list_head	cc_rwctxts;
150 	int			cc_sqecount;
151 };
152 
153 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
154 			     struct svc_rdma_chunk_ctxt *cc)
155 {
156 	cc->cc_rdma = rdma;
157 	svc_xprt_get(&rdma->sc_xprt);
158 
159 	INIT_LIST_HEAD(&cc->cc_rwctxts);
160 	cc->cc_sqecount = 0;
161 }
162 
163 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
164 				enum dma_data_direction dir)
165 {
166 	struct svcxprt_rdma *rdma = cc->cc_rdma;
167 	struct svc_rdma_rw_ctxt *ctxt;
168 
169 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
170 		list_del(&ctxt->rw_list);
171 
172 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
173 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
174 				    ctxt->rw_nents, dir);
175 		svc_rdma_put_rw_ctxt(rdma, ctxt);
176 	}
177 	svc_xprt_put(&rdma->sc_xprt);
178 }
179 
180 /* State for sending a Write or Reply chunk.
181  *  - Tracks progress of writing one chunk over all its segments
182  *  - Stores arguments for the SGL constructor functions
183  */
184 struct svc_rdma_write_info {
185 	/* write state of this chunk */
186 	unsigned int		wi_seg_off;
187 	unsigned int		wi_seg_no;
188 	unsigned int		wi_nsegs;
189 	__be32			*wi_segs;
190 
191 	/* SGL constructor arguments */
192 	struct xdr_buf		*wi_xdr;
193 	unsigned char		*wi_base;
194 	unsigned int		wi_next_off;
195 
196 	struct svc_rdma_chunk_ctxt	wi_cc;
197 };
198 
199 static struct svc_rdma_write_info *
200 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
201 {
202 	struct svc_rdma_write_info *info;
203 
204 	info = kmalloc(sizeof(*info), GFP_KERNEL);
205 	if (!info)
206 		return info;
207 
208 	info->wi_seg_off = 0;
209 	info->wi_seg_no = 0;
210 	info->wi_nsegs = be32_to_cpup(++chunk);
211 	info->wi_segs = ++chunk;
212 	svc_rdma_cc_init(rdma, &info->wi_cc);
213 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
214 	return info;
215 }
216 
217 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
218 {
219 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
220 	kfree(info);
221 }
222 
223 /**
224  * svc_rdma_write_done - Write chunk completion
225  * @cq: controlling Completion Queue
226  * @wc: Work Completion
227  *
228  * Pages under I/O are freed by a subsequent Send completion.
229  */
230 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
231 {
232 	struct ib_cqe *cqe = wc->wr_cqe;
233 	struct svc_rdma_chunk_ctxt *cc =
234 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
235 	struct svcxprt_rdma *rdma = cc->cc_rdma;
236 	struct svc_rdma_write_info *info =
237 			container_of(cc, struct svc_rdma_write_info, wi_cc);
238 
239 	trace_svcrdma_wc_write(wc);
240 
241 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
242 	wake_up(&rdma->sc_send_wait);
243 
244 	if (unlikely(wc->status != IB_WC_SUCCESS))
245 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
246 
247 	svc_rdma_write_info_free(info);
248 }
249 
250 /* State for pulling a Read chunk.
251  */
252 struct svc_rdma_read_info {
253 	struct svc_rdma_recv_ctxt	*ri_readctxt;
254 	unsigned int			ri_position;
255 	unsigned int			ri_pageno;
256 	unsigned int			ri_pageoff;
257 	unsigned int			ri_chunklen;
258 
259 	struct svc_rdma_chunk_ctxt	ri_cc;
260 };
261 
262 static struct svc_rdma_read_info *
263 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
264 {
265 	struct svc_rdma_read_info *info;
266 
267 	info = kmalloc(sizeof(*info), GFP_KERNEL);
268 	if (!info)
269 		return info;
270 
271 	svc_rdma_cc_init(rdma, &info->ri_cc);
272 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
273 	return info;
274 }
275 
276 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
277 {
278 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
279 	kfree(info);
280 }
281 
282 /**
283  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
284  * @cq: controlling Completion Queue
285  * @wc: Work Completion
286  *
287  */
288 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
289 {
290 	struct ib_cqe *cqe = wc->wr_cqe;
291 	struct svc_rdma_chunk_ctxt *cc =
292 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
293 	struct svcxprt_rdma *rdma = cc->cc_rdma;
294 	struct svc_rdma_read_info *info =
295 			container_of(cc, struct svc_rdma_read_info, ri_cc);
296 
297 	trace_svcrdma_wc_read(wc);
298 
299 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
300 	wake_up(&rdma->sc_send_wait);
301 
302 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
303 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
304 		svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
305 	} else {
306 		spin_lock(&rdma->sc_rq_dto_lock);
307 		list_add_tail(&info->ri_readctxt->rc_list,
308 			      &rdma->sc_read_complete_q);
309 		/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
310 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
311 		spin_unlock(&rdma->sc_rq_dto_lock);
312 
313 		svc_xprt_enqueue(&rdma->sc_xprt);
314 	}
315 
316 	svc_rdma_read_info_free(info);
317 }
318 
319 /* This function sleeps when the transport's Send Queue is congested.
320  *
321  * Assumptions:
322  * - If ib_post_send() succeeds, only one completion is expected,
323  *   even if one or more WRs are flushed. This is true when posting
324  *   an rdma_rw_ctx or when posting a single signaled WR.
325  */
326 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
327 {
328 	struct svcxprt_rdma *rdma = cc->cc_rdma;
329 	struct svc_xprt *xprt = &rdma->sc_xprt;
330 	struct ib_send_wr *first_wr;
331 	const struct ib_send_wr *bad_wr;
332 	struct list_head *tmp;
333 	struct ib_cqe *cqe;
334 	int ret;
335 
336 	if (cc->cc_sqecount > rdma->sc_sq_depth)
337 		return -EINVAL;
338 
339 	first_wr = NULL;
340 	cqe = &cc->cc_cqe;
341 	list_for_each(tmp, &cc->cc_rwctxts) {
342 		struct svc_rdma_rw_ctxt *ctxt;
343 
344 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
345 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
346 					   rdma->sc_port_num, cqe, first_wr);
347 		cqe = NULL;
348 	}
349 
350 	do {
351 		if (atomic_sub_return(cc->cc_sqecount,
352 				      &rdma->sc_sq_avail) > 0) {
353 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
354 			if (ret)
355 				break;
356 			return 0;
357 		}
358 
359 		trace_svcrdma_sq_full(rdma);
360 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
361 		wait_event(rdma->sc_send_wait,
362 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
363 		trace_svcrdma_sq_retry(rdma);
364 	} while (1);
365 
366 	trace_svcrdma_sq_post_err(rdma, ret);
367 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
368 
369 	/* If even one was posted, there will be a completion. */
370 	if (bad_wr != first_wr)
371 		return 0;
372 
373 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
374 	wake_up(&rdma->sc_send_wait);
375 	return -ENOTCONN;
376 }
377 
378 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
379  */
380 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
381 			       unsigned int len,
382 			       struct svc_rdma_rw_ctxt *ctxt)
383 {
384 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
385 
386 	sg_set_buf(&sg[0], info->wi_base, len);
387 	info->wi_base += len;
388 
389 	ctxt->rw_nents = 1;
390 }
391 
392 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
393  */
394 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
395 				    unsigned int remaining,
396 				    struct svc_rdma_rw_ctxt *ctxt)
397 {
398 	unsigned int sge_no, sge_bytes, page_off, page_no;
399 	struct xdr_buf *xdr = info->wi_xdr;
400 	struct scatterlist *sg;
401 	struct page **page;
402 
403 	page_off = info->wi_next_off + xdr->page_base;
404 	page_no = page_off >> PAGE_SHIFT;
405 	page_off = offset_in_page(page_off);
406 	page = xdr->pages + page_no;
407 	info->wi_next_off += remaining;
408 	sg = ctxt->rw_sg_table.sgl;
409 	sge_no = 0;
410 	do {
411 		sge_bytes = min_t(unsigned int, remaining,
412 				  PAGE_SIZE - page_off);
413 		sg_set_page(sg, *page, sge_bytes, page_off);
414 
415 		remaining -= sge_bytes;
416 		sg = sg_next(sg);
417 		page_off = 0;
418 		sge_no++;
419 		page++;
420 	} while (remaining);
421 
422 	ctxt->rw_nents = sge_no;
423 }
424 
425 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
426  * an RPC Reply.
427  */
428 static int
429 svc_rdma_build_writes(struct svc_rdma_write_info *info,
430 		      void (*constructor)(struct svc_rdma_write_info *info,
431 					  unsigned int len,
432 					  struct svc_rdma_rw_ctxt *ctxt),
433 		      unsigned int remaining)
434 {
435 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
436 	struct svcxprt_rdma *rdma = cc->cc_rdma;
437 	struct svc_rdma_rw_ctxt *ctxt;
438 	__be32 *seg;
439 	int ret;
440 
441 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
442 	do {
443 		unsigned int write_len;
444 		u32 seg_length, seg_handle;
445 		u64 seg_offset;
446 
447 		if (info->wi_seg_no >= info->wi_nsegs)
448 			goto out_overflow;
449 
450 		seg_handle = be32_to_cpup(seg);
451 		seg_length = be32_to_cpup(seg + 1);
452 		xdr_decode_hyper(seg + 2, &seg_offset);
453 		seg_offset += info->wi_seg_off;
454 
455 		write_len = min(remaining, seg_length - info->wi_seg_off);
456 		ctxt = svc_rdma_get_rw_ctxt(rdma,
457 					    (write_len >> PAGE_SHIFT) + 2);
458 		if (!ctxt)
459 			return -ENOMEM;
460 
461 		constructor(info, write_len, ctxt);
462 		ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
463 					   DMA_TO_DEVICE);
464 		if (ret < 0)
465 			return -EIO;
466 
467 		trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
468 
469 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
470 		cc->cc_sqecount += ret;
471 		if (write_len == seg_length - info->wi_seg_off) {
472 			seg += 4;
473 			info->wi_seg_no++;
474 			info->wi_seg_off = 0;
475 		} else {
476 			info->wi_seg_off += write_len;
477 		}
478 		remaining -= write_len;
479 	} while (remaining);
480 
481 	return 0;
482 
483 out_overflow:
484 	trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
485 				     info->wi_nsegs);
486 	return -E2BIG;
487 }
488 
489 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
490  * chunk, the whole RPC Reply is written back to the client.
491  * This function writes either the head or tail of the xdr_buf
492  * containing the Reply.
493  */
494 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
495 				  struct kvec *vec)
496 {
497 	info->wi_base = vec->iov_base;
498 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
499 				     vec->iov_len);
500 }
501 
502 /* Send an xdr_buf's page list by itself. A Write chunk is just
503  * the page list. A Reply chunk is @xdr's head, page list, and
504  * tail. This function is shared between the two types of chunk.
505  */
506 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
507 				      struct xdr_buf *xdr,
508 				      unsigned int offset,
509 				      unsigned long length)
510 {
511 	info->wi_xdr = xdr;
512 	info->wi_next_off = offset - xdr->head[0].iov_len;
513 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
514 				     length);
515 }
516 
517 /**
518  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
519  * @rdma: controlling RDMA transport
520  * @wr_ch: Write chunk provided by client
521  * @xdr: xdr_buf containing the data payload
522  * @offset: payload's byte offset in @xdr
523  * @length: size of payload, in bytes
524  *
525  * Returns a non-negative number of bytes the chunk consumed, or
526  *	%-E2BIG if the payload was larger than the Write chunk,
527  *	%-EINVAL if client provided too many segments,
528  *	%-ENOMEM if rdma_rw context pool was exhausted,
529  *	%-ENOTCONN if posting failed (connection is lost),
530  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
531  */
532 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
533 			      struct xdr_buf *xdr,
534 			      unsigned int offset, unsigned long length)
535 {
536 	struct svc_rdma_write_info *info;
537 	int ret;
538 
539 	if (!length)
540 		return 0;
541 
542 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
543 	if (!info)
544 		return -ENOMEM;
545 
546 	ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
547 	if (ret < 0)
548 		goto out_err;
549 
550 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
551 	if (ret < 0)
552 		goto out_err;
553 
554 	trace_svcrdma_send_write_chunk(xdr->page_len);
555 	return length;
556 
557 out_err:
558 	svc_rdma_write_info_free(info);
559 	return ret;
560 }
561 
562 /**
563  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
564  * @rdma: controlling RDMA transport
565  * @rctxt: Write and Reply chunks from client
566  * @xdr: xdr_buf containing an RPC Reply
567  *
568  * Returns a non-negative number of bytes the chunk consumed, or
569  *	%-E2BIG if the payload was larger than the Reply chunk,
570  *	%-EINVAL if client provided too many segments,
571  *	%-ENOMEM if rdma_rw context pool was exhausted,
572  *	%-ENOTCONN if posting failed (connection is lost),
573  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
574  */
575 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
576 			      const struct svc_rdma_recv_ctxt *rctxt,
577 			      struct xdr_buf *xdr)
578 {
579 	struct svc_rdma_write_info *info;
580 	int consumed, ret;
581 
582 	info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
583 	if (!info)
584 		return -ENOMEM;
585 
586 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
587 	if (ret < 0)
588 		goto out_err;
589 	consumed = xdr->head[0].iov_len;
590 
591 	/* Send the page list in the Reply chunk only if the
592 	 * client did not provide Write chunks.
593 	 */
594 	if (!rctxt->rc_write_list && xdr->page_len) {
595 		ret = svc_rdma_send_xdr_pagelist(info, xdr,
596 						 xdr->head[0].iov_len,
597 						 xdr->page_len);
598 		if (ret < 0)
599 			goto out_err;
600 		consumed += xdr->page_len;
601 	}
602 
603 	if (xdr->tail[0].iov_len) {
604 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
605 		if (ret < 0)
606 			goto out_err;
607 		consumed += xdr->tail[0].iov_len;
608 	}
609 
610 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
611 	if (ret < 0)
612 		goto out_err;
613 
614 	trace_svcrdma_send_reply_chunk(consumed);
615 	return consumed;
616 
617 out_err:
618 	svc_rdma_write_info_free(info);
619 	return ret;
620 }
621 
622 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
623 				       struct svc_rqst *rqstp,
624 				       u32 rkey, u32 len, u64 offset)
625 {
626 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
627 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
628 	struct svc_rdma_rw_ctxt *ctxt;
629 	unsigned int sge_no, seg_len;
630 	struct scatterlist *sg;
631 	int ret;
632 
633 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
634 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
635 	if (!ctxt)
636 		return -ENOMEM;
637 	ctxt->rw_nents = sge_no;
638 
639 	sg = ctxt->rw_sg_table.sgl;
640 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
641 		seg_len = min_t(unsigned int, len,
642 				PAGE_SIZE - info->ri_pageoff);
643 
644 		head->rc_arg.pages[info->ri_pageno] =
645 			rqstp->rq_pages[info->ri_pageno];
646 		if (!info->ri_pageoff)
647 			head->rc_page_count++;
648 
649 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
650 			    seg_len, info->ri_pageoff);
651 		sg = sg_next(sg);
652 
653 		info->ri_pageoff += seg_len;
654 		if (info->ri_pageoff == PAGE_SIZE) {
655 			info->ri_pageno++;
656 			info->ri_pageoff = 0;
657 		}
658 		len -= seg_len;
659 
660 		/* Safety check */
661 		if (len &&
662 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
663 			goto out_overrun;
664 	}
665 
666 	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
667 				   DMA_FROM_DEVICE);
668 	if (ret < 0)
669 		return -EIO;
670 
671 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
672 	cc->cc_sqecount += ret;
673 	return 0;
674 
675 out_overrun:
676 	trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
677 	return -EINVAL;
678 }
679 
680 /* Walk the segments in the Read chunk starting at @p and construct
681  * RDMA Read operations to pull the chunk to the server.
682  */
683 static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
684 				     struct svc_rdma_read_info *info,
685 				     __be32 *p)
686 {
687 	unsigned int i;
688 	int ret;
689 
690 	ret = -EINVAL;
691 	info->ri_chunklen = 0;
692 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
693 		u32 rs_handle, rs_length;
694 		u64 rs_offset;
695 
696 		rs_handle = be32_to_cpup(p++);
697 		rs_length = be32_to_cpup(p++);
698 		p = xdr_decode_hyper(p, &rs_offset);
699 
700 		ret = svc_rdma_build_read_segment(info, rqstp,
701 						  rs_handle, rs_length,
702 						  rs_offset);
703 		if (ret < 0)
704 			break;
705 
706 		trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset);
707 		info->ri_chunklen += rs_length;
708 	}
709 
710 	/* Pages under I/O have been copied to head->rc_pages.
711 	 * Prevent their premature release by svc_xprt_release() .
712 	 */
713 	for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
714 		rqstp->rq_pages[i] = NULL;
715 
716 	return ret;
717 }
718 
719 /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
720  * data lands in the page list of head->rc_arg.pages.
721  *
722  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
723  * Therefore, XDR round-up of the Read chunk and trailing
724  * inline content must both be added at the end of the pagelist.
725  */
726 static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
727 					    struct svc_rdma_read_info *info,
728 					    __be32 *p)
729 {
730 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
731 	int ret;
732 
733 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
734 	if (ret < 0)
735 		goto out;
736 
737 	trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
738 
739 	head->rc_hdr_count = 0;
740 
741 	/* Split the Receive buffer between the head and tail
742 	 * buffers at Read chunk's position. XDR roundup of the
743 	 * chunk is not included in either the pagelist or in
744 	 * the tail.
745 	 */
746 	head->rc_arg.tail[0].iov_base =
747 		head->rc_arg.head[0].iov_base + info->ri_position;
748 	head->rc_arg.tail[0].iov_len =
749 		head->rc_arg.head[0].iov_len - info->ri_position;
750 	head->rc_arg.head[0].iov_len = info->ri_position;
751 
752 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
753 	 *
754 	 * If the client already rounded up the chunk length, the
755 	 * length does not change. Otherwise, the length of the page
756 	 * list is increased to include XDR round-up.
757 	 *
758 	 * Currently these chunks always start at page offset 0,
759 	 * thus the rounded-up length never crosses a page boundary.
760 	 */
761 	info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
762 
763 	head->rc_arg.page_len = info->ri_chunklen;
764 	head->rc_arg.len += info->ri_chunklen;
765 	head->rc_arg.buflen += info->ri_chunklen;
766 
767 out:
768 	return ret;
769 }
770 
771 /* Construct RDMA Reads to pull over a Position Zero Read chunk.
772  * The start of the data lands in the first page just after
773  * the Transport header, and the rest lands in the page list of
774  * head->rc_arg.pages.
775  *
776  * Assumptions:
777  *	- A PZRC has an XDR-aligned length (no implicit round-up).
778  *	- There can be no trailing inline content (IOW, we assume
779  *	  a PZRC is never sent in an RDMA_MSG message, though it's
780  *	  allowed by spec).
781  */
782 static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
783 					struct svc_rdma_read_info *info,
784 					__be32 *p)
785 {
786 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
787 	int ret;
788 
789 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
790 	if (ret < 0)
791 		goto out;
792 
793 	trace_svcrdma_send_pzr(info->ri_chunklen);
794 
795 	head->rc_arg.len += info->ri_chunklen;
796 	head->rc_arg.buflen += info->ri_chunklen;
797 
798 	head->rc_hdr_count = 1;
799 	head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
800 	head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
801 					     info->ri_chunklen);
802 
803 	head->rc_arg.page_len = info->ri_chunklen -
804 				head->rc_arg.head[0].iov_len;
805 
806 out:
807 	return ret;
808 }
809 
810 /**
811  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
812  * @rdma: controlling RDMA transport
813  * @rqstp: set of pages to use as Read sink buffers
814  * @head: pages under I/O collect here
815  * @p: pointer to start of Read chunk
816  *
817  * Returns:
818  *	%0 if all needed RDMA Reads were posted successfully,
819  *	%-EINVAL if client provided too many segments,
820  *	%-ENOMEM if rdma_rw context pool was exhausted,
821  *	%-ENOTCONN if posting failed (connection is lost),
822  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
823  *
824  * Assumptions:
825  * - All Read segments in @p have the same Position value.
826  */
827 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
828 			     struct svc_rdma_recv_ctxt *head, __be32 *p)
829 {
830 	struct svc_rdma_read_info *info;
831 	int ret;
832 
833 	/* The request (with page list) is constructed in
834 	 * head->rc_arg. Pages involved with RDMA Read I/O are
835 	 * transferred there.
836 	 */
837 	head->rc_arg.head[0] = rqstp->rq_arg.head[0];
838 	head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
839 	head->rc_arg.pages = head->rc_pages;
840 	head->rc_arg.page_base = 0;
841 	head->rc_arg.page_len = 0;
842 	head->rc_arg.len = rqstp->rq_arg.len;
843 	head->rc_arg.buflen = rqstp->rq_arg.buflen;
844 
845 	info = svc_rdma_read_info_alloc(rdma);
846 	if (!info)
847 		return -ENOMEM;
848 	info->ri_readctxt = head;
849 	info->ri_pageno = 0;
850 	info->ri_pageoff = 0;
851 
852 	info->ri_position = be32_to_cpup(p + 1);
853 	if (info->ri_position)
854 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
855 	else
856 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
857 	if (ret < 0)
858 		goto out_err;
859 
860 	ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
861 	if (ret < 0)
862 		goto out_err;
863 	return 0;
864 
865 out_err:
866 	svc_rdma_read_info_free(info);
867 	return ret;
868 }
869