1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4  *
5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6  */
7 
8 #include <rdma/rw.h>
9 
10 #include <linux/sunrpc/xdr.h>
11 #include <linux/sunrpc/rpc_rdma.h>
12 #include <linux/sunrpc/svc_rdma.h>
13 
14 #include "xprt_rdma.h"
15 #include <trace/events/rpcrdma.h>
16 
17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19 
20 /* Each R/W context contains state for one chain of RDMA Read or
21  * Write Work Requests.
22  *
23  * Each WR chain handles a single contiguous server-side buffer,
24  * because scatterlist entries after the first have to start on
25  * page alignment. xdr_buf iovecs cannot guarantee alignment.
26  *
27  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
28  * from a client may contain a unique R_key, so each WR chain moves
29  * up to one segment at a time.
30  *
31  * The scatterlist makes this data structure over 4KB in size. To
32  * make it less likely to fail, and to handle the allocation for
33  * smaller I/O requests without disabling bottom-halves, these
34  * contexts are created on demand, but cached and reused until the
35  * controlling svcxprt_rdma is destroyed.
36  */
37 struct svc_rdma_rw_ctxt {
38 	struct llist_node	rw_node;
39 	struct list_head	rw_list;
40 	struct rdma_rw_ctx	rw_ctx;
41 	unsigned int		rw_nents;
42 	struct sg_table		rw_sg_table;
43 	struct scatterlist	rw_first_sgl[];
44 };
45 
46 static inline struct svc_rdma_rw_ctxt *
47 svc_rdma_next_ctxt(struct list_head *list)
48 {
49 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
50 					rw_list);
51 }
52 
53 static struct svc_rdma_rw_ctxt *
54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
55 {
56 	struct svc_rdma_rw_ctxt *ctxt;
57 	struct llist_node *node;
58 
59 	spin_lock(&rdma->sc_rw_ctxt_lock);
60 	node = llist_del_first(&rdma->sc_rw_ctxts);
61 	spin_unlock(&rdma->sc_rw_ctxt_lock);
62 	if (node) {
63 		ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
64 	} else {
65 		ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66 				    GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device));
67 		if (!ctxt)
68 			goto out_noctx;
69 
70 		INIT_LIST_HEAD(&ctxt->rw_list);
71 	}
72 
73 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
74 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
75 				   ctxt->rw_sg_table.sgl,
76 				   SG_CHUNK_SIZE))
77 		goto out_free;
78 	return ctxt;
79 
80 out_free:
81 	kfree(ctxt);
82 out_noctx:
83 	trace_svcrdma_no_rwctx_err(rdma, sges);
84 	return NULL;
85 }
86 
87 static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
88 				   struct svc_rdma_rw_ctxt *ctxt,
89 				   struct llist_head *list)
90 {
91 	sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
92 	llist_add(&ctxt->rw_node, list);
93 }
94 
95 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
96 				 struct svc_rdma_rw_ctxt *ctxt)
97 {
98 	__svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts);
99 }
100 
101 /**
102  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
103  * @rdma: transport about to be destroyed
104  *
105  */
106 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
107 {
108 	struct svc_rdma_rw_ctxt *ctxt;
109 	struct llist_node *node;
110 
111 	while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
112 		ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
113 		kfree(ctxt);
114 	}
115 }
116 
117 /**
118  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
119  * @rdma: controlling transport instance
120  * @ctxt: R/W context to prepare
121  * @offset: RDMA offset
122  * @handle: RDMA tag/handle
123  * @direction: I/O direction
124  *
125  * Returns on success, the number of WQEs that will be needed
126  * on the workqueue, or a negative errno.
127  */
128 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
129 				struct svc_rdma_rw_ctxt *ctxt,
130 				u64 offset, u32 handle,
131 				enum dma_data_direction direction)
132 {
133 	int ret;
134 
135 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
136 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
137 			       0, offset, handle, direction);
138 	if (unlikely(ret < 0)) {
139 		svc_rdma_put_rw_ctxt(rdma, ctxt);
140 		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
141 	}
142 	return ret;
143 }
144 
145 /* A chunk context tracks all I/O for moving one Read or Write
146  * chunk. This is a set of rdma_rw's that handle data movement
147  * for all segments of one chunk.
148  *
149  * These are small, acquired with a single allocator call, and
150  * no more than one is needed per chunk. They are allocated on
151  * demand, and not cached.
152  */
153 struct svc_rdma_chunk_ctxt {
154 	struct rpc_rdma_cid	cc_cid;
155 	struct ib_cqe		cc_cqe;
156 	struct svcxprt_rdma	*cc_rdma;
157 	struct list_head	cc_rwctxts;
158 	ktime_t			cc_posttime;
159 	int			cc_sqecount;
160 	enum ib_wc_status	cc_status;
161 	struct completion	cc_done;
162 };
163 
164 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
165 				 struct rpc_rdma_cid *cid)
166 {
167 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
168 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
169 }
170 
171 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
172 			     struct svc_rdma_chunk_ctxt *cc)
173 {
174 	svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
175 	cc->cc_rdma = rdma;
176 
177 	INIT_LIST_HEAD(&cc->cc_rwctxts);
178 	cc->cc_sqecount = 0;
179 }
180 
181 /*
182  * The consumed rw_ctx's are cleaned and placed on a local llist so
183  * that only one atomic llist operation is needed to put them all
184  * back on the free list.
185  */
186 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
187 				enum dma_data_direction dir)
188 {
189 	struct svcxprt_rdma *rdma = cc->cc_rdma;
190 	struct llist_node *first, *last;
191 	struct svc_rdma_rw_ctxt *ctxt;
192 	LLIST_HEAD(free);
193 
194 	first = last = NULL;
195 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
196 		list_del(&ctxt->rw_list);
197 
198 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
199 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
200 				    ctxt->rw_nents, dir);
201 		__svc_rdma_put_rw_ctxt(rdma, ctxt, &free);
202 
203 		ctxt->rw_node.next = first;
204 		first = &ctxt->rw_node;
205 		if (!last)
206 			last = first;
207 	}
208 	if (first)
209 		llist_add_batch(first, last, &rdma->sc_rw_ctxts);
210 }
211 
212 /* State for sending a Write or Reply chunk.
213  *  - Tracks progress of writing one chunk over all its segments
214  *  - Stores arguments for the SGL constructor functions
215  */
216 struct svc_rdma_write_info {
217 	const struct svc_rdma_chunk	*wi_chunk;
218 
219 	/* write state of this chunk */
220 	unsigned int		wi_seg_off;
221 	unsigned int		wi_seg_no;
222 
223 	/* SGL constructor arguments */
224 	const struct xdr_buf	*wi_xdr;
225 	unsigned char		*wi_base;
226 	unsigned int		wi_next_off;
227 
228 	struct svc_rdma_chunk_ctxt	wi_cc;
229 };
230 
231 static struct svc_rdma_write_info *
232 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
233 			  const struct svc_rdma_chunk *chunk)
234 {
235 	struct svc_rdma_write_info *info;
236 
237 	info = kmalloc_node(sizeof(*info), GFP_KERNEL,
238 			    ibdev_to_node(rdma->sc_cm_id->device));
239 	if (!info)
240 		return info;
241 
242 	info->wi_chunk = chunk;
243 	info->wi_seg_off = 0;
244 	info->wi_seg_no = 0;
245 	svc_rdma_cc_init(rdma, &info->wi_cc);
246 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
247 	return info;
248 }
249 
250 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
251 {
252 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
253 	kfree(info);
254 }
255 
256 /**
257  * svc_rdma_write_done - Write chunk completion
258  * @cq: controlling Completion Queue
259  * @wc: Work Completion
260  *
261  * Pages under I/O are freed by a subsequent Send completion.
262  */
263 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
264 {
265 	struct ib_cqe *cqe = wc->wr_cqe;
266 	struct svc_rdma_chunk_ctxt *cc =
267 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
268 	struct svcxprt_rdma *rdma = cc->cc_rdma;
269 	struct svc_rdma_write_info *info =
270 			container_of(cc, struct svc_rdma_write_info, wi_cc);
271 
272 	switch (wc->status) {
273 	case IB_WC_SUCCESS:
274 		trace_svcrdma_wc_write(wc, &cc->cc_cid);
275 		break;
276 	case IB_WC_WR_FLUSH_ERR:
277 		trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
278 		break;
279 	default:
280 		trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
281 	}
282 
283 	svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
284 
285 	if (unlikely(wc->status != IB_WC_SUCCESS))
286 		svc_xprt_deferred_close(&rdma->sc_xprt);
287 
288 	svc_rdma_write_info_free(info);
289 }
290 
291 /* State for pulling a Read chunk.
292  */
293 struct svc_rdma_read_info {
294 	struct svc_rqst			*ri_rqst;
295 	struct svc_rdma_recv_ctxt	*ri_readctxt;
296 	unsigned int			ri_pageno;
297 	unsigned int			ri_pageoff;
298 	unsigned int			ri_totalbytes;
299 
300 	struct svc_rdma_chunk_ctxt	ri_cc;
301 };
302 
303 static struct svc_rdma_read_info *
304 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
305 {
306 	struct svc_rdma_read_info *info;
307 
308 	info = kmalloc_node(sizeof(*info), GFP_KERNEL,
309 			    ibdev_to_node(rdma->sc_cm_id->device));
310 	if (!info)
311 		return info;
312 
313 	svc_rdma_cc_init(rdma, &info->ri_cc);
314 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
315 	return info;
316 }
317 
318 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
319 {
320 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
321 	kfree(info);
322 }
323 
324 /**
325  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
326  * @cq: controlling Completion Queue
327  * @wc: Work Completion
328  *
329  */
330 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
331 {
332 	struct ib_cqe *cqe = wc->wr_cqe;
333 	struct svc_rdma_chunk_ctxt *cc =
334 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
335 	struct svc_rdma_read_info *info;
336 
337 	switch (wc->status) {
338 	case IB_WC_SUCCESS:
339 		info = container_of(cc, struct svc_rdma_read_info, ri_cc);
340 		trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes,
341 				      cc->cc_posttime);
342 		break;
343 	case IB_WC_WR_FLUSH_ERR:
344 		trace_svcrdma_wc_read_flush(wc, &cc->cc_cid);
345 		break;
346 	default:
347 		trace_svcrdma_wc_read_err(wc, &cc->cc_cid);
348 	}
349 
350 	svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount);
351 	cc->cc_status = wc->status;
352 	complete(&cc->cc_done);
353 	return;
354 }
355 
356 /*
357  * Assumptions:
358  * - If ib_post_send() succeeds, only one completion is expected,
359  *   even if one or more WRs are flushed. This is true when posting
360  *   an rdma_rw_ctx or when posting a single signaled WR.
361  */
362 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
363 {
364 	struct svcxprt_rdma *rdma = cc->cc_rdma;
365 	struct ib_send_wr *first_wr;
366 	const struct ib_send_wr *bad_wr;
367 	struct list_head *tmp;
368 	struct ib_cqe *cqe;
369 	int ret;
370 
371 	might_sleep();
372 
373 	if (cc->cc_sqecount > rdma->sc_sq_depth)
374 		return -EINVAL;
375 
376 	first_wr = NULL;
377 	cqe = &cc->cc_cqe;
378 	list_for_each(tmp, &cc->cc_rwctxts) {
379 		struct svc_rdma_rw_ctxt *ctxt;
380 
381 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
382 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
383 					   rdma->sc_port_num, cqe, first_wr);
384 		cqe = NULL;
385 	}
386 
387 	do {
388 		if (atomic_sub_return(cc->cc_sqecount,
389 				      &rdma->sc_sq_avail) > 0) {
390 			cc->cc_posttime = ktime_get();
391 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
392 			if (ret)
393 				break;
394 			return 0;
395 		}
396 
397 		percpu_counter_inc(&svcrdma_stat_sq_starve);
398 		trace_svcrdma_sq_full(rdma);
399 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
400 		wait_event(rdma->sc_send_wait,
401 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
402 		trace_svcrdma_sq_retry(rdma);
403 	} while (1);
404 
405 	trace_svcrdma_sq_post_err(rdma, ret);
406 	svc_xprt_deferred_close(&rdma->sc_xprt);
407 
408 	/* If even one was posted, there will be a completion. */
409 	if (bad_wr != first_wr)
410 		return 0;
411 
412 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
413 	wake_up(&rdma->sc_send_wait);
414 	return -ENOTCONN;
415 }
416 
417 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
418  */
419 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
420 			       unsigned int len,
421 			       struct svc_rdma_rw_ctxt *ctxt)
422 {
423 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
424 
425 	sg_set_buf(&sg[0], info->wi_base, len);
426 	info->wi_base += len;
427 
428 	ctxt->rw_nents = 1;
429 }
430 
431 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
432  */
433 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
434 				    unsigned int remaining,
435 				    struct svc_rdma_rw_ctxt *ctxt)
436 {
437 	unsigned int sge_no, sge_bytes, page_off, page_no;
438 	const struct xdr_buf *xdr = info->wi_xdr;
439 	struct scatterlist *sg;
440 	struct page **page;
441 
442 	page_off = info->wi_next_off + xdr->page_base;
443 	page_no = page_off >> PAGE_SHIFT;
444 	page_off = offset_in_page(page_off);
445 	page = xdr->pages + page_no;
446 	info->wi_next_off += remaining;
447 	sg = ctxt->rw_sg_table.sgl;
448 	sge_no = 0;
449 	do {
450 		sge_bytes = min_t(unsigned int, remaining,
451 				  PAGE_SIZE - page_off);
452 		sg_set_page(sg, *page, sge_bytes, page_off);
453 
454 		remaining -= sge_bytes;
455 		sg = sg_next(sg);
456 		page_off = 0;
457 		sge_no++;
458 		page++;
459 	} while (remaining);
460 
461 	ctxt->rw_nents = sge_no;
462 }
463 
464 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
465  * an RPC Reply.
466  */
467 static int
468 svc_rdma_build_writes(struct svc_rdma_write_info *info,
469 		      void (*constructor)(struct svc_rdma_write_info *info,
470 					  unsigned int len,
471 					  struct svc_rdma_rw_ctxt *ctxt),
472 		      unsigned int remaining)
473 {
474 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
475 	struct svcxprt_rdma *rdma = cc->cc_rdma;
476 	const struct svc_rdma_segment *seg;
477 	struct svc_rdma_rw_ctxt *ctxt;
478 	int ret;
479 
480 	do {
481 		unsigned int write_len;
482 		u64 offset;
483 
484 		if (info->wi_seg_no >= info->wi_chunk->ch_segcount)
485 			goto out_overflow;
486 
487 		seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
488 		write_len = min(remaining, seg->rs_length - info->wi_seg_off);
489 		if (!write_len)
490 			goto out_overflow;
491 		ctxt = svc_rdma_get_rw_ctxt(rdma,
492 					    (write_len >> PAGE_SHIFT) + 2);
493 		if (!ctxt)
494 			return -ENOMEM;
495 
496 		constructor(info, write_len, ctxt);
497 		offset = seg->rs_offset + info->wi_seg_off;
498 		ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle,
499 					   DMA_TO_DEVICE);
500 		if (ret < 0)
501 			return -EIO;
502 		percpu_counter_inc(&svcrdma_stat_write);
503 
504 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
505 		cc->cc_sqecount += ret;
506 		if (write_len == seg->rs_length - info->wi_seg_off) {
507 			info->wi_seg_no++;
508 			info->wi_seg_off = 0;
509 		} else {
510 			info->wi_seg_off += write_len;
511 		}
512 		remaining -= write_len;
513 	} while (remaining);
514 
515 	return 0;
516 
517 out_overflow:
518 	trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
519 				     info->wi_chunk->ch_segcount);
520 	return -E2BIG;
521 }
522 
523 /**
524  * svc_rdma_iov_write - Construct RDMA Writes from an iov
525  * @info: pointer to write arguments
526  * @iov: kvec to write
527  *
528  * Returns:
529  *   On success, returns zero
530  *   %-E2BIG if the client-provided Write chunk is too small
531  *   %-ENOMEM if a resource has been exhausted
532  *   %-EIO if an rdma-rw error occurred
533  */
534 static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
535 			      const struct kvec *iov)
536 {
537 	info->wi_base = iov->iov_base;
538 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
539 				     iov->iov_len);
540 }
541 
542 /**
543  * svc_rdma_pages_write - Construct RDMA Writes from pages
544  * @info: pointer to write arguments
545  * @xdr: xdr_buf with pages to write
546  * @offset: offset into the content of @xdr
547  * @length: number of bytes to write
548  *
549  * Returns:
550  *   On success, returns zero
551  *   %-E2BIG if the client-provided Write chunk is too small
552  *   %-ENOMEM if a resource has been exhausted
553  *   %-EIO if an rdma-rw error occurred
554  */
555 static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
556 				const struct xdr_buf *xdr,
557 				unsigned int offset,
558 				unsigned long length)
559 {
560 	info->wi_xdr = xdr;
561 	info->wi_next_off = offset - xdr->head[0].iov_len;
562 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
563 				     length);
564 }
565 
566 /**
567  * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
568  * @xdr: xdr_buf to write
569  * @data: pointer to write arguments
570  *
571  * Returns:
572  *   On success, returns zero
573  *   %-E2BIG if the client-provided Write chunk is too small
574  *   %-ENOMEM if a resource has been exhausted
575  *   %-EIO if an rdma-rw error occurred
576  */
577 static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
578 {
579 	struct svc_rdma_write_info *info = data;
580 	int ret;
581 
582 	if (xdr->head[0].iov_len) {
583 		ret = svc_rdma_iov_write(info, &xdr->head[0]);
584 		if (ret < 0)
585 			return ret;
586 	}
587 
588 	if (xdr->page_len) {
589 		ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
590 					   xdr->page_len);
591 		if (ret < 0)
592 			return ret;
593 	}
594 
595 	if (xdr->tail[0].iov_len) {
596 		ret = svc_rdma_iov_write(info, &xdr->tail[0]);
597 		if (ret < 0)
598 			return ret;
599 	}
600 
601 	return xdr->len;
602 }
603 
604 /**
605  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
606  * @rdma: controlling RDMA transport
607  * @chunk: Write chunk provided by the client
608  * @xdr: xdr_buf containing the data payload
609  *
610  * Returns a non-negative number of bytes the chunk consumed, or
611  *	%-E2BIG if the payload was larger than the Write chunk,
612  *	%-EINVAL if client provided too many segments,
613  *	%-ENOMEM if rdma_rw context pool was exhausted,
614  *	%-ENOTCONN if posting failed (connection is lost),
615  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
616  */
617 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
618 			      const struct svc_rdma_chunk *chunk,
619 			      const struct xdr_buf *xdr)
620 {
621 	struct svc_rdma_write_info *info;
622 	struct svc_rdma_chunk_ctxt *cc;
623 	int ret;
624 
625 	info = svc_rdma_write_info_alloc(rdma, chunk);
626 	if (!info)
627 		return -ENOMEM;
628 	cc = &info->wi_cc;
629 
630 	ret = svc_rdma_xb_write(xdr, info);
631 	if (ret != xdr->len)
632 		goto out_err;
633 
634 	trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
635 	ret = svc_rdma_post_chunk_ctxt(cc);
636 	if (ret < 0)
637 		goto out_err;
638 	return xdr->len;
639 
640 out_err:
641 	svc_rdma_write_info_free(info);
642 	return ret;
643 }
644 
645 /**
646  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
647  * @rdma: controlling RDMA transport
648  * @rctxt: Write and Reply chunks from client
649  * @xdr: xdr_buf containing an RPC Reply
650  *
651  * Returns a non-negative number of bytes the chunk consumed, or
652  *	%-E2BIG if the payload was larger than the Reply chunk,
653  *	%-EINVAL if client provided too many segments,
654  *	%-ENOMEM if rdma_rw context pool was exhausted,
655  *	%-ENOTCONN if posting failed (connection is lost),
656  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
657  */
658 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
659 			      const struct svc_rdma_recv_ctxt *rctxt,
660 			      const struct xdr_buf *xdr)
661 {
662 	struct svc_rdma_write_info *info;
663 	struct svc_rdma_chunk_ctxt *cc;
664 	struct svc_rdma_chunk *chunk;
665 	int ret;
666 
667 	if (pcl_is_empty(&rctxt->rc_reply_pcl))
668 		return 0;
669 
670 	chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
671 	info = svc_rdma_write_info_alloc(rdma, chunk);
672 	if (!info)
673 		return -ENOMEM;
674 	cc = &info->wi_cc;
675 
676 	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
677 				      svc_rdma_xb_write, info);
678 	if (ret < 0)
679 		goto out_err;
680 
681 	trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
682 	ret = svc_rdma_post_chunk_ctxt(cc);
683 	if (ret < 0)
684 		goto out_err;
685 
686 	return xdr->len;
687 
688 out_err:
689 	svc_rdma_write_info_free(info);
690 	return ret;
691 }
692 
693 /**
694  * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
695  * @info: context for ongoing I/O
696  * @segment: co-ordinates of remote memory to be read
697  *
698  * Returns:
699  *   %0: the Read WR chain was constructed successfully
700  *   %-EINVAL: there were not enough rq_pages to finish
701  *   %-ENOMEM: allocating a local resources failed
702  *   %-EIO: a DMA mapping error occurred
703  */
704 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
705 				       const struct svc_rdma_segment *segment)
706 {
707 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
708 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
709 	struct svc_rqst *rqstp = info->ri_rqst;
710 	unsigned int sge_no, seg_len, len;
711 	struct svc_rdma_rw_ctxt *ctxt;
712 	struct scatterlist *sg;
713 	int ret;
714 
715 	len = segment->rs_length;
716 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
717 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
718 	if (!ctxt)
719 		return -ENOMEM;
720 	ctxt->rw_nents = sge_no;
721 
722 	sg = ctxt->rw_sg_table.sgl;
723 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
724 		seg_len = min_t(unsigned int, len,
725 				PAGE_SIZE - info->ri_pageoff);
726 
727 		if (!info->ri_pageoff)
728 			head->rc_page_count++;
729 
730 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
731 			    seg_len, info->ri_pageoff);
732 		sg = sg_next(sg);
733 
734 		info->ri_pageoff += seg_len;
735 		if (info->ri_pageoff == PAGE_SIZE) {
736 			info->ri_pageno++;
737 			info->ri_pageoff = 0;
738 		}
739 		len -= seg_len;
740 
741 		/* Safety check */
742 		if (len &&
743 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
744 			goto out_overrun;
745 	}
746 
747 	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, segment->rs_offset,
748 				   segment->rs_handle, DMA_FROM_DEVICE);
749 	if (ret < 0)
750 		return -EIO;
751 	percpu_counter_inc(&svcrdma_stat_read);
752 
753 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
754 	cc->cc_sqecount += ret;
755 	return 0;
756 
757 out_overrun:
758 	trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
759 	return -EINVAL;
760 }
761 
762 /**
763  * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
764  * @info: context for ongoing I/O
765  * @chunk: Read chunk to pull
766  *
767  * Return values:
768  *   %0: the Read WR chain was constructed successfully
769  *   %-EINVAL: there were not enough resources to finish
770  *   %-ENOMEM: allocating a local resources failed
771  *   %-EIO: a DMA mapping error occurred
772  */
773 static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
774 				     const struct svc_rdma_chunk *chunk)
775 {
776 	const struct svc_rdma_segment *segment;
777 	int ret;
778 
779 	ret = -EINVAL;
780 	pcl_for_each_segment(segment, chunk) {
781 		ret = svc_rdma_build_read_segment(info, segment);
782 		if (ret < 0)
783 			break;
784 		info->ri_totalbytes += segment->rs_length;
785 	}
786 	return ret;
787 }
788 
789 /**
790  * svc_rdma_copy_inline_range - Copy part of the inline content into pages
791  * @info: context for RDMA Reads
792  * @offset: offset into the Receive buffer of region to copy
793  * @remaining: length of region to copy
794  *
795  * Take a page at a time from rqstp->rq_pages and copy the inline
796  * content from the Receive buffer into that page. Update
797  * info->ri_pageno and info->ri_pageoff so that the next RDMA Read
798  * result will land contiguously with the copied content.
799  *
800  * Return values:
801  *   %0: Inline content was successfully copied
802  *   %-EINVAL: offset or length was incorrect
803  */
804 static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
805 				      unsigned int offset,
806 				      unsigned int remaining)
807 {
808 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
809 	unsigned char *dst, *src = head->rc_recv_buf;
810 	struct svc_rqst *rqstp = info->ri_rqst;
811 	unsigned int page_no, numpages;
812 
813 	numpages = PAGE_ALIGN(info->ri_pageoff + remaining) >> PAGE_SHIFT;
814 	for (page_no = 0; page_no < numpages; page_no++) {
815 		unsigned int page_len;
816 
817 		page_len = min_t(unsigned int, remaining,
818 				 PAGE_SIZE - info->ri_pageoff);
819 
820 		if (!info->ri_pageoff)
821 			head->rc_page_count++;
822 
823 		dst = page_address(rqstp->rq_pages[info->ri_pageno]);
824 		memcpy(dst + info->ri_pageno, src + offset, page_len);
825 
826 		info->ri_totalbytes += page_len;
827 		info->ri_pageoff += page_len;
828 		if (info->ri_pageoff == PAGE_SIZE) {
829 			info->ri_pageno++;
830 			info->ri_pageoff = 0;
831 		}
832 		remaining -= page_len;
833 		offset += page_len;
834 	}
835 
836 	return -EINVAL;
837 }
838 
839 /**
840  * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
841  * @info: context for RDMA Reads
842  *
843  * The chunk data lands in rqstp->rq_arg as a series of contiguous pages,
844  * like an incoming TCP call.
845  *
846  * Return values:
847  *   %0: RDMA Read WQEs were successfully built
848  *   %-EINVAL: client provided too many chunks or segments,
849  *   %-ENOMEM: rdma_rw context pool was exhausted,
850  *   %-ENOTCONN: posting failed (connection is lost),
851  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
852  */
853 static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *info)
854 {
855 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
856 	const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
857 	struct xdr_buf *buf = &info->ri_rqst->rq_arg;
858 	struct svc_rdma_chunk *chunk, *next;
859 	unsigned int start, length;
860 	int ret;
861 
862 	start = 0;
863 	chunk = pcl_first_chunk(pcl);
864 	length = chunk->ch_position;
865 	ret = svc_rdma_copy_inline_range(info, start, length);
866 	if (ret < 0)
867 		return ret;
868 
869 	pcl_for_each_chunk(chunk, pcl) {
870 		ret = svc_rdma_build_read_chunk(info, chunk);
871 		if (ret < 0)
872 			return ret;
873 
874 		next = pcl_next_chunk(pcl, chunk);
875 		if (!next)
876 			break;
877 
878 		start += length;
879 		length = next->ch_position - info->ri_totalbytes;
880 		ret = svc_rdma_copy_inline_range(info, start, length);
881 		if (ret < 0)
882 			return ret;
883 	}
884 
885 	start += length;
886 	length = head->rc_byte_len - start;
887 	ret = svc_rdma_copy_inline_range(info, start, length);
888 	if (ret < 0)
889 		return ret;
890 
891 	buf->len += info->ri_totalbytes;
892 	buf->buflen += info->ri_totalbytes;
893 
894 	buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
895 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
896 	buf->pages = &info->ri_rqst->rq_pages[1];
897 	buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
898 	return 0;
899 }
900 
901 /**
902  * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
903  * @info: context for RDMA Reads
904  *
905  * The chunk data lands in the page list of rqstp->rq_arg.pages.
906  *
907  * Currently NFSD does not look at the rqstp->rq_arg.tail[0] kvec.
908  * Therefore, XDR round-up of the Read chunk and trailing
909  * inline content must both be added at the end of the pagelist.
910  *
911  * Return values:
912  *   %0: RDMA Read WQEs were successfully built
913  *   %-EINVAL: client provided too many chunks or segments,
914  *   %-ENOMEM: rdma_rw context pool was exhausted,
915  *   %-ENOTCONN: posting failed (connection is lost),
916  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
917  */
918 static int svc_rdma_read_data_item(struct svc_rdma_read_info *info)
919 {
920 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
921 	struct xdr_buf *buf = &info->ri_rqst->rq_arg;
922 	struct svc_rdma_chunk *chunk;
923 	unsigned int length;
924 	int ret;
925 
926 	chunk = pcl_first_chunk(&head->rc_read_pcl);
927 	ret = svc_rdma_build_read_chunk(info, chunk);
928 	if (ret < 0)
929 		goto out;
930 
931 	/* Split the Receive buffer between the head and tail
932 	 * buffers at Read chunk's position. XDR roundup of the
933 	 * chunk is not included in either the pagelist or in
934 	 * the tail.
935 	 */
936 	buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
937 	buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
938 	buf->head[0].iov_len = chunk->ch_position;
939 
940 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
941 	 *
942 	 * If the client already rounded up the chunk length, the
943 	 * length does not change. Otherwise, the length of the page
944 	 * list is increased to include XDR round-up.
945 	 *
946 	 * Currently these chunks always start at page offset 0,
947 	 * thus the rounded-up length never crosses a page boundary.
948 	 */
949 	buf->pages = &info->ri_rqst->rq_pages[0];
950 	length = xdr_align_size(chunk->ch_length);
951 	buf->page_len = length;
952 	buf->len += length;
953 	buf->buflen += length;
954 
955 out:
956 	return ret;
957 }
958 
959 /**
960  * svc_rdma_read_chunk_range - Build RDMA Read WQEs for portion of a chunk
961  * @info: context for RDMA Reads
962  * @chunk: parsed Call chunk to pull
963  * @offset: offset of region to pull
964  * @length: length of region to pull
965  *
966  * Return values:
967  *   %0: RDMA Read WQEs were successfully built
968  *   %-EINVAL: there were not enough resources to finish
969  *   %-ENOMEM: rdma_rw context pool was exhausted,
970  *   %-ENOTCONN: posting failed (connection is lost),
971  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
972  */
973 static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
974 				     const struct svc_rdma_chunk *chunk,
975 				     unsigned int offset, unsigned int length)
976 {
977 	const struct svc_rdma_segment *segment;
978 	int ret;
979 
980 	ret = -EINVAL;
981 	pcl_for_each_segment(segment, chunk) {
982 		struct svc_rdma_segment dummy;
983 
984 		if (offset > segment->rs_length) {
985 			offset -= segment->rs_length;
986 			continue;
987 		}
988 
989 		dummy.rs_handle = segment->rs_handle;
990 		dummy.rs_length = min_t(u32, length, segment->rs_length) - offset;
991 		dummy.rs_offset = segment->rs_offset + offset;
992 
993 		ret = svc_rdma_build_read_segment(info, &dummy);
994 		if (ret < 0)
995 			break;
996 
997 		info->ri_totalbytes += dummy.rs_length;
998 		length -= dummy.rs_length;
999 		offset = 0;
1000 	}
1001 	return ret;
1002 }
1003 
1004 /**
1005  * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
1006  * @info: context for RDMA Reads
1007  *
1008  * Return values:
1009  *   %0: RDMA Read WQEs were successfully built
1010  *   %-EINVAL: there were not enough resources to finish
1011  *   %-ENOMEM: rdma_rw context pool was exhausted,
1012  *   %-ENOTCONN: posting failed (connection is lost),
1013  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
1014  */
1015 static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
1016 {
1017 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
1018 	const struct svc_rdma_chunk *call_chunk =
1019 			pcl_first_chunk(&head->rc_call_pcl);
1020 	const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
1021 	struct svc_rdma_chunk *chunk, *next;
1022 	unsigned int start, length;
1023 	int ret;
1024 
1025 	if (pcl_is_empty(pcl))
1026 		return svc_rdma_build_read_chunk(info, call_chunk);
1027 
1028 	start = 0;
1029 	chunk = pcl_first_chunk(pcl);
1030 	length = chunk->ch_position;
1031 	ret = svc_rdma_read_chunk_range(info, call_chunk, start, length);
1032 	if (ret < 0)
1033 		return ret;
1034 
1035 	pcl_for_each_chunk(chunk, pcl) {
1036 		ret = svc_rdma_build_read_chunk(info, chunk);
1037 		if (ret < 0)
1038 			return ret;
1039 
1040 		next = pcl_next_chunk(pcl, chunk);
1041 		if (!next)
1042 			break;
1043 
1044 		start += length;
1045 		length = next->ch_position - info->ri_totalbytes;
1046 		ret = svc_rdma_read_chunk_range(info, call_chunk,
1047 						start, length);
1048 		if (ret < 0)
1049 			return ret;
1050 	}
1051 
1052 	start += length;
1053 	length = call_chunk->ch_length - start;
1054 	return svc_rdma_read_chunk_range(info, call_chunk, start, length);
1055 }
1056 
1057 /**
1058  * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
1059  * @info: context for RDMA Reads
1060  *
1061  * The start of the data lands in the first page just after the
1062  * Transport header, and the rest lands in rqstp->rq_arg.pages.
1063  *
1064  * Assumptions:
1065  *	- A PZRC is never sent in an RDMA_MSG message, though it's
1066  *	  allowed by spec.
1067  *
1068  * Return values:
1069  *   %0: RDMA Read WQEs were successfully built
1070  *   %-EINVAL: client provided too many chunks or segments,
1071  *   %-ENOMEM: rdma_rw context pool was exhausted,
1072  *   %-ENOTCONN: posting failed (connection is lost),
1073  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
1074  */
1075 static noinline int svc_rdma_read_special(struct svc_rdma_read_info *info)
1076 {
1077 	struct xdr_buf *buf = &info->ri_rqst->rq_arg;
1078 	int ret;
1079 
1080 	ret = svc_rdma_read_call_chunk(info);
1081 	if (ret < 0)
1082 		goto out;
1083 
1084 	buf->len += info->ri_totalbytes;
1085 	buf->buflen += info->ri_totalbytes;
1086 
1087 	buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
1088 	buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
1089 	buf->pages = &info->ri_rqst->rq_pages[1];
1090 	buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
1091 
1092 out:
1093 	return ret;
1094 }
1095 
1096 /**
1097  * svc_rdma_process_read_list - Pull list of Read chunks from the client
1098  * @rdma: controlling RDMA transport
1099  * @rqstp: set of pages to use as Read sink buffers
1100  * @head: pages under I/O collect here
1101  *
1102  * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
1103  * pull each Read chunk as they decode an incoming RPC message.
1104  *
1105  * On Linux, however, the server needs to have a fully-constructed RPC
1106  * message in rqstp->rq_arg when there is a positive return code from
1107  * ->xpo_recvfrom. So the Read list is safety-checked immediately when
1108  * it is received, then here the whole Read list is pulled all at once.
1109  * The ingress RPC message is fully reconstructed once all associated
1110  * RDMA Reads have completed.
1111  *
1112  * Return values:
1113  *   %1: all needed RDMA Reads were posted successfully,
1114  *   %-EINVAL: client provided too many chunks or segments,
1115  *   %-ENOMEM: rdma_rw context pool was exhausted,
1116  *   %-ENOTCONN: posting failed (connection is lost),
1117  *   %-EIO: rdma_rw initialization failed (DMA mapping, etc).
1118  */
1119 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
1120 			       struct svc_rqst *rqstp,
1121 			       struct svc_rdma_recv_ctxt *head)
1122 {
1123 	struct svc_rdma_read_info *info;
1124 	struct svc_rdma_chunk_ctxt *cc;
1125 	int ret;
1126 
1127 	info = svc_rdma_read_info_alloc(rdma);
1128 	if (!info)
1129 		return -ENOMEM;
1130 	cc = &info->ri_cc;
1131 	info->ri_rqst = rqstp;
1132 	info->ri_readctxt = head;
1133 	info->ri_pageno = 0;
1134 	info->ri_pageoff = 0;
1135 	info->ri_totalbytes = 0;
1136 
1137 	if (pcl_is_empty(&head->rc_call_pcl)) {
1138 		if (head->rc_read_pcl.cl_count == 1)
1139 			ret = svc_rdma_read_data_item(info);
1140 		else
1141 			ret = svc_rdma_read_multiple_chunks(info);
1142 	} else
1143 		ret = svc_rdma_read_special(info);
1144 	if (ret < 0)
1145 		goto out_err;
1146 
1147 	trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
1148 	init_completion(&cc->cc_done);
1149 	ret = svc_rdma_post_chunk_ctxt(cc);
1150 	if (ret < 0)
1151 		goto out_err;
1152 
1153 	ret = 1;
1154 	wait_for_completion(&cc->cc_done);
1155 	if (cc->cc_status != IB_WC_SUCCESS)
1156 		ret = -EIO;
1157 
1158 	/* rq_respages starts after the last arg page */
1159 	rqstp->rq_respages = &rqstp->rq_pages[head->rc_page_count];
1160 	rqstp->rq_next_page = rqstp->rq_respages + 1;
1161 
1162 	/* Ensure svc_rdma_recv_ctxt_put() does not try to release pages */
1163 	head->rc_page_count = 0;
1164 
1165 out_err:
1166 	svc_rdma_read_info_free(info);
1167 	return ret;
1168 }
1169