1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_sendto. This is called by the
48  * RPC server when an RPC Reply is ready to be transmitted to a client.
49  *
50  * The passed-in svc_rqst contains a struct xdr_buf which holds an
51  * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52  * transport header, post all Write WRs needed for this Reply, then post
53  * a Send WR conveying the transport header and the RPC message itself to
54  * the client.
55  *
56  * svc_rdma_sendto must fully transmit the Reply before returning, as
57  * the svc_rqst will be recycled as soon as sendto returns. Remaining
58  * resources referred to by the svc_rqst are also recycled at that time.
59  * Therefore any resources that must remain longer must be detached
60  * from the svc_rqst and released later.
61  *
62  * Page Management
63  *
64  * The I/O that performs Reply transmission is asynchronous, and may
65  * complete well after sendto returns. Thus pages under I/O must be
66  * removed from the svc_rqst before sendto returns.
67  *
68  * The logic here depends on Send Queue and completion ordering. Since
69  * the Send WR is always posted last, it will always complete last. Thus
70  * when it completes, it is guaranteed that all previous Write WRs have
71  * also completed.
72  *
73  * Write WRs are constructed and posted. Each Write segment gets its own
74  * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75  * DMA-unmap the pages under I/O for that Write segment. The Write
76  * completion handler does not release any pages.
77  *
78  * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79  * The ownership of all of the Reply's pages are transferred into that
80  * ctxt, the Send WR is posted, and sendto returns.
81  *
82  * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83  * Send completion handler finally releases the Reply's pages.
84  *
85  * This mechanism also assumes that completions on the transport's Send
86  * Completion Queue do not run in parallel. Otherwise a Write completion
87  * and Send completion running at the same time could release pages that
88  * are still DMA-mapped.
89  *
90  * Error Handling
91  *
92  * - If the Send WR is posted successfully, it will either complete
93  *   successfully, or get flushed. Either way, the Send completion
94  *   handler releases the Reply's pages.
95  * - If the Send WR cannot be not posted, the forward path releases
96  *   the Reply's pages.
97  *
98  * This handles the case, without the use of page reference counting,
99  * where two different Write segments send portions of the same page.
100  */
101 
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104 
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107 
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/rpc_rdma.h>
110 #include <linux/sunrpc/svc_rdma.h>
111 
112 #include "xprt_rdma.h"
113 #include <trace/events/rpcrdma.h>
114 
115 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
116 
117 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
118 
119 static inline struct svc_rdma_send_ctxt *
120 svc_rdma_next_send_ctxt(struct list_head *list)
121 {
122 	return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 					sc_list);
124 }
125 
126 static struct svc_rdma_send_ctxt *
127 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
128 {
129 	struct svc_rdma_send_ctxt *ctxt;
130 	dma_addr_t addr;
131 	void *buffer;
132 	size_t size;
133 	int i;
134 
135 	size = sizeof(*ctxt);
136 	size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
137 	ctxt = kmalloc(size, GFP_KERNEL);
138 	if (!ctxt)
139 		goto fail0;
140 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 	if (!buffer)
142 		goto fail1;
143 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
144 				 rdma->sc_max_req_size, DMA_TO_DEVICE);
145 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 		goto fail2;
147 
148 	ctxt->sc_send_wr.next = NULL;
149 	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
150 	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
151 	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
152 	ctxt->sc_cqe.done = svc_rdma_wc_send;
153 	ctxt->sc_xprt_buf = buffer;
154 	xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
155 		     rdma->sc_max_req_size);
156 	ctxt->sc_sges[0].addr = addr;
157 
158 	for (i = 0; i < rdma->sc_max_send_sges; i++)
159 		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
160 	return ctxt;
161 
162 fail2:
163 	kfree(buffer);
164 fail1:
165 	kfree(ctxt);
166 fail0:
167 	return NULL;
168 }
169 
170 /**
171  * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
172  * @rdma: svcxprt_rdma being torn down
173  *
174  */
175 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
176 {
177 	struct svc_rdma_send_ctxt *ctxt;
178 
179 	while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
180 		list_del(&ctxt->sc_list);
181 		ib_dma_unmap_single(rdma->sc_pd->device,
182 				    ctxt->sc_sges[0].addr,
183 				    rdma->sc_max_req_size,
184 				    DMA_TO_DEVICE);
185 		kfree(ctxt->sc_xprt_buf);
186 		kfree(ctxt);
187 	}
188 }
189 
190 /**
191  * svc_rdma_send_ctxt_get - Get a free send_ctxt
192  * @rdma: controlling svcxprt_rdma
193  *
194  * Returns a ready-to-use send_ctxt, or NULL if none are
195  * available and a fresh one cannot be allocated.
196  */
197 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
198 {
199 	struct svc_rdma_send_ctxt *ctxt;
200 
201 	spin_lock(&rdma->sc_send_lock);
202 	ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
203 	if (!ctxt)
204 		goto out_empty;
205 	list_del(&ctxt->sc_list);
206 	spin_unlock(&rdma->sc_send_lock);
207 
208 out:
209 	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
210 	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
211 			ctxt->sc_xprt_buf, NULL);
212 
213 	ctxt->sc_send_wr.num_sge = 0;
214 	ctxt->sc_cur_sge_no = 0;
215 	ctxt->sc_page_count = 0;
216 	return ctxt;
217 
218 out_empty:
219 	spin_unlock(&rdma->sc_send_lock);
220 	ctxt = svc_rdma_send_ctxt_alloc(rdma);
221 	if (!ctxt)
222 		return NULL;
223 	goto out;
224 }
225 
226 /**
227  * svc_rdma_send_ctxt_put - Return send_ctxt to free list
228  * @rdma: controlling svcxprt_rdma
229  * @ctxt: object to return to the free list
230  *
231  * Pages left in sc_pages are DMA unmapped and released.
232  */
233 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
234 			    struct svc_rdma_send_ctxt *ctxt)
235 {
236 	struct ib_device *device = rdma->sc_cm_id->device;
237 	unsigned int i;
238 
239 	/* The first SGE contains the transport header, which
240 	 * remains mapped until @ctxt is destroyed.
241 	 */
242 	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
243 		ib_dma_unmap_page(device,
244 				  ctxt->sc_sges[i].addr,
245 				  ctxt->sc_sges[i].length,
246 				  DMA_TO_DEVICE);
247 		trace_svcrdma_dma_unmap_page(rdma,
248 					     ctxt->sc_sges[i].addr,
249 					     ctxt->sc_sges[i].length);
250 	}
251 
252 	for (i = 0; i < ctxt->sc_page_count; ++i)
253 		put_page(ctxt->sc_pages[i]);
254 
255 	spin_lock(&rdma->sc_send_lock);
256 	list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
257 	spin_unlock(&rdma->sc_send_lock);
258 }
259 
260 /**
261  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
262  * @cq: Completion Queue context
263  * @wc: Work Completion object
264  *
265  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
266  * the Send completion handler could be running.
267  */
268 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
269 {
270 	struct svcxprt_rdma *rdma = cq->cq_context;
271 	struct ib_cqe *cqe = wc->wr_cqe;
272 	struct svc_rdma_send_ctxt *ctxt;
273 
274 	trace_svcrdma_wc_send(wc);
275 
276 	atomic_inc(&rdma->sc_sq_avail);
277 	wake_up(&rdma->sc_send_wait);
278 
279 	ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
280 	svc_rdma_send_ctxt_put(rdma, ctxt);
281 
282 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
283 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
284 		svc_xprt_enqueue(&rdma->sc_xprt);
285 	}
286 
287 	svc_xprt_put(&rdma->sc_xprt);
288 }
289 
290 /**
291  * svc_rdma_send - Post a single Send WR
292  * @rdma: transport on which to post the WR
293  * @wr: prepared Send WR to post
294  *
295  * Returns zero the Send WR was posted successfully. Otherwise, a
296  * negative errno is returned.
297  */
298 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
299 {
300 	int ret;
301 
302 	might_sleep();
303 
304 	/* Sync the transport header buffer */
305 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
306 				      wr->sg_list[0].addr,
307 				      wr->sg_list[0].length,
308 				      DMA_TO_DEVICE);
309 
310 	/* If the SQ is full, wait until an SQ entry is available */
311 	while (1) {
312 		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
313 			atomic_inc(&rdma_stat_sq_starve);
314 			trace_svcrdma_sq_full(rdma);
315 			atomic_inc(&rdma->sc_sq_avail);
316 			wait_event(rdma->sc_send_wait,
317 				   atomic_read(&rdma->sc_sq_avail) > 1);
318 			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
319 				return -ENOTCONN;
320 			trace_svcrdma_sq_retry(rdma);
321 			continue;
322 		}
323 
324 		svc_xprt_get(&rdma->sc_xprt);
325 		trace_svcrdma_post_send(wr);
326 		ret = ib_post_send(rdma->sc_qp, wr, NULL);
327 		if (ret)
328 			break;
329 		return 0;
330 	}
331 
332 	trace_svcrdma_sq_post_err(rdma, ret);
333 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
334 	svc_xprt_put(&rdma->sc_xprt);
335 	wake_up(&rdma->sc_send_wait);
336 	return ret;
337 }
338 
339 /**
340  * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
341  * @sctxt: Send context for the RPC Reply
342  *
343  * Return values:
344  *   On success, returns length in bytes of the Reply XDR buffer
345  *   that was consumed by the Reply Read list
346  *   %-EMSGSIZE on XDR buffer overflow
347  */
348 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
349 {
350 	/* RPC-over-RDMA version 1 replies never have a Read list. */
351 	return xdr_stream_encode_item_absent(&sctxt->sc_stream);
352 }
353 
354 /**
355  * svc_rdma_encode_write_segment - Encode one Write segment
356  * @src: matching Write chunk in the RPC Call header
357  * @sctxt: Send context for the RPC Reply
358  * @remaining: remaining bytes of the payload left in the Write chunk
359  *
360  * Return values:
361  *   On success, returns length in bytes of the Reply XDR buffer
362  *   that was consumed by the Write segment
363  *   %-EMSGSIZE on XDR buffer overflow
364  */
365 static ssize_t svc_rdma_encode_write_segment(__be32 *src,
366 					     struct svc_rdma_send_ctxt *sctxt,
367 					     unsigned int *remaining)
368 {
369 	__be32 *p;
370 	const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
371 	u32 handle, length;
372 	u64 offset;
373 
374 	p = xdr_reserve_space(&sctxt->sc_stream, len);
375 	if (!p)
376 		return -EMSGSIZE;
377 
378 	handle = be32_to_cpup(src++);
379 	length = be32_to_cpup(src++);
380 	xdr_decode_hyper(src, &offset);
381 
382 	*p++ = cpu_to_be32(handle);
383 	if (*remaining < length) {
384 		/* segment only partly filled */
385 		length = *remaining;
386 		*remaining = 0;
387 	} else {
388 		/* entire segment was consumed */
389 		*remaining -= length;
390 	}
391 	*p++ = cpu_to_be32(length);
392 	xdr_encode_hyper(p, offset);
393 
394 	trace_svcrdma_encode_wseg(handle, length, offset);
395 	return len;
396 }
397 
398 /**
399  * svc_rdma_encode_write_chunk - Encode one Write chunk
400  * @src: matching Write chunk in the RPC Call header
401  * @sctxt: Send context for the RPC Reply
402  * @remaining: size in bytes of the payload in the Write chunk
403  *
404  * Copy a Write chunk from the Call transport header to the
405  * Reply transport header. Update each segment's length field
406  * to reflect the number of bytes written in that segment.
407  *
408  * Return values:
409  *   On success, returns length in bytes of the Reply XDR buffer
410  *   that was consumed by the Write chunk
411  *   %-EMSGSIZE on XDR buffer overflow
412  */
413 static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
414 					   struct svc_rdma_send_ctxt *sctxt,
415 					   unsigned int remaining)
416 {
417 	unsigned int i, nsegs;
418 	ssize_t len, ret;
419 
420 	len = 0;
421 	trace_svcrdma_encode_write_chunk(remaining);
422 
423 	src++;
424 	ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
425 	if (ret < 0)
426 		return -EMSGSIZE;
427 	len += ret;
428 
429 	nsegs = be32_to_cpup(src++);
430 	ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
431 	if (ret < 0)
432 		return -EMSGSIZE;
433 	len += ret;
434 
435 	for (i = nsegs; i; i--) {
436 		ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
437 		if (ret < 0)
438 			return -EMSGSIZE;
439 		src += rpcrdma_segment_maxsz;
440 		len += ret;
441 	}
442 
443 	return len;
444 }
445 
446 /**
447  * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
448  * @rctxt: Reply context with information about the RPC Call
449  * @sctxt: Send context for the RPC Reply
450  * @length: size in bytes of the payload in the first Write chunk
451  *
452  * The client provides a Write chunk list in the Call message. Fill
453  * in the segments in the first Write chunk in the Reply's transport
454  * header with the number of bytes consumed in each segment.
455  * Remaining chunks are returned unused.
456  *
457  * Assumptions:
458  *  - Client has provided only one Write chunk
459  *
460  * Return values:
461  *   On success, returns length in bytes of the Reply XDR buffer
462  *   that was consumed by the Reply's Write list
463  *   %-EMSGSIZE on XDR buffer overflow
464  */
465 static ssize_t
466 svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
467 			   struct svc_rdma_send_ctxt *sctxt,
468 			   unsigned int length)
469 {
470 	ssize_t len, ret;
471 
472 	ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
473 	if (ret < 0)
474 		return ret;
475 	len = ret;
476 
477 	/* Terminate the Write list */
478 	ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
479 	if (ret < 0)
480 		return ret;
481 
482 	return len + ret;
483 }
484 
485 /**
486  * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
487  * @rctxt: Reply context with information about the RPC Call
488  * @sctxt: Send context for the RPC Reply
489  * @length: size in bytes of the payload in the Reply chunk
490  *
491  * Assumptions:
492  * - Reply can always fit in the client-provided Reply chunk
493  *
494  * Return values:
495  *   On success, returns length in bytes of the Reply XDR buffer
496  *   that was consumed by the Reply's Reply chunk
497  *   %-EMSGSIZE on XDR buffer overflow
498  */
499 static ssize_t
500 svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
501 			    struct svc_rdma_send_ctxt *sctxt,
502 			    unsigned int length)
503 {
504 	return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
505 					   length);
506 }
507 
508 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
509 				 struct svc_rdma_send_ctxt *ctxt,
510 				 struct page *page,
511 				 unsigned long offset,
512 				 unsigned int len)
513 {
514 	struct ib_device *dev = rdma->sc_cm_id->device;
515 	dma_addr_t dma_addr;
516 
517 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
518 	trace_svcrdma_dma_map_page(rdma, dma_addr, len);
519 	if (ib_dma_mapping_error(dev, dma_addr))
520 		goto out_maperr;
521 
522 	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
523 	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
524 	ctxt->sc_send_wr.num_sge++;
525 	return 0;
526 
527 out_maperr:
528 	return -EIO;
529 }
530 
531 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
532  * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
533  */
534 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
535 				struct svc_rdma_send_ctxt *ctxt,
536 				unsigned char *base,
537 				unsigned int len)
538 {
539 	return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
540 				     offset_in_page(base), len);
541 }
542 
543 /**
544  * svc_rdma_pull_up_needed - Determine whether to use pull-up
545  * @rdma: controlling transport
546  * @sctxt: send_ctxt for the Send WR
547  * @rctxt: Write and Reply chunks provided by client
548  * @xdr: xdr_buf containing RPC message to transmit
549  *
550  * Returns:
551  *	%true if pull-up must be used
552  *	%false otherwise
553  */
554 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
555 				    struct svc_rdma_send_ctxt *sctxt,
556 				    const struct svc_rdma_recv_ctxt *rctxt,
557 				    struct xdr_buf *xdr)
558 {
559 	int elements;
560 
561 	/* For small messages, copying bytes is cheaper than DMA mapping.
562 	 */
563 	if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
564 		return true;
565 
566 	/* Check whether the xdr_buf has more elements than can
567 	 * fit in a single RDMA Send.
568 	 */
569 	/* xdr->head */
570 	elements = 1;
571 
572 	/* xdr->pages */
573 	if (!rctxt || !rctxt->rc_write_list) {
574 		unsigned int remaining;
575 		unsigned long pageoff;
576 
577 		pageoff = xdr->page_base & ~PAGE_MASK;
578 		remaining = xdr->page_len;
579 		while (remaining) {
580 			++elements;
581 			remaining -= min_t(u32, PAGE_SIZE - pageoff,
582 					   remaining);
583 			pageoff = 0;
584 		}
585 	}
586 
587 	/* xdr->tail */
588 	if (xdr->tail[0].iov_len)
589 		++elements;
590 
591 	/* assume 1 SGE is needed for the transport header */
592 	return elements >= rdma->sc_max_send_sges;
593 }
594 
595 /**
596  * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
597  * @rdma: controlling transport
598  * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
599  * @rctxt: Write and Reply chunks provided by client
600  * @xdr: prepared xdr_buf containing RPC message
601  *
602  * The device is not capable of sending the reply directly.
603  * Assemble the elements of @xdr into the transport header buffer.
604  *
605  * Returns zero on success, or a negative errno on failure.
606  */
607 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
608 				      struct svc_rdma_send_ctxt *sctxt,
609 				      const struct svc_rdma_recv_ctxt *rctxt,
610 				      const struct xdr_buf *xdr)
611 {
612 	unsigned char *dst, *tailbase;
613 	unsigned int taillen;
614 
615 	dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
616 	memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
617 	dst += xdr->head[0].iov_len;
618 
619 	tailbase = xdr->tail[0].iov_base;
620 	taillen = xdr->tail[0].iov_len;
621 	if (rctxt && rctxt->rc_write_list) {
622 		u32 xdrpad;
623 
624 		xdrpad = xdr_pad_size(xdr->page_len);
625 		if (taillen && xdrpad) {
626 			tailbase += xdrpad;
627 			taillen -= xdrpad;
628 		}
629 	} else {
630 		unsigned int len, remaining;
631 		unsigned long pageoff;
632 		struct page **ppages;
633 
634 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
635 		pageoff = xdr->page_base & ~PAGE_MASK;
636 		remaining = xdr->page_len;
637 		while (remaining) {
638 			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
639 
640 			memcpy(dst, page_address(*ppages), len);
641 			remaining -= len;
642 			dst += len;
643 			pageoff = 0;
644 		}
645 	}
646 
647 	if (taillen)
648 		memcpy(dst, tailbase, taillen);
649 
650 	sctxt->sc_sges[0].length += xdr->len;
651 	trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
652 	return 0;
653 }
654 
655 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
656  * @rdma: controlling transport
657  * @sctxt: send_ctxt for the Send WR
658  * @rctxt: Write and Reply chunks provided by client
659  * @xdr: prepared xdr_buf containing RPC message
660  *
661  * Load the xdr_buf into the ctxt's sge array, and DMA map each
662  * element as it is added. The Send WR's num_sge field is set.
663  *
664  * Returns zero on success, or a negative errno on failure.
665  */
666 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
667 			   struct svc_rdma_send_ctxt *sctxt,
668 			   const struct svc_rdma_recv_ctxt *rctxt,
669 			   struct xdr_buf *xdr)
670 {
671 	unsigned int len, remaining;
672 	unsigned long page_off;
673 	struct page **ppages;
674 	unsigned char *base;
675 	u32 xdr_pad;
676 	int ret;
677 
678 	/* Set up the (persistently-mapped) transport header SGE. */
679 	sctxt->sc_send_wr.num_sge = 1;
680 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
681 
682 	/* If there is a Reply chunk, nothing follows the transport
683 	 * header, and we're done here.
684 	 */
685 	if (rctxt && rctxt->rc_reply_chunk)
686 		return 0;
687 
688 	/* For pull-up, svc_rdma_send() will sync the transport header.
689 	 * No additional DMA mapping is necessary.
690 	 */
691 	if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
692 		return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
693 
694 	++sctxt->sc_cur_sge_no;
695 	ret = svc_rdma_dma_map_buf(rdma, sctxt,
696 				   xdr->head[0].iov_base,
697 				   xdr->head[0].iov_len);
698 	if (ret < 0)
699 		return ret;
700 
701 	/* If a Write chunk is present, the xdr_buf's page list
702 	 * is not included inline. However the Upper Layer may
703 	 * have added XDR padding in the tail buffer, and that
704 	 * should not be included inline.
705 	 */
706 	if (rctxt && rctxt->rc_write_list) {
707 		base = xdr->tail[0].iov_base;
708 		len = xdr->tail[0].iov_len;
709 		xdr_pad = xdr_pad_size(xdr->page_len);
710 
711 		if (len && xdr_pad) {
712 			base += xdr_pad;
713 			len -= xdr_pad;
714 		}
715 
716 		goto tail;
717 	}
718 
719 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
720 	page_off = xdr->page_base & ~PAGE_MASK;
721 	remaining = xdr->page_len;
722 	while (remaining) {
723 		len = min_t(u32, PAGE_SIZE - page_off, remaining);
724 
725 		++sctxt->sc_cur_sge_no;
726 		ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
727 					    page_off, len);
728 		if (ret < 0)
729 			return ret;
730 
731 		remaining -= len;
732 		page_off = 0;
733 	}
734 
735 	base = xdr->tail[0].iov_base;
736 	len = xdr->tail[0].iov_len;
737 tail:
738 	if (len) {
739 		++sctxt->sc_cur_sge_no;
740 		ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
741 		if (ret < 0)
742 			return ret;
743 	}
744 
745 	return 0;
746 }
747 
748 /* The svc_rqst and all resources it owns are released as soon as
749  * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
750  * so they are released by the Send completion handler.
751  */
752 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
753 				   struct svc_rdma_send_ctxt *ctxt)
754 {
755 	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
756 
757 	ctxt->sc_page_count += pages;
758 	for (i = 0; i < pages; i++) {
759 		ctxt->sc_pages[i] = rqstp->rq_respages[i];
760 		rqstp->rq_respages[i] = NULL;
761 	}
762 
763 	/* Prevent svc_xprt_release from releasing pages in rq_pages */
764 	rqstp->rq_next_page = rqstp->rq_respages;
765 }
766 
767 /* Prepare the portion of the RPC Reply that will be transmitted
768  * via RDMA Send. The RPC-over-RDMA transport header is prepared
769  * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
770  *
771  * Depending on whether a Write list or Reply chunk is present,
772  * the server may send all, a portion of, or none of the xdr_buf.
773  * In the latter case, only the transport header (sc_sges[0]) is
774  * transmitted.
775  *
776  * RDMA Send is the last step of transmitting an RPC reply. Pages
777  * involved in the earlier RDMA Writes are here transferred out
778  * of the rqstp and into the sctxt's page array. These pages are
779  * DMA unmapped by each Write completion, but the subsequent Send
780  * completion finally releases these pages.
781  *
782  * Assumptions:
783  * - The Reply's transport header will never be larger than a page.
784  */
785 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
786 				   struct svc_rdma_send_ctxt *sctxt,
787 				   const struct svc_rdma_recv_ctxt *rctxt,
788 				   struct svc_rqst *rqstp)
789 {
790 	int ret;
791 
792 	ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
793 	if (ret < 0)
794 		return ret;
795 
796 	svc_rdma_save_io_pages(rqstp, sctxt);
797 
798 	if (rctxt->rc_inv_rkey) {
799 		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
800 		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
801 	} else {
802 		sctxt->sc_send_wr.opcode = IB_WR_SEND;
803 	}
804 	return svc_rdma_send(rdma, &sctxt->sc_send_wr);
805 }
806 
807 /* Given the client-provided Write and Reply chunks, the server was not
808  * able to form a complete reply. Return an RDMA_ERROR message so the
809  * client can retire this RPC transaction.
810  *
811  * Remote Invalidation is skipped for simplicity.
812  */
813 static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
814 				   struct svc_rdma_send_ctxt *ctxt,
815 				   struct svc_rqst *rqstp)
816 {
817 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
818 	__be32 *rdma_argp = rctxt->rc_recv_buf;
819 	__be32 *p;
820 
821 	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
822 	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
823 			NULL);
824 
825 	p = xdr_reserve_space(&ctxt->sc_stream, RPCRDMA_HDRLEN_ERR);
826 	if (!p)
827 		return -ENOMSG;
828 
829 	*p++ = *rdma_argp;
830 	*p++ = *(rdma_argp + 1);
831 	*p++ = rdma->sc_fc_credits;
832 	*p++ = rdma_error;
833 	*p   = err_chunk;
834 	trace_svcrdma_err_chunk(*rdma_argp);
835 
836 	ctxt->sc_send_wr.num_sge = 1;
837 	ctxt->sc_send_wr.opcode = IB_WR_SEND;
838 	ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len;
839 	return svc_rdma_send(rdma, &ctxt->sc_send_wr);
840 }
841 
842 /**
843  * svc_rdma_sendto - Transmit an RPC reply
844  * @rqstp: processed RPC request, reply XDR already in ::rq_res
845  *
846  * Any resources still associated with @rqstp are released upon return.
847  * If no reply message was possible, the connection is closed.
848  *
849  * Returns:
850  *	%0 if an RPC reply has been successfully posted,
851  *	%-ENOMEM if a resource shortage occurred (connection is lost),
852  *	%-ENOTCONN if posting failed (connection is lost).
853  */
854 int svc_rdma_sendto(struct svc_rqst *rqstp)
855 {
856 	struct svc_xprt *xprt = rqstp->rq_xprt;
857 	struct svcxprt_rdma *rdma =
858 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
859 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
860 	__be32 *rdma_argp = rctxt->rc_recv_buf;
861 	__be32 *wr_lst = rctxt->rc_write_list;
862 	__be32 *rp_ch = rctxt->rc_reply_chunk;
863 	struct xdr_buf *xdr = &rqstp->rq_res;
864 	struct svc_rdma_send_ctxt *sctxt;
865 	__be32 *p;
866 	int ret;
867 
868 	ret = -ENOTCONN;
869 	if (svc_xprt_is_dead(xprt))
870 		goto err0;
871 
872 	ret = -ENOMEM;
873 	sctxt = svc_rdma_send_ctxt_get(rdma);
874 	if (!sctxt)
875 		goto err0;
876 
877 	p = xdr_reserve_space(&sctxt->sc_stream,
878 			      rpcrdma_fixed_maxsz * sizeof(*p));
879 	if (!p)
880 		goto err0;
881 	*p++ = *rdma_argp;
882 	*p++ = *(rdma_argp + 1);
883 	*p++ = rdma->sc_fc_credits;
884 	*p   = rp_ch ? rdma_nomsg : rdma_msg;
885 
886 	if (svc_rdma_encode_read_list(sctxt) < 0)
887 		goto err0;
888 	if (wr_lst) {
889 		/* XXX: Presume the client sent only one Write chunk */
890 		unsigned long offset;
891 		unsigned int length;
892 
893 		if (rctxt->rc_read_payload_length) {
894 			offset = rctxt->rc_read_payload_offset;
895 			length = rctxt->rc_read_payload_length;
896 		} else {
897 			offset = xdr->head[0].iov_len;
898 			length = xdr->page_len;
899 		}
900 		ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
901 						length);
902 		if (ret < 0)
903 			goto err2;
904 		if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
905 			goto err0;
906 	} else {
907 		if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
908 			goto err0;
909 	}
910 	if (rp_ch) {
911 		ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
912 		if (ret < 0)
913 			goto err2;
914 		if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
915 			goto err0;
916 	} else {
917 		if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
918 			goto err0;
919 	}
920 
921 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
922 	if (ret < 0)
923 		goto err1;
924 	return 0;
925 
926  err2:
927 	if (ret != -E2BIG && ret != -EINVAL)
928 		goto err1;
929 
930 	/* Send completion releases payload pages that were part
931 	 * of previously posted RDMA Writes.
932 	 */
933 	svc_rdma_save_io_pages(rqstp, sctxt);
934 	ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
935 	if (ret < 0)
936 		goto err1;
937 	return 0;
938 
939  err1:
940 	svc_rdma_send_ctxt_put(rdma, sctxt);
941  err0:
942 	trace_svcrdma_send_failed(rqstp, ret);
943 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
944 	return -ENOTCONN;
945 }
946 
947 /**
948  * svc_rdma_read_payload - special processing for a READ payload
949  * @rqstp: svc_rqst to operate on
950  * @offset: payload's byte offset in @xdr
951  * @length: size of payload, in bytes
952  *
953  * Returns zero on success.
954  *
955  * For the moment, just record the xdr_buf location of the READ
956  * payload. svc_rdma_sendto will use that location later when
957  * we actually send the payload.
958  */
959 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
960 			  unsigned int length)
961 {
962 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
963 
964 	/* XXX: Just one READ payload slot for now, since our
965 	 * transport implementation currently supports only one
966 	 * Write chunk.
967 	 */
968 	rctxt->rc_read_payload_offset = offset;
969 	rctxt->rc_read_payload_length = length;
970 
971 	return 0;
972 }
973