1bcf3ffd4SChuck Lever // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2c06b540aSTom Tucker /*
3ecf85b23SChuck Lever  * Copyright (c) 2016-2018 Oracle. All rights reserved.
40bf48289SSteve Wise  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5c06b540aSTom Tucker  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6c06b540aSTom Tucker  *
7c06b540aSTom Tucker  * This software is available to you under a choice of one of two
8c06b540aSTom Tucker  * licenses.  You may choose to be licensed under the terms of the GNU
9c06b540aSTom Tucker  * General Public License (GPL) Version 2, available from the file
10c06b540aSTom Tucker  * COPYING in the main directory of this source tree, or the BSD-type
11c06b540aSTom Tucker  * license below:
12c06b540aSTom Tucker  *
13c06b540aSTom Tucker  * Redistribution and use in source and binary forms, with or without
14c06b540aSTom Tucker  * modification, are permitted provided that the following conditions
15c06b540aSTom Tucker  * are met:
16c06b540aSTom Tucker  *
17c06b540aSTom Tucker  *      Redistributions of source code must retain the above copyright
18c06b540aSTom Tucker  *      notice, this list of conditions and the following disclaimer.
19c06b540aSTom Tucker  *
20c06b540aSTom Tucker  *      Redistributions in binary form must reproduce the above
21c06b540aSTom Tucker  *      copyright notice, this list of conditions and the following
22c06b540aSTom Tucker  *      disclaimer in the documentation and/or other materials provided
23c06b540aSTom Tucker  *      with the distribution.
24c06b540aSTom Tucker  *
25c06b540aSTom Tucker  *      Neither the name of the Network Appliance, Inc. nor the names of
26c06b540aSTom Tucker  *      its contributors may be used to endorse or promote products
27c06b540aSTom Tucker  *      derived from this software without specific prior written
28c06b540aSTom Tucker  *      permission.
29c06b540aSTom Tucker  *
30c06b540aSTom Tucker  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31c06b540aSTom Tucker  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32c06b540aSTom Tucker  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33c06b540aSTom Tucker  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34c06b540aSTom Tucker  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35c06b540aSTom Tucker  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36c06b540aSTom Tucker  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37c06b540aSTom Tucker  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38c06b540aSTom Tucker  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39c06b540aSTom Tucker  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40c06b540aSTom Tucker  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41c06b540aSTom Tucker  *
42c06b540aSTom Tucker  * Author: Tom Tucker <tom@opengridcomputing.com>
43c06b540aSTom Tucker  */
44c06b540aSTom Tucker 
459a6a180bSChuck Lever /* Operation
469a6a180bSChuck Lever  *
479a6a180bSChuck Lever  * The main entry point is svc_rdma_sendto. This is called by the
489a6a180bSChuck Lever  * RPC server when an RPC Reply is ready to be transmitted to a client.
499a6a180bSChuck Lever  *
509a6a180bSChuck Lever  * The passed-in svc_rqst contains a struct xdr_buf which holds an
519a6a180bSChuck Lever  * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
529a6a180bSChuck Lever  * transport header, post all Write WRs needed for this Reply, then post
539a6a180bSChuck Lever  * a Send WR conveying the transport header and the RPC message itself to
549a6a180bSChuck Lever  * the client.
559a6a180bSChuck Lever  *
569a6a180bSChuck Lever  * svc_rdma_sendto must fully transmit the Reply before returning, as
579a6a180bSChuck Lever  * the svc_rqst will be recycled as soon as sendto returns. Remaining
589a6a180bSChuck Lever  * resources referred to by the svc_rqst are also recycled at that time.
599a6a180bSChuck Lever  * Therefore any resources that must remain longer must be detached
609a6a180bSChuck Lever  * from the svc_rqst and released later.
619a6a180bSChuck Lever  *
629a6a180bSChuck Lever  * Page Management
639a6a180bSChuck Lever  *
649a6a180bSChuck Lever  * The I/O that performs Reply transmission is asynchronous, and may
659a6a180bSChuck Lever  * complete well after sendto returns. Thus pages under I/O must be
669a6a180bSChuck Lever  * removed from the svc_rqst before sendto returns.
679a6a180bSChuck Lever  *
689a6a180bSChuck Lever  * The logic here depends on Send Queue and completion ordering. Since
699a6a180bSChuck Lever  * the Send WR is always posted last, it will always complete last. Thus
709a6a180bSChuck Lever  * when it completes, it is guaranteed that all previous Write WRs have
719a6a180bSChuck Lever  * also completed.
729a6a180bSChuck Lever  *
739a6a180bSChuck Lever  * Write WRs are constructed and posted. Each Write segment gets its own
749a6a180bSChuck Lever  * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
759a6a180bSChuck Lever  * DMA-unmap the pages under I/O for that Write segment. The Write
769a6a180bSChuck Lever  * completion handler does not release any pages.
779a6a180bSChuck Lever  *
784201c746SChuck Lever  * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
799a6a180bSChuck Lever  * The ownership of all of the Reply's pages are transferred into that
809a6a180bSChuck Lever  * ctxt, the Send WR is posted, and sendto returns.
819a6a180bSChuck Lever  *
824201c746SChuck Lever  * The svc_rdma_send_ctxt is presented when the Send WR completes. The
839a6a180bSChuck Lever  * Send completion handler finally releases the Reply's pages.
849a6a180bSChuck Lever  *
859a6a180bSChuck Lever  * This mechanism also assumes that completions on the transport's Send
869a6a180bSChuck Lever  * Completion Queue do not run in parallel. Otherwise a Write completion
879a6a180bSChuck Lever  * and Send completion running at the same time could release pages that
889a6a180bSChuck Lever  * are still DMA-mapped.
899a6a180bSChuck Lever  *
909a6a180bSChuck Lever  * Error Handling
919a6a180bSChuck Lever  *
929a6a180bSChuck Lever  * - If the Send WR is posted successfully, it will either complete
939a6a180bSChuck Lever  *   successfully, or get flushed. Either way, the Send completion
949a6a180bSChuck Lever  *   handler releases the Reply's pages.
959a6a180bSChuck Lever  * - If the Send WR cannot be not posted, the forward path releases
969a6a180bSChuck Lever  *   the Reply's pages.
979a6a180bSChuck Lever  *
989a6a180bSChuck Lever  * This handles the case, without the use of page reference counting,
999a6a180bSChuck Lever  * where two different Write segments send portions of the same page.
1009a6a180bSChuck Lever  */
1019a6a180bSChuck Lever 
102c06b540aSTom Tucker #include <linux/spinlock.h>
103c06b540aSTom Tucker #include <asm/unaligned.h>
10498895edbSChuck Lever 
105c06b540aSTom Tucker #include <rdma/ib_verbs.h>
106c06b540aSTom Tucker #include <rdma/rdma_cm.h>
10798895edbSChuck Lever 
10898895edbSChuck Lever #include <linux/sunrpc/debug.h>
109c06b540aSTom Tucker #include <linux/sunrpc/svc_rdma.h>
110c06b540aSTom Tucker 
11198895edbSChuck Lever #include "xprt_rdma.h"
11298895edbSChuck Lever #include <trace/events/rpcrdma.h>
11398895edbSChuck Lever 
114c06b540aSTom Tucker #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
115c06b540aSTom Tucker 
1164201c746SChuck Lever static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
1174201c746SChuck Lever 
1184201c746SChuck Lever static inline struct svc_rdma_send_ctxt *
1194201c746SChuck Lever svc_rdma_next_send_ctxt(struct list_head *list)
1204201c746SChuck Lever {
1214201c746SChuck Lever 	return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
1224201c746SChuck Lever 					sc_list);
1234201c746SChuck Lever }
1244201c746SChuck Lever 
1253ac56c2fSChuck Lever static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
1263ac56c2fSChuck Lever 				   struct rpc_rdma_cid *cid)
1273ac56c2fSChuck Lever {
1283ac56c2fSChuck Lever 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
1293ac56c2fSChuck Lever 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
1303ac56c2fSChuck Lever }
1313ac56c2fSChuck Lever 
1324201c746SChuck Lever static struct svc_rdma_send_ctxt *
1334201c746SChuck Lever svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
1344201c746SChuck Lever {
1354201c746SChuck Lever 	struct svc_rdma_send_ctxt *ctxt;
13699722fe4SChuck Lever 	dma_addr_t addr;
13799722fe4SChuck Lever 	void *buffer;
13825fd86ecSChuck Lever 	size_t size;
1394201c746SChuck Lever 	int i;
1404201c746SChuck Lever 
14125fd86ecSChuck Lever 	size = sizeof(*ctxt);
14225fd86ecSChuck Lever 	size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
14325fd86ecSChuck Lever 	ctxt = kmalloc(size, GFP_KERNEL);
1444201c746SChuck Lever 	if (!ctxt)
14599722fe4SChuck Lever 		goto fail0;
14699722fe4SChuck Lever 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
14799722fe4SChuck Lever 	if (!buffer)
14899722fe4SChuck Lever 		goto fail1;
14999722fe4SChuck Lever 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
15099722fe4SChuck Lever 				 rdma->sc_max_req_size, DMA_TO_DEVICE);
15199722fe4SChuck Lever 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
15299722fe4SChuck Lever 		goto fail2;
1534201c746SChuck Lever 
1543ac56c2fSChuck Lever 	svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
1553ac56c2fSChuck Lever 
1564201c746SChuck Lever 	ctxt->sc_send_wr.next = NULL;
1574201c746SChuck Lever 	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
1584201c746SChuck Lever 	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
1594201c746SChuck Lever 	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
16099722fe4SChuck Lever 	ctxt->sc_cqe.done = svc_rdma_wc_send;
16199722fe4SChuck Lever 	ctxt->sc_xprt_buf = buffer;
1626fd5034dSChuck Lever 	xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
1636fd5034dSChuck Lever 		     rdma->sc_max_req_size);
16499722fe4SChuck Lever 	ctxt->sc_sges[0].addr = addr;
16599722fe4SChuck Lever 
16625fd86ecSChuck Lever 	for (i = 0; i < rdma->sc_max_send_sges; i++)
1674201c746SChuck Lever 		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
1684201c746SChuck Lever 	return ctxt;
16999722fe4SChuck Lever 
17099722fe4SChuck Lever fail2:
17199722fe4SChuck Lever 	kfree(buffer);
17299722fe4SChuck Lever fail1:
17399722fe4SChuck Lever 	kfree(ctxt);
17499722fe4SChuck Lever fail0:
17599722fe4SChuck Lever 	return NULL;
1764201c746SChuck Lever }
1774201c746SChuck Lever 
1784201c746SChuck Lever /**
1794201c746SChuck Lever  * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
1804201c746SChuck Lever  * @rdma: svcxprt_rdma being torn down
1814201c746SChuck Lever  *
1824201c746SChuck Lever  */
1834201c746SChuck Lever void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
1844201c746SChuck Lever {
1854201c746SChuck Lever 	struct svc_rdma_send_ctxt *ctxt;
1864201c746SChuck Lever 
1874201c746SChuck Lever 	while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
1884201c746SChuck Lever 		list_del(&ctxt->sc_list);
18999722fe4SChuck Lever 		ib_dma_unmap_single(rdma->sc_pd->device,
19099722fe4SChuck Lever 				    ctxt->sc_sges[0].addr,
19199722fe4SChuck Lever 				    rdma->sc_max_req_size,
19299722fe4SChuck Lever 				    DMA_TO_DEVICE);
19399722fe4SChuck Lever 		kfree(ctxt->sc_xprt_buf);
1944201c746SChuck Lever 		kfree(ctxt);
1954201c746SChuck Lever 	}
1964201c746SChuck Lever }
1974201c746SChuck Lever 
1984201c746SChuck Lever /**
1994201c746SChuck Lever  * svc_rdma_send_ctxt_get - Get a free send_ctxt
2004201c746SChuck Lever  * @rdma: controlling svcxprt_rdma
2014201c746SChuck Lever  *
2024201c746SChuck Lever  * Returns a ready-to-use send_ctxt, or NULL if none are
2034201c746SChuck Lever  * available and a fresh one cannot be allocated.
2044201c746SChuck Lever  */
2054201c746SChuck Lever struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
2064201c746SChuck Lever {
2074201c746SChuck Lever 	struct svc_rdma_send_ctxt *ctxt;
2084201c746SChuck Lever 
2094201c746SChuck Lever 	spin_lock(&rdma->sc_send_lock);
2104201c746SChuck Lever 	ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
2114201c746SChuck Lever 	if (!ctxt)
2124201c746SChuck Lever 		goto out_empty;
2134201c746SChuck Lever 	list_del(&ctxt->sc_list);
2144201c746SChuck Lever 	spin_unlock(&rdma->sc_send_lock);
2154201c746SChuck Lever 
2164201c746SChuck Lever out:
2176fd5034dSChuck Lever 	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
2186fd5034dSChuck Lever 	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
2196fd5034dSChuck Lever 			ctxt->sc_xprt_buf, NULL);
2206fd5034dSChuck Lever 
2214201c746SChuck Lever 	ctxt->sc_send_wr.num_sge = 0;
22299722fe4SChuck Lever 	ctxt->sc_cur_sge_no = 0;
2234201c746SChuck Lever 	ctxt->sc_page_count = 0;
2244201c746SChuck Lever 	return ctxt;
2254201c746SChuck Lever 
2264201c746SChuck Lever out_empty:
2274201c746SChuck Lever 	spin_unlock(&rdma->sc_send_lock);
2284201c746SChuck Lever 	ctxt = svc_rdma_send_ctxt_alloc(rdma);
2294201c746SChuck Lever 	if (!ctxt)
2304201c746SChuck Lever 		return NULL;
2314201c746SChuck Lever 	goto out;
2324201c746SChuck Lever }
2334201c746SChuck Lever 
2344201c746SChuck Lever /**
2354201c746SChuck Lever  * svc_rdma_send_ctxt_put - Return send_ctxt to free list
2364201c746SChuck Lever  * @rdma: controlling svcxprt_rdma
2374201c746SChuck Lever  * @ctxt: object to return to the free list
2384201c746SChuck Lever  *
2394201c746SChuck Lever  * Pages left in sc_pages are DMA unmapped and released.
2404201c746SChuck Lever  */
2414201c746SChuck Lever void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
2424201c746SChuck Lever 			    struct svc_rdma_send_ctxt *ctxt)
2434201c746SChuck Lever {
2444201c746SChuck Lever 	struct ib_device *device = rdma->sc_cm_id->device;
2454201c746SChuck Lever 	unsigned int i;
2464201c746SChuck Lever 
24799722fe4SChuck Lever 	/* The first SGE contains the transport header, which
24899722fe4SChuck Lever 	 * remains mapped until @ctxt is destroyed.
24999722fe4SChuck Lever 	 */
250832b2cb9SChuck Lever 	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
2514201c746SChuck Lever 		ib_dma_unmap_page(device,
2524201c746SChuck Lever 				  ctxt->sc_sges[i].addr,
2534201c746SChuck Lever 				  ctxt->sc_sges[i].length,
2544201c746SChuck Lever 				  DMA_TO_DEVICE);
255832b2cb9SChuck Lever 		trace_svcrdma_dma_unmap_page(rdma,
256832b2cb9SChuck Lever 					     ctxt->sc_sges[i].addr,
257832b2cb9SChuck Lever 					     ctxt->sc_sges[i].length);
258832b2cb9SChuck Lever 	}
2594201c746SChuck Lever 
2604201c746SChuck Lever 	for (i = 0; i < ctxt->sc_page_count; ++i)
2614201c746SChuck Lever 		put_page(ctxt->sc_pages[i]);
2624201c746SChuck Lever 
2634201c746SChuck Lever 	spin_lock(&rdma->sc_send_lock);
2644201c746SChuck Lever 	list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
2654201c746SChuck Lever 	spin_unlock(&rdma->sc_send_lock);
2664201c746SChuck Lever }
2674201c746SChuck Lever 
2684201c746SChuck Lever /**
2694201c746SChuck Lever  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
2704201c746SChuck Lever  * @cq: Completion Queue context
2714201c746SChuck Lever  * @wc: Work Completion object
2724201c746SChuck Lever  *
2734201c746SChuck Lever  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
2744201c746SChuck Lever  * the Send completion handler could be running.
2754201c746SChuck Lever  */
2764201c746SChuck Lever static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
2774201c746SChuck Lever {
2784201c746SChuck Lever 	struct svcxprt_rdma *rdma = cq->cq_context;
2794201c746SChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
2803ac56c2fSChuck Lever 	struct svc_rdma_send_ctxt *ctxt =
2813ac56c2fSChuck Lever 		container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
2824201c746SChuck Lever 
2833ac56c2fSChuck Lever 	trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
2844201c746SChuck Lever 
2854201c746SChuck Lever 	atomic_inc(&rdma->sc_sq_avail);
2864201c746SChuck Lever 	wake_up(&rdma->sc_send_wait);
2874201c746SChuck Lever 
2884201c746SChuck Lever 	svc_rdma_send_ctxt_put(rdma, ctxt);
2894201c746SChuck Lever 
2904201c746SChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2914201c746SChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
2924201c746SChuck Lever 		svc_xprt_enqueue(&rdma->sc_xprt);
2934201c746SChuck Lever 	}
2944201c746SChuck Lever }
2954201c746SChuck Lever 
2963abb03faSChuck Lever /**
2973abb03faSChuck Lever  * svc_rdma_send - Post a single Send WR
2983abb03faSChuck Lever  * @rdma: transport on which to post the WR
29917f70f8dSChuck Lever  * @ctxt: send ctxt with a Send WR ready to post
3003abb03faSChuck Lever  *
3013abb03faSChuck Lever  * Returns zero the Send WR was posted successfully. Otherwise, a
3023abb03faSChuck Lever  * negative errno is returned.
3033abb03faSChuck Lever  */
30417f70f8dSChuck Lever int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
3054201c746SChuck Lever {
30617f70f8dSChuck Lever 	struct ib_send_wr *wr = &ctxt->sc_send_wr;
3074201c746SChuck Lever 	int ret;
3084201c746SChuck Lever 
3093abb03faSChuck Lever 	might_sleep();
3104201c746SChuck Lever 
311aee4b74aSChuck Lever 	/* Sync the transport header buffer */
312aee4b74aSChuck Lever 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
313aee4b74aSChuck Lever 				      wr->sg_list[0].addr,
314aee4b74aSChuck Lever 				      wr->sg_list[0].length,
315aee4b74aSChuck Lever 				      DMA_TO_DEVICE);
316aee4b74aSChuck Lever 
3174201c746SChuck Lever 	/* If the SQ is full, wait until an SQ entry is available */
3184201c746SChuck Lever 	while (1) {
3193abb03faSChuck Lever 		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
3204201c746SChuck Lever 			atomic_inc(&rdma_stat_sq_starve);
3214201c746SChuck Lever 			trace_svcrdma_sq_full(rdma);
3223abb03faSChuck Lever 			atomic_inc(&rdma->sc_sq_avail);
3234201c746SChuck Lever 			wait_event(rdma->sc_send_wait,
3243abb03faSChuck Lever 				   atomic_read(&rdma->sc_sq_avail) > 1);
3254201c746SChuck Lever 			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
3264201c746SChuck Lever 				return -ENOTCONN;
3274201c746SChuck Lever 			trace_svcrdma_sq_retry(rdma);
3284201c746SChuck Lever 			continue;
3294201c746SChuck Lever 		}
3304201c746SChuck Lever 
33117f70f8dSChuck Lever 		trace_svcrdma_post_send(ctxt);
332ed288d74SBart Van Assche 		ret = ib_post_send(rdma->sc_qp, wr, NULL);
333e28b4fc6SChuck Lever 		if (ret)
334e28b4fc6SChuck Lever 			break;
335e28b4fc6SChuck Lever 		return 0;
336e28b4fc6SChuck Lever 	}
337e28b4fc6SChuck Lever 
338e28b4fc6SChuck Lever 	trace_svcrdma_sq_post_err(rdma, ret);
3394201c746SChuck Lever 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
3404201c746SChuck Lever 	wake_up(&rdma->sc_send_wait);
3414201c746SChuck Lever 	return ret;
3424201c746SChuck Lever }
3434201c746SChuck Lever 
3446fd5034dSChuck Lever /**
3456fd5034dSChuck Lever  * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
3466fd5034dSChuck Lever  * @sctxt: Send context for the RPC Reply
3479a6a180bSChuck Lever  *
3486fd5034dSChuck Lever  * Return values:
3496fd5034dSChuck Lever  *   On success, returns length in bytes of the Reply XDR buffer
3506fd5034dSChuck Lever  *   that was consumed by the Reply Read list
3516fd5034dSChuck Lever  *   %-EMSGSIZE on XDR buffer overflow
3529a6a180bSChuck Lever  */
3536fd5034dSChuck Lever static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
3546fd5034dSChuck Lever {
3556fd5034dSChuck Lever 	/* RPC-over-RDMA version 1 replies never have a Read list. */
3566fd5034dSChuck Lever 	return xdr_stream_encode_item_absent(&sctxt->sc_stream);
3576fd5034dSChuck Lever }
3586fd5034dSChuck Lever 
3596fd5034dSChuck Lever /**
3606fd5034dSChuck Lever  * svc_rdma_encode_write_segment - Encode one Write segment
3616fd5034dSChuck Lever  * @src: matching Write chunk in the RPC Call header
3626fd5034dSChuck Lever  * @sctxt: Send context for the RPC Reply
3636fd5034dSChuck Lever  * @remaining: remaining bytes of the payload left in the Write chunk
3646fd5034dSChuck Lever  *
3656fd5034dSChuck Lever  * Return values:
3666fd5034dSChuck Lever  *   On success, returns length in bytes of the Reply XDR buffer
3676fd5034dSChuck Lever  *   that was consumed by the Write segment
3686fd5034dSChuck Lever  *   %-EMSGSIZE on XDR buffer overflow
3696fd5034dSChuck Lever  */
3706fd5034dSChuck Lever static ssize_t svc_rdma_encode_write_segment(__be32 *src,
3716fd5034dSChuck Lever 					     struct svc_rdma_send_ctxt *sctxt,
3726fd5034dSChuck Lever 					     unsigned int *remaining)
3736fd5034dSChuck Lever {
3746fd5034dSChuck Lever 	__be32 *p;
3756fd5034dSChuck Lever 	const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
3766fd5034dSChuck Lever 	u32 handle, length;
3776fd5034dSChuck Lever 	u64 offset;
3786fd5034dSChuck Lever 
3796fd5034dSChuck Lever 	p = xdr_reserve_space(&sctxt->sc_stream, len);
3806fd5034dSChuck Lever 	if (!p)
3816fd5034dSChuck Lever 		return -EMSGSIZE;
3826fd5034dSChuck Lever 
383f60a0869SChuck Lever 	xdr_decode_rdma_segment(src, &handle, &length, &offset);
3846fd5034dSChuck Lever 
3856fd5034dSChuck Lever 	if (*remaining < length) {
3866fd5034dSChuck Lever 		/* segment only partly filled */
3876fd5034dSChuck Lever 		length = *remaining;
3886fd5034dSChuck Lever 		*remaining = 0;
3896fd5034dSChuck Lever 	} else {
3906fd5034dSChuck Lever 		/* entire segment was consumed */
3916fd5034dSChuck Lever 		*remaining -= length;
3926fd5034dSChuck Lever 	}
393379c3bc6SChuck Lever 	xdr_encode_rdma_segment(p, handle, length, offset);
3946fd5034dSChuck Lever 
3956fd5034dSChuck Lever 	trace_svcrdma_encode_wseg(handle, length, offset);
3966fd5034dSChuck Lever 	return len;
3976fd5034dSChuck Lever }
3986fd5034dSChuck Lever 
3996fd5034dSChuck Lever /**
4006fd5034dSChuck Lever  * svc_rdma_encode_write_chunk - Encode one Write chunk
4016fd5034dSChuck Lever  * @src: matching Write chunk in the RPC Call header
4026fd5034dSChuck Lever  * @sctxt: Send context for the RPC Reply
4036fd5034dSChuck Lever  * @remaining: size in bytes of the payload in the Write chunk
4046fd5034dSChuck Lever  *
4056fd5034dSChuck Lever  * Copy a Write chunk from the Call transport header to the
4066fd5034dSChuck Lever  * Reply transport header. Update each segment's length field
4076fd5034dSChuck Lever  * to reflect the number of bytes written in that segment.
4086fd5034dSChuck Lever  *
4096fd5034dSChuck Lever  * Return values:
4106fd5034dSChuck Lever  *   On success, returns length in bytes of the Reply XDR buffer
4116fd5034dSChuck Lever  *   that was consumed by the Write chunk
4126fd5034dSChuck Lever  *   %-EMSGSIZE on XDR buffer overflow
4136fd5034dSChuck Lever  */
4146fd5034dSChuck Lever static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
4156fd5034dSChuck Lever 					   struct svc_rdma_send_ctxt *sctxt,
4169a6a180bSChuck Lever 					   unsigned int remaining)
4179a6a180bSChuck Lever {
4189a6a180bSChuck Lever 	unsigned int i, nsegs;
4196fd5034dSChuck Lever 	ssize_t len, ret;
4209a6a180bSChuck Lever 
4216fd5034dSChuck Lever 	len = 0;
4226fd5034dSChuck Lever 	trace_svcrdma_encode_write_chunk(remaining);
4239a6a180bSChuck Lever 
4246fd5034dSChuck Lever 	src++;
4256fd5034dSChuck Lever 	ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
4266fd5034dSChuck Lever 	if (ret < 0)
4276fd5034dSChuck Lever 		return -EMSGSIZE;
4286fd5034dSChuck Lever 	len += ret;
4296fd5034dSChuck Lever 
4306fd5034dSChuck Lever 	nsegs = be32_to_cpup(src++);
4316fd5034dSChuck Lever 	ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
4326fd5034dSChuck Lever 	if (ret < 0)
4336fd5034dSChuck Lever 		return -EMSGSIZE;
4346fd5034dSChuck Lever 	len += ret;
4359a6a180bSChuck Lever 
4369a6a180bSChuck Lever 	for (i = nsegs; i; i--) {
4376fd5034dSChuck Lever 		ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
4386fd5034dSChuck Lever 		if (ret < 0)
4396fd5034dSChuck Lever 			return -EMSGSIZE;
4406fd5034dSChuck Lever 		src += rpcrdma_segment_maxsz;
4416fd5034dSChuck Lever 		len += ret;
4429a6a180bSChuck Lever 	}
4439a6a180bSChuck Lever 
4446fd5034dSChuck Lever 	return len;
4459a6a180bSChuck Lever }
4469a6a180bSChuck Lever 
4476fd5034dSChuck Lever /**
4486fd5034dSChuck Lever  * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
4496fd5034dSChuck Lever  * @rctxt: Reply context with information about the RPC Call
4506fd5034dSChuck Lever  * @sctxt: Send context for the RPC Reply
4516fd5034dSChuck Lever  * @length: size in bytes of the payload in the first Write chunk
4526fd5034dSChuck Lever  *
4536fd5034dSChuck Lever  * The client provides a Write chunk list in the Call message. Fill
4546fd5034dSChuck Lever  * in the segments in the first Write chunk in the Reply's transport
4559a6a180bSChuck Lever  * header with the number of bytes consumed in each segment.
4569a6a180bSChuck Lever  * Remaining chunks are returned unused.
4579a6a180bSChuck Lever  *
4589a6a180bSChuck Lever  * Assumptions:
4599a6a180bSChuck Lever  *  - Client has provided only one Write chunk
4606fd5034dSChuck Lever  *
4616fd5034dSChuck Lever  * Return values:
4626fd5034dSChuck Lever  *   On success, returns length in bytes of the Reply XDR buffer
4636fd5034dSChuck Lever  *   that was consumed by the Reply's Write list
4646fd5034dSChuck Lever  *   %-EMSGSIZE on XDR buffer overflow
4659a6a180bSChuck Lever  */
4666fd5034dSChuck Lever static ssize_t
4676fd5034dSChuck Lever svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
4686fd5034dSChuck Lever 			   struct svc_rdma_send_ctxt *sctxt,
4696fd5034dSChuck Lever 			   unsigned int length)
4709a6a180bSChuck Lever {
4716fd5034dSChuck Lever 	ssize_t len, ret;
4729a6a180bSChuck Lever 
4736fd5034dSChuck Lever 	ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
4746fd5034dSChuck Lever 	if (ret < 0)
4756fd5034dSChuck Lever 		return ret;
4766fd5034dSChuck Lever 	len = ret;
4779a6a180bSChuck Lever 
4786fd5034dSChuck Lever 	/* Terminate the Write list */
4796fd5034dSChuck Lever 	ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
4806fd5034dSChuck Lever 	if (ret < 0)
4816fd5034dSChuck Lever 		return ret;
4826fd5034dSChuck Lever 
4836fd5034dSChuck Lever 	return len + ret;
4849a6a180bSChuck Lever }
4859a6a180bSChuck Lever 
4866fd5034dSChuck Lever /**
4876fd5034dSChuck Lever  * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
4886fd5034dSChuck Lever  * @rctxt: Reply context with information about the RPC Call
4896fd5034dSChuck Lever  * @sctxt: Send context for the RPC Reply
4906fd5034dSChuck Lever  * @length: size in bytes of the payload in the Reply chunk
4919a6a180bSChuck Lever  *
4929a6a180bSChuck Lever  * Assumptions:
4936fd5034dSChuck Lever  * - Reply can always fit in the client-provided Reply chunk
4946fd5034dSChuck Lever  *
4956fd5034dSChuck Lever  * Return values:
4966fd5034dSChuck Lever  *   On success, returns length in bytes of the Reply XDR buffer
4976fd5034dSChuck Lever  *   that was consumed by the Reply's Reply chunk
4986fd5034dSChuck Lever  *   %-EMSGSIZE on XDR buffer overflow
4999a6a180bSChuck Lever  */
5006fd5034dSChuck Lever static ssize_t
5016fd5034dSChuck Lever svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
5026fd5034dSChuck Lever 			    struct svc_rdma_send_ctxt *sctxt,
5036fd5034dSChuck Lever 			    unsigned int length)
5049a6a180bSChuck Lever {
5056fd5034dSChuck Lever 	return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
5066fd5034dSChuck Lever 					   length);
5079a6a180bSChuck Lever }
5089a6a180bSChuck Lever 
5096e6092caSChuck Lever static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
5104201c746SChuck Lever 				 struct svc_rdma_send_ctxt *ctxt,
5116e6092caSChuck Lever 				 struct page *page,
512f016f305SChuck Lever 				 unsigned long offset,
5136e6092caSChuck Lever 				 unsigned int len)
5146e6092caSChuck Lever {
5156e6092caSChuck Lever 	struct ib_device *dev = rdma->sc_cm_id->device;
5166e6092caSChuck Lever 	dma_addr_t dma_addr;
5176e6092caSChuck Lever 
5186e6092caSChuck Lever 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
519832b2cb9SChuck Lever 	trace_svcrdma_dma_map_page(rdma, dma_addr, len);
5206e6092caSChuck Lever 	if (ib_dma_mapping_error(dev, dma_addr))
52191a08eaeSChuck Lever 		goto out_maperr;
5226e6092caSChuck Lever 
52325fd86ecSChuck Lever 	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
52425fd86ecSChuck Lever 	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
5254201c746SChuck Lever 	ctxt->sc_send_wr.num_sge++;
5266e6092caSChuck Lever 	return 0;
52791a08eaeSChuck Lever 
52891a08eaeSChuck Lever out_maperr:
52991a08eaeSChuck Lever 	return -EIO;
5306e6092caSChuck Lever }
5316e6092caSChuck Lever 
532f016f305SChuck Lever /* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
533f016f305SChuck Lever  * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
534f016f305SChuck Lever  */
535f016f305SChuck Lever static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
5364201c746SChuck Lever 				struct svc_rdma_send_ctxt *ctxt,
537f016f305SChuck Lever 				unsigned char *base,
538f016f305SChuck Lever 				unsigned int len)
539f016f305SChuck Lever {
54025fd86ecSChuck Lever 	return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
541f016f305SChuck Lever 				     offset_in_page(base), len);
542f016f305SChuck Lever }
543f016f305SChuck Lever 
5446e6092caSChuck Lever /**
5454554755eSChuck Lever  * svc_rdma_pull_up_needed - Determine whether to use pull-up
5464554755eSChuck Lever  * @rdma: controlling transport
5470dabe948SChuck Lever  * @sctxt: send_ctxt for the Send WR
5484554755eSChuck Lever  * @rctxt: Write and Reply chunks provided by client
5494554755eSChuck Lever  * @xdr: xdr_buf containing RPC message to transmit
5504554755eSChuck Lever  *
5514554755eSChuck Lever  * Returns:
5524554755eSChuck Lever  *	%true if pull-up must be used
5534554755eSChuck Lever  *	%false otherwise
554e248aa7bSChuck Lever  */
555e248aa7bSChuck Lever static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
5560dabe948SChuck Lever 				    struct svc_rdma_send_ctxt *sctxt,
5574554755eSChuck Lever 				    const struct svc_rdma_recv_ctxt *rctxt,
5584554755eSChuck Lever 				    struct xdr_buf *xdr)
559e248aa7bSChuck Lever {
560e248aa7bSChuck Lever 	int elements;
561e248aa7bSChuck Lever 
5620dabe948SChuck Lever 	/* For small messages, copying bytes is cheaper than DMA mapping.
5630dabe948SChuck Lever 	 */
5640dabe948SChuck Lever 	if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
5650dabe948SChuck Lever 		return true;
5660dabe948SChuck Lever 
5670dabe948SChuck Lever 	/* Check whether the xdr_buf has more elements than can
5680dabe948SChuck Lever 	 * fit in a single RDMA Send.
5690dabe948SChuck Lever 	 */
570e248aa7bSChuck Lever 	/* xdr->head */
571e248aa7bSChuck Lever 	elements = 1;
572e248aa7bSChuck Lever 
573e248aa7bSChuck Lever 	/* xdr->pages */
5744554755eSChuck Lever 	if (!rctxt || !rctxt->rc_write_list) {
575e248aa7bSChuck Lever 		unsigned int remaining;
576e248aa7bSChuck Lever 		unsigned long pageoff;
577e248aa7bSChuck Lever 
578e248aa7bSChuck Lever 		pageoff = xdr->page_base & ~PAGE_MASK;
579e248aa7bSChuck Lever 		remaining = xdr->page_len;
580e248aa7bSChuck Lever 		while (remaining) {
581e248aa7bSChuck Lever 			++elements;
582e248aa7bSChuck Lever 			remaining -= min_t(u32, PAGE_SIZE - pageoff,
583e248aa7bSChuck Lever 					   remaining);
584e248aa7bSChuck Lever 			pageoff = 0;
585e248aa7bSChuck Lever 		}
586e248aa7bSChuck Lever 	}
587e248aa7bSChuck Lever 
588e248aa7bSChuck Lever 	/* xdr->tail */
589e248aa7bSChuck Lever 	if (xdr->tail[0].iov_len)
590e248aa7bSChuck Lever 		++elements;
591e248aa7bSChuck Lever 
592e248aa7bSChuck Lever 	/* assume 1 SGE is needed for the transport header */
593e248aa7bSChuck Lever 	return elements >= rdma->sc_max_send_sges;
594e248aa7bSChuck Lever }
595e248aa7bSChuck Lever 
5964554755eSChuck Lever /**
5974554755eSChuck Lever  * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
5984554755eSChuck Lever  * @rdma: controlling transport
5994554755eSChuck Lever  * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
6004554755eSChuck Lever  * @rctxt: Write and Reply chunks provided by client
6014554755eSChuck Lever  * @xdr: prepared xdr_buf containing RPC message
6024554755eSChuck Lever  *
6034554755eSChuck Lever  * The device is not capable of sending the reply directly.
6044554755eSChuck Lever  * Assemble the elements of @xdr into the transport header buffer.
6054554755eSChuck Lever  *
6064554755eSChuck Lever  * Returns zero on success, or a negative errno on failure.
607e248aa7bSChuck Lever  */
608e248aa7bSChuck Lever static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
6094554755eSChuck Lever 				      struct svc_rdma_send_ctxt *sctxt,
6104554755eSChuck Lever 				      const struct svc_rdma_recv_ctxt *rctxt,
6114554755eSChuck Lever 				      const struct xdr_buf *xdr)
612e248aa7bSChuck Lever {
613e248aa7bSChuck Lever 	unsigned char *dst, *tailbase;
614e248aa7bSChuck Lever 	unsigned int taillen;
615e248aa7bSChuck Lever 
616aee4b74aSChuck Lever 	dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
617e248aa7bSChuck Lever 	memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
618e248aa7bSChuck Lever 	dst += xdr->head[0].iov_len;
619e248aa7bSChuck Lever 
620e248aa7bSChuck Lever 	tailbase = xdr->tail[0].iov_base;
621e248aa7bSChuck Lever 	taillen = xdr->tail[0].iov_len;
6224554755eSChuck Lever 	if (rctxt && rctxt->rc_write_list) {
623e248aa7bSChuck Lever 		u32 xdrpad;
624e248aa7bSChuck Lever 
62596f194b7SChuck Lever 		xdrpad = xdr_pad_size(xdr->page_len);
626e248aa7bSChuck Lever 		if (taillen && xdrpad) {
627e248aa7bSChuck Lever 			tailbase += xdrpad;
628e248aa7bSChuck Lever 			taillen -= xdrpad;
629e248aa7bSChuck Lever 		}
630e248aa7bSChuck Lever 	} else {
631e248aa7bSChuck Lever 		unsigned int len, remaining;
632e248aa7bSChuck Lever 		unsigned long pageoff;
633e248aa7bSChuck Lever 		struct page **ppages;
634e248aa7bSChuck Lever 
635e248aa7bSChuck Lever 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
636e248aa7bSChuck Lever 		pageoff = xdr->page_base & ~PAGE_MASK;
637e248aa7bSChuck Lever 		remaining = xdr->page_len;
638e248aa7bSChuck Lever 		while (remaining) {
639e248aa7bSChuck Lever 			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
640e248aa7bSChuck Lever 
641c327a310SDan Aloni 			memcpy(dst, page_address(*ppages) + pageoff, len);
642e248aa7bSChuck Lever 			remaining -= len;
643e248aa7bSChuck Lever 			dst += len;
644e248aa7bSChuck Lever 			pageoff = 0;
645c327a310SDan Aloni 			ppages++;
646e248aa7bSChuck Lever 		}
647e248aa7bSChuck Lever 	}
648e248aa7bSChuck Lever 
649e248aa7bSChuck Lever 	if (taillen)
650e248aa7bSChuck Lever 		memcpy(dst, tailbase, taillen);
651e248aa7bSChuck Lever 
6524554755eSChuck Lever 	sctxt->sc_sges[0].length += xdr->len;
6530dabe948SChuck Lever 	trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
654e248aa7bSChuck Lever 	return 0;
655e248aa7bSChuck Lever }
656e248aa7bSChuck Lever 
6574554755eSChuck Lever /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
65899722fe4SChuck Lever  * @rdma: controlling transport
6594554755eSChuck Lever  * @sctxt: send_ctxt for the Send WR
6604554755eSChuck Lever  * @rctxt: Write and Reply chunks provided by client
66199722fe4SChuck Lever  * @xdr: prepared xdr_buf containing RPC message
66299722fe4SChuck Lever  *
66399722fe4SChuck Lever  * Load the xdr_buf into the ctxt's sge array, and DMA map each
664aee4b74aSChuck Lever  * element as it is added. The Send WR's num_sge field is set.
6659a6a180bSChuck Lever  *
66623262790SChuck Lever  * Returns zero on success, or a negative errno on failure.
667c06b540aSTom Tucker  */
66899722fe4SChuck Lever int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
6694554755eSChuck Lever 			   struct svc_rdma_send_ctxt *sctxt,
6704554755eSChuck Lever 			   const struct svc_rdma_recv_ctxt *rctxt,
6714554755eSChuck Lever 			   struct xdr_buf *xdr)
672c06b540aSTom Tucker {
67325fd86ecSChuck Lever 	unsigned int len, remaining;
674f016f305SChuck Lever 	unsigned long page_off;
6759a6a180bSChuck Lever 	struct page **ppages;
6769a6a180bSChuck Lever 	unsigned char *base;
6779a6a180bSChuck Lever 	u32 xdr_pad;
678c06b540aSTom Tucker 	int ret;
679c06b540aSTom Tucker 
680aee4b74aSChuck Lever 	/* Set up the (persistently-mapped) transport header SGE. */
681aee4b74aSChuck Lever 	sctxt->sc_send_wr.num_sge = 1;
682aee4b74aSChuck Lever 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
683aee4b74aSChuck Lever 
684aee4b74aSChuck Lever 	/* If there is a Reply chunk, nothing follows the transport
685aee4b74aSChuck Lever 	 * header, and we're done here.
686aee4b74aSChuck Lever 	 */
687aee4b74aSChuck Lever 	if (rctxt && rctxt->rc_reply_chunk)
688aee4b74aSChuck Lever 		return 0;
689aee4b74aSChuck Lever 
690aee4b74aSChuck Lever 	/* For pull-up, svc_rdma_send() will sync the transport header.
691aee4b74aSChuck Lever 	 * No additional DMA mapping is necessary.
692aee4b74aSChuck Lever 	 */
6930dabe948SChuck Lever 	if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
6944554755eSChuck Lever 		return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
695e248aa7bSChuck Lever 
6964554755eSChuck Lever 	++sctxt->sc_cur_sge_no;
6974554755eSChuck Lever 	ret = svc_rdma_dma_map_buf(rdma, sctxt,
6989a6a180bSChuck Lever 				   xdr->head[0].iov_base,
6999a6a180bSChuck Lever 				   xdr->head[0].iov_len);
7009a6a180bSChuck Lever 	if (ret < 0)
7019a6a180bSChuck Lever 		return ret;
702c06b540aSTom Tucker 
7039a6a180bSChuck Lever 	/* If a Write chunk is present, the xdr_buf's page list
7049a6a180bSChuck Lever 	 * is not included inline. However the Upper Layer may
7059a6a180bSChuck Lever 	 * have added XDR padding in the tail buffer, and that
7069a6a180bSChuck Lever 	 * should not be included inline.
7079a6a180bSChuck Lever 	 */
7084554755eSChuck Lever 	if (rctxt && rctxt->rc_write_list) {
7099a6a180bSChuck Lever 		base = xdr->tail[0].iov_base;
7109a6a180bSChuck Lever 		len = xdr->tail[0].iov_len;
71196f194b7SChuck Lever 		xdr_pad = xdr_pad_size(xdr->page_len);
712c06b540aSTom Tucker 
7139a6a180bSChuck Lever 		if (len && xdr_pad) {
7149a6a180bSChuck Lever 			base += xdr_pad;
7159a6a180bSChuck Lever 			len -= xdr_pad;
716c06b540aSTom Tucker 		}
717c06b540aSTom Tucker 
7189a6a180bSChuck Lever 		goto tail;
719c06b540aSTom Tucker 	}
7209a6a180bSChuck Lever 
7219a6a180bSChuck Lever 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
7229a6a180bSChuck Lever 	page_off = xdr->page_base & ~PAGE_MASK;
7239a6a180bSChuck Lever 	remaining = xdr->page_len;
7249a6a180bSChuck Lever 	while (remaining) {
7259a6a180bSChuck Lever 		len = min_t(u32, PAGE_SIZE - page_off, remaining);
7269a6a180bSChuck Lever 
7274554755eSChuck Lever 		++sctxt->sc_cur_sge_no;
7284554755eSChuck Lever 		ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
72925fd86ecSChuck Lever 					    page_off, len);
7309a6a180bSChuck Lever 		if (ret < 0)
7319a6a180bSChuck Lever 			return ret;
7329a6a180bSChuck Lever 
7339a6a180bSChuck Lever 		remaining -= len;
7349a6a180bSChuck Lever 		page_off = 0;
735c06b540aSTom Tucker 	}
736c06b540aSTom Tucker 
7379a6a180bSChuck Lever 	base = xdr->tail[0].iov_base;
7389a6a180bSChuck Lever 	len = xdr->tail[0].iov_len;
7399a6a180bSChuck Lever tail:
7409a6a180bSChuck Lever 	if (len) {
7414554755eSChuck Lever 		++sctxt->sc_cur_sge_no;
7424554755eSChuck Lever 		ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
7439a6a180bSChuck Lever 		if (ret < 0)
7449a6a180bSChuck Lever 			return ret;
7459a6a180bSChuck Lever 	}
74608ae4e7fSChuck Lever 
74723262790SChuck Lever 	return 0;
748c06b540aSTom Tucker }
749c06b540aSTom Tucker 
750c55ab070SChuck Lever /* The svc_rqst and all resources it owns are released as soon as
751c55ab070SChuck Lever  * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
752c55ab070SChuck Lever  * so they are released by the Send completion handler.
753c55ab070SChuck Lever  */
754c55ab070SChuck Lever static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
7554201c746SChuck Lever 				   struct svc_rdma_send_ctxt *ctxt)
756c55ab070SChuck Lever {
757c55ab070SChuck Lever 	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
758c55ab070SChuck Lever 
7594201c746SChuck Lever 	ctxt->sc_page_count += pages;
760c55ab070SChuck Lever 	for (i = 0; i < pages; i++) {
76199722fe4SChuck Lever 		ctxt->sc_pages[i] = rqstp->rq_respages[i];
762c55ab070SChuck Lever 		rqstp->rq_respages[i] = NULL;
763c55ab070SChuck Lever 	}
764a53d5cb0SChuck Lever 
765a53d5cb0SChuck Lever 	/* Prevent svc_xprt_release from releasing pages in rq_pages */
766a53d5cb0SChuck Lever 	rqstp->rq_next_page = rqstp->rq_respages;
767c55ab070SChuck Lever }
768c55ab070SChuck Lever 
7699a6a180bSChuck Lever /* Prepare the portion of the RPC Reply that will be transmitted
7709a6a180bSChuck Lever  * via RDMA Send. The RPC-over-RDMA transport header is prepared
7714201c746SChuck Lever  * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
7729a6a180bSChuck Lever  *
7739a6a180bSChuck Lever  * Depending on whether a Write list or Reply chunk is present,
7749a6a180bSChuck Lever  * the server may send all, a portion of, or none of the xdr_buf.
7754201c746SChuck Lever  * In the latter case, only the transport header (sc_sges[0]) is
7769a6a180bSChuck Lever  * transmitted.
7779a6a180bSChuck Lever  *
7789a6a180bSChuck Lever  * RDMA Send is the last step of transmitting an RPC reply. Pages
7799a6a180bSChuck Lever  * involved in the earlier RDMA Writes are here transferred out
78097bce634SChuck Lever  * of the rqstp and into the sctxt's page array. These pages are
7819a6a180bSChuck Lever  * DMA unmapped by each Write completion, but the subsequent Send
7829a6a180bSChuck Lever  * completion finally releases these pages.
7839a6a180bSChuck Lever  *
7849a6a180bSChuck Lever  * Assumptions:
7859a6a180bSChuck Lever  * - The Reply's transport header will never be larger than a page.
786c06b540aSTom Tucker  */
7879a6a180bSChuck Lever static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
78897bce634SChuck Lever 				   struct svc_rdma_send_ctxt *sctxt,
789db9602e4SChuck Lever 				   const struct svc_rdma_recv_ctxt *rctxt,
790db9602e4SChuck Lever 				   struct svc_rqst *rqstp)
791c06b540aSTom Tucker {
7929a6a180bSChuck Lever 	int ret;
7930e7f011aSTom Tucker 
794aee4b74aSChuck Lever 	ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
7959a6a180bSChuck Lever 	if (ret < 0)
79699722fe4SChuck Lever 		return ret;
797c06b540aSTom Tucker 
79897bce634SChuck Lever 	svc_rdma_save_io_pages(rqstp, sctxt);
7990bf48289SSteve Wise 
80097bce634SChuck Lever 	if (rctxt->rc_inv_rkey) {
80197bce634SChuck Lever 		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
80297bce634SChuck Lever 		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
80397bce634SChuck Lever 	} else {
80497bce634SChuck Lever 		sctxt->sc_send_wr.opcode = IB_WR_SEND;
805986b7889SChuck Lever 	}
80617f70f8dSChuck Lever 	return svc_rdma_send(rdma, sctxt);
807c06b540aSTom Tucker }
808c06b540aSTom Tucker 
809c65b326bSChuck Lever /**
810c65b326bSChuck Lever  * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
811c65b326bSChuck Lever  * @rdma: controlling transport context
812c65b326bSChuck Lever  * @sctxt: Send context for the response
813c65b326bSChuck Lever  * @rctxt: Receive context for incoming bad message
814c65b326bSChuck Lever  * @status: negative errno indicating error that occurred
8154757d90bSChuck Lever  *
816c65b326bSChuck Lever  * Given the client-provided Read, Write, and Reply chunks, the
817c65b326bSChuck Lever  * server was not able to parse the Call or form a complete Reply.
818c65b326bSChuck Lever  * Return an RDMA_ERROR message so the client can retire the RPC
819c65b326bSChuck Lever  * transaction.
820c65b326bSChuck Lever  *
821c65b326bSChuck Lever  * The caller does not have to release @sctxt. It is released by
822c65b326bSChuck Lever  * Send completion, or by this function on error.
8234757d90bSChuck Lever  */
824c65b326bSChuck Lever void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
825d1f6e236SChuck Lever 			     struct svc_rdma_send_ctxt *sctxt,
8264f200bd8SChuck Lever 			     struct svc_rdma_recv_ctxt *rctxt,
8274f200bd8SChuck Lever 			     int status)
8284757d90bSChuck Lever {
8296fd5034dSChuck Lever 	__be32 *rdma_argp = rctxt->rc_recv_buf;
8304757d90bSChuck Lever 	__be32 *p;
8314757d90bSChuck Lever 
832d1f6e236SChuck Lever 	rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
833d1f6e236SChuck Lever 	xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
834d1f6e236SChuck Lever 			sctxt->sc_xprt_buf, NULL);
8356fd5034dSChuck Lever 
8364f200bd8SChuck Lever 	p = xdr_reserve_space(&sctxt->sc_stream,
8374f200bd8SChuck Lever 			      rpcrdma_fixed_maxsz * sizeof(*p));
8386fd5034dSChuck Lever 	if (!p)
839605c61beSChuck Lever 		goto put_ctxt;
8406fd5034dSChuck Lever 
8416fd5034dSChuck Lever 	*p++ = *rdma_argp;
8426fd5034dSChuck Lever 	*p++ = *(rdma_argp + 1);
8436fd5034dSChuck Lever 	*p++ = rdma->sc_fc_credits;
8444f200bd8SChuck Lever 	*p = rdma_error;
8454f200bd8SChuck Lever 
8464f200bd8SChuck Lever 	switch (status) {
8474f200bd8SChuck Lever 	case -EPROTONOSUPPORT:
8484f200bd8SChuck Lever 		p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
8494f200bd8SChuck Lever 		if (!p)
850605c61beSChuck Lever 			goto put_ctxt;
8514f200bd8SChuck Lever 
8524f200bd8SChuck Lever 		*p++ = err_vers;
8534f200bd8SChuck Lever 		*p++ = rpcrdma_version;
8544f200bd8SChuck Lever 		*p = rpcrdma_version;
8554f200bd8SChuck Lever 		trace_svcrdma_err_vers(*rdma_argp);
8564f200bd8SChuck Lever 		break;
8574f200bd8SChuck Lever 	default:
8584f200bd8SChuck Lever 		p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
8594f200bd8SChuck Lever 		if (!p)
860605c61beSChuck Lever 			goto put_ctxt;
8614f200bd8SChuck Lever 
8624757d90bSChuck Lever 		*p = err_chunk;
8636fd5034dSChuck Lever 		trace_svcrdma_err_chunk(*rdma_argp);
8644f200bd8SChuck Lever 	}
8656fd5034dSChuck Lever 
866c65b326bSChuck Lever 	/* Remote Invalidation is skipped for simplicity. */
867d1f6e236SChuck Lever 	sctxt->sc_send_wr.num_sge = 1;
868d1f6e236SChuck Lever 	sctxt->sc_send_wr.opcode = IB_WR_SEND;
869d1f6e236SChuck Lever 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
87017f70f8dSChuck Lever 	if (svc_rdma_send(rdma, sctxt))
871605c61beSChuck Lever 		goto put_ctxt;
872605c61beSChuck Lever 	return;
873605c61beSChuck Lever 
874605c61beSChuck Lever put_ctxt:
875605c61beSChuck Lever 	svc_rdma_send_ctxt_put(rdma, sctxt);
87699722fe4SChuck Lever }
87799722fe4SChuck Lever 
8789a6a180bSChuck Lever /**
8799a6a180bSChuck Lever  * svc_rdma_sendto - Transmit an RPC reply
8809a6a180bSChuck Lever  * @rqstp: processed RPC request, reply XDR already in ::rq_res
8819a6a180bSChuck Lever  *
8829a6a180bSChuck Lever  * Any resources still associated with @rqstp are released upon return.
8839a6a180bSChuck Lever  * If no reply message was possible, the connection is closed.
8849a6a180bSChuck Lever  *
8859a6a180bSChuck Lever  * Returns:
8869a6a180bSChuck Lever  *	%0 if an RPC reply has been successfully posted,
8879a6a180bSChuck Lever  *	%-ENOMEM if a resource shortage occurred (connection is lost),
8889a6a180bSChuck Lever  *	%-ENOTCONN if posting failed (connection is lost).
8899a6a180bSChuck Lever  */
890c06b540aSTom Tucker int svc_rdma_sendto(struct svc_rqst *rqstp)
891c06b540aSTom Tucker {
892c06b540aSTom Tucker 	struct svc_xprt *xprt = rqstp->rq_xprt;
893c06b540aSTom Tucker 	struct svcxprt_rdma *rdma =
894c06b540aSTom Tucker 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
8953a88092eSChuck Lever 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
8962fe8c446SChuck Lever 	__be32 *rdma_argp = rctxt->rc_recv_buf;
8972fe8c446SChuck Lever 	__be32 *wr_lst = rctxt->rc_write_list;
8982fe8c446SChuck Lever 	__be32 *rp_ch = rctxt->rc_reply_chunk;
8999a6a180bSChuck Lever 	struct xdr_buf *xdr = &rqstp->rq_res;
90099722fe4SChuck Lever 	struct svc_rdma_send_ctxt *sctxt;
9016fd5034dSChuck Lever 	__be32 *p;
9029a6a180bSChuck Lever 	int ret;
903c06b540aSTom Tucker 
904ca4faf54SChuck Lever 	ret = -ENOTCONN;
905ca4faf54SChuck Lever 	if (svc_xprt_is_dead(xprt))
906ca4faf54SChuck Lever 		goto err0;
907ca4faf54SChuck Lever 
90878da2b3cSChuck Lever 	ret = -ENOMEM;
90999722fe4SChuck Lever 	sctxt = svc_rdma_send_ctxt_get(rdma);
91099722fe4SChuck Lever 	if (!sctxt)
91178da2b3cSChuck Lever 		goto err0;
91298fc21d3SChuck Lever 
9136fd5034dSChuck Lever 	p = xdr_reserve_space(&sctxt->sc_stream,
9146fd5034dSChuck Lever 			      rpcrdma_fixed_maxsz * sizeof(*p));
9156fd5034dSChuck Lever 	if (!p)
9166fd5034dSChuck Lever 		goto err0;
9179a6a180bSChuck Lever 	*p++ = *rdma_argp;
9189a6a180bSChuck Lever 	*p++ = *(rdma_argp + 1);
91998fc21d3SChuck Lever 	*p++ = rdma->sc_fc_credits;
9206fd5034dSChuck Lever 	*p   = rp_ch ? rdma_nomsg : rdma_msg;
92198fc21d3SChuck Lever 
9226fd5034dSChuck Lever 	if (svc_rdma_encode_read_list(sctxt) < 0)
9236fd5034dSChuck Lever 		goto err0;
9249a6a180bSChuck Lever 	if (wr_lst) {
9259a6a180bSChuck Lever 		/* XXX: Presume the client sent only one Write chunk */
92641205539SChuck Lever 		unsigned long offset;
92741205539SChuck Lever 		unsigned int length;
92841205539SChuck Lever 
92941205539SChuck Lever 		if (rctxt->rc_read_payload_length) {
93041205539SChuck Lever 			offset = rctxt->rc_read_payload_offset;
93141205539SChuck Lever 			length = rctxt->rc_read_payload_length;
93241205539SChuck Lever 		} else {
93341205539SChuck Lever 			offset = xdr->head[0].iov_len;
93441205539SChuck Lever 			length = xdr->page_len;
93541205539SChuck Lever 		}
93641205539SChuck Lever 		ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
93741205539SChuck Lever 						length);
93808ae4e7fSChuck Lever 		if (ret < 0)
9394757d90bSChuck Lever 			goto err2;
9406fd5034dSChuck Lever 		if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
9416fd5034dSChuck Lever 			goto err0;
9426fd5034dSChuck Lever 	} else {
9436fd5034dSChuck Lever 		if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
9446fd5034dSChuck Lever 			goto err0;
94508ae4e7fSChuck Lever 	}
9469a6a180bSChuck Lever 	if (rp_ch) {
9476fa5785eSChuck Lever 		ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
94808ae4e7fSChuck Lever 		if (ret < 0)
9494757d90bSChuck Lever 			goto err2;
9506fd5034dSChuck Lever 		if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
9516fd5034dSChuck Lever 			goto err0;
9526fd5034dSChuck Lever 	} else {
9536fd5034dSChuck Lever 		if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
9546fd5034dSChuck Lever 			goto err0;
95508ae4e7fSChuck Lever 	}
956c06b540aSTom Tucker 
957db9602e4SChuck Lever 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
9583e1eeb98SChuck Lever 	if (ret < 0)
95999722fe4SChuck Lever 		goto err1;
96023cf1ee1SChuck Lever 	return 0;
961afd566eaSTom Tucker 
9624757d90bSChuck Lever  err2:
963b20dae70SColin Ian King 	if (ret != -E2BIG && ret != -EINVAL)
9644757d90bSChuck Lever 		goto err1;
9654757d90bSChuck Lever 
9666e9fab70SChuck Lever 	/* Send completion releases payload pages that were part
9676e9fab70SChuck Lever 	 * of previously posted RDMA Writes.
9686e9fab70SChuck Lever 	 */
9696e9fab70SChuck Lever 	svc_rdma_save_io_pages(rqstp, sctxt);
970605c61beSChuck Lever 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
97123cf1ee1SChuck Lever 	return 0;
9724757d90bSChuck Lever 
973afd566eaSTom Tucker  err1:
97499722fe4SChuck Lever 	svc_rdma_send_ctxt_put(rdma, sctxt);
975afd566eaSTom Tucker  err0:
9763f8f25c6SChuck Lever 	trace_svcrdma_send_err(rqstp, ret);
9779a6a180bSChuck Lever 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
97823cf1ee1SChuck Lever 	return -ENOTCONN;
979c06b540aSTom Tucker }
98041205539SChuck Lever 
98141205539SChuck Lever /**
982*03493bcaSChuck Lever  * svc_rdma_result_payload - special processing for a result payload
98341205539SChuck Lever  * @rqstp: svc_rqst to operate on
98441205539SChuck Lever  * @offset: payload's byte offset in @xdr
98541205539SChuck Lever  * @length: size of payload, in bytes
98641205539SChuck Lever  *
98741205539SChuck Lever  * Returns zero on success.
98841205539SChuck Lever  *
989*03493bcaSChuck Lever  * For the moment, just record the xdr_buf location of the result
99041205539SChuck Lever  * payload. svc_rdma_sendto will use that location later when
99141205539SChuck Lever  * we actually send the payload.
99241205539SChuck Lever  */
993*03493bcaSChuck Lever int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
99441205539SChuck Lever 			    unsigned int length)
99541205539SChuck Lever {
99641205539SChuck Lever 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
99741205539SChuck Lever 
99841205539SChuck Lever 	/* XXX: Just one READ payload slot for now, since our
99941205539SChuck Lever 	 * transport implementation currently supports only one
100041205539SChuck Lever 	 * Write chunk.
100141205539SChuck Lever 	 */
100241205539SChuck Lever 	rctxt->rc_read_payload_offset = offset;
100341205539SChuck Lever 	rctxt->rc_read_payload_length = length;
100441205539SChuck Lever 
100541205539SChuck Lever 	return 0;
100641205539SChuck Lever }
1007