xref: /openbmc/linux/net/sunrpc/xprtrdma/rpc_rdma.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * rpc_rdma.c
42  *
43  * This file contains the guts of the RPC RDMA protocol, and
44  * does marshaling/unmarshaling, etc. It is also where interfacing
45  * to the Linux RPC framework lives.
46  */
47 
48 #include "xprt_rdma.h"
49 
50 #include <linux/highmem.h>
51 
52 #ifdef RPC_DEBUG
53 # define RPCDBG_FACILITY	RPCDBG_TRANS
54 #endif
55 
56 enum rpcrdma_chunktype {
57 	rpcrdma_noch = 0,
58 	rpcrdma_readch,
59 	rpcrdma_areadch,
60 	rpcrdma_writech,
61 	rpcrdma_replych
62 };
63 
64 #ifdef RPC_DEBUG
65 static const char transfertypes[][12] = {
66 	"pure inline",	/* no chunks */
67 	" read chunk",	/* some argument via rdma read */
68 	"*read chunk",	/* entire request via rdma read */
69 	"write chunk",	/* some result via rdma write */
70 	"reply chunk"	/* entire reply via rdma write */
71 };
72 #endif
73 
74 /*
75  * Chunk assembly from upper layer xdr_buf.
76  *
77  * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
78  * elements. Segments are then coalesced when registered, if possible
79  * within the selected memreg mode.
80  *
81  * Note, this routine is never called if the connection's memory
82  * registration strategy is 0 (bounce buffers).
83  */
84 
85 static int
86 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, int pos,
87 	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
88 {
89 	int len, n = 0, p;
90 
91 	if (pos == 0 && xdrbuf->head[0].iov_len) {
92 		seg[n].mr_page = NULL;
93 		seg[n].mr_offset = xdrbuf->head[0].iov_base;
94 		seg[n].mr_len = xdrbuf->head[0].iov_len;
95 		pos += xdrbuf->head[0].iov_len;
96 		++n;
97 	}
98 
99 	if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
100 		if (n == nsegs)
101 			return 0;
102 		seg[n].mr_page = xdrbuf->pages[0];
103 		seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
104 		seg[n].mr_len = min_t(u32,
105 			PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
106 		len = xdrbuf->page_len - seg[n].mr_len;
107 		pos += len;
108 		++n;
109 		p = 1;
110 		while (len > 0) {
111 			if (n == nsegs)
112 				return 0;
113 			seg[n].mr_page = xdrbuf->pages[p];
114 			seg[n].mr_offset = NULL;
115 			seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
116 			len -= seg[n].mr_len;
117 			++n;
118 			++p;
119 		}
120 	}
121 
122 	if (pos < xdrbuf->len && xdrbuf->tail[0].iov_len) {
123 		if (n == nsegs)
124 			return 0;
125 		seg[n].mr_page = NULL;
126 		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
127 		seg[n].mr_len = xdrbuf->tail[0].iov_len;
128 		pos += xdrbuf->tail[0].iov_len;
129 		++n;
130 	}
131 
132 	if (pos < xdrbuf->len)
133 		dprintk("RPC:       %s: marshaled only %d of %d\n",
134 				__func__, pos, xdrbuf->len);
135 
136 	return n;
137 }
138 
139 /*
140  * Create read/write chunk lists, and reply chunks, for RDMA
141  *
142  *   Assume check against THRESHOLD has been done, and chunks are required.
143  *   Assume only encoding one list entry for read|write chunks. The NFSv3
144  *     protocol is simple enough to allow this as it only has a single "bulk
145  *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
146  *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
147  *
148  * When used for a single reply chunk (which is a special write
149  * chunk used for the entire reply, rather than just the data), it
150  * is used primarily for READDIR and READLINK which would otherwise
151  * be severely size-limited by a small rdma inline read max. The server
152  * response will come back as an RDMA Write, followed by a message
153  * of type RDMA_NOMSG carrying the xid and length. As a result, reply
154  * chunks do not provide data alignment, however they do not require
155  * "fixup" (moving the response to the upper layer buffer) either.
156  *
157  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
158  *
159  *  Read chunklist (a linked list):
160  *   N elements, position P (same P for all chunks of same arg!):
161  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
162  *
163  *  Write chunklist (a list of (one) counted array):
164  *   N elements:
165  *    1 - N - HLOO - HLOO - ... - HLOO - 0
166  *
167  *  Reply chunk (a counted array):
168  *   N elements:
169  *    1 - N - HLOO - HLOO - ... - HLOO
170  */
171 
172 static unsigned int
173 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
174 		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
175 {
176 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
177 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
178 	int nsegs, nchunks = 0;
179 	int pos;
180 	struct rpcrdma_mr_seg *seg = req->rl_segments;
181 	struct rpcrdma_read_chunk *cur_rchunk = NULL;
182 	struct rpcrdma_write_array *warray = NULL;
183 	struct rpcrdma_write_chunk *cur_wchunk = NULL;
184 	u32 *iptr = headerp->rm_body.rm_chunks;
185 
186 	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
187 		/* a read chunk - server will RDMA Read our memory */
188 		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
189 	} else {
190 		/* a write or reply chunk - server will RDMA Write our memory */
191 		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
192 		if (type == rpcrdma_replych)
193 			*iptr++ = xdr_zero;	/* a NULL write chunk list */
194 		warray = (struct rpcrdma_write_array *) iptr;
195 		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
196 	}
197 
198 	if (type == rpcrdma_replych || type == rpcrdma_areadch)
199 		pos = 0;
200 	else
201 		pos = target->head[0].iov_len;
202 
203 	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
204 	if (nsegs == 0)
205 		return 0;
206 
207 	do {
208 		/* bind/register the memory, then build chunk from result. */
209 		int n = rpcrdma_register_external(seg, nsegs,
210 						cur_wchunk != NULL, r_xprt);
211 		if (n <= 0)
212 			goto out;
213 		if (cur_rchunk) {	/* read */
214 			cur_rchunk->rc_discrim = xdr_one;
215 			/* all read chunks have the same "position" */
216 			cur_rchunk->rc_position = htonl(pos);
217 			cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
218 			cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
219 			xdr_encode_hyper(
220 					(u32 *)&cur_rchunk->rc_target.rs_offset,
221 					seg->mr_base);
222 			dprintk("RPC:       %s: read chunk "
223 				"elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
224 				seg->mr_len, seg->mr_base, seg->mr_rkey, pos,
225 				n < nsegs ? "more" : "last");
226 			cur_rchunk++;
227 			r_xprt->rx_stats.read_chunk_count++;
228 		} else {		/* write/reply */
229 			cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
230 			cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
231 			xdr_encode_hyper(
232 					(u32 *)&cur_wchunk->wc_target.rs_offset,
233 					seg->mr_base);
234 			dprintk("RPC:       %s: %s chunk "
235 				"elem %d@0x%llx:0x%x (%s)\n", __func__,
236 				(type == rpcrdma_replych) ? "reply" : "write",
237 				seg->mr_len, seg->mr_base, seg->mr_rkey,
238 				n < nsegs ? "more" : "last");
239 			cur_wchunk++;
240 			if (type == rpcrdma_replych)
241 				r_xprt->rx_stats.reply_chunk_count++;
242 			else
243 				r_xprt->rx_stats.write_chunk_count++;
244 			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
245 		}
246 		nchunks++;
247 		seg   += n;
248 		nsegs -= n;
249 	} while (nsegs);
250 
251 	/* success. all failures return above */
252 	req->rl_nchunks = nchunks;
253 
254 	BUG_ON(nchunks == 0);
255 
256 	/*
257 	 * finish off header. If write, marshal discrim and nchunks.
258 	 */
259 	if (cur_rchunk) {
260 		iptr = (u32 *) cur_rchunk;
261 		*iptr++ = xdr_zero;	/* finish the read chunk list */
262 		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
263 		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
264 	} else {
265 		warray->wc_discrim = xdr_one;
266 		warray->wc_nchunks = htonl(nchunks);
267 		iptr = (u32 *) cur_wchunk;
268 		if (type == rpcrdma_writech) {
269 			*iptr++ = xdr_zero; /* finish the write chunk list */
270 			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
271 		}
272 	}
273 
274 	/*
275 	 * Return header size.
276 	 */
277 	return (unsigned char *)iptr - (unsigned char *)headerp;
278 
279 out:
280 	for (pos = 0; nchunks--;)
281 		pos += rpcrdma_deregister_external(
282 				&req->rl_segments[pos], r_xprt, NULL);
283 	return 0;
284 }
285 
286 /*
287  * Copy write data inline.
288  * This function is used for "small" requests. Data which is passed
289  * to RPC via iovecs (or page list) is copied directly into the
290  * pre-registered memory buffer for this request. For small amounts
291  * of data, this is efficient. The cutoff value is tunable.
292  */
293 static int
294 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
295 {
296 	int i, npages, curlen;
297 	int copy_len;
298 	unsigned char *srcp, *destp;
299 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
300 
301 	destp = rqst->rq_svec[0].iov_base;
302 	curlen = rqst->rq_svec[0].iov_len;
303 	destp += curlen;
304 	/*
305 	 * Do optional padding where it makes sense. Alignment of write
306 	 * payload can help the server, if our setting is accurate.
307 	 */
308 	pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
309 	if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
310 		pad = 0;	/* don't pad this request */
311 
312 	dprintk("RPC:       %s: pad %d destp 0x%p len %d hdrlen %d\n",
313 		__func__, pad, destp, rqst->rq_slen, curlen);
314 
315 	copy_len = rqst->rq_snd_buf.page_len;
316 	r_xprt->rx_stats.pullup_copy_count += copy_len;
317 	npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
318 	for (i = 0; copy_len && i < npages; i++) {
319 		if (i == 0)
320 			curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
321 		else
322 			curlen = PAGE_SIZE;
323 		if (curlen > copy_len)
324 			curlen = copy_len;
325 		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
326 			__func__, i, destp, copy_len, curlen);
327 		srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
328 					KM_SKB_SUNRPC_DATA);
329 		if (i == 0)
330 			memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
331 		else
332 			memcpy(destp, srcp, curlen);
333 		kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
334 		rqst->rq_svec[0].iov_len += curlen;
335 		destp += curlen;
336 		copy_len -= curlen;
337 	}
338 	if (rqst->rq_snd_buf.tail[0].iov_len) {
339 		curlen = rqst->rq_snd_buf.tail[0].iov_len;
340 		if (destp != rqst->rq_snd_buf.tail[0].iov_base) {
341 			memcpy(destp,
342 				rqst->rq_snd_buf.tail[0].iov_base, curlen);
343 			r_xprt->rx_stats.pullup_copy_count += curlen;
344 		}
345 		dprintk("RPC:       %s: tail destp 0x%p len %d curlen %d\n",
346 			__func__, destp, copy_len, curlen);
347 		rqst->rq_svec[0].iov_len += curlen;
348 	}
349 	/* header now contains entire send message */
350 	return pad;
351 }
352 
353 /*
354  * Marshal a request: the primary job of this routine is to choose
355  * the transfer modes. See comments below.
356  *
357  * Uses multiple RDMA IOVs for a request:
358  *  [0] -- RPC RDMA header, which uses memory from the *start* of the
359  *         preregistered buffer that already holds the RPC data in
360  *         its middle.
361  *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
362  *  [2] -- optional padding.
363  *  [3] -- if padded, header only in [1] and data here.
364  */
365 
366 int
367 rpcrdma_marshal_req(struct rpc_rqst *rqst)
368 {
369 	struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
370 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
371 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
372 	char *base;
373 	size_t hdrlen, rpclen, padlen;
374 	enum rpcrdma_chunktype rtype, wtype;
375 	struct rpcrdma_msg *headerp;
376 
377 	/*
378 	 * rpclen gets amount of data in first buffer, which is the
379 	 * pre-registered buffer.
380 	 */
381 	base = rqst->rq_svec[0].iov_base;
382 	rpclen = rqst->rq_svec[0].iov_len;
383 
384 	/* build RDMA header in private area at front */
385 	headerp = (struct rpcrdma_msg *) req->rl_base;
386 	/* don't htonl XID, it's already done in request */
387 	headerp->rm_xid = rqst->rq_xid;
388 	headerp->rm_vers = xdr_one;
389 	headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
390 	headerp->rm_type = __constant_htonl(RDMA_MSG);
391 
392 	/*
393 	 * Chunks needed for results?
394 	 *
395 	 * o If the expected result is under the inline threshold, all ops
396 	 *   return as inline (but see later).
397 	 * o Large non-read ops return as a single reply chunk.
398 	 * o Large read ops return data as write chunk(s), header as inline.
399 	 *
400 	 * Note: the NFS code sending down multiple result segments implies
401 	 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
402 	 */
403 
404 	/*
405 	 * This code can handle read chunks, write chunks OR reply
406 	 * chunks -- only one type. If the request is too big to fit
407 	 * inline, then we will choose read chunks. If the request is
408 	 * a READ, then use write chunks to separate the file data
409 	 * into pages; otherwise use reply chunks.
410 	 */
411 	if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
412 		wtype = rpcrdma_noch;
413 	else if (rqst->rq_rcv_buf.page_len == 0)
414 		wtype = rpcrdma_replych;
415 	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
416 		wtype = rpcrdma_writech;
417 	else
418 		wtype = rpcrdma_replych;
419 
420 	/*
421 	 * Chunks needed for arguments?
422 	 *
423 	 * o If the total request is under the inline threshold, all ops
424 	 *   are sent as inline.
425 	 * o Large non-write ops are sent with the entire message as a
426 	 *   single read chunk (protocol 0-position special case).
427 	 * o Large write ops transmit data as read chunk(s), header as
428 	 *   inline.
429 	 *
430 	 * Note: the NFS code sending down multiple argument segments
431 	 * implies the op is a write.
432 	 * TBD check NFSv4 setacl
433 	 */
434 	if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
435 		rtype = rpcrdma_noch;
436 	else if (rqst->rq_snd_buf.page_len == 0)
437 		rtype = rpcrdma_areadch;
438 	else
439 		rtype = rpcrdma_readch;
440 
441 	/* The following simplification is not true forever */
442 	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
443 		wtype = rpcrdma_noch;
444 	BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
445 
446 	if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
447 	    (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
448 		/* forced to "pure inline"? */
449 		dprintk("RPC:       %s: too much data (%d/%d) for inline\n",
450 			__func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
451 		return -1;
452 	}
453 
454 	hdrlen = 28; /*sizeof *headerp;*/
455 	padlen = 0;
456 
457 	/*
458 	 * Pull up any extra send data into the preregistered buffer.
459 	 * When padding is in use and applies to the transfer, insert
460 	 * it and change the message type.
461 	 */
462 	if (rtype == rpcrdma_noch) {
463 
464 		padlen = rpcrdma_inline_pullup(rqst,
465 						RPCRDMA_INLINE_PAD_VALUE(rqst));
466 
467 		if (padlen) {
468 			headerp->rm_type = __constant_htonl(RDMA_MSGP);
469 			headerp->rm_body.rm_padded.rm_align =
470 				htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
471 			headerp->rm_body.rm_padded.rm_thresh =
472 				__constant_htonl(RPCRDMA_INLINE_PAD_THRESH);
473 			headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
474 			headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
475 			headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
476 			hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
477 			BUG_ON(wtype != rpcrdma_noch);
478 
479 		} else {
480 			headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
481 			headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
482 			headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
483 			/* new length after pullup */
484 			rpclen = rqst->rq_svec[0].iov_len;
485 			/*
486 			 * Currently we try to not actually use read inline.
487 			 * Reply chunks have the desirable property that
488 			 * they land, packed, directly in the target buffers
489 			 * without headers, so they require no fixup. The
490 			 * additional RDMA Write op sends the same amount
491 			 * of data, streams on-the-wire and adds no overhead
492 			 * on receive. Therefore, we request a reply chunk
493 			 * for non-writes wherever feasible and efficient.
494 			 */
495 			if (wtype == rpcrdma_noch &&
496 			    r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
497 				wtype = rpcrdma_replych;
498 		}
499 	}
500 
501 	/*
502 	 * Marshal chunks. This routine will return the header length
503 	 * consumed by marshaling.
504 	 */
505 	if (rtype != rpcrdma_noch) {
506 		hdrlen = rpcrdma_create_chunks(rqst,
507 					&rqst->rq_snd_buf, headerp, rtype);
508 		wtype = rtype;	/* simplify dprintk */
509 
510 	} else if (wtype != rpcrdma_noch) {
511 		hdrlen = rpcrdma_create_chunks(rqst,
512 					&rqst->rq_rcv_buf, headerp, wtype);
513 	}
514 
515 	if (hdrlen == 0)
516 		return -1;
517 
518 	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd\n"
519 		"                   headerp 0x%p base 0x%p lkey 0x%x\n",
520 		__func__, transfertypes[wtype], hdrlen, rpclen, padlen,
521 		headerp, base, req->rl_iov.lkey);
522 
523 	/*
524 	 * initialize send_iov's - normally only two: rdma chunk header and
525 	 * single preregistered RPC header buffer, but if padding is present,
526 	 * then use a preregistered (and zeroed) pad buffer between the RPC
527 	 * header and any write data. In all non-rdma cases, any following
528 	 * data has been copied into the RPC header buffer.
529 	 */
530 	req->rl_send_iov[0].addr = req->rl_iov.addr;
531 	req->rl_send_iov[0].length = hdrlen;
532 	req->rl_send_iov[0].lkey = req->rl_iov.lkey;
533 
534 	req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
535 	req->rl_send_iov[1].length = rpclen;
536 	req->rl_send_iov[1].lkey = req->rl_iov.lkey;
537 
538 	req->rl_niovs = 2;
539 
540 	if (padlen) {
541 		struct rpcrdma_ep *ep = &r_xprt->rx_ep;
542 
543 		req->rl_send_iov[2].addr = ep->rep_pad.addr;
544 		req->rl_send_iov[2].length = padlen;
545 		req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
546 
547 		req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
548 		req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
549 		req->rl_send_iov[3].lkey = req->rl_iov.lkey;
550 
551 		req->rl_niovs = 4;
552 	}
553 
554 	return 0;
555 }
556 
557 /*
558  * Chase down a received write or reply chunklist to get length
559  * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
560  */
561 static int
562 rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
563 {
564 	unsigned int i, total_len;
565 	struct rpcrdma_write_chunk *cur_wchunk;
566 
567 	i = ntohl(**iptrp);	/* get array count */
568 	if (i > max)
569 		return -1;
570 	cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
571 	total_len = 0;
572 	while (i--) {
573 		struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
574 		ifdebug(FACILITY) {
575 			u64 off;
576 			xdr_decode_hyper((u32 *)&seg->rs_offset, &off);
577 			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
578 				__func__,
579 				ntohl(seg->rs_length),
580 				off,
581 				ntohl(seg->rs_handle));
582 		}
583 		total_len += ntohl(seg->rs_length);
584 		++cur_wchunk;
585 	}
586 	/* check and adjust for properly terminated write chunk */
587 	if (wrchunk) {
588 		u32 *w = (u32 *) cur_wchunk;
589 		if (*w++ != xdr_zero)
590 			return -1;
591 		cur_wchunk = (struct rpcrdma_write_chunk *) w;
592 	}
593 	if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
594 		return -1;
595 
596 	*iptrp = (u32 *) cur_wchunk;
597 	return total_len;
598 }
599 
600 /*
601  * Scatter inline received data back into provided iov's.
602  */
603 static void
604 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len)
605 {
606 	int i, npages, curlen, olen;
607 	char *destp;
608 
609 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
610 	if (curlen > copy_len) {	/* write chunk header fixup */
611 		curlen = copy_len;
612 		rqst->rq_rcv_buf.head[0].iov_len = curlen;
613 	}
614 
615 	dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
616 		__func__, srcp, copy_len, curlen);
617 
618 	/* Shift pointer for first receive segment only */
619 	rqst->rq_rcv_buf.head[0].iov_base = srcp;
620 	srcp += curlen;
621 	copy_len -= curlen;
622 
623 	olen = copy_len;
624 	i = 0;
625 	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
626 	if (copy_len && rqst->rq_rcv_buf.page_len) {
627 		npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
628 			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
629 		for (; i < npages; i++) {
630 			if (i == 0)
631 				curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
632 			else
633 				curlen = PAGE_SIZE;
634 			if (curlen > copy_len)
635 				curlen = copy_len;
636 			dprintk("RPC:       %s: page %d"
637 				" srcp 0x%p len %d curlen %d\n",
638 				__func__, i, srcp, copy_len, curlen);
639 			destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
640 						KM_SKB_SUNRPC_DATA);
641 			if (i == 0)
642 				memcpy(destp + rqst->rq_rcv_buf.page_base,
643 						srcp, curlen);
644 			else
645 				memcpy(destp, srcp, curlen);
646 			flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
647 			kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
648 			srcp += curlen;
649 			copy_len -= curlen;
650 			if (copy_len == 0)
651 				break;
652 		}
653 		rqst->rq_rcv_buf.page_len = olen - copy_len;
654 	} else
655 		rqst->rq_rcv_buf.page_len = 0;
656 
657 	if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
658 		curlen = copy_len;
659 		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
660 			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
661 		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
662 			memcpy(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
663 		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
664 			__func__, srcp, copy_len, curlen);
665 		rqst->rq_rcv_buf.tail[0].iov_len = curlen;
666 		copy_len -= curlen; ++i;
667 	} else
668 		rqst->rq_rcv_buf.tail[0].iov_len = 0;
669 
670 	if (copy_len)
671 		dprintk("RPC:       %s: %d bytes in"
672 			" %d extra segments (%d lost)\n",
673 			__func__, olen, i, copy_len);
674 
675 	/* TBD avoid a warning from call_decode() */
676 	rqst->rq_private_buf = rqst->rq_rcv_buf;
677 }
678 
679 /*
680  * This function is called when an async event is posted to
681  * the connection which changes the connection state. All it
682  * does at this point is mark the connection up/down, the rpc
683  * timers do the rest.
684  */
685 void
686 rpcrdma_conn_func(struct rpcrdma_ep *ep)
687 {
688 	struct rpc_xprt *xprt = ep->rep_xprt;
689 
690 	spin_lock_bh(&xprt->transport_lock);
691 	if (ep->rep_connected > 0) {
692 		if (!xprt_test_and_set_connected(xprt))
693 			xprt_wake_pending_tasks(xprt, 0);
694 	} else {
695 		if (xprt_test_and_clear_connected(xprt))
696 			xprt_wake_pending_tasks(xprt, ep->rep_connected);
697 	}
698 	spin_unlock_bh(&xprt->transport_lock);
699 }
700 
701 /*
702  * This function is called when memory window unbind which we are waiting
703  * for completes. Just use rr_func (zeroed by upcall) to signal completion.
704  */
705 static void
706 rpcrdma_unbind_func(struct rpcrdma_rep *rep)
707 {
708 	wake_up(&rep->rr_unbind);
709 }
710 
711 /*
712  * Called as a tasklet to do req/reply match and complete a request
713  * Errors must result in the RPC task either being awakened, or
714  * allowed to timeout, to discover the errors at that time.
715  */
716 void
717 rpcrdma_reply_handler(struct rpcrdma_rep *rep)
718 {
719 	struct rpcrdma_msg *headerp;
720 	struct rpcrdma_req *req;
721 	struct rpc_rqst *rqst;
722 	struct rpc_xprt *xprt = rep->rr_xprt;
723 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
724 	u32 *iptr;
725 	int i, rdmalen, status;
726 
727 	/* Check status. If bad, signal disconnect and return rep to pool */
728 	if (rep->rr_len == ~0U) {
729 		rpcrdma_recv_buffer_put(rep);
730 		if (r_xprt->rx_ep.rep_connected == 1) {
731 			r_xprt->rx_ep.rep_connected = -EIO;
732 			rpcrdma_conn_func(&r_xprt->rx_ep);
733 		}
734 		return;
735 	}
736 	if (rep->rr_len < 28) {
737 		dprintk("RPC:       %s: short/invalid reply\n", __func__);
738 		goto repost;
739 	}
740 	headerp = (struct rpcrdma_msg *) rep->rr_base;
741 	if (headerp->rm_vers != xdr_one) {
742 		dprintk("RPC:       %s: invalid version %d\n",
743 			__func__, ntohl(headerp->rm_vers));
744 		goto repost;
745 	}
746 
747 	/* Get XID and try for a match. */
748 	spin_lock(&xprt->transport_lock);
749 	rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
750 	if (rqst == NULL) {
751 		spin_unlock(&xprt->transport_lock);
752 		dprintk("RPC:       %s: reply 0x%p failed "
753 			"to match any request xid 0x%08x len %d\n",
754 			__func__, rep, headerp->rm_xid, rep->rr_len);
755 repost:
756 		r_xprt->rx_stats.bad_reply_count++;
757 		rep->rr_func = rpcrdma_reply_handler;
758 		if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
759 			rpcrdma_recv_buffer_put(rep);
760 
761 		return;
762 	}
763 
764 	/* get request object */
765 	req = rpcr_to_rdmar(rqst);
766 
767 	dprintk("RPC:       %s: reply 0x%p completes request 0x%p\n"
768 		"                   RPC request 0x%p xid 0x%08x\n",
769 			__func__, rep, req, rqst, headerp->rm_xid);
770 
771 	BUG_ON(!req || req->rl_reply);
772 
773 	/* from here on, the reply is no longer an orphan */
774 	req->rl_reply = rep;
775 
776 	/* check for expected message types */
777 	/* The order of some of these tests is important. */
778 	switch (headerp->rm_type) {
779 	case __constant_htonl(RDMA_MSG):
780 		/* never expect read chunks */
781 		/* never expect reply chunks (two ways to check) */
782 		/* never expect write chunks without having offered RDMA */
783 		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
784 		    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
785 		     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
786 		    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
787 		     req->rl_nchunks == 0))
788 			goto badheader;
789 		if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
790 			/* count any expected write chunks in read reply */
791 			/* start at write chunk array count */
792 			iptr = &headerp->rm_body.rm_chunks[2];
793 			rdmalen = rpcrdma_count_chunks(rep,
794 						req->rl_nchunks, 1, &iptr);
795 			/* check for validity, and no reply chunk after */
796 			if (rdmalen < 0 || *iptr++ != xdr_zero)
797 				goto badheader;
798 			rep->rr_len -=
799 			    ((unsigned char *)iptr - (unsigned char *)headerp);
800 			status = rep->rr_len + rdmalen;
801 			r_xprt->rx_stats.total_rdma_reply += rdmalen;
802 		} else {
803 			/* else ordinary inline */
804 			iptr = (u32 *)((unsigned char *)headerp + 28);
805 			rep->rr_len -= 28; /*sizeof *headerp;*/
806 			status = rep->rr_len;
807 		}
808 		/* Fix up the rpc results for upper layer */
809 		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len);
810 		break;
811 
812 	case __constant_htonl(RDMA_NOMSG):
813 		/* never expect read or write chunks, always reply chunks */
814 		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
815 		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
816 		    headerp->rm_body.rm_chunks[2] != xdr_one ||
817 		    req->rl_nchunks == 0)
818 			goto badheader;
819 		iptr = (u32 *)((unsigned char *)headerp + 28);
820 		rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
821 		if (rdmalen < 0)
822 			goto badheader;
823 		r_xprt->rx_stats.total_rdma_reply += rdmalen;
824 		/* Reply chunk buffer already is the reply vector - no fixup. */
825 		status = rdmalen;
826 		break;
827 
828 badheader:
829 	default:
830 		dprintk("%s: invalid rpcrdma reply header (type %d):"
831 				" chunks[012] == %d %d %d"
832 				" expected chunks <= %d\n",
833 				__func__, ntohl(headerp->rm_type),
834 				headerp->rm_body.rm_chunks[0],
835 				headerp->rm_body.rm_chunks[1],
836 				headerp->rm_body.rm_chunks[2],
837 				req->rl_nchunks);
838 		status = -EIO;
839 		r_xprt->rx_stats.bad_reply_count++;
840 		break;
841 	}
842 
843 	/* If using mw bind, start the deregister process now. */
844 	/* (Note: if mr_free(), cannot perform it here, in tasklet context) */
845 	if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
846 	case RPCRDMA_MEMWINDOWS:
847 		for (i = 0; req->rl_nchunks-- > 1;)
848 			i += rpcrdma_deregister_external(
849 				&req->rl_segments[i], r_xprt, NULL);
850 		/* Optionally wait (not here) for unbinds to complete */
851 		rep->rr_func = rpcrdma_unbind_func;
852 		(void) rpcrdma_deregister_external(&req->rl_segments[i],
853 						   r_xprt, rep);
854 		break;
855 	case RPCRDMA_MEMWINDOWS_ASYNC:
856 		for (i = 0; req->rl_nchunks--;)
857 			i += rpcrdma_deregister_external(&req->rl_segments[i],
858 							 r_xprt, NULL);
859 		break;
860 	default:
861 		break;
862 	}
863 
864 	dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
865 			__func__, xprt, rqst, status);
866 	xprt_complete_rqst(rqst->rq_task, status);
867 	spin_unlock(&xprt->transport_lock);
868 }
869