xref: /openbmc/linux/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c (revision eb3fcf007fffe5830d815e713591f3e858f2a365)
1 /*
2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Author: Tom Tucker <tom@opengridcomputing.com>
41  */
42 
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
50 
51 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
52 
53 /*
54  * Replace the pages in the rq_argpages array with the pages from the SGE in
55  * the RDMA_RECV completion. The SGL should contain full pages up until the
56  * last one.
57  */
58 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
59 			       struct svc_rdma_op_ctxt *ctxt,
60 			       u32 byte_count)
61 {
62 	struct rpcrdma_msg *rmsgp;
63 	struct page *page;
64 	u32 bc;
65 	int sge_no;
66 
67 	/* Swap the page in the SGE with the page in argpages */
68 	page = ctxt->pages[0];
69 	put_page(rqstp->rq_pages[0]);
70 	rqstp->rq_pages[0] = page;
71 
72 	/* Set up the XDR head */
73 	rqstp->rq_arg.head[0].iov_base = page_address(page);
74 	rqstp->rq_arg.head[0].iov_len =
75 		min_t(size_t, byte_count, ctxt->sge[0].length);
76 	rqstp->rq_arg.len = byte_count;
77 	rqstp->rq_arg.buflen = byte_count;
78 
79 	/* Compute bytes past head in the SGL */
80 	bc = byte_count - rqstp->rq_arg.head[0].iov_len;
81 
82 	/* If data remains, store it in the pagelist */
83 	rqstp->rq_arg.page_len = bc;
84 	rqstp->rq_arg.page_base = 0;
85 
86 	/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87 	rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
88 	if (rmsgp->rm_type == rdma_nomsg)
89 		rqstp->rq_arg.pages = &rqstp->rq_pages[0];
90 	else
91 		rqstp->rq_arg.pages = &rqstp->rq_pages[1];
92 
93 	sge_no = 1;
94 	while (bc && sge_no < ctxt->count) {
95 		page = ctxt->pages[sge_no];
96 		put_page(rqstp->rq_pages[sge_no]);
97 		rqstp->rq_pages[sge_no] = page;
98 		bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
99 		rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
100 		sge_no++;
101 	}
102 	rqstp->rq_respages = &rqstp->rq_pages[sge_no];
103 	rqstp->rq_next_page = rqstp->rq_respages + 1;
104 
105 	/* If not all pages were used from the SGL, free the remaining ones */
106 	bc = sge_no;
107 	while (sge_no < ctxt->count) {
108 		page = ctxt->pages[sge_no++];
109 		put_page(page);
110 	}
111 	ctxt->count = bc;
112 
113 	/* Set up tail */
114 	rqstp->rq_arg.tail[0].iov_base = NULL;
115 	rqstp->rq_arg.tail[0].iov_len = 0;
116 }
117 
118 /* Issue an RDMA_READ using the local lkey to map the data sink */
119 int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
120 			struct svc_rqst *rqstp,
121 			struct svc_rdma_op_ctxt *head,
122 			int *page_no,
123 			u32 *page_offset,
124 			u32 rs_handle,
125 			u32 rs_length,
126 			u64 rs_offset,
127 			bool last)
128 {
129 	struct ib_send_wr read_wr;
130 	int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
131 	struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
132 	int ret, read, pno;
133 	u32 pg_off = *page_offset;
134 	u32 pg_no = *page_no;
135 
136 	ctxt->direction = DMA_FROM_DEVICE;
137 	ctxt->read_hdr = head;
138 	pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
139 	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
140 
141 	for (pno = 0; pno < pages_needed; pno++) {
142 		int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
143 
144 		head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
145 		head->arg.page_len += len;
146 		head->arg.len += len;
147 		if (!pg_off)
148 			head->count++;
149 		rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
150 		rqstp->rq_next_page = rqstp->rq_respages + 1;
151 		ctxt->sge[pno].addr =
152 			ib_dma_map_page(xprt->sc_cm_id->device,
153 					head->arg.pages[pg_no], pg_off,
154 					PAGE_SIZE - pg_off,
155 					DMA_FROM_DEVICE);
156 		ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
157 					   ctxt->sge[pno].addr);
158 		if (ret)
159 			goto err;
160 		atomic_inc(&xprt->sc_dma_used);
161 
162 		/* The lkey here is either a local dma lkey or a dma_mr lkey */
163 		ctxt->sge[pno].lkey = xprt->sc_dma_lkey;
164 		ctxt->sge[pno].length = len;
165 		ctxt->count++;
166 
167 		/* adjust offset and wrap to next page if needed */
168 		pg_off += len;
169 		if (pg_off == PAGE_SIZE) {
170 			pg_off = 0;
171 			pg_no++;
172 		}
173 		rs_length -= len;
174 	}
175 
176 	if (last && rs_length == 0)
177 		set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
178 	else
179 		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
180 
181 	memset(&read_wr, 0, sizeof(read_wr));
182 	read_wr.wr_id = (unsigned long)ctxt;
183 	read_wr.opcode = IB_WR_RDMA_READ;
184 	ctxt->wr_op = read_wr.opcode;
185 	read_wr.send_flags = IB_SEND_SIGNALED;
186 	read_wr.wr.rdma.rkey = rs_handle;
187 	read_wr.wr.rdma.remote_addr = rs_offset;
188 	read_wr.sg_list = ctxt->sge;
189 	read_wr.num_sge = pages_needed;
190 
191 	ret = svc_rdma_send(xprt, &read_wr);
192 	if (ret) {
193 		pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
194 		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
195 		goto err;
196 	}
197 
198 	/* return current location in page array */
199 	*page_no = pg_no;
200 	*page_offset = pg_off;
201 	ret = read;
202 	atomic_inc(&rdma_stat_read);
203 	return ret;
204  err:
205 	svc_rdma_unmap_dma(ctxt);
206 	svc_rdma_put_context(ctxt, 0);
207 	return ret;
208 }
209 
210 /* Issue an RDMA_READ using an FRMR to map the data sink */
211 int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
212 			 struct svc_rqst *rqstp,
213 			 struct svc_rdma_op_ctxt *head,
214 			 int *page_no,
215 			 u32 *page_offset,
216 			 u32 rs_handle,
217 			 u32 rs_length,
218 			 u64 rs_offset,
219 			 bool last)
220 {
221 	struct ib_send_wr read_wr;
222 	struct ib_send_wr inv_wr;
223 	struct ib_send_wr fastreg_wr;
224 	u8 key;
225 	int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
226 	struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
227 	struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
228 	int ret, read, pno;
229 	u32 pg_off = *page_offset;
230 	u32 pg_no = *page_no;
231 
232 	if (IS_ERR(frmr))
233 		return -ENOMEM;
234 
235 	ctxt->direction = DMA_FROM_DEVICE;
236 	ctxt->frmr = frmr;
237 	pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
238 	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
239 
240 	frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
241 	frmr->direction = DMA_FROM_DEVICE;
242 	frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
243 	frmr->map_len = pages_needed << PAGE_SHIFT;
244 	frmr->page_list_len = pages_needed;
245 
246 	for (pno = 0; pno < pages_needed; pno++) {
247 		int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
248 
249 		head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
250 		head->arg.page_len += len;
251 		head->arg.len += len;
252 		if (!pg_off)
253 			head->count++;
254 		rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
255 		rqstp->rq_next_page = rqstp->rq_respages + 1;
256 		frmr->page_list->page_list[pno] =
257 			ib_dma_map_page(xprt->sc_cm_id->device,
258 					head->arg.pages[pg_no], 0,
259 					PAGE_SIZE, DMA_FROM_DEVICE);
260 		ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
261 					   frmr->page_list->page_list[pno]);
262 		if (ret)
263 			goto err;
264 		atomic_inc(&xprt->sc_dma_used);
265 
266 		/* adjust offset and wrap to next page if needed */
267 		pg_off += len;
268 		if (pg_off == PAGE_SIZE) {
269 			pg_off = 0;
270 			pg_no++;
271 		}
272 		rs_length -= len;
273 	}
274 
275 	if (last && rs_length == 0)
276 		set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
277 	else
278 		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
279 
280 	/* Bump the key */
281 	key = (u8)(frmr->mr->lkey & 0x000000FF);
282 	ib_update_fast_reg_key(frmr->mr, ++key);
283 
284 	ctxt->sge[0].addr = (unsigned long)frmr->kva + *page_offset;
285 	ctxt->sge[0].lkey = frmr->mr->lkey;
286 	ctxt->sge[0].length = read;
287 	ctxt->count = 1;
288 	ctxt->read_hdr = head;
289 
290 	/* Prepare FASTREG WR */
291 	memset(&fastreg_wr, 0, sizeof(fastreg_wr));
292 	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
293 	fastreg_wr.send_flags = IB_SEND_SIGNALED;
294 	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
295 	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
296 	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
297 	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
298 	fastreg_wr.wr.fast_reg.length = frmr->map_len;
299 	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
300 	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
301 	fastreg_wr.next = &read_wr;
302 
303 	/* Prepare RDMA_READ */
304 	memset(&read_wr, 0, sizeof(read_wr));
305 	read_wr.send_flags = IB_SEND_SIGNALED;
306 	read_wr.wr.rdma.rkey = rs_handle;
307 	read_wr.wr.rdma.remote_addr = rs_offset;
308 	read_wr.sg_list = ctxt->sge;
309 	read_wr.num_sge = 1;
310 	if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
311 		read_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
312 		read_wr.wr_id = (unsigned long)ctxt;
313 		read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
314 	} else {
315 		read_wr.opcode = IB_WR_RDMA_READ;
316 		read_wr.next = &inv_wr;
317 		/* Prepare invalidate */
318 		memset(&inv_wr, 0, sizeof(inv_wr));
319 		inv_wr.wr_id = (unsigned long)ctxt;
320 		inv_wr.opcode = IB_WR_LOCAL_INV;
321 		inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
322 		inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
323 	}
324 	ctxt->wr_op = read_wr.opcode;
325 
326 	/* Post the chain */
327 	ret = svc_rdma_send(xprt, &fastreg_wr);
328 	if (ret) {
329 		pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
330 		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
331 		goto err;
332 	}
333 
334 	/* return current location in page array */
335 	*page_no = pg_no;
336 	*page_offset = pg_off;
337 	ret = read;
338 	atomic_inc(&rdma_stat_read);
339 	return ret;
340  err:
341 	svc_rdma_unmap_dma(ctxt);
342 	svc_rdma_put_context(ctxt, 0);
343 	svc_rdma_put_frmr(xprt, frmr);
344 	return ret;
345 }
346 
347 static unsigned int
348 rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
349 {
350 	unsigned int count;
351 
352 	for (count = 0; ch->rc_discrim != xdr_zero; ch++)
353 		count++;
354 	return count;
355 }
356 
357 /* If there was additional inline content, append it to the end of arg.pages.
358  * Tail copy has to be done after the reader function has determined how many
359  * pages are needed for RDMA READ.
360  */
361 static int
362 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
363 	       u32 position, u32 byte_count, u32 page_offset, int page_no)
364 {
365 	char *srcp, *destp;
366 	int ret;
367 
368 	ret = 0;
369 	srcp = head->arg.head[0].iov_base + position;
370 	byte_count = head->arg.head[0].iov_len - position;
371 	if (byte_count > PAGE_SIZE) {
372 		dprintk("svcrdma: large tail unsupported\n");
373 		return 0;
374 	}
375 
376 	/* Fit as much of the tail on the current page as possible */
377 	if (page_offset != PAGE_SIZE) {
378 		destp = page_address(rqstp->rq_arg.pages[page_no]);
379 		destp += page_offset;
380 		while (byte_count--) {
381 			*destp++ = *srcp++;
382 			page_offset++;
383 			if (page_offset == PAGE_SIZE && byte_count)
384 				goto more;
385 		}
386 		goto done;
387 	}
388 
389 more:
390 	/* Fit the rest on the next page */
391 	page_no++;
392 	destp = page_address(rqstp->rq_arg.pages[page_no]);
393 	while (byte_count--)
394 		*destp++ = *srcp++;
395 
396 	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
397 	rqstp->rq_next_page = rqstp->rq_respages + 1;
398 
399 done:
400 	byte_count = head->arg.head[0].iov_len - position;
401 	head->arg.page_len += byte_count;
402 	head->arg.len += byte_count;
403 	head->arg.buflen += byte_count;
404 	return 1;
405 }
406 
407 static int rdma_read_chunks(struct svcxprt_rdma *xprt,
408 			    struct rpcrdma_msg *rmsgp,
409 			    struct svc_rqst *rqstp,
410 			    struct svc_rdma_op_ctxt *head)
411 {
412 	int page_no, ret;
413 	struct rpcrdma_read_chunk *ch;
414 	u32 handle, page_offset, byte_count;
415 	u32 position;
416 	u64 rs_offset;
417 	bool last;
418 
419 	/* If no read list is present, return 0 */
420 	ch = svc_rdma_get_read_chunk(rmsgp);
421 	if (!ch)
422 		return 0;
423 
424 	if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
425 		return -EINVAL;
426 
427 	/* The request is completed when the RDMA_READs complete. The
428 	 * head context keeps all the pages that comprise the
429 	 * request.
430 	 */
431 	head->arg.head[0] = rqstp->rq_arg.head[0];
432 	head->arg.tail[0] = rqstp->rq_arg.tail[0];
433 	head->hdr_count = head->count;
434 	head->arg.page_base = 0;
435 	head->arg.page_len = 0;
436 	head->arg.len = rqstp->rq_arg.len;
437 	head->arg.buflen = rqstp->rq_arg.buflen;
438 
439 	ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
440 	position = be32_to_cpu(ch->rc_position);
441 
442 	/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
443 	if (position == 0) {
444 		head->arg.pages = &head->pages[0];
445 		page_offset = head->byte_len;
446 	} else {
447 		head->arg.pages = &head->pages[head->count];
448 		page_offset = 0;
449 	}
450 
451 	ret = 0;
452 	page_no = 0;
453 	for (; ch->rc_discrim != xdr_zero; ch++) {
454 		if (be32_to_cpu(ch->rc_position) != position)
455 			goto err;
456 
457 		handle = be32_to_cpu(ch->rc_target.rs_handle),
458 		byte_count = be32_to_cpu(ch->rc_target.rs_length);
459 		xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
460 				 &rs_offset);
461 
462 		while (byte_count > 0) {
463 			last = (ch + 1)->rc_discrim == xdr_zero;
464 			ret = xprt->sc_reader(xprt, rqstp, head,
465 					      &page_no, &page_offset,
466 					      handle, byte_count,
467 					      rs_offset, last);
468 			if (ret < 0)
469 				goto err;
470 			byte_count -= ret;
471 			rs_offset += ret;
472 			head->arg.buflen += ret;
473 		}
474 	}
475 
476 	/* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
477 	if (page_offset & 3) {
478 		u32 pad = 4 - (page_offset & 3);
479 
480 		head->arg.page_len += pad;
481 		head->arg.len += pad;
482 		head->arg.buflen += pad;
483 		page_offset += pad;
484 	}
485 
486 	ret = 1;
487 	if (position && position < head->arg.head[0].iov_len)
488 		ret = rdma_copy_tail(rqstp, head, position,
489 				     byte_count, page_offset, page_no);
490 	head->arg.head[0].iov_len = position;
491 	head->position = position;
492 
493  err:
494 	/* Detach arg pages. svc_recv will replenish them */
495 	for (page_no = 0;
496 	     &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
497 		rqstp->rq_pages[page_no] = NULL;
498 
499 	return ret;
500 }
501 
502 static int rdma_read_complete(struct svc_rqst *rqstp,
503 			      struct svc_rdma_op_ctxt *head)
504 {
505 	int page_no;
506 	int ret;
507 
508 	/* Copy RPC pages */
509 	for (page_no = 0; page_no < head->count; page_no++) {
510 		put_page(rqstp->rq_pages[page_no]);
511 		rqstp->rq_pages[page_no] = head->pages[page_no];
512 	}
513 
514 	/* Adjustments made for RDMA_NOMSG type requests */
515 	if (head->position == 0) {
516 		if (head->arg.len <= head->sge[0].length) {
517 			head->arg.head[0].iov_len = head->arg.len -
518 							head->byte_len;
519 			head->arg.page_len = 0;
520 		} else {
521 			head->arg.head[0].iov_len = head->sge[0].length -
522 								head->byte_len;
523 			head->arg.page_len = head->arg.len -
524 						head->sge[0].length;
525 		}
526 	}
527 
528 	/* Point rq_arg.pages past header */
529 	rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
530 	rqstp->rq_arg.page_len = head->arg.page_len;
531 	rqstp->rq_arg.page_base = head->arg.page_base;
532 
533 	/* rq_respages starts after the last arg page */
534 	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
535 	rqstp->rq_next_page = rqstp->rq_respages + 1;
536 
537 	/* Rebuild rq_arg head and tail. */
538 	rqstp->rq_arg.head[0] = head->arg.head[0];
539 	rqstp->rq_arg.tail[0] = head->arg.tail[0];
540 	rqstp->rq_arg.len = head->arg.len;
541 	rqstp->rq_arg.buflen = head->arg.buflen;
542 
543 	/* Free the context */
544 	svc_rdma_put_context(head, 0);
545 
546 	/* XXX: What should this be? */
547 	rqstp->rq_prot = IPPROTO_MAX;
548 	svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
549 
550 	ret = rqstp->rq_arg.head[0].iov_len
551 		+ rqstp->rq_arg.page_len
552 		+ rqstp->rq_arg.tail[0].iov_len;
553 	dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
554 		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
555 		ret, rqstp->rq_arg.len,	rqstp->rq_arg.head[0].iov_base,
556 		rqstp->rq_arg.head[0].iov_len);
557 
558 	return ret;
559 }
560 
561 /*
562  * Set up the rqstp thread context to point to the RQ buffer. If
563  * necessary, pull additional data from the client with an RDMA_READ
564  * request.
565  */
566 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
567 {
568 	struct svc_xprt *xprt = rqstp->rq_xprt;
569 	struct svcxprt_rdma *rdma_xprt =
570 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
571 	struct svc_rdma_op_ctxt *ctxt = NULL;
572 	struct rpcrdma_msg *rmsgp;
573 	int ret = 0;
574 	int len;
575 
576 	dprintk("svcrdma: rqstp=%p\n", rqstp);
577 
578 	spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
579 	if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
580 		ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
581 				  struct svc_rdma_op_ctxt,
582 				  dto_q);
583 		list_del_init(&ctxt->dto_q);
584 		spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
585 		return rdma_read_complete(rqstp, ctxt);
586 	} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
587 		ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
588 				  struct svc_rdma_op_ctxt,
589 				  dto_q);
590 		list_del_init(&ctxt->dto_q);
591 	} else {
592 		atomic_inc(&rdma_stat_rq_starve);
593 		clear_bit(XPT_DATA, &xprt->xpt_flags);
594 		ctxt = NULL;
595 	}
596 	spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
597 	if (!ctxt) {
598 		/* This is the EAGAIN path. The svc_recv routine will
599 		 * return -EAGAIN, the nfsd thread will go to call into
600 		 * svc_recv again and we shouldn't be on the active
601 		 * transport list
602 		 */
603 		if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
604 			goto close_out;
605 
606 		goto out;
607 	}
608 	dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
609 		ctxt, rdma_xprt, rqstp, ctxt->wc_status);
610 	atomic_inc(&rdma_stat_recv);
611 
612 	/* Build up the XDR from the receive buffers. */
613 	rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
614 
615 	/* Decode the RDMA header. */
616 	len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
617 	rqstp->rq_xprt_hlen = len;
618 
619 	/* If the request is invalid, reply with an error */
620 	if (len < 0) {
621 		if (len == -ENOSYS)
622 			svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
623 		goto close_out;
624 	}
625 
626 	/* Read read-list data. */
627 	ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
628 	if (ret > 0) {
629 		/* read-list posted, defer until data received from client. */
630 		goto defer;
631 	} else if (ret < 0) {
632 		/* Post of read-list failed, free context. */
633 		svc_rdma_put_context(ctxt, 1);
634 		return 0;
635 	}
636 
637 	ret = rqstp->rq_arg.head[0].iov_len
638 		+ rqstp->rq_arg.page_len
639 		+ rqstp->rq_arg.tail[0].iov_len;
640 	svc_rdma_put_context(ctxt, 0);
641  out:
642 	dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
643 		"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
644 		ret, rqstp->rq_arg.len,
645 		rqstp->rq_arg.head[0].iov_base,
646 		rqstp->rq_arg.head[0].iov_len);
647 	rqstp->rq_prot = IPPROTO_MAX;
648 	svc_xprt_copy_addrs(rqstp, xprt);
649 	return ret;
650 
651  close_out:
652 	if (ctxt)
653 		svc_rdma_put_context(ctxt, 1);
654 	dprintk("svcrdma: transport %p is closing\n", xprt);
655 	/*
656 	 * Set the close bit and enqueue it. svc_recv will see the
657 	 * close bit and call svc_xprt_delete
658 	 */
659 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
660 defer:
661 	return 0;
662 }
663