1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41 
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49 
50 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
51 
52 /* Encode an XDR as an array of IB SGE
53  *
54  * Assumptions:
55  * - head[0] is physically contiguous.
56  * - tail[0] is physically contiguous.
57  * - pages[] is not physically or virtually contiguous and consists of
58  *   PAGE_SIZE elements.
59  *
60  * Output:
61  * SGE[0]              reserved for RCPRDMA header
62  * SGE[1]              data from xdr->head[]
63  * SGE[2..sge_count-2] data from xdr->pages[]
64  * SGE[sge_count-1]    data from xdr->tail.
65  *
66  * The max SGE we need is the length of the XDR / pagesize + one for
67  * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68  * reserves a page for both the request and the reply header, and this
69  * array is only concerned with the reply we are assured that we have
70  * on extra page for the RPCRMDA header.
71  */
72 static int fast_reg_xdr(struct svcxprt_rdma *xprt,
73 			struct xdr_buf *xdr,
74 			struct svc_rdma_req_map *vec)
75 {
76 	int sge_no;
77 	u32 sge_bytes;
78 	u32 page_bytes;
79 	u32 page_off;
80 	int page_no = 0;
81 	u8 *frva;
82 	struct svc_rdma_fastreg_mr *frmr;
83 
84 	frmr = svc_rdma_get_frmr(xprt);
85 	if (IS_ERR(frmr))
86 		return -ENOMEM;
87 	vec->frmr = frmr;
88 
89 	/* Skip the RPCRDMA header */
90 	sge_no = 1;
91 
92 	/* Map the head. */
93 	frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
94 	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
95 	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
96 	vec->count = 2;
97 	sge_no++;
98 
99 	/* Map the XDR head */
100 	frmr->kva = frva;
101 	frmr->direction = DMA_TO_DEVICE;
102 	frmr->access_flags = 0;
103 	frmr->map_len = PAGE_SIZE;
104 	frmr->page_list_len = 1;
105 	page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
106 	frmr->page_list->page_list[page_no] =
107 		ib_dma_map_page(xprt->sc_cm_id->device,
108 				virt_to_page(xdr->head[0].iov_base),
109 				page_off,
110 				PAGE_SIZE - page_off,
111 				DMA_TO_DEVICE);
112 	if (ib_dma_mapping_error(xprt->sc_cm_id->device,
113 				 frmr->page_list->page_list[page_no]))
114 		goto fatal_err;
115 	atomic_inc(&xprt->sc_dma_used);
116 
117 	/* Map the XDR page list */
118 	page_off = xdr->page_base;
119 	page_bytes = xdr->page_len + page_off;
120 	if (!page_bytes)
121 		goto encode_tail;
122 
123 	/* Map the pages */
124 	vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
125 	vec->sge[sge_no].iov_len = page_bytes;
126 	sge_no++;
127 	while (page_bytes) {
128 		struct page *page;
129 
130 		page = xdr->pages[page_no++];
131 		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
132 		page_bytes -= sge_bytes;
133 
134 		frmr->page_list->page_list[page_no] =
135 			ib_dma_map_page(xprt->sc_cm_id->device,
136 					page, page_off,
137 					sge_bytes, DMA_TO_DEVICE);
138 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
139 					 frmr->page_list->page_list[page_no]))
140 			goto fatal_err;
141 
142 		atomic_inc(&xprt->sc_dma_used);
143 		page_off = 0; /* reset for next time through loop */
144 		frmr->map_len += PAGE_SIZE;
145 		frmr->page_list_len++;
146 	}
147 	vec->count++;
148 
149  encode_tail:
150 	/* Map tail */
151 	if (0 == xdr->tail[0].iov_len)
152 		goto done;
153 
154 	vec->count++;
155 	vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
156 
157 	if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
158 	    ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
159 		/*
160 		 * If head and tail use the same page, we don't need
161 		 * to map it again.
162 		 */
163 		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
164 	} else {
165 		void *va;
166 
167 		/* Map another page for the tail */
168 		page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
169 		va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
170 		vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
171 
172 		frmr->page_list->page_list[page_no] =
173 		    ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
174 				    page_off,
175 				    PAGE_SIZE,
176 				    DMA_TO_DEVICE);
177 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
178 					 frmr->page_list->page_list[page_no]))
179 			goto fatal_err;
180 		atomic_inc(&xprt->sc_dma_used);
181 		frmr->map_len += PAGE_SIZE;
182 		frmr->page_list_len++;
183 	}
184 
185  done:
186 	if (svc_rdma_fastreg(xprt, frmr))
187 		goto fatal_err;
188 
189 	return 0;
190 
191  fatal_err:
192 	printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
193 	vec->frmr = NULL;
194 	svc_rdma_put_frmr(xprt, frmr);
195 	return -EIO;
196 }
197 
198 static int map_xdr(struct svcxprt_rdma *xprt,
199 		   struct xdr_buf *xdr,
200 		   struct svc_rdma_req_map *vec)
201 {
202 	int sge_no;
203 	u32 sge_bytes;
204 	u32 page_bytes;
205 	u32 page_off;
206 	int page_no;
207 
208 	BUG_ON(xdr->len !=
209 	       (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
210 
211 	if (xprt->sc_frmr_pg_list_len)
212 		return fast_reg_xdr(xprt, xdr, vec);
213 
214 	/* Skip the first sge, this is for the RPCRDMA header */
215 	sge_no = 1;
216 
217 	/* Head SGE */
218 	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
219 	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
220 	sge_no++;
221 
222 	/* pages SGE */
223 	page_no = 0;
224 	page_bytes = xdr->page_len;
225 	page_off = xdr->page_base;
226 	while (page_bytes) {
227 		vec->sge[sge_no].iov_base =
228 			page_address(xdr->pages[page_no]) + page_off;
229 		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
230 		page_bytes -= sge_bytes;
231 		vec->sge[sge_no].iov_len = sge_bytes;
232 
233 		sge_no++;
234 		page_no++;
235 		page_off = 0; /* reset for next time through loop */
236 	}
237 
238 	/* Tail SGE */
239 	if (xdr->tail[0].iov_len) {
240 		vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
241 		vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
242 		sge_no++;
243 	}
244 
245 	dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
246 		"page_base %u page_len %u head_len %zu tail_len %zu\n",
247 		sge_no, page_no, xdr->page_base, xdr->page_len,
248 		xdr->head[0].iov_len, xdr->tail[0].iov_len);
249 
250 	vec->count = sge_no;
251 	return 0;
252 }
253 
254 static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
255 			      struct xdr_buf *xdr,
256 			      u32 xdr_off, size_t len, int dir)
257 {
258 	struct page *page;
259 	dma_addr_t dma_addr;
260 	if (xdr_off < xdr->head[0].iov_len) {
261 		/* This offset is in the head */
262 		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
263 		page = virt_to_page(xdr->head[0].iov_base);
264 	} else {
265 		xdr_off -= xdr->head[0].iov_len;
266 		if (xdr_off < xdr->page_len) {
267 			/* This offset is in the page list */
268 			xdr_off += xdr->page_base;
269 			page = xdr->pages[xdr_off >> PAGE_SHIFT];
270 			xdr_off &= ~PAGE_MASK;
271 		} else {
272 			/* This offset is in the tail */
273 			xdr_off -= xdr->page_len;
274 			xdr_off += (unsigned long)
275 				xdr->tail[0].iov_base & ~PAGE_MASK;
276 			page = virt_to_page(xdr->tail[0].iov_base);
277 		}
278 	}
279 	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
280 				   min_t(size_t, PAGE_SIZE, len), dir);
281 	return dma_addr;
282 }
283 
284 /* Assumptions:
285  * - We are using FRMR
286  *     - or -
287  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
288  */
289 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
290 		      u32 rmr, u64 to,
291 		      u32 xdr_off, int write_len,
292 		      struct svc_rdma_req_map *vec)
293 {
294 	struct ib_send_wr write_wr;
295 	struct ib_sge *sge;
296 	int xdr_sge_no;
297 	int sge_no;
298 	int sge_bytes;
299 	int sge_off;
300 	int bc;
301 	struct svc_rdma_op_ctxt *ctxt;
302 
303 	BUG_ON(vec->count > RPCSVC_MAXPAGES);
304 	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
305 		"write_len=%d, vec->sge=%p, vec->count=%lu\n",
306 		rmr, (unsigned long long)to, xdr_off,
307 		write_len, vec->sge, vec->count);
308 
309 	ctxt = svc_rdma_get_context(xprt);
310 	ctxt->direction = DMA_TO_DEVICE;
311 	sge = ctxt->sge;
312 
313 	/* Find the SGE associated with xdr_off */
314 	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
315 	     xdr_sge_no++) {
316 		if (vec->sge[xdr_sge_no].iov_len > bc)
317 			break;
318 		bc -= vec->sge[xdr_sge_no].iov_len;
319 	}
320 
321 	sge_off = bc;
322 	bc = write_len;
323 	sge_no = 0;
324 
325 	/* Copy the remaining SGE */
326 	while (bc != 0) {
327 		sge_bytes = min_t(size_t,
328 			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);
329 		sge[sge_no].length = sge_bytes;
330 		if (!vec->frmr) {
331 			sge[sge_no].addr =
332 				dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
333 					    sge_bytes, DMA_TO_DEVICE);
334 			xdr_off += sge_bytes;
335 			if (ib_dma_mapping_error(xprt->sc_cm_id->device,
336 						 sge[sge_no].addr))
337 				goto err;
338 			atomic_inc(&xprt->sc_dma_used);
339 			sge[sge_no].lkey = xprt->sc_dma_lkey;
340 		} else {
341 			sge[sge_no].addr = (unsigned long)
342 				vec->sge[xdr_sge_no].iov_base + sge_off;
343 			sge[sge_no].lkey = vec->frmr->mr->lkey;
344 		}
345 		ctxt->count++;
346 		ctxt->frmr = vec->frmr;
347 		sge_off = 0;
348 		sge_no++;
349 		xdr_sge_no++;
350 		BUG_ON(xdr_sge_no > vec->count);
351 		bc -= sge_bytes;
352 	}
353 
354 	/* Prepare WRITE WR */
355 	memset(&write_wr, 0, sizeof write_wr);
356 	ctxt->wr_op = IB_WR_RDMA_WRITE;
357 	write_wr.wr_id = (unsigned long)ctxt;
358 	write_wr.sg_list = &sge[0];
359 	write_wr.num_sge = sge_no;
360 	write_wr.opcode = IB_WR_RDMA_WRITE;
361 	write_wr.send_flags = IB_SEND_SIGNALED;
362 	write_wr.wr.rdma.rkey = rmr;
363 	write_wr.wr.rdma.remote_addr = to;
364 
365 	/* Post It */
366 	atomic_inc(&rdma_stat_write);
367 	if (svc_rdma_send(xprt, &write_wr))
368 		goto err;
369 	return 0;
370  err:
371 	svc_rdma_unmap_dma(ctxt);
372 	svc_rdma_put_frmr(xprt, vec->frmr);
373 	svc_rdma_put_context(ctxt, 0);
374 	/* Fatal error, close transport */
375 	return -EIO;
376 }
377 
378 static int send_write_chunks(struct svcxprt_rdma *xprt,
379 			     struct rpcrdma_msg *rdma_argp,
380 			     struct rpcrdma_msg *rdma_resp,
381 			     struct svc_rqst *rqstp,
382 			     struct svc_rdma_req_map *vec)
383 {
384 	u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
385 	int write_len;
386 	int max_write;
387 	u32 xdr_off;
388 	int chunk_off;
389 	int chunk_no;
390 	struct rpcrdma_write_array *arg_ary;
391 	struct rpcrdma_write_array *res_ary;
392 	int ret;
393 
394 	arg_ary = svc_rdma_get_write_array(rdma_argp);
395 	if (!arg_ary)
396 		return 0;
397 	res_ary = (struct rpcrdma_write_array *)
398 		&rdma_resp->rm_body.rm_chunks[1];
399 
400 	if (vec->frmr)
401 		max_write = vec->frmr->map_len;
402 	else
403 		max_write = xprt->sc_max_sge * PAGE_SIZE;
404 
405 	/* Write chunks start at the pagelist */
406 	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
407 	     xfer_len && chunk_no < arg_ary->wc_nchunks;
408 	     chunk_no++) {
409 		struct rpcrdma_segment *arg_ch;
410 		u64 rs_offset;
411 
412 		arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
413 		write_len = min(xfer_len, ntohl(arg_ch->rs_length));
414 
415 		/* Prepare the response chunk given the length actually
416 		 * written */
417 		xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
418 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
419 						arg_ch->rs_handle,
420 						arg_ch->rs_offset,
421 						write_len);
422 		chunk_off = 0;
423 		while (write_len) {
424 			int this_write;
425 			this_write = min(write_len, max_write);
426 			ret = send_write(xprt, rqstp,
427 					 ntohl(arg_ch->rs_handle),
428 					 rs_offset + chunk_off,
429 					 xdr_off,
430 					 this_write,
431 					 vec);
432 			if (ret) {
433 				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
434 					ret);
435 				return -EIO;
436 			}
437 			chunk_off += this_write;
438 			xdr_off += this_write;
439 			xfer_len -= this_write;
440 			write_len -= this_write;
441 		}
442 	}
443 	/* Update the req with the number of chunks actually used */
444 	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
445 
446 	return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
447 }
448 
449 static int send_reply_chunks(struct svcxprt_rdma *xprt,
450 			     struct rpcrdma_msg *rdma_argp,
451 			     struct rpcrdma_msg *rdma_resp,
452 			     struct svc_rqst *rqstp,
453 			     struct svc_rdma_req_map *vec)
454 {
455 	u32 xfer_len = rqstp->rq_res.len;
456 	int write_len;
457 	int max_write;
458 	u32 xdr_off;
459 	int chunk_no;
460 	int chunk_off;
461 	int nchunks;
462 	struct rpcrdma_segment *ch;
463 	struct rpcrdma_write_array *arg_ary;
464 	struct rpcrdma_write_array *res_ary;
465 	int ret;
466 
467 	arg_ary = svc_rdma_get_reply_array(rdma_argp);
468 	if (!arg_ary)
469 		return 0;
470 	/* XXX: need to fix when reply lists occur with read-list and or
471 	 * write-list */
472 	res_ary = (struct rpcrdma_write_array *)
473 		&rdma_resp->rm_body.rm_chunks[2];
474 
475 	if (vec->frmr)
476 		max_write = vec->frmr->map_len;
477 	else
478 		max_write = xprt->sc_max_sge * PAGE_SIZE;
479 
480 	/* xdr offset starts at RPC message */
481 	nchunks = ntohl(arg_ary->wc_nchunks);
482 	for (xdr_off = 0, chunk_no = 0;
483 	     xfer_len && chunk_no < nchunks;
484 	     chunk_no++) {
485 		u64 rs_offset;
486 		ch = &arg_ary->wc_array[chunk_no].wc_target;
487 		write_len = min(xfer_len, htonl(ch->rs_length));
488 
489 		/* Prepare the reply chunk given the length actually
490 		 * written */
491 		xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
492 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
493 						ch->rs_handle, ch->rs_offset,
494 						write_len);
495 		chunk_off = 0;
496 		while (write_len) {
497 			int this_write;
498 
499 			this_write = min(write_len, max_write);
500 			ret = send_write(xprt, rqstp,
501 					 ntohl(ch->rs_handle),
502 					 rs_offset + chunk_off,
503 					 xdr_off,
504 					 this_write,
505 					 vec);
506 			if (ret) {
507 				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
508 					ret);
509 				return -EIO;
510 			}
511 			chunk_off += this_write;
512 			xdr_off += this_write;
513 			xfer_len -= this_write;
514 			write_len -= this_write;
515 		}
516 	}
517 	/* Update the req with the number of chunks actually used */
518 	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
519 
520 	return rqstp->rq_res.len;
521 }
522 
523 /* This function prepares the portion of the RPCRDMA message to be
524  * sent in the RDMA_SEND. This function is called after data sent via
525  * RDMA has already been transmitted. There are three cases:
526  * - The RPCRDMA header, RPC header, and payload are all sent in a
527  *   single RDMA_SEND. This is the "inline" case.
528  * - The RPCRDMA header and some portion of the RPC header and data
529  *   are sent via this RDMA_SEND and another portion of the data is
530  *   sent via RDMA.
531  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
532  *   header and data are all transmitted via RDMA.
533  * In all three cases, this function prepares the RPCRDMA header in
534  * sge[0], the 'type' parameter indicates the type to place in the
535  * RPCRDMA header, and the 'byte_count' field indicates how much of
536  * the XDR to include in this RDMA_SEND. NB: The offset of the payload
537  * to send is zero in the XDR.
538  */
539 static int send_reply(struct svcxprt_rdma *rdma,
540 		      struct svc_rqst *rqstp,
541 		      struct page *page,
542 		      struct rpcrdma_msg *rdma_resp,
543 		      struct svc_rdma_op_ctxt *ctxt,
544 		      struct svc_rdma_req_map *vec,
545 		      int byte_count)
546 {
547 	struct ib_send_wr send_wr;
548 	struct ib_send_wr inv_wr;
549 	int sge_no;
550 	int sge_bytes;
551 	int page_no;
552 	int pages;
553 	int ret;
554 
555 	/* Post a recv buffer to handle another request. */
556 	ret = svc_rdma_post_recv(rdma);
557 	if (ret) {
558 		printk(KERN_INFO
559 		       "svcrdma: could not post a receive buffer, err=%d."
560 		       "Closing transport %p.\n", ret, rdma);
561 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
562 		svc_rdma_put_frmr(rdma, vec->frmr);
563 		svc_rdma_put_context(ctxt, 0);
564 		return -ENOTCONN;
565 	}
566 
567 	/* Prepare the context */
568 	ctxt->pages[0] = page;
569 	ctxt->count = 1;
570 	ctxt->frmr = vec->frmr;
571 	if (vec->frmr)
572 		set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
573 	else
574 		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
575 
576 	/* Prepare the SGE for the RPCRDMA Header */
577 	ctxt->sge[0].lkey = rdma->sc_dma_lkey;
578 	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
579 	ctxt->sge[0].addr =
580 	    ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
581 			    ctxt->sge[0].length, DMA_TO_DEVICE);
582 	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
583 		goto err;
584 	atomic_inc(&rdma->sc_dma_used);
585 
586 	ctxt->direction = DMA_TO_DEVICE;
587 
588 	/* Map the payload indicated by 'byte_count' */
589 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
590 		int xdr_off = 0;
591 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
592 		byte_count -= sge_bytes;
593 		if (!vec->frmr) {
594 			ctxt->sge[sge_no].addr =
595 				dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
596 					    sge_bytes, DMA_TO_DEVICE);
597 			xdr_off += sge_bytes;
598 			if (ib_dma_mapping_error(rdma->sc_cm_id->device,
599 						 ctxt->sge[sge_no].addr))
600 				goto err;
601 			atomic_inc(&rdma->sc_dma_used);
602 			ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
603 		} else {
604 			ctxt->sge[sge_no].addr = (unsigned long)
605 				vec->sge[sge_no].iov_base;
606 			ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
607 		}
608 		ctxt->sge[sge_no].length = sge_bytes;
609 	}
610 	BUG_ON(byte_count != 0);
611 
612 	/* Save all respages in the ctxt and remove them from the
613 	 * respages array. They are our pages until the I/O
614 	 * completes.
615 	 */
616 	pages = rqstp->rq_next_page - rqstp->rq_respages;
617 	for (page_no = 0; page_no < pages; page_no++) {
618 		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
619 		ctxt->count++;
620 		rqstp->rq_respages[page_no] = NULL;
621 		/*
622 		 * If there are more pages than SGE, terminate SGE
623 		 * list so that svc_rdma_unmap_dma doesn't attempt to
624 		 * unmap garbage.
625 		 */
626 		if (page_no+1 >= sge_no)
627 			ctxt->sge[page_no+1].length = 0;
628 	}
629 	rqstp->rq_next_page = rqstp->rq_respages + 1;
630 	BUG_ON(sge_no > rdma->sc_max_sge);
631 	memset(&send_wr, 0, sizeof send_wr);
632 	ctxt->wr_op = IB_WR_SEND;
633 	send_wr.wr_id = (unsigned long)ctxt;
634 	send_wr.sg_list = ctxt->sge;
635 	send_wr.num_sge = sge_no;
636 	send_wr.opcode = IB_WR_SEND;
637 	send_wr.send_flags =  IB_SEND_SIGNALED;
638 	if (vec->frmr) {
639 		/* Prepare INVALIDATE WR */
640 		memset(&inv_wr, 0, sizeof inv_wr);
641 		inv_wr.opcode = IB_WR_LOCAL_INV;
642 		inv_wr.send_flags = IB_SEND_SIGNALED;
643 		inv_wr.ex.invalidate_rkey =
644 			vec->frmr->mr->lkey;
645 		send_wr.next = &inv_wr;
646 	}
647 
648 	ret = svc_rdma_send(rdma, &send_wr);
649 	if (ret)
650 		goto err;
651 
652 	return 0;
653 
654  err:
655 	svc_rdma_unmap_dma(ctxt);
656 	svc_rdma_put_frmr(rdma, vec->frmr);
657 	svc_rdma_put_context(ctxt, 1);
658 	return -EIO;
659 }
660 
661 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
662 {
663 }
664 
665 /*
666  * Return the start of an xdr buffer.
667  */
668 static void *xdr_start(struct xdr_buf *xdr)
669 {
670 	return xdr->head[0].iov_base -
671 		(xdr->len -
672 		 xdr->page_len -
673 		 xdr->tail[0].iov_len -
674 		 xdr->head[0].iov_len);
675 }
676 
677 int svc_rdma_sendto(struct svc_rqst *rqstp)
678 {
679 	struct svc_xprt *xprt = rqstp->rq_xprt;
680 	struct svcxprt_rdma *rdma =
681 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
682 	struct rpcrdma_msg *rdma_argp;
683 	struct rpcrdma_msg *rdma_resp;
684 	struct rpcrdma_write_array *reply_ary;
685 	enum rpcrdma_proc reply_type;
686 	int ret;
687 	int inline_bytes;
688 	struct page *res_page;
689 	struct svc_rdma_op_ctxt *ctxt;
690 	struct svc_rdma_req_map *vec;
691 
692 	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
693 
694 	/* Get the RDMA request header. */
695 	rdma_argp = xdr_start(&rqstp->rq_arg);
696 
697 	/* Build an req vec for the XDR */
698 	ctxt = svc_rdma_get_context(rdma);
699 	ctxt->direction = DMA_TO_DEVICE;
700 	vec = svc_rdma_get_req_map();
701 	ret = map_xdr(rdma, &rqstp->rq_res, vec);
702 	if (ret)
703 		goto err0;
704 	inline_bytes = rqstp->rq_res.len;
705 
706 	/* Create the RDMA response header */
707 	res_page = svc_rdma_get_page();
708 	rdma_resp = page_address(res_page);
709 	reply_ary = svc_rdma_get_reply_array(rdma_argp);
710 	if (reply_ary)
711 		reply_type = RDMA_NOMSG;
712 	else
713 		reply_type = RDMA_MSG;
714 	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
715 					 rdma_resp, reply_type);
716 
717 	/* Send any write-chunk data and build resp write-list */
718 	ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
719 				rqstp, vec);
720 	if (ret < 0) {
721 		printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
722 		       ret);
723 		goto err1;
724 	}
725 	inline_bytes -= ret;
726 
727 	/* Send any reply-list data and update resp reply-list */
728 	ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
729 				rqstp, vec);
730 	if (ret < 0) {
731 		printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
732 		       ret);
733 		goto err1;
734 	}
735 	inline_bytes -= ret;
736 
737 	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
738 			 inline_bytes);
739 	svc_rdma_put_req_map(vec);
740 	dprintk("svcrdma: send_reply returns %d\n", ret);
741 	return ret;
742 
743  err1:
744 	put_page(res_page);
745  err0:
746 	svc_rdma_put_req_map(vec);
747 	svc_rdma_put_context(ctxt, 0);
748 	return ret;
749 }
750