1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41 
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49 
50 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
51 
52 /* Encode an XDR as an array of IB SGE
53  *
54  * Assumptions:
55  * - head[0] is physically contiguous.
56  * - tail[0] is physically contiguous.
57  * - pages[] is not physically or virtually contigous and consists of
58  *   PAGE_SIZE elements.
59  *
60  * Output:
61  * SGE[0]              reserved for RCPRDMA header
62  * SGE[1]              data from xdr->head[]
63  * SGE[2..sge_count-2] data from xdr->pages[]
64  * SGE[sge_count-1]    data from xdr->tail.
65  *
66  */
67 static struct ib_sge *xdr_to_sge(struct svcxprt_rdma *xprt,
68 				 struct xdr_buf *xdr,
69 				 struct ib_sge *sge,
70 				 int *sge_count)
71 {
72 	/* Max we need is the length of the XDR / pagesize + one for
73 	 * head + one for tail + one for RPCRDMA header
74 	 */
75 	int sge_max = (xdr->len+PAGE_SIZE-1) / PAGE_SIZE + 3;
76 	int sge_no;
77 	u32 byte_count = xdr->len;
78 	u32 sge_bytes;
79 	u32 page_bytes;
80 	int page_off;
81 	int page_no;
82 
83 	/* Skip the first sge, this is for the RPCRDMA header */
84 	sge_no = 1;
85 
86 	/* Head SGE */
87 	sge[sge_no].addr = ib_dma_map_single(xprt->sc_cm_id->device,
88 					     xdr->head[0].iov_base,
89 					     xdr->head[0].iov_len,
90 					     DMA_TO_DEVICE);
91 	sge_bytes = min_t(u32, byte_count, xdr->head[0].iov_len);
92 	byte_count -= sge_bytes;
93 	sge[sge_no].length = sge_bytes;
94 	sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
95 	sge_no++;
96 
97 	/* pages SGE */
98 	page_no = 0;
99 	page_bytes = xdr->page_len;
100 	page_off = xdr->page_base;
101 	while (byte_count && page_bytes) {
102 		sge_bytes = min_t(u32, byte_count, (PAGE_SIZE-page_off));
103 		sge[sge_no].addr =
104 			ib_dma_map_page(xprt->sc_cm_id->device,
105 					xdr->pages[page_no], page_off,
106 					sge_bytes, DMA_TO_DEVICE);
107 		sge_bytes = min(sge_bytes, page_bytes);
108 		byte_count -= sge_bytes;
109 		page_bytes -= sge_bytes;
110 		sge[sge_no].length = sge_bytes;
111 		sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
112 
113 		sge_no++;
114 		page_no++;
115 		page_off = 0; /* reset for next time through loop */
116 	}
117 
118 	/* Tail SGE */
119 	if (byte_count && xdr->tail[0].iov_len) {
120 		sge[sge_no].addr =
121 			ib_dma_map_single(xprt->sc_cm_id->device,
122 					  xdr->tail[0].iov_base,
123 					  xdr->tail[0].iov_len,
124 					  DMA_TO_DEVICE);
125 		sge_bytes = min_t(u32, byte_count, xdr->tail[0].iov_len);
126 		byte_count -= sge_bytes;
127 		sge[sge_no].length = sge_bytes;
128 		sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
129 		sge_no++;
130 	}
131 
132 	BUG_ON(sge_no > sge_max);
133 	BUG_ON(byte_count != 0);
134 
135 	*sge_count = sge_no;
136 	return sge;
137 }
138 
139 
140 /* Assumptions:
141  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
142  */
143 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
144 		      u32 rmr, u64 to,
145 		      u32 xdr_off, int write_len,
146 		      struct ib_sge *xdr_sge, int sge_count)
147 {
148 	struct svc_rdma_op_ctxt *tmp_sge_ctxt;
149 	struct ib_send_wr write_wr;
150 	struct ib_sge *sge;
151 	int xdr_sge_no;
152 	int sge_no;
153 	int sge_bytes;
154 	int sge_off;
155 	int bc;
156 	struct svc_rdma_op_ctxt *ctxt;
157 	int ret = 0;
158 
159 	BUG_ON(sge_count >= 32);
160 	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
161 		"write_len=%d, xdr_sge=%p, sge_count=%d\n",
162 		rmr, (unsigned long long)to, xdr_off,
163 		write_len, xdr_sge, sge_count);
164 
165 	ctxt = svc_rdma_get_context(xprt);
166 	ctxt->count = 0;
167 	tmp_sge_ctxt = svc_rdma_get_context(xprt);
168 	sge = tmp_sge_ctxt->sge;
169 
170 	/* Find the SGE associated with xdr_off */
171 	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < sge_count;
172 	     xdr_sge_no++) {
173 		if (xdr_sge[xdr_sge_no].length > bc)
174 			break;
175 		bc -= xdr_sge[xdr_sge_no].length;
176 	}
177 
178 	sge_off = bc;
179 	bc = write_len;
180 	sge_no = 0;
181 
182 	/* Copy the remaining SGE */
183 	while (bc != 0 && xdr_sge_no < sge_count) {
184 		sge[sge_no].addr = xdr_sge[xdr_sge_no].addr + sge_off;
185 		sge[sge_no].lkey = xdr_sge[xdr_sge_no].lkey;
186 		sge_bytes = min((size_t)bc,
187 				(size_t)(xdr_sge[xdr_sge_no].length-sge_off));
188 		sge[sge_no].length = sge_bytes;
189 
190 		sge_off = 0;
191 		sge_no++;
192 		xdr_sge_no++;
193 		bc -= sge_bytes;
194 	}
195 
196 	BUG_ON(bc != 0);
197 	BUG_ON(xdr_sge_no > sge_count);
198 
199 	/* Prepare WRITE WR */
200 	memset(&write_wr, 0, sizeof write_wr);
201 	ctxt->wr_op = IB_WR_RDMA_WRITE;
202 	write_wr.wr_id = (unsigned long)ctxt;
203 	write_wr.sg_list = &sge[0];
204 	write_wr.num_sge = sge_no;
205 	write_wr.opcode = IB_WR_RDMA_WRITE;
206 	write_wr.send_flags = IB_SEND_SIGNALED;
207 	write_wr.wr.rdma.rkey = rmr;
208 	write_wr.wr.rdma.remote_addr = to;
209 
210 	/* Post It */
211 	atomic_inc(&rdma_stat_write);
212 	if (svc_rdma_send(xprt, &write_wr)) {
213 		svc_rdma_put_context(ctxt, 1);
214 		/* Fatal error, close transport */
215 		ret = -EIO;
216 	}
217 	svc_rdma_put_context(tmp_sge_ctxt, 0);
218 	return ret;
219 }
220 
221 static int send_write_chunks(struct svcxprt_rdma *xprt,
222 			     struct rpcrdma_msg *rdma_argp,
223 			     struct rpcrdma_msg *rdma_resp,
224 			     struct svc_rqst *rqstp,
225 			     struct ib_sge *sge,
226 			     int sge_count)
227 {
228 	u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
229 	int write_len;
230 	int max_write;
231 	u32 xdr_off;
232 	int chunk_off;
233 	int chunk_no;
234 	struct rpcrdma_write_array *arg_ary;
235 	struct rpcrdma_write_array *res_ary;
236 	int ret;
237 
238 	arg_ary = svc_rdma_get_write_array(rdma_argp);
239 	if (!arg_ary)
240 		return 0;
241 	res_ary = (struct rpcrdma_write_array *)
242 		&rdma_resp->rm_body.rm_chunks[1];
243 
244 	max_write = xprt->sc_max_sge * PAGE_SIZE;
245 
246 	/* Write chunks start at the pagelist */
247 	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
248 	     xfer_len && chunk_no < arg_ary->wc_nchunks;
249 	     chunk_no++) {
250 		struct rpcrdma_segment *arg_ch;
251 		u64 rs_offset;
252 
253 		arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
254 		write_len = min(xfer_len, arg_ch->rs_length);
255 
256 		/* Prepare the response chunk given the length actually
257 		 * written */
258 		rs_offset = get_unaligned(&(arg_ch->rs_offset));
259 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
260 					    arg_ch->rs_handle,
261 					    rs_offset,
262 					    write_len);
263 		chunk_off = 0;
264 		while (write_len) {
265 			int this_write;
266 			this_write = min(write_len, max_write);
267 			ret = send_write(xprt, rqstp,
268 					 arg_ch->rs_handle,
269 					 rs_offset + chunk_off,
270 					 xdr_off,
271 					 this_write,
272 					 sge,
273 					 sge_count);
274 			if (ret) {
275 				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
276 					ret);
277 				return -EIO;
278 			}
279 			chunk_off += this_write;
280 			xdr_off += this_write;
281 			xfer_len -= this_write;
282 			write_len -= this_write;
283 		}
284 	}
285 	/* Update the req with the number of chunks actually used */
286 	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
287 
288 	return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
289 }
290 
291 static int send_reply_chunks(struct svcxprt_rdma *xprt,
292 			     struct rpcrdma_msg *rdma_argp,
293 			     struct rpcrdma_msg *rdma_resp,
294 			     struct svc_rqst *rqstp,
295 			     struct ib_sge *sge,
296 			     int sge_count)
297 {
298 	u32 xfer_len = rqstp->rq_res.len;
299 	int write_len;
300 	int max_write;
301 	u32 xdr_off;
302 	int chunk_no;
303 	int chunk_off;
304 	struct rpcrdma_segment *ch;
305 	struct rpcrdma_write_array *arg_ary;
306 	struct rpcrdma_write_array *res_ary;
307 	int ret;
308 
309 	arg_ary = svc_rdma_get_reply_array(rdma_argp);
310 	if (!arg_ary)
311 		return 0;
312 	/* XXX: need to fix when reply lists occur with read-list and or
313 	 * write-list */
314 	res_ary = (struct rpcrdma_write_array *)
315 		&rdma_resp->rm_body.rm_chunks[2];
316 
317 	max_write = xprt->sc_max_sge * PAGE_SIZE;
318 
319 	/* xdr offset starts at RPC message */
320 	for (xdr_off = 0, chunk_no = 0;
321 	     xfer_len && chunk_no < arg_ary->wc_nchunks;
322 	     chunk_no++) {
323 		u64 rs_offset;
324 		ch = &arg_ary->wc_array[chunk_no].wc_target;
325 		write_len = min(xfer_len, ch->rs_length);
326 
327 
328 		/* Prepare the reply chunk given the length actually
329 		 * written */
330 		rs_offset = get_unaligned(&(ch->rs_offset));
331 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
332 					    ch->rs_handle, rs_offset,
333 					    write_len);
334 		chunk_off = 0;
335 		while (write_len) {
336 			int this_write;
337 
338 			this_write = min(write_len, max_write);
339 			ret = send_write(xprt, rqstp,
340 					 ch->rs_handle,
341 					 rs_offset + chunk_off,
342 					 xdr_off,
343 					 this_write,
344 					 sge,
345 					 sge_count);
346 			if (ret) {
347 				dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
348 					ret);
349 				return -EIO;
350 			}
351 			chunk_off += this_write;
352 			xdr_off += this_write;
353 			xfer_len -= this_write;
354 			write_len -= this_write;
355 		}
356 	}
357 	/* Update the req with the number of chunks actually used */
358 	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
359 
360 	return rqstp->rq_res.len;
361 }
362 
363 /* This function prepares the portion of the RPCRDMA message to be
364  * sent in the RDMA_SEND. This function is called after data sent via
365  * RDMA has already been transmitted. There are three cases:
366  * - The RPCRDMA header, RPC header, and payload are all sent in a
367  *   single RDMA_SEND. This is the "inline" case.
368  * - The RPCRDMA header and some portion of the RPC header and data
369  *   are sent via this RDMA_SEND and another portion of the data is
370  *   sent via RDMA.
371  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
372  *   header and data are all transmitted via RDMA.
373  * In all three cases, this function prepares the RPCRDMA header in
374  * sge[0], the 'type' parameter indicates the type to place in the
375  * RPCRDMA header, and the 'byte_count' field indicates how much of
376  * the XDR to include in this RDMA_SEND.
377  */
378 static int send_reply(struct svcxprt_rdma *rdma,
379 		      struct svc_rqst *rqstp,
380 		      struct page *page,
381 		      struct rpcrdma_msg *rdma_resp,
382 		      struct svc_rdma_op_ctxt *ctxt,
383 		      int sge_count,
384 		      int byte_count)
385 {
386 	struct ib_send_wr send_wr;
387 	int sge_no;
388 	int sge_bytes;
389 	int page_no;
390 	int ret;
391 
392 	/* Prepare the context */
393 	ctxt->pages[0] = page;
394 	ctxt->count = 1;
395 
396 	/* Prepare the SGE for the RPCRDMA Header */
397 	ctxt->sge[0].addr =
398 		ib_dma_map_page(rdma->sc_cm_id->device,
399 				page, 0, PAGE_SIZE, DMA_TO_DEVICE);
400 	ctxt->direction = DMA_TO_DEVICE;
401 	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
402 	ctxt->sge[0].lkey = rdma->sc_phys_mr->lkey;
403 
404 	/* Determine how many of our SGE are to be transmitted */
405 	for (sge_no = 1; byte_count && sge_no < sge_count; sge_no++) {
406 		sge_bytes = min((size_t)ctxt->sge[sge_no].length,
407 				(size_t)byte_count);
408 		byte_count -= sge_bytes;
409 	}
410 	BUG_ON(byte_count != 0);
411 
412 	/* Save all respages in the ctxt and remove them from the
413 	 * respages array. They are our pages until the I/O
414 	 * completes.
415 	 */
416 	for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
417 		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
418 		ctxt->count++;
419 		rqstp->rq_respages[page_no] = NULL;
420 	}
421 
422 	BUG_ON(sge_no > rdma->sc_max_sge);
423 	memset(&send_wr, 0, sizeof send_wr);
424 	ctxt->wr_op = IB_WR_SEND;
425 	send_wr.wr_id = (unsigned long)ctxt;
426 	send_wr.sg_list = ctxt->sge;
427 	send_wr.num_sge = sge_no;
428 	send_wr.opcode = IB_WR_SEND;
429 	send_wr.send_flags =  IB_SEND_SIGNALED;
430 
431 	ret = svc_rdma_send(rdma, &send_wr);
432 	if (ret)
433 		svc_rdma_put_context(ctxt, 1);
434 
435 	return ret;
436 }
437 
438 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
439 {
440 }
441 
442 /*
443  * Return the start of an xdr buffer.
444  */
445 static void *xdr_start(struct xdr_buf *xdr)
446 {
447 	return xdr->head[0].iov_base -
448 		(xdr->len -
449 		 xdr->page_len -
450 		 xdr->tail[0].iov_len -
451 		 xdr->head[0].iov_len);
452 }
453 
454 int svc_rdma_sendto(struct svc_rqst *rqstp)
455 {
456 	struct svc_xprt *xprt = rqstp->rq_xprt;
457 	struct svcxprt_rdma *rdma =
458 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
459 	struct rpcrdma_msg *rdma_argp;
460 	struct rpcrdma_msg *rdma_resp;
461 	struct rpcrdma_write_array *reply_ary;
462 	enum rpcrdma_proc reply_type;
463 	int ret;
464 	int inline_bytes;
465 	struct ib_sge *sge;
466 	int sge_count = 0;
467 	struct page *res_page;
468 	struct svc_rdma_op_ctxt *ctxt;
469 
470 	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
471 
472 	/* Get the RDMA request header. */
473 	rdma_argp = xdr_start(&rqstp->rq_arg);
474 
475 	/* Build an SGE for the XDR */
476 	ctxt = svc_rdma_get_context(rdma);
477 	ctxt->direction = DMA_TO_DEVICE;
478 	sge = xdr_to_sge(rdma, &rqstp->rq_res, ctxt->sge, &sge_count);
479 
480 	inline_bytes = rqstp->rq_res.len;
481 
482 	/* Create the RDMA response header */
483 	res_page = svc_rdma_get_page();
484 	rdma_resp = page_address(res_page);
485 	reply_ary = svc_rdma_get_reply_array(rdma_argp);
486 	if (reply_ary)
487 		reply_type = RDMA_NOMSG;
488 	else
489 		reply_type = RDMA_MSG;
490 	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
491 					 rdma_resp, reply_type);
492 
493 	/* Send any write-chunk data and build resp write-list */
494 	ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
495 				rqstp, sge, sge_count);
496 	if (ret < 0) {
497 		printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
498 		       ret);
499 		goto error;
500 	}
501 	inline_bytes -= ret;
502 
503 	/* Send any reply-list data and update resp reply-list */
504 	ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
505 				rqstp, sge, sge_count);
506 	if (ret < 0) {
507 		printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
508 		       ret);
509 		goto error;
510 	}
511 	inline_bytes -= ret;
512 
513 	ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, sge_count,
514 			 inline_bytes);
515 	dprintk("svcrdma: send_reply returns %d\n", ret);
516 	return ret;
517  error:
518 	svc_rdma_put_context(ctxt, 0);
519 	put_page(res_page);
520 	return ret;
521 }
522