1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_sendto. This is called by the
48  * RPC server when an RPC Reply is ready to be transmitted to a client.
49  *
50  * The passed-in svc_rqst contains a struct xdr_buf which holds an
51  * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52  * transport header, post all Write WRs needed for this Reply, then post
53  * a Send WR conveying the transport header and the RPC message itself to
54  * the client.
55  *
56  * svc_rdma_sendto must fully transmit the Reply before returning, as
57  * the svc_rqst will be recycled as soon as sendto returns. Remaining
58  * resources referred to by the svc_rqst are also recycled at that time.
59  * Therefore any resources that must remain longer must be detached
60  * from the svc_rqst and released later.
61  *
62  * Page Management
63  *
64  * The I/O that performs Reply transmission is asynchronous, and may
65  * complete well after sendto returns. Thus pages under I/O must be
66  * removed from the svc_rqst before sendto returns.
67  *
68  * The logic here depends on Send Queue and completion ordering. Since
69  * the Send WR is always posted last, it will always complete last. Thus
70  * when it completes, it is guaranteed that all previous Write WRs have
71  * also completed.
72  *
73  * Write WRs are constructed and posted. Each Write segment gets its own
74  * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75  * DMA-unmap the pages under I/O for that Write segment. The Write
76  * completion handler does not release any pages.
77  *
78  * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79  * The ownership of all of the Reply's pages are transferred into that
80  * ctxt, the Send WR is posted, and sendto returns.
81  *
82  * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83  * Send completion handler finally releases the Reply's pages.
84  *
85  * This mechanism also assumes that completions on the transport's Send
86  * Completion Queue do not run in parallel. Otherwise a Write completion
87  * and Send completion running at the same time could release pages that
88  * are still DMA-mapped.
89  *
90  * Error Handling
91  *
92  * - If the Send WR is posted successfully, it will either complete
93  *   successfully, or get flushed. Either way, the Send completion
94  *   handler releases the Reply's pages.
95  * - If the Send WR cannot be not posted, the forward path releases
96  *   the Reply's pages.
97  *
98  * This handles the case, without the use of page reference counting,
99  * where two different Write segments send portions of the same page.
100  */
101 
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
104 
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
107 
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
110 
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
113 
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115 
116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
117 				   struct rpc_rdma_cid *cid)
118 {
119 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
120 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
121 }
122 
123 static struct svc_rdma_send_ctxt *
124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
125 {
126 	struct svc_rdma_send_ctxt *ctxt;
127 	dma_addr_t addr;
128 	void *buffer;
129 	size_t size;
130 	int i;
131 
132 	size = sizeof(*ctxt);
133 	size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
134 	ctxt = kmalloc(size, GFP_KERNEL);
135 	if (!ctxt)
136 		goto fail0;
137 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
138 	if (!buffer)
139 		goto fail1;
140 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
141 				 rdma->sc_max_req_size, DMA_TO_DEVICE);
142 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
143 		goto fail2;
144 
145 	svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
146 
147 	ctxt->sc_send_wr.next = NULL;
148 	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
149 	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
150 	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
151 	init_completion(&ctxt->sc_done);
152 	ctxt->sc_cqe.done = svc_rdma_wc_send;
153 	ctxt->sc_xprt_buf = buffer;
154 	xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
155 		     rdma->sc_max_req_size);
156 	ctxt->sc_sges[0].addr = addr;
157 
158 	for (i = 0; i < rdma->sc_max_send_sges; i++)
159 		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
160 	return ctxt;
161 
162 fail2:
163 	kfree(buffer);
164 fail1:
165 	kfree(ctxt);
166 fail0:
167 	return NULL;
168 }
169 
170 /**
171  * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
172  * @rdma: svcxprt_rdma being torn down
173  *
174  */
175 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
176 {
177 	struct svc_rdma_send_ctxt *ctxt;
178 	struct llist_node *node;
179 
180 	while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
181 		ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
182 		ib_dma_unmap_single(rdma->sc_pd->device,
183 				    ctxt->sc_sges[0].addr,
184 				    rdma->sc_max_req_size,
185 				    DMA_TO_DEVICE);
186 		kfree(ctxt->sc_xprt_buf);
187 		kfree(ctxt);
188 	}
189 }
190 
191 /**
192  * svc_rdma_send_ctxt_get - Get a free send_ctxt
193  * @rdma: controlling svcxprt_rdma
194  *
195  * Returns a ready-to-use send_ctxt, or NULL if none are
196  * available and a fresh one cannot be allocated.
197  */
198 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
199 {
200 	struct svc_rdma_send_ctxt *ctxt;
201 	struct llist_node *node;
202 
203 	spin_lock(&rdma->sc_send_lock);
204 	node = llist_del_first(&rdma->sc_send_ctxts);
205 	if (!node)
206 		goto out_empty;
207 	ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
208 	spin_unlock(&rdma->sc_send_lock);
209 
210 out:
211 	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
212 	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
213 			ctxt->sc_xprt_buf, NULL);
214 
215 	ctxt->sc_send_wr.num_sge = 0;
216 	ctxt->sc_cur_sge_no = 0;
217 	return ctxt;
218 
219 out_empty:
220 	spin_unlock(&rdma->sc_send_lock);
221 	ctxt = svc_rdma_send_ctxt_alloc(rdma);
222 	if (!ctxt)
223 		return NULL;
224 	goto out;
225 }
226 
227 /**
228  * svc_rdma_send_ctxt_put - Return send_ctxt to free list
229  * @rdma: controlling svcxprt_rdma
230  * @ctxt: object to return to the free list
231  */
232 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
233 			    struct svc_rdma_send_ctxt *ctxt)
234 {
235 	struct ib_device *device = rdma->sc_cm_id->device;
236 	unsigned int i;
237 
238 	/* The first SGE contains the transport header, which
239 	 * remains mapped until @ctxt is destroyed.
240 	 */
241 	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
242 		ib_dma_unmap_page(device,
243 				  ctxt->sc_sges[i].addr,
244 				  ctxt->sc_sges[i].length,
245 				  DMA_TO_DEVICE);
246 		trace_svcrdma_dma_unmap_page(rdma,
247 					     ctxt->sc_sges[i].addr,
248 					     ctxt->sc_sges[i].length);
249 	}
250 
251 	llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
252 }
253 
254 /**
255  * svc_rdma_wake_send_waiters - manage Send Queue accounting
256  * @rdma: controlling transport
257  * @avail: Number of additional SQEs that are now available
258  *
259  */
260 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
261 {
262 	atomic_add(avail, &rdma->sc_sq_avail);
263 	smp_mb__after_atomic();
264 	if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
265 		wake_up(&rdma->sc_send_wait);
266 }
267 
268 /**
269  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
270  * @cq: Completion Queue context
271  * @wc: Work Completion object
272  *
273  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
274  * the Send completion handler could be running.
275  */
276 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
277 {
278 	struct svcxprt_rdma *rdma = cq->cq_context;
279 	struct ib_cqe *cqe = wc->wr_cqe;
280 	struct svc_rdma_send_ctxt *ctxt =
281 		container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
282 
283 	trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
284 
285 	svc_rdma_wake_send_waiters(rdma, 1);
286 	complete(&ctxt->sc_done);
287 
288 	if (unlikely(wc->status != IB_WC_SUCCESS))
289 		svc_xprt_deferred_close(&rdma->sc_xprt);
290 }
291 
292 /**
293  * svc_rdma_send - Post a single Send WR
294  * @rdma: transport on which to post the WR
295  * @ctxt: send ctxt with a Send WR ready to post
296  *
297  * Returns zero if the Send WR was posted successfully. Otherwise, a
298  * negative errno is returned.
299  */
300 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
301 {
302 	struct ib_send_wr *wr = &ctxt->sc_send_wr;
303 	int ret;
304 
305 	reinit_completion(&ctxt->sc_done);
306 
307 	/* Sync the transport header buffer */
308 	ib_dma_sync_single_for_device(rdma->sc_pd->device,
309 				      wr->sg_list[0].addr,
310 				      wr->sg_list[0].length,
311 				      DMA_TO_DEVICE);
312 
313 	/* If the SQ is full, wait until an SQ entry is available */
314 	while (1) {
315 		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
316 			percpu_counter_inc(&svcrdma_stat_sq_starve);
317 			trace_svcrdma_sq_full(rdma);
318 			atomic_inc(&rdma->sc_sq_avail);
319 			wait_event(rdma->sc_send_wait,
320 				   atomic_read(&rdma->sc_sq_avail) > 1);
321 			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
322 				return -ENOTCONN;
323 			trace_svcrdma_sq_retry(rdma);
324 			continue;
325 		}
326 
327 		trace_svcrdma_post_send(ctxt);
328 		ret = ib_post_send(rdma->sc_qp, wr, NULL);
329 		if (ret)
330 			break;
331 		return 0;
332 	}
333 
334 	trace_svcrdma_sq_post_err(rdma, ret);
335 	svc_xprt_deferred_close(&rdma->sc_xprt);
336 	wake_up(&rdma->sc_send_wait);
337 	return ret;
338 }
339 
340 /**
341  * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
342  * @sctxt: Send context for the RPC Reply
343  *
344  * Return values:
345  *   On success, returns length in bytes of the Reply XDR buffer
346  *   that was consumed by the Reply Read list
347  *   %-EMSGSIZE on XDR buffer overflow
348  */
349 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
350 {
351 	/* RPC-over-RDMA version 1 replies never have a Read list. */
352 	return xdr_stream_encode_item_absent(&sctxt->sc_stream);
353 }
354 
355 /**
356  * svc_rdma_encode_write_segment - Encode one Write segment
357  * @sctxt: Send context for the RPC Reply
358  * @chunk: Write chunk to push
359  * @remaining: remaining bytes of the payload left in the Write chunk
360  * @segno: which segment in the chunk
361  *
362  * Return values:
363  *   On success, returns length in bytes of the Reply XDR buffer
364  *   that was consumed by the Write segment, and updates @remaining
365  *   %-EMSGSIZE on XDR buffer overflow
366  */
367 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
368 					     const struct svc_rdma_chunk *chunk,
369 					     u32 *remaining, unsigned int segno)
370 {
371 	const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
372 	const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
373 	u32 length;
374 	__be32 *p;
375 
376 	p = xdr_reserve_space(&sctxt->sc_stream, len);
377 	if (!p)
378 		return -EMSGSIZE;
379 
380 	length = min_t(u32, *remaining, segment->rs_length);
381 	*remaining -= length;
382 	xdr_encode_rdma_segment(p, segment->rs_handle, length,
383 				segment->rs_offset);
384 	trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
385 				  segment->rs_offset);
386 	return len;
387 }
388 
389 /**
390  * svc_rdma_encode_write_chunk - Encode one Write chunk
391  * @sctxt: Send context for the RPC Reply
392  * @chunk: Write chunk to push
393  *
394  * Copy a Write chunk from the Call transport header to the
395  * Reply transport header. Update each segment's length field
396  * to reflect the number of bytes written in that segment.
397  *
398  * Return values:
399  *   On success, returns length in bytes of the Reply XDR buffer
400  *   that was consumed by the Write chunk
401  *   %-EMSGSIZE on XDR buffer overflow
402  */
403 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
404 					   const struct svc_rdma_chunk *chunk)
405 {
406 	u32 remaining = chunk->ch_payload_length;
407 	unsigned int segno;
408 	ssize_t len, ret;
409 
410 	len = 0;
411 	ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
412 	if (ret < 0)
413 		return ret;
414 	len += ret;
415 
416 	ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
417 	if (ret < 0)
418 		return ret;
419 	len += ret;
420 
421 	for (segno = 0; segno < chunk->ch_segcount; segno++) {
422 		ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
423 		if (ret < 0)
424 			return ret;
425 		len += ret;
426 	}
427 
428 	return len;
429 }
430 
431 /**
432  * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
433  * @rctxt: Reply context with information about the RPC Call
434  * @sctxt: Send context for the RPC Reply
435  *
436  * Return values:
437  *   On success, returns length in bytes of the Reply XDR buffer
438  *   that was consumed by the Reply's Write list
439  *   %-EMSGSIZE on XDR buffer overflow
440  */
441 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
442 					  struct svc_rdma_send_ctxt *sctxt)
443 {
444 	struct svc_rdma_chunk *chunk;
445 	ssize_t len, ret;
446 
447 	len = 0;
448 	pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
449 		ret = svc_rdma_encode_write_chunk(sctxt, chunk);
450 		if (ret < 0)
451 			return ret;
452 		len += ret;
453 	}
454 
455 	/* Terminate the Write list */
456 	ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
457 	if (ret < 0)
458 		return ret;
459 
460 	return len + ret;
461 }
462 
463 /**
464  * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
465  * @rctxt: Reply context with information about the RPC Call
466  * @sctxt: Send context for the RPC Reply
467  * @length: size in bytes of the payload in the Reply chunk
468  *
469  * Return values:
470  *   On success, returns length in bytes of the Reply XDR buffer
471  *   that was consumed by the Reply's Reply chunk
472  *   %-EMSGSIZE on XDR buffer overflow
473  *   %-E2BIG if the RPC message is larger than the Reply chunk
474  */
475 static ssize_t
476 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
477 			    struct svc_rdma_send_ctxt *sctxt,
478 			    unsigned int length)
479 {
480 	struct svc_rdma_chunk *chunk;
481 
482 	if (pcl_is_empty(&rctxt->rc_reply_pcl))
483 		return xdr_stream_encode_item_absent(&sctxt->sc_stream);
484 
485 	chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
486 	if (length > chunk->ch_length)
487 		return -E2BIG;
488 
489 	chunk->ch_payload_length = length;
490 	return svc_rdma_encode_write_chunk(sctxt, chunk);
491 }
492 
493 struct svc_rdma_map_data {
494 	struct svcxprt_rdma		*md_rdma;
495 	struct svc_rdma_send_ctxt	*md_ctxt;
496 };
497 
498 /**
499  * svc_rdma_page_dma_map - DMA map one page
500  * @data: pointer to arguments
501  * @page: struct page to DMA map
502  * @offset: offset into the page
503  * @len: number of bytes to map
504  *
505  * Returns:
506  *   %0 if DMA mapping was successful
507  *   %-EIO if the page cannot be DMA mapped
508  */
509 static int svc_rdma_page_dma_map(void *data, struct page *page,
510 				 unsigned long offset, unsigned int len)
511 {
512 	struct svc_rdma_map_data *args = data;
513 	struct svcxprt_rdma *rdma = args->md_rdma;
514 	struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
515 	struct ib_device *dev = rdma->sc_cm_id->device;
516 	dma_addr_t dma_addr;
517 
518 	++ctxt->sc_cur_sge_no;
519 
520 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
521 	if (ib_dma_mapping_error(dev, dma_addr))
522 		goto out_maperr;
523 
524 	trace_svcrdma_dma_map_page(rdma, dma_addr, len);
525 	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
526 	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
527 	ctxt->sc_send_wr.num_sge++;
528 	return 0;
529 
530 out_maperr:
531 	trace_svcrdma_dma_map_err(rdma, dma_addr, len);
532 	return -EIO;
533 }
534 
535 /**
536  * svc_rdma_iov_dma_map - DMA map an iovec
537  * @data: pointer to arguments
538  * @iov: kvec to DMA map
539  *
540  * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
541  * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
542  *
543  * Returns:
544  *   %0 if DMA mapping was successful
545  *   %-EIO if the iovec cannot be DMA mapped
546  */
547 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
548 {
549 	if (!iov->iov_len)
550 		return 0;
551 	return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
552 				     offset_in_page(iov->iov_base),
553 				     iov->iov_len);
554 }
555 
556 /**
557  * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
558  * @xdr: xdr_buf containing portion of an RPC message to transmit
559  * @data: pointer to arguments
560  *
561  * Returns:
562  *   %0 if DMA mapping was successful
563  *   %-EIO if DMA mapping failed
564  *
565  * On failure, any DMA mappings that have been already done must be
566  * unmapped by the caller.
567  */
568 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
569 {
570 	unsigned int len, remaining;
571 	unsigned long pageoff;
572 	struct page **ppages;
573 	int ret;
574 
575 	ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
576 	if (ret < 0)
577 		return ret;
578 
579 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
580 	pageoff = offset_in_page(xdr->page_base);
581 	remaining = xdr->page_len;
582 	while (remaining) {
583 		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
584 
585 		ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
586 		if (ret < 0)
587 			return ret;
588 
589 		remaining -= len;
590 		pageoff = 0;
591 	}
592 
593 	ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
594 	if (ret < 0)
595 		return ret;
596 
597 	return xdr->len;
598 }
599 
600 struct svc_rdma_pullup_data {
601 	u8		*pd_dest;
602 	unsigned int	pd_length;
603 	unsigned int	pd_num_sges;
604 };
605 
606 /**
607  * svc_rdma_xb_count_sges - Count how many SGEs will be needed
608  * @xdr: xdr_buf containing portion of an RPC message to transmit
609  * @data: pointer to arguments
610  *
611  * Returns:
612  *   Number of SGEs needed to Send the contents of @xdr inline
613  */
614 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
615 				  void *data)
616 {
617 	struct svc_rdma_pullup_data *args = data;
618 	unsigned int remaining;
619 	unsigned long offset;
620 
621 	if (xdr->head[0].iov_len)
622 		++args->pd_num_sges;
623 
624 	offset = offset_in_page(xdr->page_base);
625 	remaining = xdr->page_len;
626 	while (remaining) {
627 		++args->pd_num_sges;
628 		remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
629 		offset = 0;
630 	}
631 
632 	if (xdr->tail[0].iov_len)
633 		++args->pd_num_sges;
634 
635 	args->pd_length += xdr->len;
636 	return 0;
637 }
638 
639 /**
640  * svc_rdma_pull_up_needed - Determine whether to use pull-up
641  * @rdma: controlling transport
642  * @sctxt: send_ctxt for the Send WR
643  * @rctxt: Write and Reply chunks provided by client
644  * @xdr: xdr_buf containing RPC message to transmit
645  *
646  * Returns:
647  *   %true if pull-up must be used
648  *   %false otherwise
649  */
650 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
651 				    const struct svc_rdma_send_ctxt *sctxt,
652 				    const struct svc_rdma_recv_ctxt *rctxt,
653 				    const struct xdr_buf *xdr)
654 {
655 	/* Resources needed for the transport header */
656 	struct svc_rdma_pullup_data args = {
657 		.pd_length	= sctxt->sc_hdrbuf.len,
658 		.pd_num_sges	= 1,
659 	};
660 	int ret;
661 
662 	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
663 				      svc_rdma_xb_count_sges, &args);
664 	if (ret < 0)
665 		return false;
666 
667 	if (args.pd_length < RPCRDMA_PULLUP_THRESH)
668 		return true;
669 	return args.pd_num_sges >= rdma->sc_max_send_sges;
670 }
671 
672 /**
673  * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
674  * @xdr: xdr_buf containing portion of an RPC message to copy
675  * @data: pointer to arguments
676  *
677  * Returns:
678  *   Always zero.
679  */
680 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
681 				 void *data)
682 {
683 	struct svc_rdma_pullup_data *args = data;
684 	unsigned int len, remaining;
685 	unsigned long pageoff;
686 	struct page **ppages;
687 
688 	if (xdr->head[0].iov_len) {
689 		memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
690 		args->pd_dest += xdr->head[0].iov_len;
691 	}
692 
693 	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
694 	pageoff = offset_in_page(xdr->page_base);
695 	remaining = xdr->page_len;
696 	while (remaining) {
697 		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
698 		memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
699 		remaining -= len;
700 		args->pd_dest += len;
701 		pageoff = 0;
702 		ppages++;
703 	}
704 
705 	if (xdr->tail[0].iov_len) {
706 		memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
707 		args->pd_dest += xdr->tail[0].iov_len;
708 	}
709 
710 	args->pd_length += xdr->len;
711 	return 0;
712 }
713 
714 /**
715  * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
716  * @rdma: controlling transport
717  * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
718  * @rctxt: Write and Reply chunks provided by client
719  * @xdr: prepared xdr_buf containing RPC message
720  *
721  * The device is not capable of sending the reply directly.
722  * Assemble the elements of @xdr into the transport header buffer.
723  *
724  * Assumptions:
725  *  pull_up_needed has determined that @xdr will fit in the buffer.
726  *
727  * Returns:
728  *   %0 if pull-up was successful
729  *   %-EMSGSIZE if a buffer manipulation problem occurred
730  */
731 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
732 				      struct svc_rdma_send_ctxt *sctxt,
733 				      const struct svc_rdma_recv_ctxt *rctxt,
734 				      const struct xdr_buf *xdr)
735 {
736 	struct svc_rdma_pullup_data args = {
737 		.pd_dest	= sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
738 	};
739 	int ret;
740 
741 	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
742 				      svc_rdma_xb_linearize, &args);
743 	if (ret < 0)
744 		return ret;
745 
746 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
747 	trace_svcrdma_send_pullup(sctxt, args.pd_length);
748 	return 0;
749 }
750 
751 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
752  * @rdma: controlling transport
753  * @sctxt: send_ctxt for the Send WR
754  * @rctxt: Write and Reply chunks provided by client
755  * @xdr: prepared xdr_buf containing RPC message
756  *
757  * Returns:
758  *   %0 if DMA mapping was successful.
759  *   %-EMSGSIZE if a buffer manipulation problem occurred
760  *   %-EIO if DMA mapping failed
761  *
762  * The Send WR's num_sge field is set in all cases.
763  */
764 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
765 			   struct svc_rdma_send_ctxt *sctxt,
766 			   const struct svc_rdma_recv_ctxt *rctxt,
767 			   const struct xdr_buf *xdr)
768 {
769 	struct svc_rdma_map_data args = {
770 		.md_rdma	= rdma,
771 		.md_ctxt	= sctxt,
772 	};
773 
774 	/* Set up the (persistently-mapped) transport header SGE. */
775 	sctxt->sc_send_wr.num_sge = 1;
776 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
777 
778 	/* If there is a Reply chunk, nothing follows the transport
779 	 * header, and we're done here.
780 	 */
781 	if (!pcl_is_empty(&rctxt->rc_reply_pcl))
782 		return 0;
783 
784 	/* For pull-up, svc_rdma_send() will sync the transport header.
785 	 * No additional DMA mapping is necessary.
786 	 */
787 	if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
788 		return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
789 
790 	return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
791 				       svc_rdma_xb_dma_map, &args);
792 }
793 
794 /* Prepare the portion of the RPC Reply that will be transmitted
795  * via RDMA Send. The RPC-over-RDMA transport header is prepared
796  * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
797  *
798  * Depending on whether a Write list or Reply chunk is present,
799  * the server may send all, a portion of, or none of the xdr_buf.
800  * In the latter case, only the transport header (sc_sges[0]) is
801  * transmitted.
802  *
803  * RDMA Send is the last step of transmitting an RPC reply. Pages
804  * involved in the earlier RDMA Writes are here transferred out
805  * of the rqstp and into the sctxt's page array. These pages are
806  * DMA unmapped by each Write completion, but the subsequent Send
807  * completion finally releases these pages.
808  *
809  * Assumptions:
810  * - The Reply's transport header will never be larger than a page.
811  */
812 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
813 				   struct svc_rdma_send_ctxt *sctxt,
814 				   const struct svc_rdma_recv_ctxt *rctxt,
815 				   struct svc_rqst *rqstp)
816 {
817 	int ret;
818 
819 	ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
820 	if (ret < 0)
821 		return ret;
822 
823 	if (rctxt->rc_inv_rkey) {
824 		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
825 		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
826 	} else {
827 		sctxt->sc_send_wr.opcode = IB_WR_SEND;
828 	}
829 
830 	ret = svc_rdma_send(rdma, sctxt);
831 	if (ret < 0)
832 		return ret;
833 
834 	ret = wait_for_completion_killable(&sctxt->sc_done);
835 	svc_rdma_send_ctxt_put(rdma, sctxt);
836 	return ret;
837 }
838 
839 /**
840  * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
841  * @rdma: controlling transport context
842  * @sctxt: Send context for the response
843  * @rctxt: Receive context for incoming bad message
844  * @status: negative errno indicating error that occurred
845  *
846  * Given the client-provided Read, Write, and Reply chunks, the
847  * server was not able to parse the Call or form a complete Reply.
848  * Return an RDMA_ERROR message so the client can retire the RPC
849  * transaction.
850  *
851  * The caller does not have to release @sctxt. It is released by
852  * Send completion, or by this function on error.
853  */
854 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
855 			     struct svc_rdma_send_ctxt *sctxt,
856 			     struct svc_rdma_recv_ctxt *rctxt,
857 			     int status)
858 {
859 	__be32 *rdma_argp = rctxt->rc_recv_buf;
860 	__be32 *p;
861 
862 	rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
863 	xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
864 			sctxt->sc_xprt_buf, NULL);
865 
866 	p = xdr_reserve_space(&sctxt->sc_stream,
867 			      rpcrdma_fixed_maxsz * sizeof(*p));
868 	if (!p)
869 		goto put_ctxt;
870 
871 	*p++ = *rdma_argp;
872 	*p++ = *(rdma_argp + 1);
873 	*p++ = rdma->sc_fc_credits;
874 	*p = rdma_error;
875 
876 	switch (status) {
877 	case -EPROTONOSUPPORT:
878 		p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
879 		if (!p)
880 			goto put_ctxt;
881 
882 		*p++ = err_vers;
883 		*p++ = rpcrdma_version;
884 		*p = rpcrdma_version;
885 		trace_svcrdma_err_vers(*rdma_argp);
886 		break;
887 	default:
888 		p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
889 		if (!p)
890 			goto put_ctxt;
891 
892 		*p = err_chunk;
893 		trace_svcrdma_err_chunk(*rdma_argp);
894 	}
895 
896 	/* Remote Invalidation is skipped for simplicity. */
897 	sctxt->sc_send_wr.num_sge = 1;
898 	sctxt->sc_send_wr.opcode = IB_WR_SEND;
899 	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
900 	if (svc_rdma_send(rdma, sctxt))
901 		goto put_ctxt;
902 
903 	wait_for_completion_killable(&sctxt->sc_done);
904 
905 put_ctxt:
906 	svc_rdma_send_ctxt_put(rdma, sctxt);
907 }
908 
909 /**
910  * svc_rdma_sendto - Transmit an RPC reply
911  * @rqstp: processed RPC request, reply XDR already in ::rq_res
912  *
913  * Any resources still associated with @rqstp are released upon return.
914  * If no reply message was possible, the connection is closed.
915  *
916  * Returns:
917  *	%0 if an RPC reply has been successfully posted,
918  *	%-ENOMEM if a resource shortage occurred (connection is lost),
919  *	%-ENOTCONN if posting failed (connection is lost).
920  */
921 int svc_rdma_sendto(struct svc_rqst *rqstp)
922 {
923 	struct svc_xprt *xprt = rqstp->rq_xprt;
924 	struct svcxprt_rdma *rdma =
925 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
926 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
927 	__be32 *rdma_argp = rctxt->rc_recv_buf;
928 	struct svc_rdma_send_ctxt *sctxt;
929 	unsigned int rc_size;
930 	__be32 *p;
931 	int ret;
932 
933 	ret = -ENOTCONN;
934 	if (svc_xprt_is_dead(xprt))
935 		goto drop_connection;
936 
937 	ret = -ENOMEM;
938 	sctxt = svc_rdma_send_ctxt_get(rdma);
939 	if (!sctxt)
940 		goto drop_connection;
941 
942 	ret = -EMSGSIZE;
943 	p = xdr_reserve_space(&sctxt->sc_stream,
944 			      rpcrdma_fixed_maxsz * sizeof(*p));
945 	if (!p)
946 		goto put_ctxt;
947 
948 	ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
949 	if (ret < 0)
950 		goto reply_chunk;
951 	rc_size = ret;
952 
953 	*p++ = *rdma_argp;
954 	*p++ = *(rdma_argp + 1);
955 	*p++ = rdma->sc_fc_credits;
956 	*p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
957 
958 	ret = svc_rdma_encode_read_list(sctxt);
959 	if (ret < 0)
960 		goto put_ctxt;
961 	ret = svc_rdma_encode_write_list(rctxt, sctxt);
962 	if (ret < 0)
963 		goto put_ctxt;
964 	ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
965 	if (ret < 0)
966 		goto put_ctxt;
967 
968 	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
969 	if (ret < 0)
970 		goto put_ctxt;
971 
972 	/* Prevent svc_xprt_release() from releasing the page backing
973 	 * rq_res.head[0].iov_base. It's no longer being accessed by
974 	 * the I/O device. */
975 	rqstp->rq_respages++;
976 	return 0;
977 
978 reply_chunk:
979 	if (ret != -E2BIG && ret != -EINVAL)
980 		goto put_ctxt;
981 
982 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
983 	return 0;
984 
985 put_ctxt:
986 	svc_rdma_send_ctxt_put(rdma, sctxt);
987 drop_connection:
988 	trace_svcrdma_send_err(rqstp, ret);
989 	svc_xprt_deferred_close(&rdma->sc_xprt);
990 	return -ENOTCONN;
991 }
992 
993 /**
994  * svc_rdma_result_payload - special processing for a result payload
995  * @rqstp: svc_rqst to operate on
996  * @offset: payload's byte offset in @xdr
997  * @length: size of payload, in bytes
998  *
999  * Return values:
1000  *   %0 if successful or nothing needed to be done
1001  *   %-EMSGSIZE on XDR buffer overflow
1002  *   %-E2BIG if the payload was larger than the Write chunk
1003  *   %-EINVAL if client provided too many segments
1004  *   %-ENOMEM if rdma_rw context pool was exhausted
1005  *   %-ENOTCONN if posting failed (connection is lost)
1006  *   %-EIO if rdma_rw initialization failed (DMA mapping, etc)
1007  */
1008 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1009 			    unsigned int length)
1010 {
1011 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1012 	struct svc_rdma_chunk *chunk;
1013 	struct svcxprt_rdma *rdma;
1014 	struct xdr_buf subbuf;
1015 	int ret;
1016 
1017 	chunk = rctxt->rc_cur_result_payload;
1018 	if (!length || !chunk)
1019 		return 0;
1020 	rctxt->rc_cur_result_payload =
1021 		pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1022 	if (length > chunk->ch_length)
1023 		return -E2BIG;
1024 
1025 	chunk->ch_position = offset;
1026 	chunk->ch_payload_length = length;
1027 
1028 	if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1029 		return -EMSGSIZE;
1030 
1031 	rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1032 	ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
1033 	if (ret < 0)
1034 		return ret;
1035 	return 0;
1036 }
1037