xref: /openbmc/linux/net/sunrpc/xprtrdma/svc_rdma_sendto.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2  /*
3   * Copyright (c) 2016-2018 Oracle. All rights reserved.
4   * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5   * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6   *
7   * This software is available to you under a choice of one of two
8   * licenses.  You may choose to be licensed under the terms of the GNU
9   * General Public License (GPL) Version 2, available from the file
10   * COPYING in the main directory of this source tree, or the BSD-type
11   * license below:
12   *
13   * Redistribution and use in source and binary forms, with or without
14   * modification, are permitted provided that the following conditions
15   * are met:
16   *
17   *      Redistributions of source code must retain the above copyright
18   *      notice, this list of conditions and the following disclaimer.
19   *
20   *      Redistributions in binary form must reproduce the above
21   *      copyright notice, this list of conditions and the following
22   *      disclaimer in the documentation and/or other materials provided
23   *      with the distribution.
24   *
25   *      Neither the name of the Network Appliance, Inc. nor the names of
26   *      its contributors may be used to endorse or promote products
27   *      derived from this software without specific prior written
28   *      permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31   * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32   * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33   * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34   * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36   * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41   *
42   * Author: Tom Tucker <tom@opengridcomputing.com>
43   */
44  
45  /* Operation
46   *
47   * The main entry point is svc_rdma_sendto. This is called by the
48   * RPC server when an RPC Reply is ready to be transmitted to a client.
49   *
50   * The passed-in svc_rqst contains a struct xdr_buf which holds an
51   * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52   * transport header, post all Write WRs needed for this Reply, then post
53   * a Send WR conveying the transport header and the RPC message itself to
54   * the client.
55   *
56   * svc_rdma_sendto must fully transmit the Reply before returning, as
57   * the svc_rqst will be recycled as soon as sendto returns. Remaining
58   * resources referred to by the svc_rqst are also recycled at that time.
59   * Therefore any resources that must remain longer must be detached
60   * from the svc_rqst and released later.
61   *
62   * Page Management
63   *
64   * The I/O that performs Reply transmission is asynchronous, and may
65   * complete well after sendto returns. Thus pages under I/O must be
66   * removed from the svc_rqst before sendto returns.
67   *
68   * The logic here depends on Send Queue and completion ordering. Since
69   * the Send WR is always posted last, it will always complete last. Thus
70   * when it completes, it is guaranteed that all previous Write WRs have
71   * also completed.
72   *
73   * Write WRs are constructed and posted. Each Write segment gets its own
74   * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75   * DMA-unmap the pages under I/O for that Write segment. The Write
76   * completion handler does not release any pages.
77   *
78   * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79   * The ownership of all of the Reply's pages are transferred into that
80   * ctxt, the Send WR is posted, and sendto returns.
81   *
82   * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83   * Send completion handler finally releases the Reply's pages.
84   *
85   * This mechanism also assumes that completions on the transport's Send
86   * Completion Queue do not run in parallel. Otherwise a Write completion
87   * and Send completion running at the same time could release pages that
88   * are still DMA-mapped.
89   *
90   * Error Handling
91   *
92   * - If the Send WR is posted successfully, it will either complete
93   *   successfully, or get flushed. Either way, the Send completion
94   *   handler releases the Reply's pages.
95   * - If the Send WR cannot be not posted, the forward path releases
96   *   the Reply's pages.
97   *
98   * This handles the case, without the use of page reference counting,
99   * where two different Write segments send portions of the same page.
100   */
101  
102  #include <linux/spinlock.h>
103  #include <asm/unaligned.h>
104  
105  #include <rdma/ib_verbs.h>
106  #include <rdma/rdma_cm.h>
107  
108  #include <linux/sunrpc/debug.h>
109  #include <linux/sunrpc/svc_rdma.h>
110  
111  #include "xprt_rdma.h"
112  #include <trace/events/rpcrdma.h>
113  
114  static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
115  
svc_rdma_send_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)116  static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
117  				   struct rpc_rdma_cid *cid)
118  {
119  	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
120  	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
121  }
122  
123  static struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma * rdma)124  svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
125  {
126  	int node = ibdev_to_node(rdma->sc_cm_id->device);
127  	struct svc_rdma_send_ctxt *ctxt;
128  	dma_addr_t addr;
129  	void *buffer;
130  	int i;
131  
132  	ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges),
133  			    GFP_KERNEL, node);
134  	if (!ctxt)
135  		goto fail0;
136  	buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
137  	if (!buffer)
138  		goto fail1;
139  	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
140  				 rdma->sc_max_req_size, DMA_TO_DEVICE);
141  	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
142  		goto fail2;
143  
144  	svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
145  
146  	ctxt->sc_send_wr.next = NULL;
147  	ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
148  	ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
149  	ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
150  	ctxt->sc_cqe.done = svc_rdma_wc_send;
151  	ctxt->sc_xprt_buf = buffer;
152  	xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
153  		     rdma->sc_max_req_size);
154  	ctxt->sc_sges[0].addr = addr;
155  
156  	for (i = 0; i < rdma->sc_max_send_sges; i++)
157  		ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
158  	return ctxt;
159  
160  fail2:
161  	kfree(buffer);
162  fail1:
163  	kfree(ctxt);
164  fail0:
165  	return NULL;
166  }
167  
168  /**
169   * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
170   * @rdma: svcxprt_rdma being torn down
171   *
172   */
svc_rdma_send_ctxts_destroy(struct svcxprt_rdma * rdma)173  void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
174  {
175  	struct svc_rdma_send_ctxt *ctxt;
176  	struct llist_node *node;
177  
178  	while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) {
179  		ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
180  		ib_dma_unmap_single(rdma->sc_pd->device,
181  				    ctxt->sc_sges[0].addr,
182  				    rdma->sc_max_req_size,
183  				    DMA_TO_DEVICE);
184  		kfree(ctxt->sc_xprt_buf);
185  		kfree(ctxt);
186  	}
187  }
188  
189  /**
190   * svc_rdma_send_ctxt_get - Get a free send_ctxt
191   * @rdma: controlling svcxprt_rdma
192   *
193   * Returns a ready-to-use send_ctxt, or NULL if none are
194   * available and a fresh one cannot be allocated.
195   */
svc_rdma_send_ctxt_get(struct svcxprt_rdma * rdma)196  struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
197  {
198  	struct svc_rdma_send_ctxt *ctxt;
199  	struct llist_node *node;
200  
201  	spin_lock(&rdma->sc_send_lock);
202  	node = llist_del_first(&rdma->sc_send_ctxts);
203  	if (!node)
204  		goto out_empty;
205  	ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node);
206  	spin_unlock(&rdma->sc_send_lock);
207  
208  out:
209  	rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
210  	xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
211  			ctxt->sc_xprt_buf, NULL);
212  
213  	ctxt->sc_send_wr.num_sge = 0;
214  	ctxt->sc_cur_sge_no = 0;
215  	ctxt->sc_page_count = 0;
216  	return ctxt;
217  
218  out_empty:
219  	spin_unlock(&rdma->sc_send_lock);
220  	ctxt = svc_rdma_send_ctxt_alloc(rdma);
221  	if (!ctxt)
222  		return NULL;
223  	goto out;
224  }
225  
226  /**
227   * svc_rdma_send_ctxt_put - Return send_ctxt to free list
228   * @rdma: controlling svcxprt_rdma
229   * @ctxt: object to return to the free list
230   *
231   * Pages left in sc_pages are DMA unmapped and released.
232   */
svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)233  void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
234  			    struct svc_rdma_send_ctxt *ctxt)
235  {
236  	struct ib_device *device = rdma->sc_cm_id->device;
237  	unsigned int i;
238  
239  	if (ctxt->sc_page_count)
240  		release_pages(ctxt->sc_pages, ctxt->sc_page_count);
241  
242  	/* The first SGE contains the transport header, which
243  	 * remains mapped until @ctxt is destroyed.
244  	 */
245  	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
246  		ib_dma_unmap_page(device,
247  				  ctxt->sc_sges[i].addr,
248  				  ctxt->sc_sges[i].length,
249  				  DMA_TO_DEVICE);
250  		trace_svcrdma_dma_unmap_page(rdma,
251  					     ctxt->sc_sges[i].addr,
252  					     ctxt->sc_sges[i].length);
253  	}
254  
255  	llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
256  }
257  
258  /**
259   * svc_rdma_wake_send_waiters - manage Send Queue accounting
260   * @rdma: controlling transport
261   * @avail: Number of additional SQEs that are now available
262   *
263   */
svc_rdma_wake_send_waiters(struct svcxprt_rdma * rdma,int avail)264  void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail)
265  {
266  	atomic_add(avail, &rdma->sc_sq_avail);
267  	smp_mb__after_atomic();
268  	if (unlikely(waitqueue_active(&rdma->sc_send_wait)))
269  		wake_up(&rdma->sc_send_wait);
270  }
271  
272  /**
273   * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
274   * @cq: Completion Queue context
275   * @wc: Work Completion object
276   *
277   * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
278   * the Send completion handler could be running.
279   */
svc_rdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)280  static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
281  {
282  	struct svcxprt_rdma *rdma = cq->cq_context;
283  	struct ib_cqe *cqe = wc->wr_cqe;
284  	struct svc_rdma_send_ctxt *ctxt =
285  		container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
286  
287  	svc_rdma_wake_send_waiters(rdma, 1);
288  
289  	if (unlikely(wc->status != IB_WC_SUCCESS))
290  		goto flushed;
291  
292  	trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
293  	svc_rdma_send_ctxt_put(rdma, ctxt);
294  	return;
295  
296  flushed:
297  	if (wc->status != IB_WC_WR_FLUSH_ERR)
298  		trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
299  	else
300  		trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
301  	svc_rdma_send_ctxt_put(rdma, ctxt);
302  	svc_xprt_deferred_close(&rdma->sc_xprt);
303  }
304  
305  /**
306   * svc_rdma_send - Post a single Send WR
307   * @rdma: transport on which to post the WR
308   * @ctxt: send ctxt with a Send WR ready to post
309   *
310   * Returns zero if the Send WR was posted successfully. Otherwise, a
311   * negative errno is returned.
312   */
svc_rdma_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt)313  int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
314  {
315  	struct ib_send_wr *wr = &ctxt->sc_send_wr;
316  	int ret;
317  
318  	might_sleep();
319  
320  	/* Sync the transport header buffer */
321  	ib_dma_sync_single_for_device(rdma->sc_pd->device,
322  				      wr->sg_list[0].addr,
323  				      wr->sg_list[0].length,
324  				      DMA_TO_DEVICE);
325  
326  	/* If the SQ is full, wait until an SQ entry is available */
327  	while (1) {
328  		if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
329  			percpu_counter_inc(&svcrdma_stat_sq_starve);
330  			trace_svcrdma_sq_full(rdma);
331  			atomic_inc(&rdma->sc_sq_avail);
332  			wait_event(rdma->sc_send_wait,
333  				   atomic_read(&rdma->sc_sq_avail) > 1);
334  			if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
335  				return -ENOTCONN;
336  			trace_svcrdma_sq_retry(rdma);
337  			continue;
338  		}
339  
340  		trace_svcrdma_post_send(ctxt);
341  		ret = ib_post_send(rdma->sc_qp, wr, NULL);
342  		if (ret)
343  			break;
344  		return 0;
345  	}
346  
347  	trace_svcrdma_sq_post_err(rdma, ret);
348  	svc_xprt_deferred_close(&rdma->sc_xprt);
349  	wake_up(&rdma->sc_send_wait);
350  	return ret;
351  }
352  
353  /**
354   * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
355   * @sctxt: Send context for the RPC Reply
356   *
357   * Return values:
358   *   On success, returns length in bytes of the Reply XDR buffer
359   *   that was consumed by the Reply Read list
360   *   %-EMSGSIZE on XDR buffer overflow
361   */
svc_rdma_encode_read_list(struct svc_rdma_send_ctxt * sctxt)362  static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
363  {
364  	/* RPC-over-RDMA version 1 replies never have a Read list. */
365  	return xdr_stream_encode_item_absent(&sctxt->sc_stream);
366  }
367  
368  /**
369   * svc_rdma_encode_write_segment - Encode one Write segment
370   * @sctxt: Send context for the RPC Reply
371   * @chunk: Write chunk to push
372   * @remaining: remaining bytes of the payload left in the Write chunk
373   * @segno: which segment in the chunk
374   *
375   * Return values:
376   *   On success, returns length in bytes of the Reply XDR buffer
377   *   that was consumed by the Write segment, and updates @remaining
378   *   %-EMSGSIZE on XDR buffer overflow
379   */
svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk,u32 * remaining,unsigned int segno)380  static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
381  					     const struct svc_rdma_chunk *chunk,
382  					     u32 *remaining, unsigned int segno)
383  {
384  	const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
385  	const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
386  	u32 length;
387  	__be32 *p;
388  
389  	p = xdr_reserve_space(&sctxt->sc_stream, len);
390  	if (!p)
391  		return -EMSGSIZE;
392  
393  	length = min_t(u32, *remaining, segment->rs_length);
394  	*remaining -= length;
395  	xdr_encode_rdma_segment(p, segment->rs_handle, length,
396  				segment->rs_offset);
397  	trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
398  				  segment->rs_offset);
399  	return len;
400  }
401  
402  /**
403   * svc_rdma_encode_write_chunk - Encode one Write chunk
404   * @sctxt: Send context for the RPC Reply
405   * @chunk: Write chunk to push
406   *
407   * Copy a Write chunk from the Call transport header to the
408   * Reply transport header. Update each segment's length field
409   * to reflect the number of bytes written in that segment.
410   *
411   * Return values:
412   *   On success, returns length in bytes of the Reply XDR buffer
413   *   that was consumed by the Write chunk
414   *   %-EMSGSIZE on XDR buffer overflow
415   */
svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_chunk * chunk)416  static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
417  					   const struct svc_rdma_chunk *chunk)
418  {
419  	u32 remaining = chunk->ch_payload_length;
420  	unsigned int segno;
421  	ssize_t len, ret;
422  
423  	len = 0;
424  	ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
425  	if (ret < 0)
426  		return ret;
427  	len += ret;
428  
429  	ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
430  	if (ret < 0)
431  		return ret;
432  	len += ret;
433  
434  	for (segno = 0; segno < chunk->ch_segcount; segno++) {
435  		ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
436  		if (ret < 0)
437  			return ret;
438  		len += ret;
439  	}
440  
441  	return len;
442  }
443  
444  /**
445   * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
446   * @rctxt: Reply context with information about the RPC Call
447   * @sctxt: Send context for the RPC Reply
448   *
449   * Return values:
450   *   On success, returns length in bytes of the Reply XDR buffer
451   *   that was consumed by the Reply's Write list
452   *   %-EMSGSIZE on XDR buffer overflow
453   */
svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt)454  static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
455  					  struct svc_rdma_send_ctxt *sctxt)
456  {
457  	struct svc_rdma_chunk *chunk;
458  	ssize_t len, ret;
459  
460  	len = 0;
461  	pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
462  		ret = svc_rdma_encode_write_chunk(sctxt, chunk);
463  		if (ret < 0)
464  			return ret;
465  		len += ret;
466  	}
467  
468  	/* Terminate the Write list */
469  	ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
470  	if (ret < 0)
471  		return ret;
472  
473  	return len + ret;
474  }
475  
476  /**
477   * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
478   * @rctxt: Reply context with information about the RPC Call
479   * @sctxt: Send context for the RPC Reply
480   * @length: size in bytes of the payload in the Reply chunk
481   *
482   * Return values:
483   *   On success, returns length in bytes of the Reply XDR buffer
484   *   that was consumed by the Reply's Reply chunk
485   *   %-EMSGSIZE on XDR buffer overflow
486   *   %-E2BIG if the RPC message is larger than the Reply chunk
487   */
488  static ssize_t
svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt * rctxt,struct svc_rdma_send_ctxt * sctxt,unsigned int length)489  svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
490  			    struct svc_rdma_send_ctxt *sctxt,
491  			    unsigned int length)
492  {
493  	struct svc_rdma_chunk *chunk;
494  
495  	if (pcl_is_empty(&rctxt->rc_reply_pcl))
496  		return xdr_stream_encode_item_absent(&sctxt->sc_stream);
497  
498  	chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
499  	if (length > chunk->ch_length)
500  		return -E2BIG;
501  
502  	chunk->ch_payload_length = length;
503  	return svc_rdma_encode_write_chunk(sctxt, chunk);
504  }
505  
506  struct svc_rdma_map_data {
507  	struct svcxprt_rdma		*md_rdma;
508  	struct svc_rdma_send_ctxt	*md_ctxt;
509  };
510  
511  /**
512   * svc_rdma_page_dma_map - DMA map one page
513   * @data: pointer to arguments
514   * @page: struct page to DMA map
515   * @offset: offset into the page
516   * @len: number of bytes to map
517   *
518   * Returns:
519   *   %0 if DMA mapping was successful
520   *   %-EIO if the page cannot be DMA mapped
521   */
svc_rdma_page_dma_map(void * data,struct page * page,unsigned long offset,unsigned int len)522  static int svc_rdma_page_dma_map(void *data, struct page *page,
523  				 unsigned long offset, unsigned int len)
524  {
525  	struct svc_rdma_map_data *args = data;
526  	struct svcxprt_rdma *rdma = args->md_rdma;
527  	struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
528  	struct ib_device *dev = rdma->sc_cm_id->device;
529  	dma_addr_t dma_addr;
530  
531  	++ctxt->sc_cur_sge_no;
532  
533  	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
534  	if (ib_dma_mapping_error(dev, dma_addr))
535  		goto out_maperr;
536  
537  	trace_svcrdma_dma_map_page(rdma, dma_addr, len);
538  	ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
539  	ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
540  	ctxt->sc_send_wr.num_sge++;
541  	return 0;
542  
543  out_maperr:
544  	trace_svcrdma_dma_map_err(rdma, dma_addr, len);
545  	return -EIO;
546  }
547  
548  /**
549   * svc_rdma_iov_dma_map - DMA map an iovec
550   * @data: pointer to arguments
551   * @iov: kvec to DMA map
552   *
553   * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
554   * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
555   *
556   * Returns:
557   *   %0 if DMA mapping was successful
558   *   %-EIO if the iovec cannot be DMA mapped
559   */
svc_rdma_iov_dma_map(void * data,const struct kvec * iov)560  static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
561  {
562  	if (!iov->iov_len)
563  		return 0;
564  	return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
565  				     offset_in_page(iov->iov_base),
566  				     iov->iov_len);
567  }
568  
569  /**
570   * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
571   * @xdr: xdr_buf containing portion of an RPC message to transmit
572   * @data: pointer to arguments
573   *
574   * Returns:
575   *   %0 if DMA mapping was successful
576   *   %-EIO if DMA mapping failed
577   *
578   * On failure, any DMA mappings that have been already done must be
579   * unmapped by the caller.
580   */
svc_rdma_xb_dma_map(const struct xdr_buf * xdr,void * data)581  static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
582  {
583  	unsigned int len, remaining;
584  	unsigned long pageoff;
585  	struct page **ppages;
586  	int ret;
587  
588  	ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
589  	if (ret < 0)
590  		return ret;
591  
592  	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
593  	pageoff = offset_in_page(xdr->page_base);
594  	remaining = xdr->page_len;
595  	while (remaining) {
596  		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
597  
598  		ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
599  		if (ret < 0)
600  			return ret;
601  
602  		remaining -= len;
603  		pageoff = 0;
604  	}
605  
606  	ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
607  	if (ret < 0)
608  		return ret;
609  
610  	return xdr->len;
611  }
612  
613  struct svc_rdma_pullup_data {
614  	u8		*pd_dest;
615  	unsigned int	pd_length;
616  	unsigned int	pd_num_sges;
617  };
618  
619  /**
620   * svc_rdma_xb_count_sges - Count how many SGEs will be needed
621   * @xdr: xdr_buf containing portion of an RPC message to transmit
622   * @data: pointer to arguments
623   *
624   * Returns:
625   *   Number of SGEs needed to Send the contents of @xdr inline
626   */
svc_rdma_xb_count_sges(const struct xdr_buf * xdr,void * data)627  static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
628  				  void *data)
629  {
630  	struct svc_rdma_pullup_data *args = data;
631  	unsigned int remaining;
632  	unsigned long offset;
633  
634  	if (xdr->head[0].iov_len)
635  		++args->pd_num_sges;
636  
637  	offset = offset_in_page(xdr->page_base);
638  	remaining = xdr->page_len;
639  	while (remaining) {
640  		++args->pd_num_sges;
641  		remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
642  		offset = 0;
643  	}
644  
645  	if (xdr->tail[0].iov_len)
646  		++args->pd_num_sges;
647  
648  	args->pd_length += xdr->len;
649  	return 0;
650  }
651  
652  /**
653   * svc_rdma_pull_up_needed - Determine whether to use pull-up
654   * @rdma: controlling transport
655   * @sctxt: send_ctxt for the Send WR
656   * @rctxt: Write and Reply chunks provided by client
657   * @xdr: xdr_buf containing RPC message to transmit
658   *
659   * Returns:
660   *   %true if pull-up must be used
661   *   %false otherwise
662   */
svc_rdma_pull_up_needed(const struct svcxprt_rdma * rdma,const struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)663  static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
664  				    const struct svc_rdma_send_ctxt *sctxt,
665  				    const struct svc_rdma_recv_ctxt *rctxt,
666  				    const struct xdr_buf *xdr)
667  {
668  	/* Resources needed for the transport header */
669  	struct svc_rdma_pullup_data args = {
670  		.pd_length	= sctxt->sc_hdrbuf.len,
671  		.pd_num_sges	= 1,
672  	};
673  	int ret;
674  
675  	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
676  				      svc_rdma_xb_count_sges, &args);
677  	if (ret < 0)
678  		return false;
679  
680  	if (args.pd_length < RPCRDMA_PULLUP_THRESH)
681  		return true;
682  	return args.pd_num_sges >= rdma->sc_max_send_sges;
683  }
684  
685  /**
686   * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
687   * @xdr: xdr_buf containing portion of an RPC message to copy
688   * @data: pointer to arguments
689   *
690   * Returns:
691   *   Always zero.
692   */
svc_rdma_xb_linearize(const struct xdr_buf * xdr,void * data)693  static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
694  				 void *data)
695  {
696  	struct svc_rdma_pullup_data *args = data;
697  	unsigned int len, remaining;
698  	unsigned long pageoff;
699  	struct page **ppages;
700  
701  	if (xdr->head[0].iov_len) {
702  		memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
703  		args->pd_dest += xdr->head[0].iov_len;
704  	}
705  
706  	ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
707  	pageoff = offset_in_page(xdr->page_base);
708  	remaining = xdr->page_len;
709  	while (remaining) {
710  		len = min_t(u32, PAGE_SIZE - pageoff, remaining);
711  		memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
712  		remaining -= len;
713  		args->pd_dest += len;
714  		pageoff = 0;
715  		ppages++;
716  	}
717  
718  	if (xdr->tail[0].iov_len) {
719  		memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
720  		args->pd_dest += xdr->tail[0].iov_len;
721  	}
722  
723  	args->pd_length += xdr->len;
724  	return 0;
725  }
726  
727  /**
728   * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
729   * @rdma: controlling transport
730   * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
731   * @rctxt: Write and Reply chunks provided by client
732   * @xdr: prepared xdr_buf containing RPC message
733   *
734   * The device is not capable of sending the reply directly.
735   * Assemble the elements of @xdr into the transport header buffer.
736   *
737   * Assumptions:
738   *  pull_up_needed has determined that @xdr will fit in the buffer.
739   *
740   * Returns:
741   *   %0 if pull-up was successful
742   *   %-EMSGSIZE if a buffer manipulation problem occurred
743   */
svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)744  static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
745  				      struct svc_rdma_send_ctxt *sctxt,
746  				      const struct svc_rdma_recv_ctxt *rctxt,
747  				      const struct xdr_buf *xdr)
748  {
749  	struct svc_rdma_pullup_data args = {
750  		.pd_dest	= sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
751  	};
752  	int ret;
753  
754  	ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
755  				      svc_rdma_xb_linearize, &args);
756  	if (ret < 0)
757  		return ret;
758  
759  	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
760  	trace_svcrdma_send_pullup(sctxt, args.pd_length);
761  	return 0;
762  }
763  
764  /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
765   * @rdma: controlling transport
766   * @sctxt: send_ctxt for the Send WR
767   * @rctxt: Write and Reply chunks provided by client
768   * @xdr: prepared xdr_buf containing RPC message
769   *
770   * Returns:
771   *   %0 if DMA mapping was successful.
772   *   %-EMSGSIZE if a buffer manipulation problem occurred
773   *   %-EIO if DMA mapping failed
774   *
775   * The Send WR's num_sge field is set in all cases.
776   */
svc_rdma_map_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,const struct xdr_buf * xdr)777  int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
778  			   struct svc_rdma_send_ctxt *sctxt,
779  			   const struct svc_rdma_recv_ctxt *rctxt,
780  			   const struct xdr_buf *xdr)
781  {
782  	struct svc_rdma_map_data args = {
783  		.md_rdma	= rdma,
784  		.md_ctxt	= sctxt,
785  	};
786  
787  	/* Set up the (persistently-mapped) transport header SGE. */
788  	sctxt->sc_send_wr.num_sge = 1;
789  	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
790  
791  	/* If there is a Reply chunk, nothing follows the transport
792  	 * header, and we're done here.
793  	 */
794  	if (!pcl_is_empty(&rctxt->rc_reply_pcl))
795  		return 0;
796  
797  	/* For pull-up, svc_rdma_send() will sync the transport header.
798  	 * No additional DMA mapping is necessary.
799  	 */
800  	if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
801  		return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
802  
803  	return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
804  				       svc_rdma_xb_dma_map, &args);
805  }
806  
807  /* The svc_rqst and all resources it owns are released as soon as
808   * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
809   * so they are released by the Send completion handler.
810   */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt)811  static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
812  				   struct svc_rdma_send_ctxt *ctxt)
813  {
814  	int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
815  
816  	ctxt->sc_page_count += pages;
817  	for (i = 0; i < pages; i++) {
818  		ctxt->sc_pages[i] = rqstp->rq_respages[i];
819  		rqstp->rq_respages[i] = NULL;
820  	}
821  
822  	/* Prevent svc_xprt_release from releasing pages in rq_pages */
823  	rqstp->rq_next_page = rqstp->rq_respages;
824  }
825  
826  /* Prepare the portion of the RPC Reply that will be transmitted
827   * via RDMA Send. The RPC-over-RDMA transport header is prepared
828   * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
829   *
830   * Depending on whether a Write list or Reply chunk is present,
831   * the server may send all, a portion of, or none of the xdr_buf.
832   * In the latter case, only the transport header (sc_sges[0]) is
833   * transmitted.
834   *
835   * RDMA Send is the last step of transmitting an RPC reply. Pages
836   * involved in the earlier RDMA Writes are here transferred out
837   * of the rqstp and into the sctxt's page array. These pages are
838   * DMA unmapped by each Write completion, but the subsequent Send
839   * completion finally releases these pages.
840   *
841   * Assumptions:
842   * - The Reply's transport header will never be larger than a page.
843   */
svc_rdma_send_reply_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,const struct svc_rdma_recv_ctxt * rctxt,struct svc_rqst * rqstp)844  static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
845  				   struct svc_rdma_send_ctxt *sctxt,
846  				   const struct svc_rdma_recv_ctxt *rctxt,
847  				   struct svc_rqst *rqstp)
848  {
849  	int ret;
850  
851  	ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
852  	if (ret < 0)
853  		return ret;
854  
855  	svc_rdma_save_io_pages(rqstp, sctxt);
856  
857  	if (rctxt->rc_inv_rkey) {
858  		sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
859  		sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
860  	} else {
861  		sctxt->sc_send_wr.opcode = IB_WR_SEND;
862  	}
863  
864  	return svc_rdma_send(rdma, sctxt);
865  }
866  
867  /**
868   * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
869   * @rdma: controlling transport context
870   * @sctxt: Send context for the response
871   * @rctxt: Receive context for incoming bad message
872   * @status: negative errno indicating error that occurred
873   *
874   * Given the client-provided Read, Write, and Reply chunks, the
875   * server was not able to parse the Call or form a complete Reply.
876   * Return an RDMA_ERROR message so the client can retire the RPC
877   * transaction.
878   *
879   * The caller does not have to release @sctxt. It is released by
880   * Send completion, or by this function on error.
881   */
svc_rdma_send_error_msg(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * sctxt,struct svc_rdma_recv_ctxt * rctxt,int status)882  void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
883  			     struct svc_rdma_send_ctxt *sctxt,
884  			     struct svc_rdma_recv_ctxt *rctxt,
885  			     int status)
886  {
887  	__be32 *rdma_argp = rctxt->rc_recv_buf;
888  	__be32 *p;
889  
890  	rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
891  	xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
892  			sctxt->sc_xprt_buf, NULL);
893  
894  	p = xdr_reserve_space(&sctxt->sc_stream,
895  			      rpcrdma_fixed_maxsz * sizeof(*p));
896  	if (!p)
897  		goto put_ctxt;
898  
899  	*p++ = *rdma_argp;
900  	*p++ = *(rdma_argp + 1);
901  	*p++ = rdma->sc_fc_credits;
902  	*p = rdma_error;
903  
904  	switch (status) {
905  	case -EPROTONOSUPPORT:
906  		p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
907  		if (!p)
908  			goto put_ctxt;
909  
910  		*p++ = err_vers;
911  		*p++ = rpcrdma_version;
912  		*p = rpcrdma_version;
913  		trace_svcrdma_err_vers(*rdma_argp);
914  		break;
915  	default:
916  		p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
917  		if (!p)
918  			goto put_ctxt;
919  
920  		*p = err_chunk;
921  		trace_svcrdma_err_chunk(*rdma_argp);
922  	}
923  
924  	/* Remote Invalidation is skipped for simplicity. */
925  	sctxt->sc_send_wr.num_sge = 1;
926  	sctxt->sc_send_wr.opcode = IB_WR_SEND;
927  	sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
928  	if (svc_rdma_send(rdma, sctxt))
929  		goto put_ctxt;
930  	return;
931  
932  put_ctxt:
933  	svc_rdma_send_ctxt_put(rdma, sctxt);
934  }
935  
936  /**
937   * svc_rdma_sendto - Transmit an RPC reply
938   * @rqstp: processed RPC request, reply XDR already in ::rq_res
939   *
940   * Any resources still associated with @rqstp are released upon return.
941   * If no reply message was possible, the connection is closed.
942   *
943   * Returns:
944   *	%0 if an RPC reply has been successfully posted,
945   *	%-ENOMEM if a resource shortage occurred (connection is lost),
946   *	%-ENOTCONN if posting failed (connection is lost).
947   */
svc_rdma_sendto(struct svc_rqst * rqstp)948  int svc_rdma_sendto(struct svc_rqst *rqstp)
949  {
950  	struct svc_xprt *xprt = rqstp->rq_xprt;
951  	struct svcxprt_rdma *rdma =
952  		container_of(xprt, struct svcxprt_rdma, sc_xprt);
953  	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
954  	__be32 *rdma_argp = rctxt->rc_recv_buf;
955  	struct svc_rdma_send_ctxt *sctxt;
956  	unsigned int rc_size;
957  	__be32 *p;
958  	int ret;
959  
960  	ret = -ENOTCONN;
961  	if (svc_xprt_is_dead(xprt))
962  		goto drop_connection;
963  
964  	ret = -ENOMEM;
965  	sctxt = svc_rdma_send_ctxt_get(rdma);
966  	if (!sctxt)
967  		goto drop_connection;
968  
969  	ret = -EMSGSIZE;
970  	p = xdr_reserve_space(&sctxt->sc_stream,
971  			      rpcrdma_fixed_maxsz * sizeof(*p));
972  	if (!p)
973  		goto put_ctxt;
974  
975  	ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
976  	if (ret < 0)
977  		goto reply_chunk;
978  	rc_size = ret;
979  
980  	*p++ = *rdma_argp;
981  	*p++ = *(rdma_argp + 1);
982  	*p++ = rdma->sc_fc_credits;
983  	*p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
984  
985  	ret = svc_rdma_encode_read_list(sctxt);
986  	if (ret < 0)
987  		goto put_ctxt;
988  	ret = svc_rdma_encode_write_list(rctxt, sctxt);
989  	if (ret < 0)
990  		goto put_ctxt;
991  	ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size);
992  	if (ret < 0)
993  		goto put_ctxt;
994  
995  	ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
996  	if (ret < 0)
997  		goto put_ctxt;
998  	return 0;
999  
1000  reply_chunk:
1001  	if (ret != -E2BIG && ret != -EINVAL)
1002  		goto put_ctxt;
1003  
1004  	/* Send completion releases payload pages that were part
1005  	 * of previously posted RDMA Writes.
1006  	 */
1007  	svc_rdma_save_io_pages(rqstp, sctxt);
1008  	svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
1009  	return 0;
1010  
1011  put_ctxt:
1012  	svc_rdma_send_ctxt_put(rdma, sctxt);
1013  drop_connection:
1014  	trace_svcrdma_send_err(rqstp, ret);
1015  	svc_xprt_deferred_close(&rdma->sc_xprt);
1016  	return -ENOTCONN;
1017  }
1018  
1019  /**
1020   * svc_rdma_result_payload - special processing for a result payload
1021   * @rqstp: svc_rqst to operate on
1022   * @offset: payload's byte offset in @xdr
1023   * @length: size of payload, in bytes
1024   *
1025   * Return values:
1026   *   %0 if successful or nothing needed to be done
1027   *   %-EMSGSIZE on XDR buffer overflow
1028   *   %-E2BIG if the payload was larger than the Write chunk
1029   *   %-EINVAL if client provided too many segments
1030   *   %-ENOMEM if rdma_rw context pool was exhausted
1031   *   %-ENOTCONN if posting failed (connection is lost)
1032   *   %-EIO if rdma_rw initialization failed (DMA mapping, etc)
1033   */
svc_rdma_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1034  int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1035  			    unsigned int length)
1036  {
1037  	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
1038  	struct svc_rdma_chunk *chunk;
1039  	struct svcxprt_rdma *rdma;
1040  	struct xdr_buf subbuf;
1041  	int ret;
1042  
1043  	chunk = rctxt->rc_cur_result_payload;
1044  	if (!length || !chunk)
1045  		return 0;
1046  	rctxt->rc_cur_result_payload =
1047  		pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1048  	if (length > chunk->ch_length)
1049  		return -E2BIG;
1050  
1051  	chunk->ch_position = offset;
1052  	chunk->ch_payload_length = length;
1053  
1054  	if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1055  		return -EMSGSIZE;
1056  
1057  	rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1058  	ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);
1059  	if (ret < 0)
1060  		return ret;
1061  	return 0;
1062  }
1063