Lines Matching +full:grant +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2020, Oracle and/or its affiliates.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
57 /* Returns size of largest RPC-over-RDMA header in a Call message
59 * The largest Call header contains a full-size Read list and a
80 /* Returns size of largest RPC-over-RDMA header in a Reply message
101 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 unsigned int maxsegs = ep->re_max_rdma_segs; in rpcrdma_set_max_header_sizes()
112 ep->re_max_inline_send = in rpcrdma_set_max_header_sizes()
113 ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs); in rpcrdma_set_max_header_sizes()
114 ep->re_max_inline_recv = in rpcrdma_set_max_header_sizes()
115 ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs); in rpcrdma_set_max_header_sizes()
129 struct xdr_buf *xdr = &rqst->rq_snd_buf; in rpcrdma_args_inline()
130 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_args_inline()
133 if (xdr->len > ep->re_max_inline_send) in rpcrdma_args_inline()
136 if (xdr->page_len) { in rpcrdma_args_inline()
137 remaining = xdr->page_len; in rpcrdma_args_inline()
138 offset = offset_in_page(xdr->page_base); in rpcrdma_args_inline()
141 remaining -= min_t(unsigned int, in rpcrdma_args_inline()
142 PAGE_SIZE - offset, remaining); in rpcrdma_args_inline()
144 if (++count > ep->re_attr.cap.max_send_sge) in rpcrdma_args_inline()
161 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_results_inline()
165 * size of the non-payload part of the RPC Reply is larger than
172 const struct xdr_buf *buf = &rqst->rq_rcv_buf; in rpcrdma_nonpayload_inline()
174 return (buf->head[0].iov_len + buf->tail[0].iov_len) < in rpcrdma_nonpayload_inline()
175 r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_nonpayload_inline()
189 len = buf->page_len; in rpcrdma_alloc_sparse_pages()
190 ppages = buf->pages + (buf->page_base >> PAGE_SHIFT); in rpcrdma_alloc_sparse_pages()
195 return -ENOBUFS; in rpcrdma_alloc_sparse_pages()
197 len -= PAGE_SIZE; in rpcrdma_alloc_sparse_pages()
212 seg->mr_page = virt_to_page(vec->iov_base); in rpcrdma_convert_kvec()
213 seg->mr_offset = offset_in_page(vec->iov_base); in rpcrdma_convert_kvec()
214 seg->mr_len = vec->iov_len; in rpcrdma_convert_kvec()
238 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n); in rpcrdma_convert_iovs()
240 len = xdrbuf->page_len; in rpcrdma_convert_iovs()
241 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); in rpcrdma_convert_iovs()
242 page_base = offset_in_page(xdrbuf->page_base); in rpcrdma_convert_iovs()
244 seg->mr_page = *ppages; in rpcrdma_convert_iovs()
245 seg->mr_offset = page_base; in rpcrdma_convert_iovs()
246 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); in rpcrdma_convert_iovs()
247 len -= seg->mr_len; in rpcrdma_convert_iovs()
257 if (xdrbuf->tail[0].iov_len) in rpcrdma_convert_iovs()
258 rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); in rpcrdma_convert_iovs()
262 return -EIO; in rpcrdma_convert_iovs()
273 return -EMSGSIZE; in encode_rdma_segment()
275 xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset); in encode_rdma_segment()
287 return -EMSGSIZE; in encode_read_segment()
290 xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length, in encode_read_segment()
291 mr->mr_offset); in encode_read_segment()
301 *mr = rpcrdma_mr_pop(&req->rl_free_mrs); in rpcrdma_mr_prepare()
306 (*mr)->mr_req = req; in rpcrdma_mr_prepare()
309 rpcrdma_mr_push(*mr, &req->rl_registered); in rpcrdma_mr_prepare()
310 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); in rpcrdma_mr_prepare()
314 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_mr_prepare()
316 return ERR_PTR(-EAGAIN); in rpcrdma_mr_prepare()
322 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
326 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
338 struct xdr_stream *xdr = &req->rl_stream; in rpcrdma_encode_read_list()
347 pos = rqst->rq_snd_buf.head[0].iov_len; in rpcrdma_encode_read_list()
350 seg = req->rl_segments; in rpcrdma_encode_read_list()
351 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, in rpcrdma_encode_read_list()
362 return -EMSGSIZE; in rpcrdma_encode_read_list()
364 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs); in rpcrdma_encode_read_list()
365 r_xprt->rx_stats.read_chunk_count++; in rpcrdma_encode_read_list()
366 nsegs -= mr->mr_nents; in rpcrdma_encode_read_list()
371 return -EMSGSIZE; in rpcrdma_encode_read_list()
379 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
383 * 1 - N - HLOO - HLOO - ... - HLOO - 0
395 struct xdr_stream *xdr = &req->rl_stream; in rpcrdma_encode_write_list()
396 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_encode_write_list()
405 seg = req->rl_segments; in rpcrdma_encode_write_list()
406 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, in rpcrdma_encode_write_list()
407 rqst->rq_rcv_buf.head[0].iov_len, in rpcrdma_encode_write_list()
413 return -EMSGSIZE; in rpcrdma_encode_write_list()
416 return -EMSGSIZE; in rpcrdma_encode_write_list()
426 return -EMSGSIZE; in rpcrdma_encode_write_list()
428 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs); in rpcrdma_encode_write_list()
429 r_xprt->rx_stats.write_chunk_count++; in rpcrdma_encode_write_list()
430 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_write_list()
432 nsegs -= mr->mr_nents; in rpcrdma_encode_write_list()
435 if (xdr_pad_size(rqst->rq_rcv_buf.page_len)) { in rpcrdma_encode_write_list()
436 if (encode_rdma_segment(xdr, ep->re_write_pad_mr) < 0) in rpcrdma_encode_write_list()
437 return -EMSGSIZE; in rpcrdma_encode_write_list()
439 trace_xprtrdma_chunk_wp(rqst->rq_task, ep->re_write_pad_mr, in rpcrdma_encode_write_list()
441 r_xprt->rx_stats.write_chunk_count++; in rpcrdma_encode_write_list()
442 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_write_list()
444 nsegs -= mr->mr_nents; in rpcrdma_encode_write_list()
452 return -EMSGSIZE; in rpcrdma_encode_write_list()
459 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
463 * 1 - N - HLOO - HLOO - ... - HLOO
473 struct xdr_stream *xdr = &req->rl_stream; in rpcrdma_encode_reply_chunk()
481 return -EMSGSIZE; in rpcrdma_encode_reply_chunk()
485 seg = req->rl_segments; in rpcrdma_encode_reply_chunk()
486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); in rpcrdma_encode_reply_chunk()
491 return -EMSGSIZE; in rpcrdma_encode_reply_chunk()
494 return -EMSGSIZE; in rpcrdma_encode_reply_chunk()
504 return -EMSGSIZE; in rpcrdma_encode_reply_chunk()
506 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs); in rpcrdma_encode_reply_chunk()
507 r_xprt->rx_stats.reply_chunk_count++; in rpcrdma_encode_reply_chunk()
508 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_reply_chunk()
510 nsegs -= mr->mr_nents; in rpcrdma_encode_reply_chunk()
523 struct rpcrdma_rep *rep = req->rl_reply; in rpcrdma_sendctx_done()
526 rep->rr_rxprt->rx_stats.reply_waits_for_send++; in rpcrdma_sendctx_done()
530 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
536 struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf; in rpcrdma_sendctx_unmap()
539 if (!sc->sc_unmap_count) in rpcrdma_sendctx_unmap()
544 * they can be cheaply re-used. in rpcrdma_sendctx_unmap()
546 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count; in rpcrdma_sendctx_unmap()
547 ++sge, --sc->sc_unmap_count) in rpcrdma_sendctx_unmap()
548 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length, in rpcrdma_sendctx_unmap()
551 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done); in rpcrdma_sendctx_unmap()
554 /* Prepare an SGE for the RPC-over-RDMA transport header.
559 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_hdr_sge()
560 struct rpcrdma_regbuf *rb = req->rl_rdmabuf; in rpcrdma_prepare_hdr_sge()
561 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_hdr_sge()
563 sge->addr = rdmab_addr(rb); in rpcrdma_prepare_hdr_sge()
564 sge->length = len; in rpcrdma_prepare_hdr_sge()
565 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_hdr_sge()
567 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length, in rpcrdma_prepare_hdr_sge()
572 * DMA-mapped. Sync the content that has changed.
577 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_head_iov()
578 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_head_iov()
579 struct rpcrdma_regbuf *rb = req->rl_sendbuf; in rpcrdma_prepare_head_iov()
584 sge->addr = rdmab_addr(rb); in rpcrdma_prepare_head_iov()
585 sge->length = len; in rpcrdma_prepare_head_iov()
586 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_head_iov()
588 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length, in rpcrdma_prepare_head_iov()
593 /* If there is a page list present, DMA map and prepare an
599 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_pagelist()
600 struct rpcrdma_regbuf *rb = req->rl_sendbuf; in rpcrdma_prepare_pagelist()
605 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); in rpcrdma_prepare_pagelist()
606 page_base = offset_in_page(xdr->page_base); in rpcrdma_prepare_pagelist()
607 remaining = xdr->page_len; in rpcrdma_prepare_pagelist()
609 sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_pagelist()
610 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining); in rpcrdma_prepare_pagelist()
611 sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages, in rpcrdma_prepare_pagelist()
613 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr)) in rpcrdma_prepare_pagelist()
616 sge->length = len; in rpcrdma_prepare_pagelist()
617 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_pagelist()
619 sc->sc_unmap_count++; in rpcrdma_prepare_pagelist()
621 remaining -= len; in rpcrdma_prepare_pagelist()
628 trace_xprtrdma_dma_maperr(sge->addr); in rpcrdma_prepare_pagelist()
640 struct rpcrdma_sendctx *sc = req->rl_sendctx; in rpcrdma_prepare_tail_iov()
641 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; in rpcrdma_prepare_tail_iov()
642 struct rpcrdma_regbuf *rb = req->rl_sendbuf; in rpcrdma_prepare_tail_iov()
643 struct page *page = virt_to_page(xdr->tail[0].iov_base); in rpcrdma_prepare_tail_iov()
645 sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len, in rpcrdma_prepare_tail_iov()
647 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr)) in rpcrdma_prepare_tail_iov()
650 sge->length = len; in rpcrdma_prepare_tail_iov()
651 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_tail_iov()
652 ++sc->sc_unmap_count; in rpcrdma_prepare_tail_iov()
656 trace_xprtrdma_dma_maperr(sge->addr); in rpcrdma_prepare_tail_iov()
668 dst = (unsigned char *)xdr->head[0].iov_base; in rpcrdma_pullup_tail_iov()
669 dst += xdr->head[0].iov_len + xdr->page_len; in rpcrdma_pullup_tail_iov()
670 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in rpcrdma_pullup_tail_iov()
671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; in rpcrdma_pullup_tail_iov()
684 dst = (unsigned char *)xdr->head[0].iov_base; in rpcrdma_pullup_pagelist()
685 dst += xdr->head[0].iov_len; in rpcrdma_pullup_pagelist()
686 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); in rpcrdma_pullup_pagelist()
687 page_base = offset_in_page(xdr->page_base); in rpcrdma_pullup_pagelist()
688 remaining = xdr->page_len; in rpcrdma_pullup_pagelist()
692 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining); in rpcrdma_pullup_pagelist()
694 r_xprt->rx_stats.pullup_copy_count += len; in rpcrdma_pullup_pagelist()
698 remaining -= len; in rpcrdma_pullup_pagelist()
703 /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
704 * When the head, pagelist, and tail are small, a pull-up copy
705 * is considerably less costly than DMA mapping the components
709 * - the caller has already verified that the total length
716 if (unlikely(xdr->tail[0].iov_len)) in rpcrdma_prepare_noch_pullup()
719 if (unlikely(xdr->page_len)) in rpcrdma_prepare_noch_pullup()
723 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len); in rpcrdma_prepare_noch_pullup()
730 struct kvec *tail = &xdr->tail[0]; in rpcrdma_prepare_noch_mapped()
732 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_noch_mapped()
734 if (xdr->page_len) in rpcrdma_prepare_noch_mapped()
737 if (tail->iov_len) in rpcrdma_prepare_noch_mapped()
739 offset_in_page(tail->iov_base), in rpcrdma_prepare_noch_mapped()
740 tail->iov_len)) in rpcrdma_prepare_noch_mapped()
743 if (req->rl_sendctx->sc_unmap_count) in rpcrdma_prepare_noch_mapped()
744 kref_get(&req->rl_kref); in rpcrdma_prepare_noch_mapped()
752 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_readch()
760 if (xdr->tail[0].iov_len > 3) { in rpcrdma_prepare_readch()
765 * the tail iovec. Force the tail's non-pad content to in rpcrdma_prepare_readch()
768 page_base = offset_in_page(xdr->tail[0].iov_base); in rpcrdma_prepare_readch()
769 len = xdr->tail[0].iov_len; in rpcrdma_prepare_readch()
771 len -= len & 3; in rpcrdma_prepare_readch()
774 kref_get(&req->rl_kref); in rpcrdma_prepare_readch()
781 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
797 ret = -EAGAIN; in rpcrdma_prepare_send_sges()
798 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt); in rpcrdma_prepare_send_sges()
799 if (!req->rl_sendctx) in rpcrdma_prepare_send_sges()
801 req->rl_sendctx->sc_unmap_count = 0; in rpcrdma_prepare_send_sges()
802 req->rl_sendctx->sc_req = req; in rpcrdma_prepare_send_sges()
803 kref_init(&req->rl_kref); in rpcrdma_prepare_send_sges()
804 req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe; in rpcrdma_prepare_send_sges()
805 req->rl_wr.sg_list = req->rl_sendctx->sc_sges; in rpcrdma_prepare_send_sges()
806 req->rl_wr.num_sge = 0; in rpcrdma_prepare_send_sges()
807 req->rl_wr.opcode = IB_WR_SEND; in rpcrdma_prepare_send_sges()
811 ret = -EIO; in rpcrdma_prepare_send_sges()
834 rpcrdma_sendctx_unmap(req->rl_sendctx); in rpcrdma_prepare_send_sges()
836 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret); in rpcrdma_prepare_send_sges()
841 * rpcrdma_marshal_req - Marshal and send one RPC request
846 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
847 * - Registers Read, Write, and Reply chunks
848 * - Constructs the transport header
849 * - Posts a Send WR to send the transport header and request
853 * %-ENOTCONN if the connection was lost,
854 * %-EAGAIN if the caller should call again with the same arguments,
855 * %-ENOBUFS if the caller should call again after a delay,
856 * %-EMSGSIZE if the transport header is too small,
857 * %-EIO if a permanent problem occurred while marshaling.
863 struct xdr_stream *xdr = &req->rl_stream; in rpcrdma_marshal_req()
865 struct xdr_buf *buf = &rqst->rq_snd_buf; in rpcrdma_marshal_req()
870 if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) { in rpcrdma_marshal_req()
871 ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf); in rpcrdma_marshal_req()
876 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); in rpcrdma_marshal_req()
877 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf), in rpcrdma_marshal_req()
881 ret = -EMSGSIZE; in rpcrdma_marshal_req()
885 *p++ = rqst->rq_xid; in rpcrdma_marshal_req()
887 *p++ = r_xprt->rx_buf.rb_max_requests; in rpcrdma_marshal_req()
894 &rqst->rq_cred->cr_auth->au_flags); in rpcrdma_marshal_req()
903 * o Large non-read ops return as a single reply chunk. in rpcrdma_marshal_req()
907 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) && in rpcrdma_marshal_req()
920 * o Large non-write ops are sent with the entire message as a in rpcrdma_marshal_req()
921 * single read chunk (protocol 0-position special case). in rpcrdma_marshal_req()
924 * that both has a data payload, and whose non-data arguments in rpcrdma_marshal_req()
929 rtype = buf->len < rdmab_length(req->rl_sendbuf) ? in rpcrdma_marshal_req()
931 } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) { in rpcrdma_marshal_req()
935 r_xprt->rx_stats.nomsg_call_count++; in rpcrdma_marshal_req()
941 * of chunk lists in one RPC-over-RDMA Call message: in rpcrdma_marshal_req()
943 * - Read list in rpcrdma_marshal_req()
944 * - Write list in rpcrdma_marshal_req()
945 * - Reply chunk in rpcrdma_marshal_req()
946 * - Read list + Reply chunk in rpcrdma_marshal_req()
950 * - Read list + Write list in rpcrdma_marshal_req()
954 * - Write list + Reply chunk in rpcrdma_marshal_req()
955 * - Read list + Write list + Reply chunk in rpcrdma_marshal_req()
972 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len, in rpcrdma_marshal_req()
982 r_xprt->rx_stats.failed_marshal_count++; in rpcrdma_marshal_req()
989 u32 grant) in __rpcrdma_update_cwnd_locked() argument
991 buf->rb_credits = grant; in __rpcrdma_update_cwnd_locked()
992 xprt->cwnd = grant << RPC_CWNDSHIFT; in __rpcrdma_update_cwnd_locked()
995 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant) in rpcrdma_update_cwnd() argument
997 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_update_cwnd()
999 spin_lock(&xprt->transport_lock); in rpcrdma_update_cwnd()
1000 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant); in rpcrdma_update_cwnd()
1001 spin_unlock(&xprt->transport_lock); in rpcrdma_update_cwnd()
1005 * rpcrdma_reset_cwnd - Reset the xprt's congestion window
1009 * its credit grant to one (see RFC 8166, Section 3.3.3).
1013 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reset_cwnd()
1015 spin_lock(&xprt->transport_lock); in rpcrdma_reset_cwnd()
1016 xprt->cong = 0; in rpcrdma_reset_cwnd()
1017 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1); in rpcrdma_reset_cwnd()
1018 spin_unlock(&xprt->transport_lock); in rpcrdma_reset_cwnd()
1022 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1051 rqst->rq_rcv_buf.head[0].iov_base = srcp; in rpcrdma_inline_fixup()
1052 rqst->rq_private_buf.head[0].iov_base = srcp; in rpcrdma_inline_fixup()
1057 curlen = rqst->rq_rcv_buf.head[0].iov_len; in rpcrdma_inline_fixup()
1061 copy_len -= curlen; in rpcrdma_inline_fixup()
1063 ppages = rqst->rq_rcv_buf.pages + in rpcrdma_inline_fixup()
1064 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT); in rpcrdma_inline_fixup()
1065 page_base = offset_in_page(rqst->rq_rcv_buf.page_base); in rpcrdma_inline_fixup()
1067 if (copy_len && rqst->rq_rcv_buf.page_len) { in rpcrdma_inline_fixup()
1070 pagelist_len = rqst->rq_rcv_buf.page_len; in rpcrdma_inline_fixup()
1075 curlen = PAGE_SIZE - page_base; in rpcrdma_inline_fixup()
1084 copy_len -= curlen; in rpcrdma_inline_fixup()
1086 pagelist_len -= curlen; in rpcrdma_inline_fixup()
1099 srcp -= pad; in rpcrdma_inline_fixup()
1106 rqst->rq_rcv_buf.tail[0].iov_base = srcp; in rpcrdma_inline_fixup()
1107 rqst->rq_private_buf.tail[0].iov_base = srcp; in rpcrdma_inline_fixup()
1124 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_is_bcall()
1125 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_is_bcall()
1128 if (rep->rr_proc != rdma_msg) in rpcrdma_is_bcall()
1143 if (*p++ != rep->rr_xid) in rpcrdma_is_bcall()
1149 if (xprt->bc_serv == NULL) in rpcrdma_is_bcall()
1176 return -EIO; in decode_rdma_segment()
1190 return -EIO; in decode_write_chunk()
1194 while (segcount--) { in decode_write_chunk()
1196 return -EIO; in decode_write_chunk()
1203 /* In RPC-over-RDMA Version One replies, a Read list is never
1213 return -EIO; in decode_read_list()
1215 return -EIO; in decode_read_list()
1232 return -EIO; in decode_write_list()
1236 return -EIO; in decode_write_list()
1239 return -EIO; in decode_write_list()
1252 return -EIO; in decode_reply_chunk()
1257 return -EIO; in decode_reply_chunk()
1265 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_decode_msg()
1271 return -EIO; in rpcrdma_decode_msg()
1273 return -EIO; in rpcrdma_decode_msg()
1275 return -EIO; in rpcrdma_decode_msg()
1279 return -EIO; in rpcrdma_decode_msg()
1281 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */ in rpcrdma_decode_msg()
1284 r_xprt->rx_stats.fixup_copy_count += in rpcrdma_decode_msg()
1287 r_xprt->rx_stats.total_rdma_reply += writelist; in rpcrdma_decode_msg()
1294 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_decode_nomsg()
1299 return -EIO; in rpcrdma_decode_nomsg()
1301 return -EIO; in rpcrdma_decode_nomsg()
1303 return -EIO; in rpcrdma_decode_nomsg()
1307 return -EIO; in rpcrdma_decode_nomsg()
1309 return -EIO; in rpcrdma_decode_nomsg()
1312 r_xprt->rx_stats.total_rdma_reply += replychunk; in rpcrdma_decode_nomsg()
1320 struct xdr_stream *xdr = &rep->rr_stream; in rpcrdma_decode_error()
1325 return -EIO; in rpcrdma_decode_error()
1341 return -EIO; in rpcrdma_decode_error()
1345 * rpcrdma_unpin_rqst - Release rqst without completing it
1354 struct rpc_xprt *xprt = &rep->rr_rxprt->rx_xprt; in rpcrdma_unpin_rqst()
1355 struct rpc_rqst *rqst = rep->rr_rqst; in rpcrdma_unpin_rqst()
1358 req->rl_reply = NULL; in rpcrdma_unpin_rqst()
1359 rep->rr_rqst = NULL; in rpcrdma_unpin_rqst()
1361 spin_lock(&xprt->queue_lock); in rpcrdma_unpin_rqst()
1363 spin_unlock(&xprt->queue_lock); in rpcrdma_unpin_rqst()
1367 * rpcrdma_complete_rqst - Pass completed rqst back to RPC
1376 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_complete_rqst()
1377 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_complete_rqst()
1378 struct rpc_rqst *rqst = rep->rr_rqst; in rpcrdma_complete_rqst()
1381 switch (rep->rr_proc) { in rpcrdma_complete_rqst()
1392 status = -EIO; in rpcrdma_complete_rqst()
1398 spin_lock(&xprt->queue_lock); in rpcrdma_complete_rqst()
1399 xprt_complete_rqst(rqst->rq_task, status); in rpcrdma_complete_rqst()
1401 spin_unlock(&xprt->queue_lock); in rpcrdma_complete_rqst()
1406 r_xprt->rx_stats.bad_reply_count++; in rpcrdma_complete_rqst()
1407 rqst->rq_task->tk_status = status; in rpcrdma_complete_rqst()
1417 rpcrdma_complete_rqst(req->rl_reply); in rpcrdma_reply_done()
1421 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1429 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_reply_handler()
1430 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reply_handler()
1431 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reply_handler()
1440 if (xprt->reestablish_timeout) in rpcrdma_reply_handler()
1441 xprt->reestablish_timeout = 0; in rpcrdma_reply_handler()
1444 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf, in rpcrdma_reply_handler()
1445 rep->rr_hdrbuf.head[0].iov_base, NULL); in rpcrdma_reply_handler()
1446 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p)); in rpcrdma_reply_handler()
1449 rep->rr_xid = *p++; in rpcrdma_reply_handler()
1450 rep->rr_vers = *p++; in rpcrdma_reply_handler()
1452 rep->rr_proc = *p++; in rpcrdma_reply_handler()
1454 if (rep->rr_vers != rpcrdma_version) in rpcrdma_reply_handler()
1463 spin_lock(&xprt->queue_lock); in rpcrdma_reply_handler()
1464 rqst = xprt_lookup_rqst(xprt, rep->rr_xid); in rpcrdma_reply_handler()
1468 spin_unlock(&xprt->queue_lock); in rpcrdma_reply_handler()
1472 else if (credits > r_xprt->rx_ep->re_max_requests) in rpcrdma_reply_handler()
1473 credits = r_xprt->rx_ep->re_max_requests; in rpcrdma_reply_handler()
1474 rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1), in rpcrdma_reply_handler()
1476 if (buf->rb_credits != credits) in rpcrdma_reply_handler()
1480 if (unlikely(req->rl_reply)) in rpcrdma_reply_handler()
1481 rpcrdma_rep_put(buf, req->rl_reply); in rpcrdma_reply_handler()
1482 req->rl_reply = rep; in rpcrdma_reply_handler()
1483 rep->rr_rqst = rqst; in rpcrdma_reply_handler()
1485 trace_xprtrdma_reply(rqst->rq_task, rep, credits); in rpcrdma_reply_handler()
1487 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) in rpcrdma_reply_handler()
1488 frwr_reminv(rep, &req->rl_registered); in rpcrdma_reply_handler()
1489 if (!list_empty(&req->rl_registered)) in rpcrdma_reply_handler()
1493 kref_put(&req->rl_kref, rpcrdma_reply_done); in rpcrdma_reply_handler()
1501 spin_unlock(&xprt->queue_lock); in rpcrdma_reply_handler()