1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * rpc_rdma.c 42 * 43 * This file contains the guts of the RPC RDMA protocol, and 44 * does marshaling/unmarshaling, etc. It is also where interfacing 45 * to the Linux RPC framework lives. 46 */ 47 48 #include "xprt_rdma.h" 49 50 #include <linux/highmem.h> 51 52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 53 # define RPCDBG_FACILITY RPCDBG_TRANS 54 #endif 55 56 enum rpcrdma_chunktype { 57 rpcrdma_noch = 0, 58 rpcrdma_readch, 59 rpcrdma_areadch, 60 rpcrdma_writech, 61 rpcrdma_replych 62 }; 63 64 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 65 static const char transfertypes[][12] = { 66 "pure inline", /* no chunks */ 67 " read chunk", /* some argument via rdma read */ 68 "*read chunk", /* entire request via rdma read */ 69 "write chunk", /* some result via rdma write */ 70 "reply chunk" /* entire reply via rdma write */ 71 }; 72 #endif 73 74 /* 75 * Chunk assembly from upper layer xdr_buf. 76 * 77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk 78 * elements. Segments are then coalesced when registered, if possible 79 * within the selected memreg mode. 80 * 81 * Returns positive number of segments converted, or a negative errno. 82 */ 83 84 static int 85 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, 86 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) 87 { 88 int len, n = 0, p; 89 int page_base; 90 struct page **ppages; 91 92 if (pos == 0 && xdrbuf->head[0].iov_len) { 93 seg[n].mr_page = NULL; 94 seg[n].mr_offset = xdrbuf->head[0].iov_base; 95 seg[n].mr_len = xdrbuf->head[0].iov_len; 96 ++n; 97 } 98 99 len = xdrbuf->page_len; 100 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); 101 page_base = xdrbuf->page_base & ~PAGE_MASK; 102 p = 0; 103 while (len && n < nsegs) { 104 if (!ppages[p]) { 105 /* alloc the pagelist for receiving buffer */ 106 ppages[p] = alloc_page(GFP_ATOMIC); 107 if (!ppages[p]) 108 return -ENOMEM; 109 } 110 seg[n].mr_page = ppages[p]; 111 seg[n].mr_offset = (void *)(unsigned long) page_base; 112 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); 113 if (seg[n].mr_len > PAGE_SIZE) 114 return -EIO; 115 len -= seg[n].mr_len; 116 ++n; 117 ++p; 118 page_base = 0; /* page offset only applies to first page */ 119 } 120 121 /* Message overflows the seg array */ 122 if (len && n == nsegs) 123 return -EIO; 124 125 if (xdrbuf->tail[0].iov_len) { 126 /* the rpcrdma protocol allows us to omit any trailing 127 * xdr pad bytes, saving the server an RDMA operation. */ 128 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) 129 return n; 130 if (n == nsegs) 131 /* Tail remains, but we're out of segments */ 132 return -EIO; 133 seg[n].mr_page = NULL; 134 seg[n].mr_offset = xdrbuf->tail[0].iov_base; 135 seg[n].mr_len = xdrbuf->tail[0].iov_len; 136 ++n; 137 } 138 139 return n; 140 } 141 142 /* 143 * Create read/write chunk lists, and reply chunks, for RDMA 144 * 145 * Assume check against THRESHOLD has been done, and chunks are required. 146 * Assume only encoding one list entry for read|write chunks. The NFSv3 147 * protocol is simple enough to allow this as it only has a single "bulk 148 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The 149 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) 150 * 151 * When used for a single reply chunk (which is a special write 152 * chunk used for the entire reply, rather than just the data), it 153 * is used primarily for READDIR and READLINK which would otherwise 154 * be severely size-limited by a small rdma inline read max. The server 155 * response will come back as an RDMA Write, followed by a message 156 * of type RDMA_NOMSG carrying the xid and length. As a result, reply 157 * chunks do not provide data alignment, however they do not require 158 * "fixup" (moving the response to the upper layer buffer) either. 159 * 160 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 161 * 162 * Read chunklist (a linked list): 163 * N elements, position P (same P for all chunks of same arg!): 164 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 165 * 166 * Write chunklist (a list of (one) counted array): 167 * N elements: 168 * 1 - N - HLOO - HLOO - ... - HLOO - 0 169 * 170 * Reply chunk (a counted array): 171 * N elements: 172 * 1 - N - HLOO - HLOO - ... - HLOO 173 * 174 * Returns positive RPC/RDMA header size, or negative errno. 175 */ 176 177 static ssize_t 178 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, 179 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) 180 { 181 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 182 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 183 int n, nsegs, nchunks = 0; 184 unsigned int pos; 185 struct rpcrdma_mr_seg *seg = req->rl_segments; 186 struct rpcrdma_read_chunk *cur_rchunk = NULL; 187 struct rpcrdma_write_array *warray = NULL; 188 struct rpcrdma_write_chunk *cur_wchunk = NULL; 189 __be32 *iptr = headerp->rm_body.rm_chunks; 190 int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool); 191 192 if (type == rpcrdma_readch || type == rpcrdma_areadch) { 193 /* a read chunk - server will RDMA Read our memory */ 194 cur_rchunk = (struct rpcrdma_read_chunk *) iptr; 195 } else { 196 /* a write or reply chunk - server will RDMA Write our memory */ 197 *iptr++ = xdr_zero; /* encode a NULL read chunk list */ 198 if (type == rpcrdma_replych) 199 *iptr++ = xdr_zero; /* a NULL write chunk list */ 200 warray = (struct rpcrdma_write_array *) iptr; 201 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); 202 } 203 204 if (type == rpcrdma_replych || type == rpcrdma_areadch) 205 pos = 0; 206 else 207 pos = target->head[0].iov_len; 208 209 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); 210 if (nsegs < 0) 211 return nsegs; 212 213 map = r_xprt->rx_ia.ri_ops->ro_map; 214 do { 215 n = map(r_xprt, seg, nsegs, cur_wchunk != NULL); 216 if (n <= 0) 217 goto out; 218 if (cur_rchunk) { /* read */ 219 cur_rchunk->rc_discrim = xdr_one; 220 /* all read chunks have the same "position" */ 221 cur_rchunk->rc_position = cpu_to_be32(pos); 222 cur_rchunk->rc_target.rs_handle = 223 cpu_to_be32(seg->mr_rkey); 224 cur_rchunk->rc_target.rs_length = 225 cpu_to_be32(seg->mr_len); 226 xdr_encode_hyper( 227 (__be32 *)&cur_rchunk->rc_target.rs_offset, 228 seg->mr_base); 229 dprintk("RPC: %s: read chunk " 230 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, 231 seg->mr_len, (unsigned long long)seg->mr_base, 232 seg->mr_rkey, pos, n < nsegs ? "more" : "last"); 233 cur_rchunk++; 234 r_xprt->rx_stats.read_chunk_count++; 235 } else { /* write/reply */ 236 cur_wchunk->wc_target.rs_handle = 237 cpu_to_be32(seg->mr_rkey); 238 cur_wchunk->wc_target.rs_length = 239 cpu_to_be32(seg->mr_len); 240 xdr_encode_hyper( 241 (__be32 *)&cur_wchunk->wc_target.rs_offset, 242 seg->mr_base); 243 dprintk("RPC: %s: %s chunk " 244 "elem %d@0x%llx:0x%x (%s)\n", __func__, 245 (type == rpcrdma_replych) ? "reply" : "write", 246 seg->mr_len, (unsigned long long)seg->mr_base, 247 seg->mr_rkey, n < nsegs ? "more" : "last"); 248 cur_wchunk++; 249 if (type == rpcrdma_replych) 250 r_xprt->rx_stats.reply_chunk_count++; 251 else 252 r_xprt->rx_stats.write_chunk_count++; 253 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 254 } 255 nchunks++; 256 seg += n; 257 nsegs -= n; 258 } while (nsegs); 259 260 /* success. all failures return above */ 261 req->rl_nchunks = nchunks; 262 263 /* 264 * finish off header. If write, marshal discrim and nchunks. 265 */ 266 if (cur_rchunk) { 267 iptr = (__be32 *) cur_rchunk; 268 *iptr++ = xdr_zero; /* finish the read chunk list */ 269 *iptr++ = xdr_zero; /* encode a NULL write chunk list */ 270 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 271 } else { 272 warray->wc_discrim = xdr_one; 273 warray->wc_nchunks = cpu_to_be32(nchunks); 274 iptr = (__be32 *) cur_wchunk; 275 if (type == rpcrdma_writech) { 276 *iptr++ = xdr_zero; /* finish the write chunk list */ 277 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 278 } 279 } 280 281 /* 282 * Return header size. 283 */ 284 return (unsigned char *)iptr - (unsigned char *)headerp; 285 286 out: 287 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) 288 return n; 289 290 for (pos = 0; nchunks--;) 291 pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, 292 &req->rl_segments[pos]); 293 return n; 294 } 295 296 /* 297 * Copy write data inline. 298 * This function is used for "small" requests. Data which is passed 299 * to RPC via iovecs (or page list) is copied directly into the 300 * pre-registered memory buffer for this request. For small amounts 301 * of data, this is efficient. The cutoff value is tunable. 302 */ 303 static int 304 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad) 305 { 306 int i, npages, curlen; 307 int copy_len; 308 unsigned char *srcp, *destp; 309 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 310 int page_base; 311 struct page **ppages; 312 313 destp = rqst->rq_svec[0].iov_base; 314 curlen = rqst->rq_svec[0].iov_len; 315 destp += curlen; 316 /* 317 * Do optional padding where it makes sense. Alignment of write 318 * payload can help the server, if our setting is accurate. 319 */ 320 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/); 321 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH) 322 pad = 0; /* don't pad this request */ 323 324 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n", 325 __func__, pad, destp, rqst->rq_slen, curlen); 326 327 copy_len = rqst->rq_snd_buf.page_len; 328 329 if (rqst->rq_snd_buf.tail[0].iov_len) { 330 curlen = rqst->rq_snd_buf.tail[0].iov_len; 331 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { 332 memmove(destp + copy_len, 333 rqst->rq_snd_buf.tail[0].iov_base, curlen); 334 r_xprt->rx_stats.pullup_copy_count += curlen; 335 } 336 dprintk("RPC: %s: tail destp 0x%p len %d\n", 337 __func__, destp + copy_len, curlen); 338 rqst->rq_svec[0].iov_len += curlen; 339 } 340 r_xprt->rx_stats.pullup_copy_count += copy_len; 341 342 page_base = rqst->rq_snd_buf.page_base; 343 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); 344 page_base &= ~PAGE_MASK; 345 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; 346 for (i = 0; copy_len && i < npages; i++) { 347 curlen = PAGE_SIZE - page_base; 348 if (curlen > copy_len) 349 curlen = copy_len; 350 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 351 __func__, i, destp, copy_len, curlen); 352 srcp = kmap_atomic(ppages[i]); 353 memcpy(destp, srcp+page_base, curlen); 354 kunmap_atomic(srcp); 355 rqst->rq_svec[0].iov_len += curlen; 356 destp += curlen; 357 copy_len -= curlen; 358 page_base = 0; 359 } 360 /* header now contains entire send message */ 361 return pad; 362 } 363 364 /* 365 * Marshal a request: the primary job of this routine is to choose 366 * the transfer modes. See comments below. 367 * 368 * Uses multiple RDMA IOVs for a request: 369 * [0] -- RPC RDMA header, which uses memory from the *start* of the 370 * preregistered buffer that already holds the RPC data in 371 * its middle. 372 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. 373 * [2] -- optional padding. 374 * [3] -- if padded, header only in [1] and data here. 375 * 376 * Returns zero on success, otherwise a negative errno. 377 */ 378 379 int 380 rpcrdma_marshal_req(struct rpc_rqst *rqst) 381 { 382 struct rpc_xprt *xprt = rqst->rq_xprt; 383 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 384 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 385 char *base; 386 size_t rpclen, padlen; 387 ssize_t hdrlen; 388 enum rpcrdma_chunktype rtype, wtype; 389 struct rpcrdma_msg *headerp; 390 391 /* 392 * rpclen gets amount of data in first buffer, which is the 393 * pre-registered buffer. 394 */ 395 base = rqst->rq_svec[0].iov_base; 396 rpclen = rqst->rq_svec[0].iov_len; 397 398 headerp = rdmab_to_msg(req->rl_rdmabuf); 399 /* don't byte-swap XID, it's already done in request */ 400 headerp->rm_xid = rqst->rq_xid; 401 headerp->rm_vers = rpcrdma_version; 402 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); 403 headerp->rm_type = rdma_msg; 404 405 /* 406 * Chunks needed for results? 407 * 408 * o If the expected result is under the inline threshold, all ops 409 * return as inline (but see later). 410 * o Large non-read ops return as a single reply chunk. 411 * o Large read ops return data as write chunk(s), header as inline. 412 * 413 * Note: the NFS code sending down multiple result segments implies 414 * the op is one of read, readdir[plus], readlink or NFSv4 getacl. 415 */ 416 417 /* 418 * This code can handle read chunks, write chunks OR reply 419 * chunks -- only one type. If the request is too big to fit 420 * inline, then we will choose read chunks. If the request is 421 * a READ, then use write chunks to separate the file data 422 * into pages; otherwise use reply chunks. 423 */ 424 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) 425 wtype = rpcrdma_noch; 426 else if (rqst->rq_rcv_buf.page_len == 0) 427 wtype = rpcrdma_replych; 428 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 429 wtype = rpcrdma_writech; 430 else 431 wtype = rpcrdma_replych; 432 433 /* 434 * Chunks needed for arguments? 435 * 436 * o If the total request is under the inline threshold, all ops 437 * are sent as inline. 438 * o Large non-write ops are sent with the entire message as a 439 * single read chunk (protocol 0-position special case). 440 * o Large write ops transmit data as read chunk(s), header as 441 * inline. 442 * 443 * Note: the NFS code sending down multiple argument segments 444 * implies the op is a write. 445 * TBD check NFSv4 setacl 446 */ 447 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 448 rtype = rpcrdma_noch; 449 else if (rqst->rq_snd_buf.page_len == 0) 450 rtype = rpcrdma_areadch; 451 else 452 rtype = rpcrdma_readch; 453 454 /* The following simplification is not true forever */ 455 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) 456 wtype = rpcrdma_noch; 457 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { 458 dprintk("RPC: %s: cannot marshal multiple chunk lists\n", 459 __func__); 460 return -EIO; 461 } 462 463 hdrlen = RPCRDMA_HDRLEN_MIN; 464 padlen = 0; 465 466 /* 467 * Pull up any extra send data into the preregistered buffer. 468 * When padding is in use and applies to the transfer, insert 469 * it and change the message type. 470 */ 471 if (rtype == rpcrdma_noch) { 472 473 padlen = rpcrdma_inline_pullup(rqst, 474 RPCRDMA_INLINE_PAD_VALUE(rqst)); 475 476 if (padlen) { 477 headerp->rm_type = rdma_msgp; 478 headerp->rm_body.rm_padded.rm_align = 479 cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst)); 480 headerp->rm_body.rm_padded.rm_thresh = 481 cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH); 482 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; 483 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; 484 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; 485 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ 486 if (wtype != rpcrdma_noch) { 487 dprintk("RPC: %s: invalid chunk list\n", 488 __func__); 489 return -EIO; 490 } 491 } else { 492 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; 493 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; 494 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; 495 /* new length after pullup */ 496 rpclen = rqst->rq_svec[0].iov_len; 497 /* 498 * Currently we try to not actually use read inline. 499 * Reply chunks have the desirable property that 500 * they land, packed, directly in the target buffers 501 * without headers, so they require no fixup. The 502 * additional RDMA Write op sends the same amount 503 * of data, streams on-the-wire and adds no overhead 504 * on receive. Therefore, we request a reply chunk 505 * for non-writes wherever feasible and efficient. 506 */ 507 if (wtype == rpcrdma_noch) 508 wtype = rpcrdma_replych; 509 } 510 } 511 512 if (rtype != rpcrdma_noch) { 513 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, 514 headerp, rtype); 515 wtype = rtype; /* simplify dprintk */ 516 517 } else if (wtype != rpcrdma_noch) { 518 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, 519 headerp, wtype); 520 } 521 if (hdrlen < 0) 522 return hdrlen; 523 524 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 525 " headerp 0x%p base 0x%p lkey 0x%x\n", 526 __func__, transfertypes[wtype], hdrlen, rpclen, padlen, 527 headerp, base, rdmab_lkey(req->rl_rdmabuf)); 528 529 /* 530 * initialize send_iov's - normally only two: rdma chunk header and 531 * single preregistered RPC header buffer, but if padding is present, 532 * then use a preregistered (and zeroed) pad buffer between the RPC 533 * header and any write data. In all non-rdma cases, any following 534 * data has been copied into the RPC header buffer. 535 */ 536 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); 537 req->rl_send_iov[0].length = hdrlen; 538 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); 539 540 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); 541 req->rl_send_iov[1].length = rpclen; 542 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); 543 544 req->rl_niovs = 2; 545 546 if (padlen) { 547 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 548 549 req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf); 550 req->rl_send_iov[2].length = padlen; 551 req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf); 552 553 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; 554 req->rl_send_iov[3].length = rqst->rq_slen - rpclen; 555 req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf); 556 557 req->rl_niovs = 4; 558 } 559 560 return 0; 561 } 562 563 /* 564 * Chase down a received write or reply chunklist to get length 565 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) 566 */ 567 static int 568 rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) 569 { 570 unsigned int i, total_len; 571 struct rpcrdma_write_chunk *cur_wchunk; 572 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); 573 574 i = be32_to_cpu(**iptrp); 575 if (i > max) 576 return -1; 577 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); 578 total_len = 0; 579 while (i--) { 580 struct rpcrdma_segment *seg = &cur_wchunk->wc_target; 581 ifdebug(FACILITY) { 582 u64 off; 583 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); 584 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", 585 __func__, 586 be32_to_cpu(seg->rs_length), 587 (unsigned long long)off, 588 be32_to_cpu(seg->rs_handle)); 589 } 590 total_len += be32_to_cpu(seg->rs_length); 591 ++cur_wchunk; 592 } 593 /* check and adjust for properly terminated write chunk */ 594 if (wrchunk) { 595 __be32 *w = (__be32 *) cur_wchunk; 596 if (*w++ != xdr_zero) 597 return -1; 598 cur_wchunk = (struct rpcrdma_write_chunk *) w; 599 } 600 if ((char *)cur_wchunk > base + rep->rr_len) 601 return -1; 602 603 *iptrp = (__be32 *) cur_wchunk; 604 return total_len; 605 } 606 607 /* 608 * Scatter inline received data back into provided iov's. 609 */ 610 static void 611 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) 612 { 613 int i, npages, curlen, olen; 614 char *destp; 615 struct page **ppages; 616 int page_base; 617 618 curlen = rqst->rq_rcv_buf.head[0].iov_len; 619 if (curlen > copy_len) { /* write chunk header fixup */ 620 curlen = copy_len; 621 rqst->rq_rcv_buf.head[0].iov_len = curlen; 622 } 623 624 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", 625 __func__, srcp, copy_len, curlen); 626 627 /* Shift pointer for first receive segment only */ 628 rqst->rq_rcv_buf.head[0].iov_base = srcp; 629 srcp += curlen; 630 copy_len -= curlen; 631 632 olen = copy_len; 633 i = 0; 634 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; 635 page_base = rqst->rq_rcv_buf.page_base; 636 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); 637 page_base &= ~PAGE_MASK; 638 639 if (copy_len && rqst->rq_rcv_buf.page_len) { 640 npages = PAGE_ALIGN(page_base + 641 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; 642 for (; i < npages; i++) { 643 curlen = PAGE_SIZE - page_base; 644 if (curlen > copy_len) 645 curlen = copy_len; 646 dprintk("RPC: %s: page %d" 647 " srcp 0x%p len %d curlen %d\n", 648 __func__, i, srcp, copy_len, curlen); 649 destp = kmap_atomic(ppages[i]); 650 memcpy(destp + page_base, srcp, curlen); 651 flush_dcache_page(ppages[i]); 652 kunmap_atomic(destp); 653 srcp += curlen; 654 copy_len -= curlen; 655 if (copy_len == 0) 656 break; 657 page_base = 0; 658 } 659 } 660 661 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { 662 curlen = copy_len; 663 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) 664 curlen = rqst->rq_rcv_buf.tail[0].iov_len; 665 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) 666 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); 667 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", 668 __func__, srcp, copy_len, curlen); 669 rqst->rq_rcv_buf.tail[0].iov_len = curlen; 670 copy_len -= curlen; ++i; 671 } else 672 rqst->rq_rcv_buf.tail[0].iov_len = 0; 673 674 if (pad) { 675 /* implicit padding on terminal chunk */ 676 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; 677 while (pad--) 678 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; 679 } 680 681 if (copy_len) 682 dprintk("RPC: %s: %d bytes in" 683 " %d extra segments (%d lost)\n", 684 __func__, olen, i, copy_len); 685 686 /* TBD avoid a warning from call_decode() */ 687 rqst->rq_private_buf = rqst->rq_rcv_buf; 688 } 689 690 void 691 rpcrdma_connect_worker(struct work_struct *work) 692 { 693 struct rpcrdma_ep *ep = 694 container_of(work, struct rpcrdma_ep, rep_connect_worker.work); 695 struct rpcrdma_xprt *r_xprt = 696 container_of(ep, struct rpcrdma_xprt, rx_ep); 697 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 698 699 spin_lock_bh(&xprt->transport_lock); 700 if (++xprt->connect_cookie == 0) /* maintain a reserved value */ 701 ++xprt->connect_cookie; 702 if (ep->rep_connected > 0) { 703 if (!xprt_test_and_set_connected(xprt)) 704 xprt_wake_pending_tasks(xprt, 0); 705 } else { 706 if (xprt_test_and_clear_connected(xprt)) 707 xprt_wake_pending_tasks(xprt, -ENOTCONN); 708 } 709 spin_unlock_bh(&xprt->transport_lock); 710 } 711 712 /* 713 * This function is called when an async event is posted to 714 * the connection which changes the connection state. All it 715 * does at this point is mark the connection up/down, the rpc 716 * timers do the rest. 717 */ 718 void 719 rpcrdma_conn_func(struct rpcrdma_ep *ep) 720 { 721 schedule_delayed_work(&ep->rep_connect_worker, 0); 722 } 723 724 /* 725 * Called as a tasklet to do req/reply match and complete a request 726 * Errors must result in the RPC task either being awakened, or 727 * allowed to timeout, to discover the errors at that time. 728 */ 729 void 730 rpcrdma_reply_handler(struct rpcrdma_rep *rep) 731 { 732 struct rpcrdma_msg *headerp; 733 struct rpcrdma_req *req; 734 struct rpc_rqst *rqst; 735 struct rpc_xprt *xprt = rep->rr_xprt; 736 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 737 __be32 *iptr; 738 int rdmalen, status; 739 unsigned long cwnd; 740 u32 credits; 741 742 /* Check status. If bad, signal disconnect and return rep to pool */ 743 if (rep->rr_len == ~0U) { 744 rpcrdma_recv_buffer_put(rep); 745 if (r_xprt->rx_ep.rep_connected == 1) { 746 r_xprt->rx_ep.rep_connected = -EIO; 747 rpcrdma_conn_func(&r_xprt->rx_ep); 748 } 749 return; 750 } 751 if (rep->rr_len < RPCRDMA_HDRLEN_MIN) { 752 dprintk("RPC: %s: short/invalid reply\n", __func__); 753 goto repost; 754 } 755 headerp = rdmab_to_msg(rep->rr_rdmabuf); 756 if (headerp->rm_vers != rpcrdma_version) { 757 dprintk("RPC: %s: invalid version %d\n", 758 __func__, be32_to_cpu(headerp->rm_vers)); 759 goto repost; 760 } 761 762 /* Get XID and try for a match. */ 763 spin_lock(&xprt->transport_lock); 764 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); 765 if (rqst == NULL) { 766 spin_unlock(&xprt->transport_lock); 767 dprintk("RPC: %s: reply 0x%p failed " 768 "to match any request xid 0x%08x len %d\n", 769 __func__, rep, be32_to_cpu(headerp->rm_xid), 770 rep->rr_len); 771 repost: 772 r_xprt->rx_stats.bad_reply_count++; 773 rep->rr_func = rpcrdma_reply_handler; 774 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 775 rpcrdma_recv_buffer_put(rep); 776 777 return; 778 } 779 780 /* get request object */ 781 req = rpcr_to_rdmar(rqst); 782 if (req->rl_reply) { 783 spin_unlock(&xprt->transport_lock); 784 dprintk("RPC: %s: duplicate reply 0x%p to RPC " 785 "request 0x%p: xid 0x%08x\n", __func__, rep, req, 786 be32_to_cpu(headerp->rm_xid)); 787 goto repost; 788 } 789 790 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" 791 " RPC request 0x%p xid 0x%08x\n", 792 __func__, rep, req, rqst, 793 be32_to_cpu(headerp->rm_xid)); 794 795 /* from here on, the reply is no longer an orphan */ 796 req->rl_reply = rep; 797 xprt->reestablish_timeout = 0; 798 799 /* check for expected message types */ 800 /* The order of some of these tests is important. */ 801 switch (headerp->rm_type) { 802 case rdma_msg: 803 /* never expect read chunks */ 804 /* never expect reply chunks (two ways to check) */ 805 /* never expect write chunks without having offered RDMA */ 806 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 807 (headerp->rm_body.rm_chunks[1] == xdr_zero && 808 headerp->rm_body.rm_chunks[2] != xdr_zero) || 809 (headerp->rm_body.rm_chunks[1] != xdr_zero && 810 req->rl_nchunks == 0)) 811 goto badheader; 812 if (headerp->rm_body.rm_chunks[1] != xdr_zero) { 813 /* count any expected write chunks in read reply */ 814 /* start at write chunk array count */ 815 iptr = &headerp->rm_body.rm_chunks[2]; 816 rdmalen = rpcrdma_count_chunks(rep, 817 req->rl_nchunks, 1, &iptr); 818 /* check for validity, and no reply chunk after */ 819 if (rdmalen < 0 || *iptr++ != xdr_zero) 820 goto badheader; 821 rep->rr_len -= 822 ((unsigned char *)iptr - (unsigned char *)headerp); 823 status = rep->rr_len + rdmalen; 824 r_xprt->rx_stats.total_rdma_reply += rdmalen; 825 /* special case - last chunk may omit padding */ 826 if (rdmalen &= 3) { 827 rdmalen = 4 - rdmalen; 828 status += rdmalen; 829 } 830 } else { 831 /* else ordinary inline */ 832 rdmalen = 0; 833 iptr = (__be32 *)((unsigned char *)headerp + 834 RPCRDMA_HDRLEN_MIN); 835 rep->rr_len -= RPCRDMA_HDRLEN_MIN; 836 status = rep->rr_len; 837 } 838 /* Fix up the rpc results for upper layer */ 839 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); 840 break; 841 842 case rdma_nomsg: 843 /* never expect read or write chunks, always reply chunks */ 844 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 845 headerp->rm_body.rm_chunks[1] != xdr_zero || 846 headerp->rm_body.rm_chunks[2] != xdr_one || 847 req->rl_nchunks == 0) 848 goto badheader; 849 iptr = (__be32 *)((unsigned char *)headerp + 850 RPCRDMA_HDRLEN_MIN); 851 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); 852 if (rdmalen < 0) 853 goto badheader; 854 r_xprt->rx_stats.total_rdma_reply += rdmalen; 855 /* Reply chunk buffer already is the reply vector - no fixup. */ 856 status = rdmalen; 857 break; 858 859 badheader: 860 default: 861 dprintk("%s: invalid rpcrdma reply header (type %d):" 862 " chunks[012] == %d %d %d" 863 " expected chunks <= %d\n", 864 __func__, be32_to_cpu(headerp->rm_type), 865 headerp->rm_body.rm_chunks[0], 866 headerp->rm_body.rm_chunks[1], 867 headerp->rm_body.rm_chunks[2], 868 req->rl_nchunks); 869 status = -EIO; 870 r_xprt->rx_stats.bad_reply_count++; 871 break; 872 } 873 874 credits = be32_to_cpu(headerp->rm_credit); 875 if (credits == 0) 876 credits = 1; /* don't deadlock */ 877 else if (credits > r_xprt->rx_buf.rb_max_requests) 878 credits = r_xprt->rx_buf.rb_max_requests; 879 880 cwnd = xprt->cwnd; 881 xprt->cwnd = credits << RPC_CWNDSHIFT; 882 if (xprt->cwnd > cwnd) 883 xprt_release_rqst_cong(rqst->rq_task); 884 885 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", 886 __func__, xprt, rqst, status); 887 xprt_complete_rqst(rqst->rq_task, status); 888 spin_unlock(&xprt->transport_lock); 889 } 890