1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * rpc_rdma.c 42 * 43 * This file contains the guts of the RPC RDMA protocol, and 44 * does marshaling/unmarshaling, etc. It is also where interfacing 45 * to the Linux RPC framework lives. 46 */ 47 48 #include "xprt_rdma.h" 49 50 #include <linux/highmem.h> 51 52 #ifdef RPC_DEBUG 53 # define RPCDBG_FACILITY RPCDBG_TRANS 54 #endif 55 56 enum rpcrdma_chunktype { 57 rpcrdma_noch = 0, 58 rpcrdma_readch, 59 rpcrdma_areadch, 60 rpcrdma_writech, 61 rpcrdma_replych 62 }; 63 64 #ifdef RPC_DEBUG 65 static const char transfertypes[][12] = { 66 "pure inline", /* no chunks */ 67 " read chunk", /* some argument via rdma read */ 68 "*read chunk", /* entire request via rdma read */ 69 "write chunk", /* some result via rdma write */ 70 "reply chunk" /* entire reply via rdma write */ 71 }; 72 #endif 73 74 /* 75 * Chunk assembly from upper layer xdr_buf. 76 * 77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk 78 * elements. Segments are then coalesced when registered, if possible 79 * within the selected memreg mode. 80 * 81 * Note, this routine is never called if the connection's memory 82 * registration strategy is 0 (bounce buffers). 83 */ 84 85 static int 86 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, 87 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) 88 { 89 int len, n = 0, p; 90 91 if (pos == 0 && xdrbuf->head[0].iov_len) { 92 seg[n].mr_page = NULL; 93 seg[n].mr_offset = xdrbuf->head[0].iov_base; 94 seg[n].mr_len = xdrbuf->head[0].iov_len; 95 ++n; 96 } 97 98 if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) { 99 if (n == nsegs) 100 return 0; 101 seg[n].mr_page = xdrbuf->pages[0]; 102 seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base; 103 seg[n].mr_len = min_t(u32, 104 PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len); 105 len = xdrbuf->page_len - seg[n].mr_len; 106 ++n; 107 p = 1; 108 while (len > 0) { 109 if (n == nsegs) 110 return 0; 111 seg[n].mr_page = xdrbuf->pages[p]; 112 seg[n].mr_offset = NULL; 113 seg[n].mr_len = min_t(u32, PAGE_SIZE, len); 114 len -= seg[n].mr_len; 115 ++n; 116 ++p; 117 } 118 } 119 120 if (xdrbuf->tail[0].iov_len) { 121 /* the rpcrdma protocol allows us to omit any trailing 122 * xdr pad bytes, saving the server an RDMA operation. */ 123 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) 124 return n; 125 if (n == nsegs) 126 return 0; 127 seg[n].mr_page = NULL; 128 seg[n].mr_offset = xdrbuf->tail[0].iov_base; 129 seg[n].mr_len = xdrbuf->tail[0].iov_len; 130 ++n; 131 } 132 133 return n; 134 } 135 136 /* 137 * Create read/write chunk lists, and reply chunks, for RDMA 138 * 139 * Assume check against THRESHOLD has been done, and chunks are required. 140 * Assume only encoding one list entry for read|write chunks. The NFSv3 141 * protocol is simple enough to allow this as it only has a single "bulk 142 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The 143 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) 144 * 145 * When used for a single reply chunk (which is a special write 146 * chunk used for the entire reply, rather than just the data), it 147 * is used primarily for READDIR and READLINK which would otherwise 148 * be severely size-limited by a small rdma inline read max. The server 149 * response will come back as an RDMA Write, followed by a message 150 * of type RDMA_NOMSG carrying the xid and length. As a result, reply 151 * chunks do not provide data alignment, however they do not require 152 * "fixup" (moving the response to the upper layer buffer) either. 153 * 154 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 155 * 156 * Read chunklist (a linked list): 157 * N elements, position P (same P for all chunks of same arg!): 158 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 159 * 160 * Write chunklist (a list of (one) counted array): 161 * N elements: 162 * 1 - N - HLOO - HLOO - ... - HLOO - 0 163 * 164 * Reply chunk (a counted array): 165 * N elements: 166 * 1 - N - HLOO - HLOO - ... - HLOO 167 */ 168 169 static unsigned int 170 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, 171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) 172 { 173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt); 175 int nsegs, nchunks = 0; 176 unsigned int pos; 177 struct rpcrdma_mr_seg *seg = req->rl_segments; 178 struct rpcrdma_read_chunk *cur_rchunk = NULL; 179 struct rpcrdma_write_array *warray = NULL; 180 struct rpcrdma_write_chunk *cur_wchunk = NULL; 181 __be32 *iptr = headerp->rm_body.rm_chunks; 182 183 if (type == rpcrdma_readch || type == rpcrdma_areadch) { 184 /* a read chunk - server will RDMA Read our memory */ 185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr; 186 } else { 187 /* a write or reply chunk - server will RDMA Write our memory */ 188 *iptr++ = xdr_zero; /* encode a NULL read chunk list */ 189 if (type == rpcrdma_replych) 190 *iptr++ = xdr_zero; /* a NULL write chunk list */ 191 warray = (struct rpcrdma_write_array *) iptr; 192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); 193 } 194 195 if (type == rpcrdma_replych || type == rpcrdma_areadch) 196 pos = 0; 197 else 198 pos = target->head[0].iov_len; 199 200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); 201 if (nsegs == 0) 202 return 0; 203 204 do { 205 /* bind/register the memory, then build chunk from result. */ 206 int n = rpcrdma_register_external(seg, nsegs, 207 cur_wchunk != NULL, r_xprt); 208 if (n <= 0) 209 goto out; 210 if (cur_rchunk) { /* read */ 211 cur_rchunk->rc_discrim = xdr_one; 212 /* all read chunks have the same "position" */ 213 cur_rchunk->rc_position = htonl(pos); 214 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); 215 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); 216 xdr_encode_hyper( 217 (__be32 *)&cur_rchunk->rc_target.rs_offset, 218 seg->mr_base); 219 dprintk("RPC: %s: read chunk " 220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, 221 seg->mr_len, (unsigned long long)seg->mr_base, 222 seg->mr_rkey, pos, n < nsegs ? "more" : "last"); 223 cur_rchunk++; 224 r_xprt->rx_stats.read_chunk_count++; 225 } else { /* write/reply */ 226 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); 227 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); 228 xdr_encode_hyper( 229 (__be32 *)&cur_wchunk->wc_target.rs_offset, 230 seg->mr_base); 231 dprintk("RPC: %s: %s chunk " 232 "elem %d@0x%llx:0x%x (%s)\n", __func__, 233 (type == rpcrdma_replych) ? "reply" : "write", 234 seg->mr_len, (unsigned long long)seg->mr_base, 235 seg->mr_rkey, n < nsegs ? "more" : "last"); 236 cur_wchunk++; 237 if (type == rpcrdma_replych) 238 r_xprt->rx_stats.reply_chunk_count++; 239 else 240 r_xprt->rx_stats.write_chunk_count++; 241 r_xprt->rx_stats.total_rdma_request += seg->mr_len; 242 } 243 nchunks++; 244 seg += n; 245 nsegs -= n; 246 } while (nsegs); 247 248 /* success. all failures return above */ 249 req->rl_nchunks = nchunks; 250 251 BUG_ON(nchunks == 0); 252 253 /* 254 * finish off header. If write, marshal discrim and nchunks. 255 */ 256 if (cur_rchunk) { 257 iptr = (__be32 *) cur_rchunk; 258 *iptr++ = xdr_zero; /* finish the read chunk list */ 259 *iptr++ = xdr_zero; /* encode a NULL write chunk list */ 260 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 261 } else { 262 warray->wc_discrim = xdr_one; 263 warray->wc_nchunks = htonl(nchunks); 264 iptr = (__be32 *) cur_wchunk; 265 if (type == rpcrdma_writech) { 266 *iptr++ = xdr_zero; /* finish the write chunk list */ 267 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ 268 } 269 } 270 271 /* 272 * Return header size. 273 */ 274 return (unsigned char *)iptr - (unsigned char *)headerp; 275 276 out: 277 for (pos = 0; nchunks--;) 278 pos += rpcrdma_deregister_external( 279 &req->rl_segments[pos], r_xprt, NULL); 280 return 0; 281 } 282 283 /* 284 * Copy write data inline. 285 * This function is used for "small" requests. Data which is passed 286 * to RPC via iovecs (or page list) is copied directly into the 287 * pre-registered memory buffer for this request. For small amounts 288 * of data, this is efficient. The cutoff value is tunable. 289 */ 290 static int 291 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad) 292 { 293 int i, npages, curlen; 294 int copy_len; 295 unsigned char *srcp, *destp; 296 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 297 298 destp = rqst->rq_svec[0].iov_base; 299 curlen = rqst->rq_svec[0].iov_len; 300 destp += curlen; 301 /* 302 * Do optional padding where it makes sense. Alignment of write 303 * payload can help the server, if our setting is accurate. 304 */ 305 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/); 306 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH) 307 pad = 0; /* don't pad this request */ 308 309 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n", 310 __func__, pad, destp, rqst->rq_slen, curlen); 311 312 copy_len = rqst->rq_snd_buf.page_len; 313 r_xprt->rx_stats.pullup_copy_count += copy_len; 314 npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT; 315 for (i = 0; copy_len && i < npages; i++) { 316 if (i == 0) 317 curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base; 318 else 319 curlen = PAGE_SIZE; 320 if (curlen > copy_len) 321 curlen = copy_len; 322 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 323 __func__, i, destp, copy_len, curlen); 324 srcp = kmap_atomic(rqst->rq_snd_buf.pages[i], 325 KM_SKB_SUNRPC_DATA); 326 if (i == 0) 327 memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen); 328 else 329 memcpy(destp, srcp, curlen); 330 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA); 331 rqst->rq_svec[0].iov_len += curlen; 332 destp += curlen; 333 copy_len -= curlen; 334 } 335 if (rqst->rq_snd_buf.tail[0].iov_len) { 336 curlen = rqst->rq_snd_buf.tail[0].iov_len; 337 if (destp != rqst->rq_snd_buf.tail[0].iov_base) { 338 memcpy(destp, 339 rqst->rq_snd_buf.tail[0].iov_base, curlen); 340 r_xprt->rx_stats.pullup_copy_count += curlen; 341 } 342 dprintk("RPC: %s: tail destp 0x%p len %d curlen %d\n", 343 __func__, destp, copy_len, curlen); 344 rqst->rq_svec[0].iov_len += curlen; 345 } 346 /* header now contains entire send message */ 347 return pad; 348 } 349 350 /* 351 * Marshal a request: the primary job of this routine is to choose 352 * the transfer modes. See comments below. 353 * 354 * Uses multiple RDMA IOVs for a request: 355 * [0] -- RPC RDMA header, which uses memory from the *start* of the 356 * preregistered buffer that already holds the RPC data in 357 * its middle. 358 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. 359 * [2] -- optional padding. 360 * [3] -- if padded, header only in [1] and data here. 361 */ 362 363 int 364 rpcrdma_marshal_req(struct rpc_rqst *rqst) 365 { 366 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt; 367 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 368 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 369 char *base; 370 size_t hdrlen, rpclen, padlen; 371 enum rpcrdma_chunktype rtype, wtype; 372 struct rpcrdma_msg *headerp; 373 374 /* 375 * rpclen gets amount of data in first buffer, which is the 376 * pre-registered buffer. 377 */ 378 base = rqst->rq_svec[0].iov_base; 379 rpclen = rqst->rq_svec[0].iov_len; 380 381 /* build RDMA header in private area at front */ 382 headerp = (struct rpcrdma_msg *) req->rl_base; 383 /* don't htonl XID, it's already done in request */ 384 headerp->rm_xid = rqst->rq_xid; 385 headerp->rm_vers = xdr_one; 386 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests); 387 headerp->rm_type = htonl(RDMA_MSG); 388 389 /* 390 * Chunks needed for results? 391 * 392 * o If the expected result is under the inline threshold, all ops 393 * return as inline (but see later). 394 * o Large non-read ops return as a single reply chunk. 395 * o Large read ops return data as write chunk(s), header as inline. 396 * 397 * Note: the NFS code sending down multiple result segments implies 398 * the op is one of read, readdir[plus], readlink or NFSv4 getacl. 399 */ 400 401 /* 402 * This code can handle read chunks, write chunks OR reply 403 * chunks -- only one type. If the request is too big to fit 404 * inline, then we will choose read chunks. If the request is 405 * a READ, then use write chunks to separate the file data 406 * into pages; otherwise use reply chunks. 407 */ 408 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) 409 wtype = rpcrdma_noch; 410 else if (rqst->rq_rcv_buf.page_len == 0) 411 wtype = rpcrdma_replych; 412 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 413 wtype = rpcrdma_writech; 414 else 415 wtype = rpcrdma_replych; 416 417 /* 418 * Chunks needed for arguments? 419 * 420 * o If the total request is under the inline threshold, all ops 421 * are sent as inline. 422 * o Large non-write ops are sent with the entire message as a 423 * single read chunk (protocol 0-position special case). 424 * o Large write ops transmit data as read chunk(s), header as 425 * inline. 426 * 427 * Note: the NFS code sending down multiple argument segments 428 * implies the op is a write. 429 * TBD check NFSv4 setacl 430 */ 431 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 432 rtype = rpcrdma_noch; 433 else if (rqst->rq_snd_buf.page_len == 0) 434 rtype = rpcrdma_areadch; 435 else 436 rtype = rpcrdma_readch; 437 438 /* The following simplification is not true forever */ 439 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) 440 wtype = rpcrdma_noch; 441 BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch); 442 443 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS && 444 (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) { 445 /* forced to "pure inline"? */ 446 dprintk("RPC: %s: too much data (%d/%d) for inline\n", 447 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len); 448 return -1; 449 } 450 451 hdrlen = 28; /*sizeof *headerp;*/ 452 padlen = 0; 453 454 /* 455 * Pull up any extra send data into the preregistered buffer. 456 * When padding is in use and applies to the transfer, insert 457 * it and change the message type. 458 */ 459 if (rtype == rpcrdma_noch) { 460 461 padlen = rpcrdma_inline_pullup(rqst, 462 RPCRDMA_INLINE_PAD_VALUE(rqst)); 463 464 if (padlen) { 465 headerp->rm_type = htonl(RDMA_MSGP); 466 headerp->rm_body.rm_padded.rm_align = 467 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst)); 468 headerp->rm_body.rm_padded.rm_thresh = 469 htonl(RPCRDMA_INLINE_PAD_THRESH); 470 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; 471 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; 472 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; 473 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ 474 BUG_ON(wtype != rpcrdma_noch); 475 476 } else { 477 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; 478 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; 479 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; 480 /* new length after pullup */ 481 rpclen = rqst->rq_svec[0].iov_len; 482 /* 483 * Currently we try to not actually use read inline. 484 * Reply chunks have the desirable property that 485 * they land, packed, directly in the target buffers 486 * without headers, so they require no fixup. The 487 * additional RDMA Write op sends the same amount 488 * of data, streams on-the-wire and adds no overhead 489 * on receive. Therefore, we request a reply chunk 490 * for non-writes wherever feasible and efficient. 491 */ 492 if (wtype == rpcrdma_noch && 493 r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER) 494 wtype = rpcrdma_replych; 495 } 496 } 497 498 /* 499 * Marshal chunks. This routine will return the header length 500 * consumed by marshaling. 501 */ 502 if (rtype != rpcrdma_noch) { 503 hdrlen = rpcrdma_create_chunks(rqst, 504 &rqst->rq_snd_buf, headerp, rtype); 505 wtype = rtype; /* simplify dprintk */ 506 507 } else if (wtype != rpcrdma_noch) { 508 hdrlen = rpcrdma_create_chunks(rqst, 509 &rqst->rq_rcv_buf, headerp, wtype); 510 } 511 512 if (hdrlen == 0) 513 return -1; 514 515 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 516 " headerp 0x%p base 0x%p lkey 0x%x\n", 517 __func__, transfertypes[wtype], hdrlen, rpclen, padlen, 518 headerp, base, req->rl_iov.lkey); 519 520 /* 521 * initialize send_iov's - normally only two: rdma chunk header and 522 * single preregistered RPC header buffer, but if padding is present, 523 * then use a preregistered (and zeroed) pad buffer between the RPC 524 * header and any write data. In all non-rdma cases, any following 525 * data has been copied into the RPC header buffer. 526 */ 527 req->rl_send_iov[0].addr = req->rl_iov.addr; 528 req->rl_send_iov[0].length = hdrlen; 529 req->rl_send_iov[0].lkey = req->rl_iov.lkey; 530 531 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base); 532 req->rl_send_iov[1].length = rpclen; 533 req->rl_send_iov[1].lkey = req->rl_iov.lkey; 534 535 req->rl_niovs = 2; 536 537 if (padlen) { 538 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 539 540 req->rl_send_iov[2].addr = ep->rep_pad.addr; 541 req->rl_send_iov[2].length = padlen; 542 req->rl_send_iov[2].lkey = ep->rep_pad.lkey; 543 544 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; 545 req->rl_send_iov[3].length = rqst->rq_slen - rpclen; 546 req->rl_send_iov[3].lkey = req->rl_iov.lkey; 547 548 req->rl_niovs = 4; 549 } 550 551 return 0; 552 } 553 554 /* 555 * Chase down a received write or reply chunklist to get length 556 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) 557 */ 558 static int 559 rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) 560 { 561 unsigned int i, total_len; 562 struct rpcrdma_write_chunk *cur_wchunk; 563 564 i = ntohl(**iptrp); /* get array count */ 565 if (i > max) 566 return -1; 567 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); 568 total_len = 0; 569 while (i--) { 570 struct rpcrdma_segment *seg = &cur_wchunk->wc_target; 571 ifdebug(FACILITY) { 572 u64 off; 573 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); 574 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", 575 __func__, 576 ntohl(seg->rs_length), 577 (unsigned long long)off, 578 ntohl(seg->rs_handle)); 579 } 580 total_len += ntohl(seg->rs_length); 581 ++cur_wchunk; 582 } 583 /* check and adjust for properly terminated write chunk */ 584 if (wrchunk) { 585 __be32 *w = (__be32 *) cur_wchunk; 586 if (*w++ != xdr_zero) 587 return -1; 588 cur_wchunk = (struct rpcrdma_write_chunk *) w; 589 } 590 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) 591 return -1; 592 593 *iptrp = (__be32 *) cur_wchunk; 594 return total_len; 595 } 596 597 /* 598 * Scatter inline received data back into provided iov's. 599 */ 600 static void 601 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) 602 { 603 int i, npages, curlen, olen; 604 char *destp; 605 606 curlen = rqst->rq_rcv_buf.head[0].iov_len; 607 if (curlen > copy_len) { /* write chunk header fixup */ 608 curlen = copy_len; 609 rqst->rq_rcv_buf.head[0].iov_len = curlen; 610 } 611 612 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", 613 __func__, srcp, copy_len, curlen); 614 615 /* Shift pointer for first receive segment only */ 616 rqst->rq_rcv_buf.head[0].iov_base = srcp; 617 srcp += curlen; 618 copy_len -= curlen; 619 620 olen = copy_len; 621 i = 0; 622 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; 623 if (copy_len && rqst->rq_rcv_buf.page_len) { 624 npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base + 625 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; 626 for (; i < npages; i++) { 627 if (i == 0) 628 curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base; 629 else 630 curlen = PAGE_SIZE; 631 if (curlen > copy_len) 632 curlen = copy_len; 633 dprintk("RPC: %s: page %d" 634 " srcp 0x%p len %d curlen %d\n", 635 __func__, i, srcp, copy_len, curlen); 636 destp = kmap_atomic(rqst->rq_rcv_buf.pages[i], 637 KM_SKB_SUNRPC_DATA); 638 if (i == 0) 639 memcpy(destp + rqst->rq_rcv_buf.page_base, 640 srcp, curlen); 641 else 642 memcpy(destp, srcp, curlen); 643 flush_dcache_page(rqst->rq_rcv_buf.pages[i]); 644 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA); 645 srcp += curlen; 646 copy_len -= curlen; 647 if (copy_len == 0) 648 break; 649 } 650 rqst->rq_rcv_buf.page_len = olen - copy_len; 651 } else 652 rqst->rq_rcv_buf.page_len = 0; 653 654 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { 655 curlen = copy_len; 656 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) 657 curlen = rqst->rq_rcv_buf.tail[0].iov_len; 658 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) 659 memcpy(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); 660 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", 661 __func__, srcp, copy_len, curlen); 662 rqst->rq_rcv_buf.tail[0].iov_len = curlen; 663 copy_len -= curlen; ++i; 664 } else 665 rqst->rq_rcv_buf.tail[0].iov_len = 0; 666 667 if (pad) { 668 /* implicit padding on terminal chunk */ 669 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; 670 while (pad--) 671 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; 672 } 673 674 if (copy_len) 675 dprintk("RPC: %s: %d bytes in" 676 " %d extra segments (%d lost)\n", 677 __func__, olen, i, copy_len); 678 679 /* TBD avoid a warning from call_decode() */ 680 rqst->rq_private_buf = rqst->rq_rcv_buf; 681 } 682 683 /* 684 * This function is called when an async event is posted to 685 * the connection which changes the connection state. All it 686 * does at this point is mark the connection up/down, the rpc 687 * timers do the rest. 688 */ 689 void 690 rpcrdma_conn_func(struct rpcrdma_ep *ep) 691 { 692 struct rpc_xprt *xprt = ep->rep_xprt; 693 694 spin_lock_bh(&xprt->transport_lock); 695 if (++xprt->connect_cookie == 0) /* maintain a reserved value */ 696 ++xprt->connect_cookie; 697 if (ep->rep_connected > 0) { 698 if (!xprt_test_and_set_connected(xprt)) 699 xprt_wake_pending_tasks(xprt, 0); 700 } else { 701 if (xprt_test_and_clear_connected(xprt)) 702 xprt_wake_pending_tasks(xprt, -ENOTCONN); 703 } 704 spin_unlock_bh(&xprt->transport_lock); 705 } 706 707 /* 708 * This function is called when memory window unbind which we are waiting 709 * for completes. Just use rr_func (zeroed by upcall) to signal completion. 710 */ 711 static void 712 rpcrdma_unbind_func(struct rpcrdma_rep *rep) 713 { 714 wake_up(&rep->rr_unbind); 715 } 716 717 /* 718 * Called as a tasklet to do req/reply match and complete a request 719 * Errors must result in the RPC task either being awakened, or 720 * allowed to timeout, to discover the errors at that time. 721 */ 722 void 723 rpcrdma_reply_handler(struct rpcrdma_rep *rep) 724 { 725 struct rpcrdma_msg *headerp; 726 struct rpcrdma_req *req; 727 struct rpc_rqst *rqst; 728 struct rpc_xprt *xprt = rep->rr_xprt; 729 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 730 __be32 *iptr; 731 int i, rdmalen, status; 732 733 /* Check status. If bad, signal disconnect and return rep to pool */ 734 if (rep->rr_len == ~0U) { 735 rpcrdma_recv_buffer_put(rep); 736 if (r_xprt->rx_ep.rep_connected == 1) { 737 r_xprt->rx_ep.rep_connected = -EIO; 738 rpcrdma_conn_func(&r_xprt->rx_ep); 739 } 740 return; 741 } 742 if (rep->rr_len < 28) { 743 dprintk("RPC: %s: short/invalid reply\n", __func__); 744 goto repost; 745 } 746 headerp = (struct rpcrdma_msg *) rep->rr_base; 747 if (headerp->rm_vers != xdr_one) { 748 dprintk("RPC: %s: invalid version %d\n", 749 __func__, ntohl(headerp->rm_vers)); 750 goto repost; 751 } 752 753 /* Get XID and try for a match. */ 754 spin_lock(&xprt->transport_lock); 755 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); 756 if (rqst == NULL) { 757 spin_unlock(&xprt->transport_lock); 758 dprintk("RPC: %s: reply 0x%p failed " 759 "to match any request xid 0x%08x len %d\n", 760 __func__, rep, headerp->rm_xid, rep->rr_len); 761 repost: 762 r_xprt->rx_stats.bad_reply_count++; 763 rep->rr_func = rpcrdma_reply_handler; 764 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) 765 rpcrdma_recv_buffer_put(rep); 766 767 return; 768 } 769 770 /* get request object */ 771 req = rpcr_to_rdmar(rqst); 772 773 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" 774 " RPC request 0x%p xid 0x%08x\n", 775 __func__, rep, req, rqst, headerp->rm_xid); 776 777 BUG_ON(!req || req->rl_reply); 778 779 /* from here on, the reply is no longer an orphan */ 780 req->rl_reply = rep; 781 782 /* check for expected message types */ 783 /* The order of some of these tests is important. */ 784 switch (headerp->rm_type) { 785 case htonl(RDMA_MSG): 786 /* never expect read chunks */ 787 /* never expect reply chunks (two ways to check) */ 788 /* never expect write chunks without having offered RDMA */ 789 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 790 (headerp->rm_body.rm_chunks[1] == xdr_zero && 791 headerp->rm_body.rm_chunks[2] != xdr_zero) || 792 (headerp->rm_body.rm_chunks[1] != xdr_zero && 793 req->rl_nchunks == 0)) 794 goto badheader; 795 if (headerp->rm_body.rm_chunks[1] != xdr_zero) { 796 /* count any expected write chunks in read reply */ 797 /* start at write chunk array count */ 798 iptr = &headerp->rm_body.rm_chunks[2]; 799 rdmalen = rpcrdma_count_chunks(rep, 800 req->rl_nchunks, 1, &iptr); 801 /* check for validity, and no reply chunk after */ 802 if (rdmalen < 0 || *iptr++ != xdr_zero) 803 goto badheader; 804 rep->rr_len -= 805 ((unsigned char *)iptr - (unsigned char *)headerp); 806 status = rep->rr_len + rdmalen; 807 r_xprt->rx_stats.total_rdma_reply += rdmalen; 808 /* special case - last chunk may omit padding */ 809 if (rdmalen &= 3) { 810 rdmalen = 4 - rdmalen; 811 status += rdmalen; 812 } 813 } else { 814 /* else ordinary inline */ 815 rdmalen = 0; 816 iptr = (__be32 *)((unsigned char *)headerp + 28); 817 rep->rr_len -= 28; /*sizeof *headerp;*/ 818 status = rep->rr_len; 819 } 820 /* Fix up the rpc results for upper layer */ 821 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); 822 break; 823 824 case htonl(RDMA_NOMSG): 825 /* never expect read or write chunks, always reply chunks */ 826 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 827 headerp->rm_body.rm_chunks[1] != xdr_zero || 828 headerp->rm_body.rm_chunks[2] != xdr_one || 829 req->rl_nchunks == 0) 830 goto badheader; 831 iptr = (__be32 *)((unsigned char *)headerp + 28); 832 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); 833 if (rdmalen < 0) 834 goto badheader; 835 r_xprt->rx_stats.total_rdma_reply += rdmalen; 836 /* Reply chunk buffer already is the reply vector - no fixup. */ 837 status = rdmalen; 838 break; 839 840 badheader: 841 default: 842 dprintk("%s: invalid rpcrdma reply header (type %d):" 843 " chunks[012] == %d %d %d" 844 " expected chunks <= %d\n", 845 __func__, ntohl(headerp->rm_type), 846 headerp->rm_body.rm_chunks[0], 847 headerp->rm_body.rm_chunks[1], 848 headerp->rm_body.rm_chunks[2], 849 req->rl_nchunks); 850 status = -EIO; 851 r_xprt->rx_stats.bad_reply_count++; 852 break; 853 } 854 855 /* If using mw bind, start the deregister process now. */ 856 /* (Note: if mr_free(), cannot perform it here, in tasklet context) */ 857 if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) { 858 case RPCRDMA_MEMWINDOWS: 859 for (i = 0; req->rl_nchunks-- > 1;) 860 i += rpcrdma_deregister_external( 861 &req->rl_segments[i], r_xprt, NULL); 862 /* Optionally wait (not here) for unbinds to complete */ 863 rep->rr_func = rpcrdma_unbind_func; 864 (void) rpcrdma_deregister_external(&req->rl_segments[i], 865 r_xprt, rep); 866 break; 867 case RPCRDMA_MEMWINDOWS_ASYNC: 868 for (i = 0; req->rl_nchunks--;) 869 i += rpcrdma_deregister_external(&req->rl_segments[i], 870 r_xprt, NULL); 871 break; 872 default: 873 break; 874 } 875 876 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", 877 __func__, xprt, rqst, status); 878 xprt_complete_rqst(rqst->rq_task, status); 879 spin_unlock(&xprt->transport_lock); 880 } 881