1 /* 2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * Author: Tom Tucker <tom@opengridcomputing.com> 40 */ 41 42 #include <linux/sunrpc/debug.h> 43 #include <linux/sunrpc/rpc_rdma.h> 44 #include <linux/spinlock.h> 45 #include <asm/unaligned.h> 46 #include <rdma/ib_verbs.h> 47 #include <rdma/rdma_cm.h> 48 #include <linux/sunrpc/svc_rdma.h> 49 50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 51 52 /* 53 * Replace the pages in the rq_argpages array with the pages from the SGE in 54 * the RDMA_RECV completion. The SGL should contain full pages up until the 55 * last one. 56 */ 57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp, 58 struct svc_rdma_op_ctxt *ctxt, 59 u32 byte_count) 60 { 61 struct page *page; 62 u32 bc; 63 int sge_no; 64 65 /* Swap the page in the SGE with the page in argpages */ 66 page = ctxt->pages[0]; 67 put_page(rqstp->rq_pages[0]); 68 rqstp->rq_pages[0] = page; 69 70 /* Set up the XDR head */ 71 rqstp->rq_arg.head[0].iov_base = page_address(page); 72 rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length); 73 rqstp->rq_arg.len = byte_count; 74 rqstp->rq_arg.buflen = byte_count; 75 76 /* Compute bytes past head in the SGL */ 77 bc = byte_count - rqstp->rq_arg.head[0].iov_len; 78 79 /* If data remains, store it in the pagelist */ 80 rqstp->rq_arg.page_len = bc; 81 rqstp->rq_arg.page_base = 0; 82 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; 83 sge_no = 1; 84 while (bc && sge_no < ctxt->count) { 85 page = ctxt->pages[sge_no]; 86 put_page(rqstp->rq_pages[sge_no]); 87 rqstp->rq_pages[sge_no] = page; 88 bc -= min(bc, ctxt->sge[sge_no].length); 89 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; 90 sge_no++; 91 } 92 rqstp->rq_respages = &rqstp->rq_pages[sge_no]; 93 94 /* We should never run out of SGE because the limit is defined to 95 * support the max allowed RPC data length 96 */ 97 BUG_ON(bc && (sge_no == ctxt->count)); 98 BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) 99 != byte_count); 100 BUG_ON(rqstp->rq_arg.len != byte_count); 101 102 /* If not all pages were used from the SGL, free the remaining ones */ 103 bc = sge_no; 104 while (sge_no < ctxt->count) { 105 page = ctxt->pages[sge_no++]; 106 put_page(page); 107 } 108 ctxt->count = bc; 109 110 /* Set up tail */ 111 rqstp->rq_arg.tail[0].iov_base = NULL; 112 rqstp->rq_arg.tail[0].iov_len = 0; 113 } 114 115 /* Encode a read-chunk-list as an array of IB SGE 116 * 117 * Assumptions: 118 * - chunk[0]->position points to pages[0] at an offset of 0 119 * - pages[] is not physically or virtually contiguous and consists of 120 * PAGE_SIZE elements. 121 * 122 * Output: 123 * - sge array pointing into pages[] array. 124 * - chunk_sge array specifying sge index and count for each 125 * chunk in the read list 126 * 127 */ 128 static int map_read_chunks(struct svcxprt_rdma *xprt, 129 struct svc_rqst *rqstp, 130 struct svc_rdma_op_ctxt *head, 131 struct rpcrdma_msg *rmsgp, 132 struct svc_rdma_req_map *rpl_map, 133 struct svc_rdma_req_map *chl_map, 134 int ch_count, 135 int byte_count) 136 { 137 int sge_no; 138 int sge_bytes; 139 int page_off; 140 int page_no; 141 int ch_bytes; 142 int ch_no; 143 struct rpcrdma_read_chunk *ch; 144 145 sge_no = 0; 146 page_no = 0; 147 page_off = 0; 148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 149 ch_no = 0; 150 ch_bytes = ch->rc_target.rs_length; 151 head->arg.head[0] = rqstp->rq_arg.head[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 153 head->arg.pages = &head->pages[head->count]; 154 head->hdr_count = head->count; /* save count of hdr pages */ 155 head->arg.page_base = 0; 156 head->arg.page_len = ch_bytes; 157 head->arg.len = rqstp->rq_arg.len + ch_bytes; 158 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes; 159 head->count++; 160 chl_map->ch[0].start = 0; 161 while (byte_count) { 162 rpl_map->sge[sge_no].iov_base = 163 page_address(rqstp->rq_arg.pages[page_no]) + page_off; 164 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes); 165 rpl_map->sge[sge_no].iov_len = sge_bytes; 166 /* 167 * Don't bump head->count here because the same page 168 * may be used by multiple SGE. 169 */ 170 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; 171 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; 172 173 byte_count -= sge_bytes; 174 ch_bytes -= sge_bytes; 175 sge_no++; 176 /* 177 * If all bytes for this chunk have been mapped to an 178 * SGE, move to the next SGE 179 */ 180 if (ch_bytes == 0) { 181 chl_map->ch[ch_no].count = 182 sge_no - chl_map->ch[ch_no].start; 183 ch_no++; 184 ch++; 185 chl_map->ch[ch_no].start = sge_no; 186 ch_bytes = ch->rc_target.rs_length; 187 /* If bytes remaining account for next chunk */ 188 if (byte_count) { 189 head->arg.page_len += ch_bytes; 190 head->arg.len += ch_bytes; 191 head->arg.buflen += ch_bytes; 192 } 193 } 194 /* 195 * If this SGE consumed all of the page, move to the 196 * next page 197 */ 198 if ((sge_bytes + page_off) == PAGE_SIZE) { 199 page_no++; 200 page_off = 0; 201 /* 202 * If there are still bytes left to map, bump 203 * the page count 204 */ 205 if (byte_count) 206 head->count++; 207 } else 208 page_off += sge_bytes; 209 } 210 BUG_ON(byte_count != 0); 211 return sge_no; 212 } 213 214 /* Map a read-chunk-list to an XDR and fast register the page-list. 215 * 216 * Assumptions: 217 * - chunk[0] position points to pages[0] at an offset of 0 218 * - pages[] will be made physically contiguous by creating a one-off memory 219 * region using the fastreg verb. 220 * - byte_count is # of bytes in read-chunk-list 221 * - ch_count is # of chunks in read-chunk-list 222 * 223 * Output: 224 * - sge array pointing into pages[] array. 225 * - chunk_sge array specifying sge index and count for each 226 * chunk in the read list 227 */ 228 static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, 229 struct svc_rqst *rqstp, 230 struct svc_rdma_op_ctxt *head, 231 struct rpcrdma_msg *rmsgp, 232 struct svc_rdma_req_map *rpl_map, 233 struct svc_rdma_req_map *chl_map, 234 int ch_count, 235 int byte_count) 236 { 237 int page_no; 238 int ch_no; 239 u32 offset; 240 struct rpcrdma_read_chunk *ch; 241 struct svc_rdma_fastreg_mr *frmr; 242 int ret = 0; 243 244 frmr = svc_rdma_get_frmr(xprt); 245 if (IS_ERR(frmr)) 246 return -ENOMEM; 247 248 head->frmr = frmr; 249 head->arg.head[0] = rqstp->rq_arg.head[0]; 250 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 251 head->arg.pages = &head->pages[head->count]; 252 head->hdr_count = head->count; /* save count of hdr pages */ 253 head->arg.page_base = 0; 254 head->arg.page_len = byte_count; 255 head->arg.len = rqstp->rq_arg.len + byte_count; 256 head->arg.buflen = rqstp->rq_arg.buflen + byte_count; 257 258 /* Fast register the page list */ 259 frmr->kva = page_address(rqstp->rq_arg.pages[0]); 260 frmr->direction = DMA_FROM_DEVICE; 261 frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE); 262 frmr->map_len = byte_count; 263 frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT; 264 for (page_no = 0; page_no < frmr->page_list_len; page_no++) { 265 frmr->page_list->page_list[page_no] = 266 ib_dma_map_single(xprt->sc_cm_id->device, 267 page_address(rqstp->rq_arg.pages[page_no]), 268 PAGE_SIZE, DMA_FROM_DEVICE); 269 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 270 frmr->page_list->page_list[page_no])) 271 goto fatal_err; 272 atomic_inc(&xprt->sc_dma_used); 273 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; 274 } 275 head->count += page_no; 276 277 /* rq_respages points one past arg pages */ 278 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; 279 280 /* Create the reply and chunk maps */ 281 offset = 0; 282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 283 for (ch_no = 0; ch_no < ch_count; ch_no++) { 284 rpl_map->sge[ch_no].iov_base = frmr->kva + offset; 285 rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; 286 chl_map->ch[ch_no].count = 1; 287 chl_map->ch[ch_no].start = ch_no; 288 offset += ch->rc_target.rs_length; 289 ch++; 290 } 291 292 ret = svc_rdma_fastreg(xprt, frmr); 293 if (ret) 294 goto fatal_err; 295 296 return ch_no; 297 298 fatal_err: 299 printk("svcrdma: error fast registering xdr for xprt %p", xprt); 300 svc_rdma_put_frmr(xprt, frmr); 301 return -EIO; 302 } 303 304 static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, 305 struct svc_rdma_op_ctxt *ctxt, 306 struct svc_rdma_fastreg_mr *frmr, 307 struct kvec *vec, 308 u64 *sgl_offset, 309 int count) 310 { 311 int i; 312 313 ctxt->count = count; 314 ctxt->direction = DMA_FROM_DEVICE; 315 for (i = 0; i < count; i++) { 316 ctxt->sge[i].length = 0; /* in case map fails */ 317 if (!frmr) { 318 ctxt->sge[i].addr = 319 ib_dma_map_single(xprt->sc_cm_id->device, 320 vec[i].iov_base, 321 vec[i].iov_len, 322 DMA_FROM_DEVICE); 323 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 324 ctxt->sge[i].addr)) 325 return -EINVAL; 326 ctxt->sge[i].lkey = xprt->sc_dma_lkey; 327 atomic_inc(&xprt->sc_dma_used); 328 } else { 329 ctxt->sge[i].addr = (unsigned long)vec[i].iov_base; 330 ctxt->sge[i].lkey = frmr->mr->lkey; 331 } 332 ctxt->sge[i].length = vec[i].iov_len; 333 *sgl_offset = *sgl_offset + vec[i].iov_len; 334 } 335 return 0; 336 } 337 338 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) 339 { 340 if ((rdma_node_get_transport(xprt->sc_cm_id->device->node_type) == 341 RDMA_TRANSPORT_IWARP) && 342 sge_count > 1) 343 return 1; 344 else 345 return min_t(int, sge_count, xprt->sc_max_sge); 346 } 347 348 /* 349 * Use RDMA_READ to read data from the advertised client buffer into the 350 * XDR stream starting at rq_arg.head[0].iov_base. 351 * Each chunk in the array 352 * contains the following fields: 353 * discrim - '1', This isn't used for data placement 354 * position - The xdr stream offset (the same for every chunk) 355 * handle - RMR for client memory region 356 * length - data transfer length 357 * offset - 64 bit tagged offset in remote memory region 358 * 359 * On our side, we need to read into a pagelist. The first page immediately 360 * follows the RPC header. 361 * 362 * This function returns: 363 * 0 - No error and no read-list found. 364 * 365 * 1 - Successful read-list processing. The data is not yet in 366 * the pagelist and therefore the RPC request must be deferred. The 367 * I/O completion will enqueue the transport again and 368 * svc_rdma_recvfrom will complete the request. 369 * 370 * <0 - Error processing/posting read-list. 371 * 372 * NOTE: The ctxt must not be touched after the last WR has been posted 373 * because the I/O completion processing may occur on another 374 * processor and free / modify the context. Ne touche pas! 375 */ 376 static int rdma_read_xdr(struct svcxprt_rdma *xprt, 377 struct rpcrdma_msg *rmsgp, 378 struct svc_rqst *rqstp, 379 struct svc_rdma_op_ctxt *hdr_ctxt) 380 { 381 struct ib_send_wr read_wr; 382 struct ib_send_wr inv_wr; 383 int err = 0; 384 int ch_no; 385 int ch_count; 386 int byte_count; 387 int sge_count; 388 u64 sgl_offset; 389 struct rpcrdma_read_chunk *ch; 390 struct svc_rdma_op_ctxt *ctxt = NULL; 391 struct svc_rdma_req_map *rpl_map; 392 struct svc_rdma_req_map *chl_map; 393 394 /* If no read list is present, return 0 */ 395 ch = svc_rdma_get_read_chunk(rmsgp); 396 if (!ch) 397 return 0; 398 399 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); 400 if (ch_count > RPCSVC_MAXPAGES) 401 return -EINVAL; 402 403 /* Allocate temporary reply and chunk maps */ 404 rpl_map = svc_rdma_get_req_map(); 405 chl_map = svc_rdma_get_req_map(); 406 407 if (!xprt->sc_frmr_pg_list_len) 408 sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp, 409 rpl_map, chl_map, ch_count, 410 byte_count); 411 else 412 sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp, 413 rpl_map, chl_map, ch_count, 414 byte_count); 415 if (sge_count < 0) { 416 err = -EIO; 417 goto out; 418 } 419 420 sgl_offset = 0; 421 ch_no = 0; 422 423 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 424 ch->rc_discrim != 0; ch++, ch_no++) { 425 next_sge: 426 ctxt = svc_rdma_get_context(xprt); 427 ctxt->direction = DMA_FROM_DEVICE; 428 ctxt->frmr = hdr_ctxt->frmr; 429 ctxt->read_hdr = NULL; 430 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); 431 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 432 433 /* Prepare READ WR */ 434 memset(&read_wr, 0, sizeof read_wr); 435 read_wr.wr_id = (unsigned long)ctxt; 436 read_wr.opcode = IB_WR_RDMA_READ; 437 ctxt->wr_op = read_wr.opcode; 438 read_wr.send_flags = IB_SEND_SIGNALED; 439 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; 440 read_wr.wr.rdma.remote_addr = 441 get_unaligned(&(ch->rc_target.rs_offset)) + 442 sgl_offset; 443 read_wr.sg_list = ctxt->sge; 444 read_wr.num_sge = 445 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); 446 err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr, 447 &rpl_map->sge[chl_map->ch[ch_no].start], 448 &sgl_offset, 449 read_wr.num_sge); 450 if (err) { 451 svc_rdma_unmap_dma(ctxt); 452 svc_rdma_put_context(ctxt, 0); 453 goto out; 454 } 455 if (((ch+1)->rc_discrim == 0) && 456 (read_wr.num_sge == chl_map->ch[ch_no].count)) { 457 /* 458 * Mark the last RDMA_READ with a bit to 459 * indicate all RPC data has been fetched from 460 * the client and the RPC needs to be enqueued. 461 */ 462 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); 463 if (hdr_ctxt->frmr) { 464 set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 465 /* 466 * Invalidate the local MR used to map the data 467 * sink. 468 */ 469 if (xprt->sc_dev_caps & 470 SVCRDMA_DEVCAP_READ_W_INV) { 471 read_wr.opcode = 472 IB_WR_RDMA_READ_WITH_INV; 473 ctxt->wr_op = read_wr.opcode; 474 read_wr.ex.invalidate_rkey = 475 ctxt->frmr->mr->lkey; 476 } else { 477 /* Prepare INVALIDATE WR */ 478 memset(&inv_wr, 0, sizeof inv_wr); 479 inv_wr.opcode = IB_WR_LOCAL_INV; 480 inv_wr.send_flags = IB_SEND_SIGNALED; 481 inv_wr.ex.invalidate_rkey = 482 hdr_ctxt->frmr->mr->lkey; 483 read_wr.next = &inv_wr; 484 } 485 } 486 ctxt->read_hdr = hdr_ctxt; 487 } 488 /* Post the read */ 489 err = svc_rdma_send(xprt, &read_wr); 490 if (err) { 491 printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", 492 err); 493 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 494 svc_rdma_put_context(ctxt, 0); 495 goto out; 496 } 497 atomic_inc(&rdma_stat_read); 498 499 if (read_wr.num_sge < chl_map->ch[ch_no].count) { 500 chl_map->ch[ch_no].count -= read_wr.num_sge; 501 chl_map->ch[ch_no].start += read_wr.num_sge; 502 goto next_sge; 503 } 504 sgl_offset = 0; 505 err = 1; 506 } 507 508 out: 509 svc_rdma_put_req_map(rpl_map); 510 svc_rdma_put_req_map(chl_map); 511 512 /* Detach arg pages. svc_recv will replenish them */ 513 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) 514 rqstp->rq_pages[ch_no] = NULL; 515 516 /* 517 * Detach res pages. svc_release must see a resused count of 518 * zero or it will attempt to put them. 519 */ 520 while (rqstp->rq_resused) 521 rqstp->rq_respages[--rqstp->rq_resused] = NULL; 522 523 return err; 524 } 525 526 static int rdma_read_complete(struct svc_rqst *rqstp, 527 struct svc_rdma_op_ctxt *head) 528 { 529 int page_no; 530 int ret; 531 532 BUG_ON(!head); 533 534 /* Copy RPC pages */ 535 for (page_no = 0; page_no < head->count; page_no++) { 536 put_page(rqstp->rq_pages[page_no]); 537 rqstp->rq_pages[page_no] = head->pages[page_no]; 538 } 539 /* Point rq_arg.pages past header */ 540 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; 541 rqstp->rq_arg.page_len = head->arg.page_len; 542 rqstp->rq_arg.page_base = head->arg.page_base; 543 544 /* rq_respages starts after the last arg page */ 545 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; 546 rqstp->rq_resused = 0; 547 548 /* Rebuild rq_arg head and tail. */ 549 rqstp->rq_arg.head[0] = head->arg.head[0]; 550 rqstp->rq_arg.tail[0] = head->arg.tail[0]; 551 rqstp->rq_arg.len = head->arg.len; 552 rqstp->rq_arg.buflen = head->arg.buflen; 553 554 /* Free the context */ 555 svc_rdma_put_context(head, 0); 556 557 /* XXX: What should this be? */ 558 rqstp->rq_prot = IPPROTO_MAX; 559 svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt); 560 561 ret = rqstp->rq_arg.head[0].iov_len 562 + rqstp->rq_arg.page_len 563 + rqstp->rq_arg.tail[0].iov_len; 564 dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, " 565 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", 566 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, 567 rqstp->rq_arg.head[0].iov_len); 568 569 svc_xprt_received(rqstp->rq_xprt); 570 return ret; 571 } 572 573 /* 574 * Set up the rqstp thread context to point to the RQ buffer. If 575 * necessary, pull additional data from the client with an RDMA_READ 576 * request. 577 */ 578 int svc_rdma_recvfrom(struct svc_rqst *rqstp) 579 { 580 struct svc_xprt *xprt = rqstp->rq_xprt; 581 struct svcxprt_rdma *rdma_xprt = 582 container_of(xprt, struct svcxprt_rdma, sc_xprt); 583 struct svc_rdma_op_ctxt *ctxt = NULL; 584 struct rpcrdma_msg *rmsgp; 585 int ret = 0; 586 int len; 587 588 dprintk("svcrdma: rqstp=%p\n", rqstp); 589 590 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); 591 if (!list_empty(&rdma_xprt->sc_read_complete_q)) { 592 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, 593 struct svc_rdma_op_ctxt, 594 dto_q); 595 list_del_init(&ctxt->dto_q); 596 } 597 if (ctxt) { 598 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); 599 return rdma_read_complete(rqstp, ctxt); 600 } 601 602 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { 603 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, 604 struct svc_rdma_op_ctxt, 605 dto_q); 606 list_del_init(&ctxt->dto_q); 607 } else { 608 atomic_inc(&rdma_stat_rq_starve); 609 clear_bit(XPT_DATA, &xprt->xpt_flags); 610 ctxt = NULL; 611 } 612 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); 613 if (!ctxt) { 614 /* This is the EAGAIN path. The svc_recv routine will 615 * return -EAGAIN, the nfsd thread will go to call into 616 * svc_recv again and we shouldn't be on the active 617 * transport list 618 */ 619 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) 620 goto close_out; 621 622 BUG_ON(ret); 623 goto out; 624 } 625 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", 626 ctxt, rdma_xprt, rqstp, ctxt->wc_status); 627 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); 628 atomic_inc(&rdma_stat_recv); 629 630 /* Build up the XDR from the receive buffers. */ 631 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); 632 633 /* Decode the RDMA header. */ 634 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp); 635 rqstp->rq_xprt_hlen = len; 636 637 /* If the request is invalid, reply with an error */ 638 if (len < 0) { 639 if (len == -ENOSYS) 640 svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS); 641 goto close_out; 642 } 643 644 /* Read read-list data. */ 645 ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt); 646 if (ret > 0) { 647 /* read-list posted, defer until data received from client. */ 648 goto defer; 649 } 650 if (ret < 0) { 651 /* Post of read-list failed, free context. */ 652 svc_rdma_put_context(ctxt, 1); 653 return 0; 654 } 655 656 ret = rqstp->rq_arg.head[0].iov_len 657 + rqstp->rq_arg.page_len 658 + rqstp->rq_arg.tail[0].iov_len; 659 svc_rdma_put_context(ctxt, 0); 660 out: 661 dprintk("svcrdma: ret = %d, rq_arg.len =%d, " 662 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n", 663 ret, rqstp->rq_arg.len, 664 rqstp->rq_arg.head[0].iov_base, 665 rqstp->rq_arg.head[0].iov_len); 666 rqstp->rq_prot = IPPROTO_MAX; 667 svc_xprt_copy_addrs(rqstp, xprt); 668 svc_xprt_received(xprt); 669 return ret; 670 671 close_out: 672 if (ctxt) 673 svc_rdma_put_context(ctxt, 1); 674 dprintk("svcrdma: transport %p is closing\n", xprt); 675 /* 676 * Set the close bit and enqueue it. svc_recv will see the 677 * close bit and call svc_xprt_delete 678 */ 679 set_bit(XPT_CLOSE, &xprt->xpt_flags); 680 defer: 681 svc_xprt_received(xprt); 682 return 0; 683 } 684