1 /* 2 * Copyright (c) 2015 Oracle. All rights reserved. 3 * 4 * Support for backward direction RPCs on RPC/RDMA (server-side). 5 */ 6 7 #include <linux/module.h> 8 #include <linux/sunrpc/svc_rdma.h> 9 #include "xprt_rdma.h" 10 11 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 12 13 #undef SVCRDMA_BACKCHANNEL_DEBUG 14 15 int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp, 16 struct xdr_buf *rcvbuf) 17 { 18 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 19 struct kvec *dst, *src = &rcvbuf->head[0]; 20 struct rpc_rqst *req; 21 unsigned long cwnd; 22 u32 credits; 23 size_t len; 24 __be32 xid; 25 __be32 *p; 26 int ret; 27 28 p = (__be32 *)src->iov_base; 29 len = src->iov_len; 30 xid = rmsgp->rm_xid; 31 32 #ifdef SVCRDMA_BACKCHANNEL_DEBUG 33 pr_info("%s: xid=%08x, length=%zu\n", 34 __func__, be32_to_cpu(xid), len); 35 pr_info("%s: RPC/RDMA: %*ph\n", 36 __func__, (int)RPCRDMA_HDRLEN_MIN, rmsgp); 37 pr_info("%s: RPC: %*ph\n", 38 __func__, (int)len, p); 39 #endif 40 41 ret = -EAGAIN; 42 if (src->iov_len < 24) 43 goto out_shortreply; 44 45 spin_lock_bh(&xprt->transport_lock); 46 req = xprt_lookup_rqst(xprt, xid); 47 if (!req) 48 goto out_notfound; 49 50 dst = &req->rq_private_buf.head[0]; 51 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); 52 if (dst->iov_len < len) 53 goto out_unlock; 54 memcpy(dst->iov_base, p, len); 55 56 credits = be32_to_cpu(rmsgp->rm_credit); 57 if (credits == 0) 58 credits = 1; /* don't deadlock */ 59 else if (credits > r_xprt->rx_buf.rb_bc_max_requests) 60 credits = r_xprt->rx_buf.rb_bc_max_requests; 61 62 cwnd = xprt->cwnd; 63 xprt->cwnd = credits << RPC_CWNDSHIFT; 64 if (xprt->cwnd > cwnd) 65 xprt_release_rqst_cong(req->rq_task); 66 67 ret = 0; 68 xprt_complete_rqst(req->rq_task, rcvbuf->len); 69 rcvbuf->len = 0; 70 71 out_unlock: 72 spin_unlock_bh(&xprt->transport_lock); 73 out: 74 return ret; 75 76 out_shortreply: 77 dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n", 78 xprt, src->iov_len); 79 goto out; 80 81 out_notfound: 82 dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", 83 xprt, be32_to_cpu(xid)); 84 85 goto out_unlock; 86 } 87 88 /* Send a backwards direction RPC call. 89 * 90 * Caller holds the connection's mutex and has already marshaled 91 * the RPC/RDMA request. 92 * 93 * This is similar to svc_rdma_reply, but takes an rpc_rqst 94 * instead, does not support chunks, and avoids blocking memory 95 * allocation. 96 * 97 * XXX: There is still an opportunity to block in svc_rdma_send() 98 * if there are no SQ entries to post the Send. This may occur if 99 * the adapter has a small maximum SQ depth. 100 */ 101 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, 102 struct rpc_rqst *rqst) 103 { 104 struct xdr_buf *sndbuf = &rqst->rq_snd_buf; 105 struct svc_rdma_op_ctxt *ctxt; 106 struct svc_rdma_req_map *vec; 107 struct ib_send_wr send_wr; 108 int ret; 109 110 vec = svc_rdma_get_req_map(rdma); 111 ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false); 112 if (ret) 113 goto out_err; 114 115 ret = svc_rdma_repost_recv(rdma, GFP_NOIO); 116 if (ret) 117 goto out_err; 118 119 ctxt = svc_rdma_get_context(rdma); 120 ctxt->pages[0] = virt_to_page(rqst->rq_buffer); 121 ctxt->count = 1; 122 123 ctxt->direction = DMA_TO_DEVICE; 124 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey; 125 ctxt->sge[0].length = sndbuf->len; 126 ctxt->sge[0].addr = 127 ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0, 128 sndbuf->len, DMA_TO_DEVICE); 129 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) { 130 ret = -EIO; 131 goto out_unmap; 132 } 133 svc_rdma_count_mappings(rdma, ctxt); 134 135 memset(&send_wr, 0, sizeof(send_wr)); 136 ctxt->cqe.done = svc_rdma_wc_send; 137 send_wr.wr_cqe = &ctxt->cqe; 138 send_wr.sg_list = ctxt->sge; 139 send_wr.num_sge = 1; 140 send_wr.opcode = IB_WR_SEND; 141 send_wr.send_flags = IB_SEND_SIGNALED; 142 143 ret = svc_rdma_send(rdma, &send_wr); 144 if (ret) { 145 ret = -EIO; 146 goto out_unmap; 147 } 148 149 out_err: 150 svc_rdma_put_req_map(rdma, vec); 151 dprintk("svcrdma: %s returns %d\n", __func__, ret); 152 return ret; 153 154 out_unmap: 155 svc_rdma_unmap_dma(ctxt); 156 svc_rdma_put_context(ctxt, 1); 157 goto out_err; 158 } 159 160 /* Server-side transport endpoint wants a whole page for its send 161 * buffer. The client RPC code constructs the RPC header in this 162 * buffer before it invokes ->send_request. 163 */ 164 static int 165 xprt_rdma_bc_allocate(struct rpc_task *task) 166 { 167 struct rpc_rqst *rqst = task->tk_rqstp; 168 size_t size = rqst->rq_callsize; 169 struct page *page; 170 171 if (size > PAGE_SIZE) { 172 WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n", 173 size); 174 return -EINVAL; 175 } 176 177 /* svc_rdma_sendto releases this page */ 178 page = alloc_page(RPCRDMA_DEF_GFP); 179 if (!page) 180 return -ENOMEM; 181 rqst->rq_buffer = page_address(page); 182 183 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP); 184 if (!rqst->rq_rbuffer) { 185 put_page(page); 186 return -ENOMEM; 187 } 188 return 0; 189 } 190 191 static void 192 xprt_rdma_bc_free(struct rpc_task *task) 193 { 194 struct rpc_rqst *rqst = task->tk_rqstp; 195 196 kfree(rqst->rq_rbuffer); 197 } 198 199 static int 200 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) 201 { 202 struct rpc_xprt *xprt = rqst->rq_xprt; 203 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 204 struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)rqst->rq_buffer; 205 int rc; 206 207 /* Space in the send buffer for an RPC/RDMA header is reserved 208 * via xprt->tsh_size. 209 */ 210 headerp->rm_xid = rqst->rq_xid; 211 headerp->rm_vers = rpcrdma_version; 212 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests); 213 headerp->rm_type = rdma_msg; 214 headerp->rm_body.rm_chunks[0] = xdr_zero; 215 headerp->rm_body.rm_chunks[1] = xdr_zero; 216 headerp->rm_body.rm_chunks[2] = xdr_zero; 217 218 #ifdef SVCRDMA_BACKCHANNEL_DEBUG 219 pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer); 220 #endif 221 222 rc = svc_rdma_bc_sendto(rdma, rqst); 223 if (rc) 224 goto drop_connection; 225 return rc; 226 227 drop_connection: 228 dprintk("svcrdma: failed to send bc call\n"); 229 xprt_disconnect_done(xprt); 230 return -ENOTCONN; 231 } 232 233 /* Send an RPC call on the passive end of a transport 234 * connection. 235 */ 236 static int 237 xprt_rdma_bc_send_request(struct rpc_task *task) 238 { 239 struct rpc_rqst *rqst = task->tk_rqstp; 240 struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt; 241 struct svcxprt_rdma *rdma; 242 int ret; 243 244 dprintk("svcrdma: sending bc call with xid: %08x\n", 245 be32_to_cpu(rqst->rq_xid)); 246 247 if (!mutex_trylock(&sxprt->xpt_mutex)) { 248 rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL); 249 if (!mutex_trylock(&sxprt->xpt_mutex)) 250 return -EAGAIN; 251 rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task); 252 } 253 254 ret = -ENOTCONN; 255 rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt); 256 if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) 257 ret = rpcrdma_bc_send_request(rdma, rqst); 258 259 mutex_unlock(&sxprt->xpt_mutex); 260 261 if (ret < 0) 262 return ret; 263 return 0; 264 } 265 266 static void 267 xprt_rdma_bc_close(struct rpc_xprt *xprt) 268 { 269 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); 270 } 271 272 static void 273 xprt_rdma_bc_put(struct rpc_xprt *xprt) 274 { 275 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); 276 277 xprt_free(xprt); 278 module_put(THIS_MODULE); 279 } 280 281 static struct rpc_xprt_ops xprt_rdma_bc_procs = { 282 .reserve_xprt = xprt_reserve_xprt_cong, 283 .release_xprt = xprt_release_xprt_cong, 284 .alloc_slot = xprt_alloc_slot, 285 .release_request = xprt_release_rqst_cong, 286 .buf_alloc = xprt_rdma_bc_allocate, 287 .buf_free = xprt_rdma_bc_free, 288 .send_request = xprt_rdma_bc_send_request, 289 .set_retrans_timeout = xprt_set_retrans_timeout_def, 290 .close = xprt_rdma_bc_close, 291 .destroy = xprt_rdma_bc_put, 292 .print_stats = xprt_rdma_print_stats 293 }; 294 295 static const struct rpc_timeout xprt_rdma_bc_timeout = { 296 .to_initval = 60 * HZ, 297 .to_maxval = 60 * HZ, 298 }; 299 300 /* It shouldn't matter if the number of backchannel session slots 301 * doesn't match the number of RPC/RDMA credits. That just means 302 * one or the other will have extra slots that aren't used. 303 */ 304 static struct rpc_xprt * 305 xprt_setup_rdma_bc(struct xprt_create *args) 306 { 307 struct rpc_xprt *xprt; 308 struct rpcrdma_xprt *new_xprt; 309 310 if (args->addrlen > sizeof(xprt->addr)) { 311 dprintk("RPC: %s: address too large\n", __func__); 312 return ERR_PTR(-EBADF); 313 } 314 315 xprt = xprt_alloc(args->net, sizeof(*new_xprt), 316 RPCRDMA_MAX_BC_REQUESTS, 317 RPCRDMA_MAX_BC_REQUESTS); 318 if (!xprt) { 319 dprintk("RPC: %s: couldn't allocate rpc_xprt\n", 320 __func__); 321 return ERR_PTR(-ENOMEM); 322 } 323 324 xprt->timeout = &xprt_rdma_bc_timeout; 325 xprt_set_bound(xprt); 326 xprt_set_connected(xprt); 327 xprt->bind_timeout = RPCRDMA_BIND_TO; 328 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 329 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 330 331 xprt->prot = XPRT_TRANSPORT_BC_RDMA; 332 xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32); 333 xprt->ops = &xprt_rdma_bc_procs; 334 335 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 336 xprt->addrlen = args->addrlen; 337 xprt_rdma_format_addresses(xprt, (struct sockaddr *)&xprt->addr); 338 xprt->resvport = 0; 339 340 xprt->max_payload = xprt_rdma_max_inline_read; 341 342 new_xprt = rpcx_to_rdmax(xprt); 343 new_xprt->rx_buf.rb_bc_max_requests = xprt->max_reqs; 344 345 xprt_get(xprt); 346 args->bc_xprt->xpt_bc_xprt = xprt; 347 xprt->bc_xprt = args->bc_xprt; 348 349 if (!try_module_get(THIS_MODULE)) 350 goto out_fail; 351 352 /* Final put for backchannel xprt is in __svc_rdma_free */ 353 xprt_get(xprt); 354 return xprt; 355 356 out_fail: 357 xprt_rdma_free_addresses(xprt); 358 args->bc_xprt->xpt_bc_xprt = NULL; 359 args->bc_xprt->xpt_bc_xps = NULL; 360 xprt_put(xprt); 361 xprt_free(xprt); 362 return ERR_PTR(-EINVAL); 363 } 364 365 struct xprt_class xprt_rdma_bc = { 366 .list = LIST_HEAD_INIT(xprt_rdma_bc.list), 367 .name = "rdma backchannel", 368 .owner = THIS_MODULE, 369 .ident = XPRT_TRANSPORT_BC_RDMA, 370 .setup = xprt_setup_rdma_bc, 371 }; 372