1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2015-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
51 
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <rdma/rw.h>
55 
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
61 
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
64 
65 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
66 
67 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
68 						 struct net *net);
69 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
70 					struct net *net,
71 					struct sockaddr *sa, int salen,
72 					int flags);
73 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
74 static void svc_rdma_release_rqst(struct svc_rqst *);
75 static void svc_rdma_detach(struct svc_xprt *xprt);
76 static void svc_rdma_free(struct svc_xprt *xprt);
77 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
78 static void svc_rdma_secure_port(struct svc_rqst *);
79 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
80 
81 static const struct svc_xprt_ops svc_rdma_ops = {
82 	.xpo_create = svc_rdma_create,
83 	.xpo_recvfrom = svc_rdma_recvfrom,
84 	.xpo_sendto = svc_rdma_sendto,
85 	.xpo_release_rqst = svc_rdma_release_rqst,
86 	.xpo_detach = svc_rdma_detach,
87 	.xpo_free = svc_rdma_free,
88 	.xpo_has_wspace = svc_rdma_has_wspace,
89 	.xpo_accept = svc_rdma_accept,
90 	.xpo_secure_port = svc_rdma_secure_port,
91 	.xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
92 };
93 
94 struct svc_xprt_class svc_rdma_class = {
95 	.xcl_name = "rdma",
96 	.xcl_owner = THIS_MODULE,
97 	.xcl_ops = &svc_rdma_ops,
98 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
99 	.xcl_ident = XPRT_TRANSPORT_RDMA,
100 };
101 
102 /* QP event handler */
103 static void qp_event_handler(struct ib_event *event, void *context)
104 {
105 	struct svc_xprt *xprt = context;
106 
107 	trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote);
108 	switch (event->event) {
109 	/* These are considered benign events */
110 	case IB_EVENT_PATH_MIG:
111 	case IB_EVENT_COMM_EST:
112 	case IB_EVENT_SQ_DRAINED:
113 	case IB_EVENT_QP_LAST_WQE_REACHED:
114 		break;
115 
116 	/* These are considered fatal events */
117 	case IB_EVENT_PATH_MIG_ERR:
118 	case IB_EVENT_QP_FATAL:
119 	case IB_EVENT_QP_REQ_ERR:
120 	case IB_EVENT_QP_ACCESS_ERR:
121 	case IB_EVENT_DEVICE_FATAL:
122 	default:
123 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
124 		svc_xprt_enqueue(xprt);
125 		break;
126 	}
127 }
128 
129 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
130 						 struct net *net)
131 {
132 	struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
133 
134 	if (!cma_xprt) {
135 		dprintk("svcrdma: failed to create new transport\n");
136 		return NULL;
137 	}
138 	svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
139 	INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
140 	INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
141 	INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
142 	INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
143 	init_llist_head(&cma_xprt->sc_recv_ctxts);
144 	INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
145 	init_waitqueue_head(&cma_xprt->sc_send_wait);
146 
147 	spin_lock_init(&cma_xprt->sc_lock);
148 	spin_lock_init(&cma_xprt->sc_rq_dto_lock);
149 	spin_lock_init(&cma_xprt->sc_send_lock);
150 	spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
151 
152 	/*
153 	 * Note that this implies that the underlying transport support
154 	 * has some form of congestion control (see RFC 7530 section 3.1
155 	 * paragraph 2). For now, we assume that all supported RDMA
156 	 * transports are suitable here.
157 	 */
158 	set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
159 
160 	return cma_xprt;
161 }
162 
163 static void
164 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
165 			       struct rdma_conn_param *param)
166 {
167 	const struct rpcrdma_connect_private *pmsg = param->private_data;
168 
169 	if (pmsg &&
170 	    pmsg->cp_magic == rpcrdma_cmp_magic &&
171 	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
172 		newxprt->sc_snd_w_inv = pmsg->cp_flags &
173 					RPCRDMA_CMP_F_SND_W_INV_OK;
174 
175 		dprintk("svcrdma: client send_size %u, recv_size %u "
176 			"remote inv %ssupported\n",
177 			rpcrdma_decode_buffer_size(pmsg->cp_send_size),
178 			rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
179 			newxprt->sc_snd_w_inv ? "" : "un");
180 	}
181 }
182 
183 /*
184  * This function handles the CONNECT_REQUEST event on a listening
185  * endpoint. It is passed the cma_id for the _new_ connection. The context in
186  * this cma_id is inherited from the listening cma_id and is the svc_xprt
187  * structure for the listening endpoint.
188  *
189  * This function creates a new xprt for the new connection and enqueues it on
190  * the accept queue for the listent xprt. When the listen thread is kicked, it
191  * will call the recvfrom method on the listen xprt which will accept the new
192  * connection.
193  */
194 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
195 			       struct rdma_conn_param *param)
196 {
197 	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
198 	struct svcxprt_rdma *newxprt;
199 	struct sockaddr *sa;
200 
201 	/* Create a new transport */
202 	newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server,
203 				       listen_xprt->sc_xprt.xpt_net);
204 	if (!newxprt)
205 		return;
206 	newxprt->sc_cm_id = new_cma_id;
207 	new_cma_id->context = newxprt;
208 	svc_rdma_parse_connect_private(newxprt, param);
209 
210 	/* Save client advertised inbound read limit for use later in accept. */
211 	newxprt->sc_ord = param->initiator_depth;
212 
213 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
214 	svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
215 	/* The remote port is arbitrary and not under the control of the
216 	 * client ULP. Set it to a fixed value so that the DRC continues
217 	 * to be effective after a reconnect.
218 	 */
219 	rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
220 
221 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
222 	svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
223 
224 	/*
225 	 * Enqueue the new transport on the accept queue of the listening
226 	 * transport
227 	 */
228 	spin_lock(&listen_xprt->sc_lock);
229 	list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
230 	spin_unlock(&listen_xprt->sc_lock);
231 
232 	set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
233 	svc_xprt_enqueue(&listen_xprt->sc_xprt);
234 }
235 
236 /*
237  * Handles events generated on the listening endpoint. These events will be
238  * either be incoming connect requests or adapter removal  events.
239  */
240 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
241 			       struct rdma_cm_event *event)
242 {
243 	struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
244 
245 	trace_svcrdma_cm_event(event, sap);
246 
247 	switch (event->event) {
248 	case RDMA_CM_EVENT_CONNECT_REQUEST:
249 		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
250 			"event = %s (%d)\n", cma_id, cma_id->context,
251 			rdma_event_msg(event->event), event->event);
252 		handle_connect_req(cma_id, &event->param.conn);
253 		break;
254 	default:
255 		/* NB: No device removal upcall for INADDR_ANY listeners */
256 		dprintk("svcrdma: Unexpected event on listening endpoint %p, "
257 			"event = %s (%d)\n", cma_id,
258 			rdma_event_msg(event->event), event->event);
259 		break;
260 	}
261 
262 	return 0;
263 }
264 
265 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
266 			    struct rdma_cm_event *event)
267 {
268 	struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.dst_addr;
269 	struct svcxprt_rdma *rdma = cma_id->context;
270 	struct svc_xprt *xprt = &rdma->sc_xprt;
271 
272 	trace_svcrdma_cm_event(event, sap);
273 
274 	switch (event->event) {
275 	case RDMA_CM_EVENT_ESTABLISHED:
276 		/* Accept complete */
277 		svc_xprt_get(xprt);
278 		dprintk("svcrdma: Connection completed on DTO xprt=%p, "
279 			"cm_id=%p\n", xprt, cma_id);
280 		clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
281 		svc_xprt_enqueue(xprt);
282 		break;
283 	case RDMA_CM_EVENT_DISCONNECTED:
284 		dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
285 			xprt, cma_id);
286 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
287 		svc_xprt_enqueue(xprt);
288 		svc_xprt_put(xprt);
289 		break;
290 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
291 		dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
292 			"event = %s (%d)\n", cma_id, xprt,
293 			rdma_event_msg(event->event), event->event);
294 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
295 		svc_xprt_enqueue(xprt);
296 		svc_xprt_put(xprt);
297 		break;
298 	default:
299 		dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
300 			"event = %s (%d)\n", cma_id,
301 			rdma_event_msg(event->event), event->event);
302 		break;
303 	}
304 	return 0;
305 }
306 
307 /*
308  * Create a listening RDMA service endpoint.
309  */
310 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
311 					struct net *net,
312 					struct sockaddr *sa, int salen,
313 					int flags)
314 {
315 	struct rdma_cm_id *listen_id;
316 	struct svcxprt_rdma *cma_xprt;
317 	int ret;
318 
319 	dprintk("svcrdma: Creating RDMA listener\n");
320 	if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
321 		dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
322 		return ERR_PTR(-EAFNOSUPPORT);
323 	}
324 	cma_xprt = svc_rdma_create_xprt(serv, net);
325 	if (!cma_xprt)
326 		return ERR_PTR(-ENOMEM);
327 	set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
328 	strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
329 
330 	listen_id = rdma_create_id(net, rdma_listen_handler, cma_xprt,
331 				   RDMA_PS_TCP, IB_QPT_RC);
332 	if (IS_ERR(listen_id)) {
333 		ret = PTR_ERR(listen_id);
334 		dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
335 		goto err0;
336 	}
337 
338 	/* Allow both IPv4 and IPv6 sockets to bind a single port
339 	 * at the same time.
340 	 */
341 #if IS_ENABLED(CONFIG_IPV6)
342 	ret = rdma_set_afonly(listen_id, 1);
343 	if (ret) {
344 		dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
345 		goto err1;
346 	}
347 #endif
348 	ret = rdma_bind_addr(listen_id, sa);
349 	if (ret) {
350 		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
351 		goto err1;
352 	}
353 	cma_xprt->sc_cm_id = listen_id;
354 
355 	ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
356 	if (ret) {
357 		dprintk("svcrdma: rdma_listen failed = %d\n", ret);
358 		goto err1;
359 	}
360 
361 	/*
362 	 * We need to use the address from the cm_id in case the
363 	 * caller specified 0 for the port number.
364 	 */
365 	sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
366 	svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
367 
368 	return &cma_xprt->sc_xprt;
369 
370  err1:
371 	rdma_destroy_id(listen_id);
372  err0:
373 	kfree(cma_xprt);
374 	return ERR_PTR(ret);
375 }
376 
377 /*
378  * This is the xpo_recvfrom function for listening endpoints. Its
379  * purpose is to accept incoming connections. The CMA callback handler
380  * has already created a new transport and attached it to the new CMA
381  * ID.
382  *
383  * There is a queue of pending connections hung on the listening
384  * transport. This queue contains the new svc_xprt structure. This
385  * function takes svc_xprt structures off the accept_q and completes
386  * the connection.
387  */
388 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
389 {
390 	struct svcxprt_rdma *listen_rdma;
391 	struct svcxprt_rdma *newxprt = NULL;
392 	struct rdma_conn_param conn_param;
393 	struct rpcrdma_connect_private pmsg;
394 	struct ib_qp_init_attr qp_attr;
395 	unsigned int ctxts, rq_depth;
396 	struct ib_device *dev;
397 	int ret = 0;
398 	RPC_IFDEBUG(struct sockaddr *sap);
399 
400 	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
401 	clear_bit(XPT_CONN, &xprt->xpt_flags);
402 	/* Get the next entry off the accept list */
403 	spin_lock(&listen_rdma->sc_lock);
404 	if (!list_empty(&listen_rdma->sc_accept_q)) {
405 		newxprt = list_entry(listen_rdma->sc_accept_q.next,
406 				     struct svcxprt_rdma, sc_accept_q);
407 		list_del_init(&newxprt->sc_accept_q);
408 	}
409 	if (!list_empty(&listen_rdma->sc_accept_q))
410 		set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
411 	spin_unlock(&listen_rdma->sc_lock);
412 	if (!newxprt)
413 		return NULL;
414 
415 	dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
416 		newxprt, newxprt->sc_cm_id);
417 
418 	dev = newxprt->sc_cm_id->device;
419 	newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
420 
421 	/* Qualify the transport resource defaults with the
422 	 * capabilities of this particular device */
423 	/* Transport header, head iovec, tail iovec */
424 	newxprt->sc_max_send_sges = 3;
425 	/* Add one SGE per page list entry */
426 	newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
427 	if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
428 		newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
429 	newxprt->sc_max_req_size = svcrdma_max_req_size;
430 	newxprt->sc_max_requests = svcrdma_max_requests;
431 	newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
432 	rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
433 	if (rq_depth > dev->attrs.max_qp_wr) {
434 		pr_warn("svcrdma: reducing receive depth to %d\n",
435 			dev->attrs.max_qp_wr);
436 		rq_depth = dev->attrs.max_qp_wr;
437 		newxprt->sc_max_requests = rq_depth - 2;
438 		newxprt->sc_max_bc_requests = 2;
439 	}
440 	newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
441 	ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
442 	ctxts *= newxprt->sc_max_requests;
443 	newxprt->sc_sq_depth = rq_depth + ctxts;
444 	if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
445 		pr_warn("svcrdma: reducing send depth to %d\n",
446 			dev->attrs.max_qp_wr);
447 		newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
448 	}
449 	atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
450 
451 	newxprt->sc_pd = ib_alloc_pd(dev, 0);
452 	if (IS_ERR(newxprt->sc_pd)) {
453 		dprintk("svcrdma: error creating PD for connect request\n");
454 		goto errout;
455 	}
456 	newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
457 					    IB_POLL_WORKQUEUE);
458 	if (IS_ERR(newxprt->sc_sq_cq)) {
459 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
460 		goto errout;
461 	}
462 	newxprt->sc_rq_cq =
463 		ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
464 	if (IS_ERR(newxprt->sc_rq_cq)) {
465 		dprintk("svcrdma: error creating RQ CQ for connect request\n");
466 		goto errout;
467 	}
468 
469 	memset(&qp_attr, 0, sizeof qp_attr);
470 	qp_attr.event_handler = qp_event_handler;
471 	qp_attr.qp_context = &newxprt->sc_xprt;
472 	qp_attr.port_num = newxprt->sc_port_num;
473 	qp_attr.cap.max_rdma_ctxs = ctxts;
474 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
475 	qp_attr.cap.max_recv_wr = rq_depth;
476 	qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges;
477 	qp_attr.cap.max_recv_sge = 1;
478 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
479 	qp_attr.qp_type = IB_QPT_RC;
480 	qp_attr.send_cq = newxprt->sc_sq_cq;
481 	qp_attr.recv_cq = newxprt->sc_rq_cq;
482 	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
483 		newxprt->sc_cm_id, newxprt->sc_pd);
484 	dprintk("    cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
485 		qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
486 	dprintk("    cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
487 		qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
488 
489 	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
490 	if (ret) {
491 		dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
492 		goto errout;
493 	}
494 	newxprt->sc_qp = newxprt->sc_cm_id->qp;
495 
496 	if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
497 		newxprt->sc_snd_w_inv = false;
498 	if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
499 	    !rdma_ib_or_roce(dev, newxprt->sc_port_num))
500 		goto errout;
501 
502 	if (!svc_rdma_post_recvs(newxprt))
503 		goto errout;
504 
505 	/* Swap out the handler */
506 	newxprt->sc_cm_id->event_handler = rdma_cma_handler;
507 
508 	/* Construct RDMA-CM private message */
509 	pmsg.cp_magic = rpcrdma_cmp_magic;
510 	pmsg.cp_version = RPCRDMA_CMP_VERSION;
511 	pmsg.cp_flags = 0;
512 	pmsg.cp_send_size = pmsg.cp_recv_size =
513 		rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
514 
515 	/* Accept Connection */
516 	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
517 	memset(&conn_param, 0, sizeof conn_param);
518 	conn_param.responder_resources = 0;
519 	conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
520 					   dev->attrs.max_qp_init_rd_atom);
521 	if (!conn_param.initiator_depth) {
522 		dprintk("svcrdma: invalid ORD setting\n");
523 		ret = -EINVAL;
524 		goto errout;
525 	}
526 	conn_param.private_data = &pmsg;
527 	conn_param.private_data_len = sizeof(pmsg);
528 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
529 	if (ret)
530 		goto errout;
531 
532 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
533 	dprintk("svcrdma: new connection %p accepted:\n", newxprt);
534 	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
535 	dprintk("    local address   : %pIS:%u\n", sap, rpc_get_port(sap));
536 	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
537 	dprintk("    remote address  : %pIS:%u\n", sap, rpc_get_port(sap));
538 	dprintk("    max_sge         : %d\n", newxprt->sc_max_send_sges);
539 	dprintk("    sq_depth        : %d\n", newxprt->sc_sq_depth);
540 	dprintk("    rdma_rw_ctxs    : %d\n", ctxts);
541 	dprintk("    max_requests    : %d\n", newxprt->sc_max_requests);
542 	dprintk("    ord             : %d\n", conn_param.initiator_depth);
543 #endif
544 
545 	trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
546 	return &newxprt->sc_xprt;
547 
548  errout:
549 	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
550 	trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
551 	/* Take a reference in case the DTO handler runs */
552 	svc_xprt_get(&newxprt->sc_xprt);
553 	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
554 		ib_destroy_qp(newxprt->sc_qp);
555 	rdma_destroy_id(newxprt->sc_cm_id);
556 	/* This call to put will destroy the transport */
557 	svc_xprt_put(&newxprt->sc_xprt);
558 	return NULL;
559 }
560 
561 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
562 {
563 }
564 
565 /*
566  * When connected, an svc_xprt has at least two references:
567  *
568  * - A reference held by the cm_id between the ESTABLISHED and
569  *   DISCONNECTED events. If the remote peer disconnected first, this
570  *   reference could be gone.
571  *
572  * - A reference held by the svc_recv code that called this function
573  *   as part of close processing.
574  *
575  * At a minimum one references should still be held.
576  */
577 static void svc_rdma_detach(struct svc_xprt *xprt)
578 {
579 	struct svcxprt_rdma *rdma =
580 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
581 
582 	/* Disconnect and flush posted WQE */
583 	rdma_disconnect(rdma->sc_cm_id);
584 }
585 
586 static void __svc_rdma_free(struct work_struct *work)
587 {
588 	struct svcxprt_rdma *rdma =
589 		container_of(work, struct svcxprt_rdma, sc_work);
590 	struct svc_xprt *xprt = &rdma->sc_xprt;
591 
592 	trace_svcrdma_xprt_free(xprt);
593 
594 	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
595 		ib_drain_qp(rdma->sc_qp);
596 
597 	svc_rdma_flush_recv_queues(rdma);
598 
599 	/* Final put of backchannel client transport */
600 	if (xprt->xpt_bc_xprt) {
601 		xprt_put(xprt->xpt_bc_xprt);
602 		xprt->xpt_bc_xprt = NULL;
603 	}
604 
605 	svc_rdma_destroy_rw_ctxts(rdma);
606 	svc_rdma_send_ctxts_destroy(rdma);
607 	svc_rdma_recv_ctxts_destroy(rdma);
608 
609 	/* Destroy the QP if present (not a listener) */
610 	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
611 		ib_destroy_qp(rdma->sc_qp);
612 
613 	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
614 		ib_free_cq(rdma->sc_sq_cq);
615 
616 	if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
617 		ib_free_cq(rdma->sc_rq_cq);
618 
619 	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
620 		ib_dealloc_pd(rdma->sc_pd);
621 
622 	/* Destroy the CM ID */
623 	rdma_destroy_id(rdma->sc_cm_id);
624 
625 	kfree(rdma);
626 }
627 
628 static void svc_rdma_free(struct svc_xprt *xprt)
629 {
630 	struct svcxprt_rdma *rdma =
631 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
632 
633 	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
634 	schedule_work(&rdma->sc_work);
635 }
636 
637 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
638 {
639 	struct svcxprt_rdma *rdma =
640 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
641 
642 	/*
643 	 * If there are already waiters on the SQ,
644 	 * return false.
645 	 */
646 	if (waitqueue_active(&rdma->sc_send_wait))
647 		return 0;
648 
649 	/* Otherwise return true. */
650 	return 1;
651 }
652 
653 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
654 {
655 	set_bit(RQ_SECURE, &rqstp->rq_flags);
656 }
657 
658 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
659 {
660 }
661