1 /*
2  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41 
42 #include <linux/sunrpc/svc_xprt.h>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49 
50 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
51 
52 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
53 					struct sockaddr *sa, int salen,
54 					int flags);
55 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
56 static void svc_rdma_release_rqst(struct svc_rqst *);
57 static void dto_tasklet_func(unsigned long data);
58 static void svc_rdma_detach(struct svc_xprt *xprt);
59 static void svc_rdma_free(struct svc_xprt *xprt);
60 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
61 static void rq_cq_reap(struct svcxprt_rdma *xprt);
62 static void sq_cq_reap(struct svcxprt_rdma *xprt);
63 
64 DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
65 static DEFINE_SPINLOCK(dto_lock);
66 static LIST_HEAD(dto_xprt_q);
67 
68 static struct svc_xprt_ops svc_rdma_ops = {
69 	.xpo_create = svc_rdma_create,
70 	.xpo_recvfrom = svc_rdma_recvfrom,
71 	.xpo_sendto = svc_rdma_sendto,
72 	.xpo_release_rqst = svc_rdma_release_rqst,
73 	.xpo_detach = svc_rdma_detach,
74 	.xpo_free = svc_rdma_free,
75 	.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
76 	.xpo_has_wspace = svc_rdma_has_wspace,
77 	.xpo_accept = svc_rdma_accept,
78 };
79 
80 struct svc_xprt_class svc_rdma_class = {
81 	.xcl_name = "rdma",
82 	.xcl_owner = THIS_MODULE,
83 	.xcl_ops = &svc_rdma_ops,
84 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
85 };
86 
87 /* WR context cache. Created in svc_rdma.c  */
88 extern struct kmem_cache *svc_rdma_ctxt_cachep;
89 
90 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
91 {
92 	struct svc_rdma_op_ctxt *ctxt;
93 
94 	while (1) {
95 		ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
96 		if (ctxt)
97 			break;
98 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
99 	}
100 	ctxt->xprt = xprt;
101 	INIT_LIST_HEAD(&ctxt->dto_q);
102 	ctxt->count = 0;
103 	atomic_inc(&xprt->sc_ctxt_used);
104 	return ctxt;
105 }
106 
107 static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
108 {
109 	struct svcxprt_rdma *xprt = ctxt->xprt;
110 	int i;
111 	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
112 		atomic_dec(&xprt->sc_dma_used);
113 		ib_dma_unmap_single(xprt->sc_cm_id->device,
114 				    ctxt->sge[i].addr,
115 				    ctxt->sge[i].length,
116 				    ctxt->direction);
117 	}
118 }
119 
120 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
121 {
122 	struct svcxprt_rdma *xprt;
123 	int i;
124 
125 	BUG_ON(!ctxt);
126 	xprt = ctxt->xprt;
127 	if (free_pages)
128 		for (i = 0; i < ctxt->count; i++)
129 			put_page(ctxt->pages[i]);
130 
131 	kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
132 	atomic_dec(&xprt->sc_ctxt_used);
133 }
134 
135 /* Temporary NFS request map cache. Created in svc_rdma.c  */
136 extern struct kmem_cache *svc_rdma_map_cachep;
137 
138 /*
139  * Temporary NFS req mappings are shared across all transport
140  * instances. These are short lived and should be bounded by the number
141  * of concurrent server threads * depth of the SQ.
142  */
143 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
144 {
145 	struct svc_rdma_req_map *map;
146 	while (1) {
147 		map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
148 		if (map)
149 			break;
150 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
151 	}
152 	map->count = 0;
153 	return map;
154 }
155 
156 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
157 {
158 	kmem_cache_free(svc_rdma_map_cachep, map);
159 }
160 
161 /* ib_cq event handler */
162 static void cq_event_handler(struct ib_event *event, void *context)
163 {
164 	struct svc_xprt *xprt = context;
165 	dprintk("svcrdma: received CQ event id=%d, context=%p\n",
166 		event->event, context);
167 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
168 }
169 
170 /* QP event handler */
171 static void qp_event_handler(struct ib_event *event, void *context)
172 {
173 	struct svc_xprt *xprt = context;
174 
175 	switch (event->event) {
176 	/* These are considered benign events */
177 	case IB_EVENT_PATH_MIG:
178 	case IB_EVENT_COMM_EST:
179 	case IB_EVENT_SQ_DRAINED:
180 	case IB_EVENT_QP_LAST_WQE_REACHED:
181 		dprintk("svcrdma: QP event %d received for QP=%p\n",
182 			event->event, event->element.qp);
183 		break;
184 	/* These are considered fatal events */
185 	case IB_EVENT_PATH_MIG_ERR:
186 	case IB_EVENT_QP_FATAL:
187 	case IB_EVENT_QP_REQ_ERR:
188 	case IB_EVENT_QP_ACCESS_ERR:
189 	case IB_EVENT_DEVICE_FATAL:
190 	default:
191 		dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
192 			"closing transport\n",
193 			event->event, event->element.qp);
194 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
195 		break;
196 	}
197 }
198 
199 /*
200  * Data Transfer Operation Tasklet
201  *
202  * Walks a list of transports with I/O pending, removing entries as
203  * they are added to the server's I/O pending list. Two bits indicate
204  * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
205  * spinlock that serializes access to the transport list with the RQ
206  * and SQ interrupt handlers.
207  */
208 static void dto_tasklet_func(unsigned long data)
209 {
210 	struct svcxprt_rdma *xprt;
211 	unsigned long flags;
212 
213 	spin_lock_irqsave(&dto_lock, flags);
214 	while (!list_empty(&dto_xprt_q)) {
215 		xprt = list_entry(dto_xprt_q.next,
216 				  struct svcxprt_rdma, sc_dto_q);
217 		list_del_init(&xprt->sc_dto_q);
218 		spin_unlock_irqrestore(&dto_lock, flags);
219 
220 		rq_cq_reap(xprt);
221 		sq_cq_reap(xprt);
222 
223 		svc_xprt_put(&xprt->sc_xprt);
224 		spin_lock_irqsave(&dto_lock, flags);
225 	}
226 	spin_unlock_irqrestore(&dto_lock, flags);
227 }
228 
229 /*
230  * Receive Queue Completion Handler
231  *
232  * Since an RQ completion handler is called on interrupt context, we
233  * need to defer the handling of the I/O to a tasklet
234  */
235 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
236 {
237 	struct svcxprt_rdma *xprt = cq_context;
238 	unsigned long flags;
239 
240 	/* Guard against unconditional flush call for destroyed QP */
241 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
242 		return;
243 
244 	/*
245 	 * Set the bit regardless of whether or not it's on the list
246 	 * because it may be on the list already due to an SQ
247 	 * completion.
248 	 */
249 	set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
250 
251 	/*
252 	 * If this transport is not already on the DTO transport queue,
253 	 * add it
254 	 */
255 	spin_lock_irqsave(&dto_lock, flags);
256 	if (list_empty(&xprt->sc_dto_q)) {
257 		svc_xprt_get(&xprt->sc_xprt);
258 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
259 	}
260 	spin_unlock_irqrestore(&dto_lock, flags);
261 
262 	/* Tasklet does all the work to avoid irqsave locks. */
263 	tasklet_schedule(&dto_tasklet);
264 }
265 
266 /*
267  * rq_cq_reap - Process the RQ CQ.
268  *
269  * Take all completing WC off the CQE and enqueue the associated DTO
270  * context on the dto_q for the transport.
271  *
272  * Note that caller must hold a transport reference.
273  */
274 static void rq_cq_reap(struct svcxprt_rdma *xprt)
275 {
276 	int ret;
277 	struct ib_wc wc;
278 	struct svc_rdma_op_ctxt *ctxt = NULL;
279 
280 	if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
281 		return;
282 
283 	ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
284 	atomic_inc(&rdma_stat_rq_poll);
285 
286 	while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
287 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
288 		ctxt->wc_status = wc.status;
289 		ctxt->byte_len = wc.byte_len;
290 		svc_rdma_unmap_dma(ctxt);
291 		if (wc.status != IB_WC_SUCCESS) {
292 			/* Close the transport */
293 			dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
294 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
295 			svc_rdma_put_context(ctxt, 1);
296 			svc_xprt_put(&xprt->sc_xprt);
297 			continue;
298 		}
299 		spin_lock_bh(&xprt->sc_rq_dto_lock);
300 		list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
301 		spin_unlock_bh(&xprt->sc_rq_dto_lock);
302 		svc_xprt_put(&xprt->sc_xprt);
303 	}
304 
305 	if (ctxt)
306 		atomic_inc(&rdma_stat_rq_prod);
307 
308 	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
309 	/*
310 	 * If data arrived before established event,
311 	 * don't enqueue. This defers RPC I/O until the
312 	 * RDMA connection is complete.
313 	 */
314 	if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
315 		svc_xprt_enqueue(&xprt->sc_xprt);
316 }
317 
318 /*
319  * Send Queue Completion Handler - potentially called on interrupt context.
320  *
321  * Note that caller must hold a transport reference.
322  */
323 static void sq_cq_reap(struct svcxprt_rdma *xprt)
324 {
325 	struct svc_rdma_op_ctxt *ctxt = NULL;
326 	struct ib_wc wc;
327 	struct ib_cq *cq = xprt->sc_sq_cq;
328 	int ret;
329 
330 
331 	if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
332 		return;
333 
334 	ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
335 	atomic_inc(&rdma_stat_sq_poll);
336 	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
337 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
338 		xprt = ctxt->xprt;
339 
340 		svc_rdma_unmap_dma(ctxt);
341 		if (wc.status != IB_WC_SUCCESS)
342 			/* Close the transport */
343 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
344 
345 		/* Decrement used SQ WR count */
346 		atomic_dec(&xprt->sc_sq_count);
347 		wake_up(&xprt->sc_send_wait);
348 
349 		switch (ctxt->wr_op) {
350 		case IB_WR_SEND:
351 			svc_rdma_put_context(ctxt, 1);
352 			break;
353 
354 		case IB_WR_RDMA_WRITE:
355 			svc_rdma_put_context(ctxt, 0);
356 			break;
357 
358 		case IB_WR_RDMA_READ:
359 			if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
360 				struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
361 				BUG_ON(!read_hdr);
362 				set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
363 				spin_lock_bh(&xprt->sc_read_complete_lock);
364 				list_add_tail(&read_hdr->dto_q,
365 					      &xprt->sc_read_complete_q);
366 				spin_unlock_bh(&xprt->sc_read_complete_lock);
367 				svc_xprt_enqueue(&xprt->sc_xprt);
368 			}
369 			svc_rdma_put_context(ctxt, 0);
370 			break;
371 
372 		default:
373 			printk(KERN_ERR "svcrdma: unexpected completion type, "
374 			       "opcode=%d, status=%d\n",
375 			       wc.opcode, wc.status);
376 			break;
377 		}
378 		svc_xprt_put(&xprt->sc_xprt);
379 	}
380 
381 	if (ctxt)
382 		atomic_inc(&rdma_stat_sq_prod);
383 }
384 
385 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
386 {
387 	struct svcxprt_rdma *xprt = cq_context;
388 	unsigned long flags;
389 
390 	/* Guard against unconditional flush call for destroyed QP */
391 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
392 		return;
393 
394 	/*
395 	 * Set the bit regardless of whether or not it's on the list
396 	 * because it may be on the list already due to an RQ
397 	 * completion.
398 	 */
399 	set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
400 
401 	/*
402 	 * If this transport is not already on the DTO transport queue,
403 	 * add it
404 	 */
405 	spin_lock_irqsave(&dto_lock, flags);
406 	if (list_empty(&xprt->sc_dto_q)) {
407 		svc_xprt_get(&xprt->sc_xprt);
408 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
409 	}
410 	spin_unlock_irqrestore(&dto_lock, flags);
411 
412 	/* Tasklet does all the work to avoid irqsave locks. */
413 	tasklet_schedule(&dto_tasklet);
414 }
415 
416 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
417 					     int listener)
418 {
419 	struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
420 
421 	if (!cma_xprt)
422 		return NULL;
423 	svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
424 	INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
425 	INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
426 	INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
427 	INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
428 	init_waitqueue_head(&cma_xprt->sc_send_wait);
429 
430 	spin_lock_init(&cma_xprt->sc_lock);
431 	spin_lock_init(&cma_xprt->sc_read_complete_lock);
432 	spin_lock_init(&cma_xprt->sc_rq_dto_lock);
433 
434 	cma_xprt->sc_ord = svcrdma_ord;
435 
436 	cma_xprt->sc_max_req_size = svcrdma_max_req_size;
437 	cma_xprt->sc_max_requests = svcrdma_max_requests;
438 	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
439 	atomic_set(&cma_xprt->sc_sq_count, 0);
440 	atomic_set(&cma_xprt->sc_ctxt_used, 0);
441 
442 	if (listener)
443 		set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
444 
445 	return cma_xprt;
446 }
447 
448 struct page *svc_rdma_get_page(void)
449 {
450 	struct page *page;
451 
452 	while ((page = alloc_page(GFP_KERNEL)) == NULL) {
453 		/* If we can't get memory, wait a bit and try again */
454 		printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
455 		       "jiffies.\n");
456 		schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
457 	}
458 	return page;
459 }
460 
461 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
462 {
463 	struct ib_recv_wr recv_wr, *bad_recv_wr;
464 	struct svc_rdma_op_ctxt *ctxt;
465 	struct page *page;
466 	unsigned long pa;
467 	int sge_no;
468 	int buflen;
469 	int ret;
470 
471 	ctxt = svc_rdma_get_context(xprt);
472 	buflen = 0;
473 	ctxt->direction = DMA_FROM_DEVICE;
474 	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
475 		BUG_ON(sge_no >= xprt->sc_max_sge);
476 		page = svc_rdma_get_page();
477 		ctxt->pages[sge_no] = page;
478 		atomic_inc(&xprt->sc_dma_used);
479 		pa = ib_dma_map_page(xprt->sc_cm_id->device,
480 				     page, 0, PAGE_SIZE,
481 				     DMA_FROM_DEVICE);
482 		ctxt->sge[sge_no].addr = pa;
483 		ctxt->sge[sge_no].length = PAGE_SIZE;
484 		ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
485 		buflen += PAGE_SIZE;
486 	}
487 	ctxt->count = sge_no;
488 	recv_wr.next = NULL;
489 	recv_wr.sg_list = &ctxt->sge[0];
490 	recv_wr.num_sge = ctxt->count;
491 	recv_wr.wr_id = (u64)(unsigned long)ctxt;
492 
493 	svc_xprt_get(&xprt->sc_xprt);
494 	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
495 	if (ret) {
496 		svc_xprt_put(&xprt->sc_xprt);
497 		svc_rdma_put_context(ctxt, 1);
498 	}
499 	return ret;
500 }
501 
502 /*
503  * This function handles the CONNECT_REQUEST event on a listening
504  * endpoint. It is passed the cma_id for the _new_ connection. The context in
505  * this cma_id is inherited from the listening cma_id and is the svc_xprt
506  * structure for the listening endpoint.
507  *
508  * This function creates a new xprt for the new connection and enqueues it on
509  * the accept queue for the listent xprt. When the listen thread is kicked, it
510  * will call the recvfrom method on the listen xprt which will accept the new
511  * connection.
512  */
513 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
514 {
515 	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
516 	struct svcxprt_rdma *newxprt;
517 	struct sockaddr *sa;
518 
519 	/* Create a new transport */
520 	newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
521 	if (!newxprt) {
522 		dprintk("svcrdma: failed to create new transport\n");
523 		return;
524 	}
525 	newxprt->sc_cm_id = new_cma_id;
526 	new_cma_id->context = newxprt;
527 	dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
528 		newxprt, newxprt->sc_cm_id, listen_xprt);
529 
530 	/* Save client advertised inbound read limit for use later in accept. */
531 	newxprt->sc_ord = client_ird;
532 
533 	/* Set the local and remote addresses in the transport */
534 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
535 	svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
536 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
537 	svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
538 
539 	/*
540 	 * Enqueue the new transport on the accept queue of the listening
541 	 * transport
542 	 */
543 	spin_lock_bh(&listen_xprt->sc_lock);
544 	list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
545 	spin_unlock_bh(&listen_xprt->sc_lock);
546 
547 	/*
548 	 * Can't use svc_xprt_received here because we are not on a
549 	 * rqstp thread
550 	*/
551 	set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
552 	svc_xprt_enqueue(&listen_xprt->sc_xprt);
553 }
554 
555 /*
556  * Handles events generated on the listening endpoint. These events will be
557  * either be incoming connect requests or adapter removal  events.
558  */
559 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
560 			       struct rdma_cm_event *event)
561 {
562 	struct svcxprt_rdma *xprt = cma_id->context;
563 	int ret = 0;
564 
565 	switch (event->event) {
566 	case RDMA_CM_EVENT_CONNECT_REQUEST:
567 		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
568 			"event=%d\n", cma_id, cma_id->context, event->event);
569 		handle_connect_req(cma_id,
570 				   event->param.conn.responder_resources);
571 		break;
572 
573 	case RDMA_CM_EVENT_ESTABLISHED:
574 		/* Accept complete */
575 		dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
576 			"cm_id=%p\n", xprt, cma_id);
577 		break;
578 
579 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
580 		dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
581 			xprt, cma_id);
582 		if (xprt)
583 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
584 		break;
585 
586 	default:
587 		dprintk("svcrdma: Unexpected event on listening endpoint %p, "
588 			"event=%d\n", cma_id, event->event);
589 		break;
590 	}
591 
592 	return ret;
593 }
594 
595 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
596 			    struct rdma_cm_event *event)
597 {
598 	struct svc_xprt *xprt = cma_id->context;
599 	struct svcxprt_rdma *rdma =
600 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
601 	switch (event->event) {
602 	case RDMA_CM_EVENT_ESTABLISHED:
603 		/* Accept complete */
604 		svc_xprt_get(xprt);
605 		dprintk("svcrdma: Connection completed on DTO xprt=%p, "
606 			"cm_id=%p\n", xprt, cma_id);
607 		clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
608 		svc_xprt_enqueue(xprt);
609 		break;
610 	case RDMA_CM_EVENT_DISCONNECTED:
611 		dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
612 			xprt, cma_id);
613 		if (xprt) {
614 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
615 			svc_xprt_enqueue(xprt);
616 			svc_xprt_put(xprt);
617 		}
618 		break;
619 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
620 		dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
621 			"event=%d\n", cma_id, xprt, event->event);
622 		if (xprt) {
623 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
624 			svc_xprt_enqueue(xprt);
625 		}
626 		break;
627 	default:
628 		dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
629 			"event=%d\n", cma_id, event->event);
630 		break;
631 	}
632 	return 0;
633 }
634 
635 /*
636  * Create a listening RDMA service endpoint.
637  */
638 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
639 					struct sockaddr *sa, int salen,
640 					int flags)
641 {
642 	struct rdma_cm_id *listen_id;
643 	struct svcxprt_rdma *cma_xprt;
644 	struct svc_xprt *xprt;
645 	int ret;
646 
647 	dprintk("svcrdma: Creating RDMA socket\n");
648 
649 	cma_xprt = rdma_create_xprt(serv, 1);
650 	if (!cma_xprt)
651 		return ERR_PTR(-ENOMEM);
652 	xprt = &cma_xprt->sc_xprt;
653 
654 	listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
655 	if (IS_ERR(listen_id)) {
656 		ret = PTR_ERR(listen_id);
657 		dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
658 		goto err0;
659 	}
660 
661 	ret = rdma_bind_addr(listen_id, sa);
662 	if (ret) {
663 		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
664 		goto err1;
665 	}
666 	cma_xprt->sc_cm_id = listen_id;
667 
668 	ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
669 	if (ret) {
670 		dprintk("svcrdma: rdma_listen failed = %d\n", ret);
671 		goto err1;
672 	}
673 
674 	/*
675 	 * We need to use the address from the cm_id in case the
676 	 * caller specified 0 for the port number.
677 	 */
678 	sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
679 	svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
680 
681 	return &cma_xprt->sc_xprt;
682 
683  err1:
684 	rdma_destroy_id(listen_id);
685  err0:
686 	kfree(cma_xprt);
687 	return ERR_PTR(ret);
688 }
689 
690 /*
691  * This is the xpo_recvfrom function for listening endpoints. Its
692  * purpose is to accept incoming connections. The CMA callback handler
693  * has already created a new transport and attached it to the new CMA
694  * ID.
695  *
696  * There is a queue of pending connections hung on the listening
697  * transport. This queue contains the new svc_xprt structure. This
698  * function takes svc_xprt structures off the accept_q and completes
699  * the connection.
700  */
701 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
702 {
703 	struct svcxprt_rdma *listen_rdma;
704 	struct svcxprt_rdma *newxprt = NULL;
705 	struct rdma_conn_param conn_param;
706 	struct ib_qp_init_attr qp_attr;
707 	struct ib_device_attr devattr;
708 	int ret;
709 	int i;
710 
711 	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
712 	clear_bit(XPT_CONN, &xprt->xpt_flags);
713 	/* Get the next entry off the accept list */
714 	spin_lock_bh(&listen_rdma->sc_lock);
715 	if (!list_empty(&listen_rdma->sc_accept_q)) {
716 		newxprt = list_entry(listen_rdma->sc_accept_q.next,
717 				     struct svcxprt_rdma, sc_accept_q);
718 		list_del_init(&newxprt->sc_accept_q);
719 	}
720 	if (!list_empty(&listen_rdma->sc_accept_q))
721 		set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
722 	spin_unlock_bh(&listen_rdma->sc_lock);
723 	if (!newxprt)
724 		return NULL;
725 
726 	dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
727 		newxprt, newxprt->sc_cm_id);
728 
729 	ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
730 	if (ret) {
731 		dprintk("svcrdma: could not query device attributes on "
732 			"device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
733 		goto errout;
734 	}
735 
736 	/* Qualify the transport resource defaults with the
737 	 * capabilities of this particular device */
738 	newxprt->sc_max_sge = min((size_t)devattr.max_sge,
739 				  (size_t)RPCSVC_MAXPAGES);
740 	newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
741 				   (size_t)svcrdma_max_requests);
742 	newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
743 
744 	/*
745 	 * Limit ORD based on client limit, local device limit, and
746 	 * configured svcrdma limit.
747 	 */
748 	newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
749 	newxprt->sc_ord = min_t(size_t,	svcrdma_ord, newxprt->sc_ord);
750 
751 	newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
752 	if (IS_ERR(newxprt->sc_pd)) {
753 		dprintk("svcrdma: error creating PD for connect request\n");
754 		goto errout;
755 	}
756 	newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
757 					 sq_comp_handler,
758 					 cq_event_handler,
759 					 newxprt,
760 					 newxprt->sc_sq_depth,
761 					 0);
762 	if (IS_ERR(newxprt->sc_sq_cq)) {
763 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
764 		goto errout;
765 	}
766 	newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
767 					 rq_comp_handler,
768 					 cq_event_handler,
769 					 newxprt,
770 					 newxprt->sc_max_requests,
771 					 0);
772 	if (IS_ERR(newxprt->sc_rq_cq)) {
773 		dprintk("svcrdma: error creating RQ CQ for connect request\n");
774 		goto errout;
775 	}
776 
777 	memset(&qp_attr, 0, sizeof qp_attr);
778 	qp_attr.event_handler = qp_event_handler;
779 	qp_attr.qp_context = &newxprt->sc_xprt;
780 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
781 	qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
782 	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
783 	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
784 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
785 	qp_attr.qp_type = IB_QPT_RC;
786 	qp_attr.send_cq = newxprt->sc_sq_cq;
787 	qp_attr.recv_cq = newxprt->sc_rq_cq;
788 	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
789 		"    cm_id->device=%p, sc_pd->device=%p\n"
790 		"    cap.max_send_wr = %d\n"
791 		"    cap.max_recv_wr = %d\n"
792 		"    cap.max_send_sge = %d\n"
793 		"    cap.max_recv_sge = %d\n",
794 		newxprt->sc_cm_id, newxprt->sc_pd,
795 		newxprt->sc_cm_id->device, newxprt->sc_pd->device,
796 		qp_attr.cap.max_send_wr,
797 		qp_attr.cap.max_recv_wr,
798 		qp_attr.cap.max_send_sge,
799 		qp_attr.cap.max_recv_sge);
800 
801 	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
802 	if (ret) {
803 		/*
804 		 * XXX: This is a hack. We need a xx_request_qp interface
805 		 * that will adjust the qp_attr's with a best-effort
806 		 * number
807 		 */
808 		qp_attr.cap.max_send_sge -= 2;
809 		qp_attr.cap.max_recv_sge -= 2;
810 		ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
811 				     &qp_attr);
812 		if (ret) {
813 			dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
814 			goto errout;
815 		}
816 		newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
817 		newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
818 		newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
819 		newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
820 	}
821 	newxprt->sc_qp = newxprt->sc_cm_id->qp;
822 
823 	/* Register all of physical memory */
824 	newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd,
825 					    IB_ACCESS_LOCAL_WRITE |
826 					    IB_ACCESS_REMOTE_WRITE);
827 	if (IS_ERR(newxprt->sc_phys_mr)) {
828 		dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret);
829 		goto errout;
830 	}
831 
832 	/* Post receive buffers */
833 	for (i = 0; i < newxprt->sc_max_requests; i++) {
834 		ret = svc_rdma_post_recv(newxprt);
835 		if (ret) {
836 			dprintk("svcrdma: failure posting receive buffers\n");
837 			goto errout;
838 		}
839 	}
840 
841 	/* Swap out the handler */
842 	newxprt->sc_cm_id->event_handler = rdma_cma_handler;
843 
844 	/*
845 	 * Arm the CQs for the SQ and RQ before accepting so we can't
846 	 * miss the first message
847 	 */
848 	ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
849 	ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
850 
851 	/* Accept Connection */
852 	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
853 	memset(&conn_param, 0, sizeof conn_param);
854 	conn_param.responder_resources = 0;
855 	conn_param.initiator_depth = newxprt->sc_ord;
856 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
857 	if (ret) {
858 		dprintk("svcrdma: failed to accept new connection, ret=%d\n",
859 		       ret);
860 		goto errout;
861 	}
862 
863 	dprintk("svcrdma: new connection %p accepted with the following "
864 		"attributes:\n"
865 		"    local_ip        : %d.%d.%d.%d\n"
866 		"    local_port	     : %d\n"
867 		"    remote_ip       : %d.%d.%d.%d\n"
868 		"    remote_port     : %d\n"
869 		"    max_sge         : %d\n"
870 		"    sq_depth        : %d\n"
871 		"    max_requests    : %d\n"
872 		"    ord             : %d\n",
873 		newxprt,
874 		NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
875 			 route.addr.src_addr)->sin_addr.s_addr),
876 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
877 		       route.addr.src_addr)->sin_port),
878 		NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
879 			 route.addr.dst_addr)->sin_addr.s_addr),
880 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
881 		       route.addr.dst_addr)->sin_port),
882 		newxprt->sc_max_sge,
883 		newxprt->sc_sq_depth,
884 		newxprt->sc_max_requests,
885 		newxprt->sc_ord);
886 
887 	return &newxprt->sc_xprt;
888 
889  errout:
890 	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
891 	/* Take a reference in case the DTO handler runs */
892 	svc_xprt_get(&newxprt->sc_xprt);
893 	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
894 		ib_destroy_qp(newxprt->sc_qp);
895 	rdma_destroy_id(newxprt->sc_cm_id);
896 	/* This call to put will destroy the transport */
897 	svc_xprt_put(&newxprt->sc_xprt);
898 	return NULL;
899 }
900 
901 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
902 {
903 }
904 
905 /*
906  * When connected, an svc_xprt has at least two references:
907  *
908  * - A reference held by the cm_id between the ESTABLISHED and
909  *   DISCONNECTED events. If the remote peer disconnected first, this
910  *   reference could be gone.
911  *
912  * - A reference held by the svc_recv code that called this function
913  *   as part of close processing.
914  *
915  * At a minimum one references should still be held.
916  */
917 static void svc_rdma_detach(struct svc_xprt *xprt)
918 {
919 	struct svcxprt_rdma *rdma =
920 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
921 	dprintk("svc: svc_rdma_detach(%p)\n", xprt);
922 
923 	/* Disconnect and flush posted WQE */
924 	rdma_disconnect(rdma->sc_cm_id);
925 }
926 
927 static void __svc_rdma_free(struct work_struct *work)
928 {
929 	struct svcxprt_rdma *rdma =
930 		container_of(work, struct svcxprt_rdma, sc_work);
931 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
932 
933 	/* We should only be called from kref_put */
934 	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
935 
936 	/*
937 	 * Destroy queued, but not processed read completions. Note
938 	 * that this cleanup has to be done before destroying the
939 	 * cm_id because the device ptr is needed to unmap the dma in
940 	 * svc_rdma_put_context.
941 	 */
942 	while (!list_empty(&rdma->sc_read_complete_q)) {
943 		struct svc_rdma_op_ctxt *ctxt;
944 		ctxt = list_entry(rdma->sc_read_complete_q.next,
945 				  struct svc_rdma_op_ctxt,
946 				  dto_q);
947 		list_del_init(&ctxt->dto_q);
948 		svc_rdma_put_context(ctxt, 1);
949 	}
950 
951 	/* Destroy queued, but not processed recv completions */
952 	while (!list_empty(&rdma->sc_rq_dto_q)) {
953 		struct svc_rdma_op_ctxt *ctxt;
954 		ctxt = list_entry(rdma->sc_rq_dto_q.next,
955 				  struct svc_rdma_op_ctxt,
956 				  dto_q);
957 		list_del_init(&ctxt->dto_q);
958 		svc_rdma_put_context(ctxt, 1);
959 	}
960 
961 	/* Warn if we leaked a resource or under-referenced */
962 	WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
963 	WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
964 
965 	/* Destroy the QP if present (not a listener) */
966 	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
967 		ib_destroy_qp(rdma->sc_qp);
968 
969 	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
970 		ib_destroy_cq(rdma->sc_sq_cq);
971 
972 	if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
973 		ib_destroy_cq(rdma->sc_rq_cq);
974 
975 	if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
976 		ib_dereg_mr(rdma->sc_phys_mr);
977 
978 	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
979 		ib_dealloc_pd(rdma->sc_pd);
980 
981 	/* Destroy the CM ID */
982 	rdma_destroy_id(rdma->sc_cm_id);
983 
984 	kfree(rdma);
985 }
986 
987 static void svc_rdma_free(struct svc_xprt *xprt)
988 {
989 	struct svcxprt_rdma *rdma =
990 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
991 	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
992 	schedule_work(&rdma->sc_work);
993 }
994 
995 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
996 {
997 	struct svcxprt_rdma *rdma =
998 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
999 
1000 	/*
1001 	 * If there are fewer SQ WR available than required to send a
1002 	 * simple response, return false.
1003 	 */
1004 	if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1005 		return 0;
1006 
1007 	/*
1008 	 * ...or there are already waiters on the SQ,
1009 	 * return false.
1010 	 */
1011 	if (waitqueue_active(&rdma->sc_send_wait))
1012 		return 0;
1013 
1014 	/* Otherwise return true. */
1015 	return 1;
1016 }
1017 
1018 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1019 {
1020 	struct ib_send_wr *bad_wr;
1021 	int ret;
1022 
1023 	if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1024 		return -ENOTCONN;
1025 
1026 	BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1027 	BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
1028 		wr->opcode);
1029 	/* If the SQ is full, wait until an SQ entry is available */
1030 	while (1) {
1031 		spin_lock_bh(&xprt->sc_lock);
1032 		if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
1033 			spin_unlock_bh(&xprt->sc_lock);
1034 			atomic_inc(&rdma_stat_sq_starve);
1035 
1036 			/* See if we can opportunistically reap SQ WR to make room */
1037 			sq_cq_reap(xprt);
1038 
1039 			/* Wait until SQ WR available if SQ still full */
1040 			wait_event(xprt->sc_send_wait,
1041 				   atomic_read(&xprt->sc_sq_count) <
1042 				   xprt->sc_sq_depth);
1043 			if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1044 				return 0;
1045 			continue;
1046 		}
1047 		/* Bumped used SQ WR count and post */
1048 		svc_xprt_get(&xprt->sc_xprt);
1049 		ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1050 		if (!ret)
1051 			atomic_inc(&xprt->sc_sq_count);
1052 		else {
1053 			svc_xprt_put(&xprt->sc_xprt);
1054 			dprintk("svcrdma: failed to post SQ WR rc=%d, "
1055 			       "sc_sq_count=%d, sc_sq_depth=%d\n",
1056 			       ret, atomic_read(&xprt->sc_sq_count),
1057 			       xprt->sc_sq_depth);
1058 		}
1059 		spin_unlock_bh(&xprt->sc_lock);
1060 		break;
1061 	}
1062 	return ret;
1063 }
1064 
1065 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1066 			 enum rpcrdma_errcode err)
1067 {
1068 	struct ib_send_wr err_wr;
1069 	struct ib_sge sge;
1070 	struct page *p;
1071 	struct svc_rdma_op_ctxt *ctxt;
1072 	u32 *va;
1073 	int length;
1074 	int ret;
1075 
1076 	p = svc_rdma_get_page();
1077 	va = page_address(p);
1078 
1079 	/* XDR encode error */
1080 	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1081 
1082 	/* Prepare SGE for local address */
1083 	atomic_inc(&xprt->sc_dma_used);
1084 	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1085 				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1086 	sge.lkey = xprt->sc_phys_mr->lkey;
1087 	sge.length = length;
1088 
1089 	ctxt = svc_rdma_get_context(xprt);
1090 	ctxt->count = 1;
1091 	ctxt->pages[0] = p;
1092 
1093 	/* Prepare SEND WR */
1094 	memset(&err_wr, 0, sizeof err_wr);
1095 	ctxt->wr_op = IB_WR_SEND;
1096 	err_wr.wr_id = (unsigned long)ctxt;
1097 	err_wr.sg_list = &sge;
1098 	err_wr.num_sge = 1;
1099 	err_wr.opcode = IB_WR_SEND;
1100 	err_wr.send_flags = IB_SEND_SIGNALED;
1101 
1102 	/* Post It */
1103 	ret = svc_rdma_send(xprt, &err_wr);
1104 	if (ret) {
1105 		dprintk("svcrdma: Error %d posting send for protocol error\n",
1106 			ret);
1107 		svc_rdma_put_context(ctxt, 1);
1108 	}
1109 }
1110