1 /*
2  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41 
42 #include <linux/sunrpc/svc_xprt.h>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49 
50 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
51 
52 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
53 					struct sockaddr *sa, int salen,
54 					int flags);
55 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
56 static void svc_rdma_release_rqst(struct svc_rqst *);
57 static void dto_tasklet_func(unsigned long data);
58 static void svc_rdma_detach(struct svc_xprt *xprt);
59 static void svc_rdma_free(struct svc_xprt *xprt);
60 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
61 static void rq_cq_reap(struct svcxprt_rdma *xprt);
62 static void sq_cq_reap(struct svcxprt_rdma *xprt);
63 
64 DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
65 static DEFINE_SPINLOCK(dto_lock);
66 static LIST_HEAD(dto_xprt_q);
67 
68 static struct svc_xprt_ops svc_rdma_ops = {
69 	.xpo_create = svc_rdma_create,
70 	.xpo_recvfrom = svc_rdma_recvfrom,
71 	.xpo_sendto = svc_rdma_sendto,
72 	.xpo_release_rqst = svc_rdma_release_rqst,
73 	.xpo_detach = svc_rdma_detach,
74 	.xpo_free = svc_rdma_free,
75 	.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
76 	.xpo_has_wspace = svc_rdma_has_wspace,
77 	.xpo_accept = svc_rdma_accept,
78 };
79 
80 struct svc_xprt_class svc_rdma_class = {
81 	.xcl_name = "rdma",
82 	.xcl_owner = THIS_MODULE,
83 	.xcl_ops = &svc_rdma_ops,
84 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
85 };
86 
87 static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
88 {
89 	int target;
90 	int at_least_one = 0;
91 	struct svc_rdma_op_ctxt *ctxt;
92 
93 	target = min(xprt->sc_ctxt_cnt + xprt->sc_ctxt_bump,
94 		     xprt->sc_ctxt_max);
95 
96 	spin_lock_bh(&xprt->sc_ctxt_lock);
97 	while (xprt->sc_ctxt_cnt < target) {
98 		xprt->sc_ctxt_cnt++;
99 		spin_unlock_bh(&xprt->sc_ctxt_lock);
100 
101 		ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
102 
103 		spin_lock_bh(&xprt->sc_ctxt_lock);
104 		if (ctxt) {
105 			at_least_one = 1;
106 			INIT_LIST_HEAD(&ctxt->free_list);
107 			list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
108 		} else {
109 			/* kmalloc failed...give up for now */
110 			xprt->sc_ctxt_cnt--;
111 			break;
112 		}
113 	}
114 	spin_unlock_bh(&xprt->sc_ctxt_lock);
115 	dprintk("svcrdma: sc_ctxt_max=%d, sc_ctxt_cnt=%d\n",
116 		xprt->sc_ctxt_max, xprt->sc_ctxt_cnt);
117 	return at_least_one;
118 }
119 
120 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
121 {
122 	struct svc_rdma_op_ctxt *ctxt;
123 
124 	while (1) {
125 		spin_lock_bh(&xprt->sc_ctxt_lock);
126 		if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
127 			/* Try to bump my cache. */
128 			spin_unlock_bh(&xprt->sc_ctxt_lock);
129 
130 			if (rdma_bump_context_cache(xprt))
131 				continue;
132 
133 			printk(KERN_INFO "svcrdma: sleeping waiting for "
134 			       "context memory on xprt=%p\n",
135 			       xprt);
136 			schedule_timeout_uninterruptible(msecs_to_jiffies(500));
137 			continue;
138 		}
139 		ctxt = list_entry(xprt->sc_ctxt_free.next,
140 				  struct svc_rdma_op_ctxt,
141 				  free_list);
142 		list_del_init(&ctxt->free_list);
143 		spin_unlock_bh(&xprt->sc_ctxt_lock);
144 		ctxt->xprt = xprt;
145 		INIT_LIST_HEAD(&ctxt->dto_q);
146 		ctxt->count = 0;
147 		atomic_inc(&xprt->sc_ctxt_used);
148 		break;
149 	}
150 	return ctxt;
151 }
152 
153 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
154 {
155 	struct svcxprt_rdma *xprt;
156 	int i;
157 
158 	BUG_ON(!ctxt);
159 	xprt = ctxt->xprt;
160 	if (free_pages)
161 		for (i = 0; i < ctxt->count; i++)
162 			put_page(ctxt->pages[i]);
163 
164 	for (i = 0; i < ctxt->count; i++)
165 		ib_dma_unmap_single(xprt->sc_cm_id->device,
166 				    ctxt->sge[i].addr,
167 				    ctxt->sge[i].length,
168 				    ctxt->direction);
169 
170 	spin_lock_bh(&xprt->sc_ctxt_lock);
171 	list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
172 	spin_unlock_bh(&xprt->sc_ctxt_lock);
173 	atomic_dec(&xprt->sc_ctxt_used);
174 }
175 
176 /* ib_cq event handler */
177 static void cq_event_handler(struct ib_event *event, void *context)
178 {
179 	struct svc_xprt *xprt = context;
180 	dprintk("svcrdma: received CQ event id=%d, context=%p\n",
181 		event->event, context);
182 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
183 }
184 
185 /* QP event handler */
186 static void qp_event_handler(struct ib_event *event, void *context)
187 {
188 	struct svc_xprt *xprt = context;
189 
190 	switch (event->event) {
191 	/* These are considered benign events */
192 	case IB_EVENT_PATH_MIG:
193 	case IB_EVENT_COMM_EST:
194 	case IB_EVENT_SQ_DRAINED:
195 	case IB_EVENT_QP_LAST_WQE_REACHED:
196 		dprintk("svcrdma: QP event %d received for QP=%p\n",
197 			event->event, event->element.qp);
198 		break;
199 	/* These are considered fatal events */
200 	case IB_EVENT_PATH_MIG_ERR:
201 	case IB_EVENT_QP_FATAL:
202 	case IB_EVENT_QP_REQ_ERR:
203 	case IB_EVENT_QP_ACCESS_ERR:
204 	case IB_EVENT_DEVICE_FATAL:
205 	default:
206 		dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
207 			"closing transport\n",
208 			event->event, event->element.qp);
209 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
210 		break;
211 	}
212 }
213 
214 /*
215  * Data Transfer Operation Tasklet
216  *
217  * Walks a list of transports with I/O pending, removing entries as
218  * they are added to the server's I/O pending list. Two bits indicate
219  * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
220  * spinlock that serializes access to the transport list with the RQ
221  * and SQ interrupt handlers.
222  */
223 static void dto_tasklet_func(unsigned long data)
224 {
225 	struct svcxprt_rdma *xprt;
226 	unsigned long flags;
227 
228 	spin_lock_irqsave(&dto_lock, flags);
229 	while (!list_empty(&dto_xprt_q)) {
230 		xprt = list_entry(dto_xprt_q.next,
231 				  struct svcxprt_rdma, sc_dto_q);
232 		list_del_init(&xprt->sc_dto_q);
233 		spin_unlock_irqrestore(&dto_lock, flags);
234 
235 		rq_cq_reap(xprt);
236 		sq_cq_reap(xprt);
237 
238 		svc_xprt_put(&xprt->sc_xprt);
239 		spin_lock_irqsave(&dto_lock, flags);
240 	}
241 	spin_unlock_irqrestore(&dto_lock, flags);
242 }
243 
244 /*
245  * Receive Queue Completion Handler
246  *
247  * Since an RQ completion handler is called on interrupt context, we
248  * need to defer the handling of the I/O to a tasklet
249  */
250 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
251 {
252 	struct svcxprt_rdma *xprt = cq_context;
253 	unsigned long flags;
254 
255 	/* Guard against unconditional flush call for destroyed QP */
256 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
257 		return;
258 
259 	/*
260 	 * Set the bit regardless of whether or not it's on the list
261 	 * because it may be on the list already due to an SQ
262 	 * completion.
263 	 */
264 	set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
265 
266 	/*
267 	 * If this transport is not already on the DTO transport queue,
268 	 * add it
269 	 */
270 	spin_lock_irqsave(&dto_lock, flags);
271 	if (list_empty(&xprt->sc_dto_q)) {
272 		svc_xprt_get(&xprt->sc_xprt);
273 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
274 	}
275 	spin_unlock_irqrestore(&dto_lock, flags);
276 
277 	/* Tasklet does all the work to avoid irqsave locks. */
278 	tasklet_schedule(&dto_tasklet);
279 }
280 
281 /*
282  * rq_cq_reap - Process the RQ CQ.
283  *
284  * Take all completing WC off the CQE and enqueue the associated DTO
285  * context on the dto_q for the transport.
286  *
287  * Note that caller must hold a transport reference.
288  */
289 static void rq_cq_reap(struct svcxprt_rdma *xprt)
290 {
291 	int ret;
292 	struct ib_wc wc;
293 	struct svc_rdma_op_ctxt *ctxt = NULL;
294 
295 	if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
296 		return;
297 
298 	ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
299 	atomic_inc(&rdma_stat_rq_poll);
300 
301 	while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
302 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
303 		ctxt->wc_status = wc.status;
304 		ctxt->byte_len = wc.byte_len;
305 		if (wc.status != IB_WC_SUCCESS) {
306 			/* Close the transport */
307 			dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
308 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
309 			svc_rdma_put_context(ctxt, 1);
310 			svc_xprt_put(&xprt->sc_xprt);
311 			continue;
312 		}
313 		spin_lock_bh(&xprt->sc_rq_dto_lock);
314 		list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
315 		spin_unlock_bh(&xprt->sc_rq_dto_lock);
316 		svc_xprt_put(&xprt->sc_xprt);
317 	}
318 
319 	if (ctxt)
320 		atomic_inc(&rdma_stat_rq_prod);
321 
322 	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
323 	/*
324 	 * If data arrived before established event,
325 	 * don't enqueue. This defers RPC I/O until the
326 	 * RDMA connection is complete.
327 	 */
328 	if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
329 		svc_xprt_enqueue(&xprt->sc_xprt);
330 }
331 
332 /*
333  * Send Queue Completion Handler - potentially called on interrupt context.
334  *
335  * Note that caller must hold a transport reference.
336  */
337 static void sq_cq_reap(struct svcxprt_rdma *xprt)
338 {
339 	struct svc_rdma_op_ctxt *ctxt = NULL;
340 	struct ib_wc wc;
341 	struct ib_cq *cq = xprt->sc_sq_cq;
342 	int ret;
343 
344 
345 	if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
346 		return;
347 
348 	ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
349 	atomic_inc(&rdma_stat_sq_poll);
350 	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
351 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
352 		xprt = ctxt->xprt;
353 
354 		if (wc.status != IB_WC_SUCCESS)
355 			/* Close the transport */
356 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
357 
358 		/* Decrement used SQ WR count */
359 		atomic_dec(&xprt->sc_sq_count);
360 		wake_up(&xprt->sc_send_wait);
361 
362 		switch (ctxt->wr_op) {
363 		case IB_WR_SEND:
364 		case IB_WR_RDMA_WRITE:
365 			svc_rdma_put_context(ctxt, 1);
366 			break;
367 
368 		case IB_WR_RDMA_READ:
369 			if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
370 				struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
371 				BUG_ON(!read_hdr);
372 				set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
373 				spin_lock_bh(&xprt->sc_read_complete_lock);
374 				list_add_tail(&read_hdr->dto_q,
375 					      &xprt->sc_read_complete_q);
376 				spin_unlock_bh(&xprt->sc_read_complete_lock);
377 				svc_xprt_enqueue(&xprt->sc_xprt);
378 			}
379 			svc_rdma_put_context(ctxt, 0);
380 			break;
381 
382 		default:
383 			printk(KERN_ERR "svcrdma: unexpected completion type, "
384 			       "opcode=%d, status=%d\n",
385 			       wc.opcode, wc.status);
386 			break;
387 		}
388 		svc_xprt_put(&xprt->sc_xprt);
389 	}
390 
391 	if (ctxt)
392 		atomic_inc(&rdma_stat_sq_prod);
393 }
394 
395 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
396 {
397 	struct svcxprt_rdma *xprt = cq_context;
398 	unsigned long flags;
399 
400 	/* Guard against unconditional flush call for destroyed QP */
401 	if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
402 		return;
403 
404 	/*
405 	 * Set the bit regardless of whether or not it's on the list
406 	 * because it may be on the list already due to an RQ
407 	 * completion.
408 	 */
409 	set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
410 
411 	/*
412 	 * If this transport is not already on the DTO transport queue,
413 	 * add it
414 	 */
415 	spin_lock_irqsave(&dto_lock, flags);
416 	if (list_empty(&xprt->sc_dto_q)) {
417 		svc_xprt_get(&xprt->sc_xprt);
418 		list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
419 	}
420 	spin_unlock_irqrestore(&dto_lock, flags);
421 
422 	/* Tasklet does all the work to avoid irqsave locks. */
423 	tasklet_schedule(&dto_tasklet);
424 }
425 
426 static void create_context_cache(struct svcxprt_rdma *xprt,
427 				 int ctxt_count, int ctxt_bump, int ctxt_max)
428 {
429 	struct svc_rdma_op_ctxt *ctxt;
430 	int i;
431 
432 	xprt->sc_ctxt_max = ctxt_max;
433 	xprt->sc_ctxt_bump = ctxt_bump;
434 	xprt->sc_ctxt_cnt = 0;
435 	atomic_set(&xprt->sc_ctxt_used, 0);
436 
437 	INIT_LIST_HEAD(&xprt->sc_ctxt_free);
438 	for (i = 0; i < ctxt_count; i++) {
439 		ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
440 		if (ctxt) {
441 			INIT_LIST_HEAD(&ctxt->free_list);
442 			list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
443 			xprt->sc_ctxt_cnt++;
444 		}
445 	}
446 }
447 
448 static void destroy_context_cache(struct svcxprt_rdma *xprt)
449 {
450 	while (!list_empty(&xprt->sc_ctxt_free)) {
451 		struct svc_rdma_op_ctxt *ctxt;
452 		ctxt = list_entry(xprt->sc_ctxt_free.next,
453 				  struct svc_rdma_op_ctxt,
454 				  free_list);
455 		list_del_init(&ctxt->free_list);
456 		kfree(ctxt);
457 	}
458 }
459 
460 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
461 					     int listener)
462 {
463 	struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
464 
465 	if (!cma_xprt)
466 		return NULL;
467 	svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
468 	INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
469 	INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
470 	INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
471 	INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
472 	init_waitqueue_head(&cma_xprt->sc_send_wait);
473 
474 	spin_lock_init(&cma_xprt->sc_lock);
475 	spin_lock_init(&cma_xprt->sc_read_complete_lock);
476 	spin_lock_init(&cma_xprt->sc_ctxt_lock);
477 	spin_lock_init(&cma_xprt->sc_rq_dto_lock);
478 
479 	cma_xprt->sc_ord = svcrdma_ord;
480 
481 	cma_xprt->sc_max_req_size = svcrdma_max_req_size;
482 	cma_xprt->sc_max_requests = svcrdma_max_requests;
483 	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
484 	atomic_set(&cma_xprt->sc_sq_count, 0);
485 
486 	if (!listener) {
487 		int reqs = cma_xprt->sc_max_requests;
488 		create_context_cache(cma_xprt,
489 				     reqs << 1, /* starting size */
490 				     reqs,	/* bump amount */
491 				     reqs +
492 				     cma_xprt->sc_sq_depth +
493 				     RPCRDMA_MAX_THREADS + 1); /* max */
494 		if (list_empty(&cma_xprt->sc_ctxt_free)) {
495 			kfree(cma_xprt);
496 			return NULL;
497 		}
498 		clear_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
499 	} else
500 		set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
501 
502 	return cma_xprt;
503 }
504 
505 struct page *svc_rdma_get_page(void)
506 {
507 	struct page *page;
508 
509 	while ((page = alloc_page(GFP_KERNEL)) == NULL) {
510 		/* If we can't get memory, wait a bit and try again */
511 		printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
512 		       "jiffies.\n");
513 		schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
514 	}
515 	return page;
516 }
517 
518 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
519 {
520 	struct ib_recv_wr recv_wr, *bad_recv_wr;
521 	struct svc_rdma_op_ctxt *ctxt;
522 	struct page *page;
523 	unsigned long pa;
524 	int sge_no;
525 	int buflen;
526 	int ret;
527 
528 	ctxt = svc_rdma_get_context(xprt);
529 	buflen = 0;
530 	ctxt->direction = DMA_FROM_DEVICE;
531 	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
532 		BUG_ON(sge_no >= xprt->sc_max_sge);
533 		page = svc_rdma_get_page();
534 		ctxt->pages[sge_no] = page;
535 		pa = ib_dma_map_page(xprt->sc_cm_id->device,
536 				     page, 0, PAGE_SIZE,
537 				     DMA_FROM_DEVICE);
538 		ctxt->sge[sge_no].addr = pa;
539 		ctxt->sge[sge_no].length = PAGE_SIZE;
540 		ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
541 		buflen += PAGE_SIZE;
542 	}
543 	ctxt->count = sge_no;
544 	recv_wr.next = NULL;
545 	recv_wr.sg_list = &ctxt->sge[0];
546 	recv_wr.num_sge = ctxt->count;
547 	recv_wr.wr_id = (u64)(unsigned long)ctxt;
548 
549 	svc_xprt_get(&xprt->sc_xprt);
550 	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
551 	if (ret) {
552 		svc_xprt_put(&xprt->sc_xprt);
553 		svc_rdma_put_context(ctxt, 1);
554 	}
555 	return ret;
556 }
557 
558 /*
559  * This function handles the CONNECT_REQUEST event on a listening
560  * endpoint. It is passed the cma_id for the _new_ connection. The context in
561  * this cma_id is inherited from the listening cma_id and is the svc_xprt
562  * structure for the listening endpoint.
563  *
564  * This function creates a new xprt for the new connection and enqueues it on
565  * the accept queue for the listent xprt. When the listen thread is kicked, it
566  * will call the recvfrom method on the listen xprt which will accept the new
567  * connection.
568  */
569 static void handle_connect_req(struct rdma_cm_id *new_cma_id)
570 {
571 	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
572 	struct svcxprt_rdma *newxprt;
573 	struct sockaddr *sa;
574 
575 	/* Create a new transport */
576 	newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
577 	if (!newxprt) {
578 		dprintk("svcrdma: failed to create new transport\n");
579 		return;
580 	}
581 	newxprt->sc_cm_id = new_cma_id;
582 	new_cma_id->context = newxprt;
583 	dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
584 		newxprt, newxprt->sc_cm_id, listen_xprt);
585 
586 	/* Set the local and remote addresses in the transport */
587 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
588 	svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
589 	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
590 	svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
591 
592 	/*
593 	 * Enqueue the new transport on the accept queue of the listening
594 	 * transport
595 	 */
596 	spin_lock_bh(&listen_xprt->sc_lock);
597 	list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
598 	spin_unlock_bh(&listen_xprt->sc_lock);
599 
600 	/*
601 	 * Can't use svc_xprt_received here because we are not on a
602 	 * rqstp thread
603 	*/
604 	set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
605 	svc_xprt_enqueue(&listen_xprt->sc_xprt);
606 }
607 
608 /*
609  * Handles events generated on the listening endpoint. These events will be
610  * either be incoming connect requests or adapter removal  events.
611  */
612 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
613 			       struct rdma_cm_event *event)
614 {
615 	struct svcxprt_rdma *xprt = cma_id->context;
616 	int ret = 0;
617 
618 	switch (event->event) {
619 	case RDMA_CM_EVENT_CONNECT_REQUEST:
620 		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
621 			"event=%d\n", cma_id, cma_id->context, event->event);
622 		handle_connect_req(cma_id);
623 		break;
624 
625 	case RDMA_CM_EVENT_ESTABLISHED:
626 		/* Accept complete */
627 		dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
628 			"cm_id=%p\n", xprt, cma_id);
629 		break;
630 
631 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
632 		dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
633 			xprt, cma_id);
634 		if (xprt)
635 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
636 		break;
637 
638 	default:
639 		dprintk("svcrdma: Unexpected event on listening endpoint %p, "
640 			"event=%d\n", cma_id, event->event);
641 		break;
642 	}
643 
644 	return ret;
645 }
646 
647 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
648 			    struct rdma_cm_event *event)
649 {
650 	struct svc_xprt *xprt = cma_id->context;
651 	struct svcxprt_rdma *rdma =
652 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
653 	switch (event->event) {
654 	case RDMA_CM_EVENT_ESTABLISHED:
655 		/* Accept complete */
656 		svc_xprt_get(xprt);
657 		dprintk("svcrdma: Connection completed on DTO xprt=%p, "
658 			"cm_id=%p\n", xprt, cma_id);
659 		clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
660 		svc_xprt_enqueue(xprt);
661 		break;
662 	case RDMA_CM_EVENT_DISCONNECTED:
663 		dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
664 			xprt, cma_id);
665 		if (xprt) {
666 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
667 			svc_xprt_enqueue(xprt);
668 			svc_xprt_put(xprt);
669 		}
670 		break;
671 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
672 		dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
673 			"event=%d\n", cma_id, xprt, event->event);
674 		if (xprt) {
675 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
676 			svc_xprt_enqueue(xprt);
677 		}
678 		break;
679 	default:
680 		dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
681 			"event=%d\n", cma_id, event->event);
682 		break;
683 	}
684 	return 0;
685 }
686 
687 /*
688  * Create a listening RDMA service endpoint.
689  */
690 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
691 					struct sockaddr *sa, int salen,
692 					int flags)
693 {
694 	struct rdma_cm_id *listen_id;
695 	struct svcxprt_rdma *cma_xprt;
696 	struct svc_xprt *xprt;
697 	int ret;
698 
699 	dprintk("svcrdma: Creating RDMA socket\n");
700 
701 	cma_xprt = rdma_create_xprt(serv, 1);
702 	if (!cma_xprt)
703 		return ERR_PTR(-ENOMEM);
704 	xprt = &cma_xprt->sc_xprt;
705 
706 	listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
707 	if (IS_ERR(listen_id)) {
708 		ret = PTR_ERR(listen_id);
709 		dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
710 		goto err0;
711 	}
712 
713 	ret = rdma_bind_addr(listen_id, sa);
714 	if (ret) {
715 		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
716 		goto err1;
717 	}
718 	cma_xprt->sc_cm_id = listen_id;
719 
720 	ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
721 	if (ret) {
722 		dprintk("svcrdma: rdma_listen failed = %d\n", ret);
723 		goto err1;
724 	}
725 
726 	/*
727 	 * We need to use the address from the cm_id in case the
728 	 * caller specified 0 for the port number.
729 	 */
730 	sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
731 	svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
732 
733 	return &cma_xprt->sc_xprt;
734 
735  err1:
736 	rdma_destroy_id(listen_id);
737  err0:
738 	kfree(cma_xprt);
739 	return ERR_PTR(ret);
740 }
741 
742 /*
743  * This is the xpo_recvfrom function for listening endpoints. Its
744  * purpose is to accept incoming connections. The CMA callback handler
745  * has already created a new transport and attached it to the new CMA
746  * ID.
747  *
748  * There is a queue of pending connections hung on the listening
749  * transport. This queue contains the new svc_xprt structure. This
750  * function takes svc_xprt structures off the accept_q and completes
751  * the connection.
752  */
753 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
754 {
755 	struct svcxprt_rdma *listen_rdma;
756 	struct svcxprt_rdma *newxprt = NULL;
757 	struct rdma_conn_param conn_param;
758 	struct ib_qp_init_attr qp_attr;
759 	struct ib_device_attr devattr;
760 	int ret;
761 	int i;
762 
763 	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
764 	clear_bit(XPT_CONN, &xprt->xpt_flags);
765 	/* Get the next entry off the accept list */
766 	spin_lock_bh(&listen_rdma->sc_lock);
767 	if (!list_empty(&listen_rdma->sc_accept_q)) {
768 		newxprt = list_entry(listen_rdma->sc_accept_q.next,
769 				     struct svcxprt_rdma, sc_accept_q);
770 		list_del_init(&newxprt->sc_accept_q);
771 	}
772 	if (!list_empty(&listen_rdma->sc_accept_q))
773 		set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
774 	spin_unlock_bh(&listen_rdma->sc_lock);
775 	if (!newxprt)
776 		return NULL;
777 
778 	dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
779 		newxprt, newxprt->sc_cm_id);
780 
781 	ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
782 	if (ret) {
783 		dprintk("svcrdma: could not query device attributes on "
784 			"device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
785 		goto errout;
786 	}
787 
788 	/* Qualify the transport resource defaults with the
789 	 * capabilities of this particular device */
790 	newxprt->sc_max_sge = min((size_t)devattr.max_sge,
791 				  (size_t)RPCSVC_MAXPAGES);
792 	newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
793 				   (size_t)svcrdma_max_requests);
794 	newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
795 
796 	newxprt->sc_ord =  min((size_t)devattr.max_qp_rd_atom,
797 			       (size_t)svcrdma_ord);
798 
799 	newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
800 	if (IS_ERR(newxprt->sc_pd)) {
801 		dprintk("svcrdma: error creating PD for connect request\n");
802 		goto errout;
803 	}
804 	newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
805 					 sq_comp_handler,
806 					 cq_event_handler,
807 					 newxprt,
808 					 newxprt->sc_sq_depth,
809 					 0);
810 	if (IS_ERR(newxprt->sc_sq_cq)) {
811 		dprintk("svcrdma: error creating SQ CQ for connect request\n");
812 		goto errout;
813 	}
814 	newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
815 					 rq_comp_handler,
816 					 cq_event_handler,
817 					 newxprt,
818 					 newxprt->sc_max_requests,
819 					 0);
820 	if (IS_ERR(newxprt->sc_rq_cq)) {
821 		dprintk("svcrdma: error creating RQ CQ for connect request\n");
822 		goto errout;
823 	}
824 
825 	memset(&qp_attr, 0, sizeof qp_attr);
826 	qp_attr.event_handler = qp_event_handler;
827 	qp_attr.qp_context = &newxprt->sc_xprt;
828 	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
829 	qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
830 	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
831 	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
832 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
833 	qp_attr.qp_type = IB_QPT_RC;
834 	qp_attr.send_cq = newxprt->sc_sq_cq;
835 	qp_attr.recv_cq = newxprt->sc_rq_cq;
836 	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
837 		"    cm_id->device=%p, sc_pd->device=%p\n"
838 		"    cap.max_send_wr = %d\n"
839 		"    cap.max_recv_wr = %d\n"
840 		"    cap.max_send_sge = %d\n"
841 		"    cap.max_recv_sge = %d\n",
842 		newxprt->sc_cm_id, newxprt->sc_pd,
843 		newxprt->sc_cm_id->device, newxprt->sc_pd->device,
844 		qp_attr.cap.max_send_wr,
845 		qp_attr.cap.max_recv_wr,
846 		qp_attr.cap.max_send_sge,
847 		qp_attr.cap.max_recv_sge);
848 
849 	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
850 	if (ret) {
851 		/*
852 		 * XXX: This is a hack. We need a xx_request_qp interface
853 		 * that will adjust the qp_attr's with a best-effort
854 		 * number
855 		 */
856 		qp_attr.cap.max_send_sge -= 2;
857 		qp_attr.cap.max_recv_sge -= 2;
858 		ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
859 				     &qp_attr);
860 		if (ret) {
861 			dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
862 			goto errout;
863 		}
864 		newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
865 		newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
866 		newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
867 		newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
868 	}
869 	newxprt->sc_qp = newxprt->sc_cm_id->qp;
870 
871 	/* Register all of physical memory */
872 	newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd,
873 					    IB_ACCESS_LOCAL_WRITE |
874 					    IB_ACCESS_REMOTE_WRITE);
875 	if (IS_ERR(newxprt->sc_phys_mr)) {
876 		dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret);
877 		goto errout;
878 	}
879 
880 	/* Post receive buffers */
881 	for (i = 0; i < newxprt->sc_max_requests; i++) {
882 		ret = svc_rdma_post_recv(newxprt);
883 		if (ret) {
884 			dprintk("svcrdma: failure posting receive buffers\n");
885 			goto errout;
886 		}
887 	}
888 
889 	/* Swap out the handler */
890 	newxprt->sc_cm_id->event_handler = rdma_cma_handler;
891 
892 	/*
893 	 * Arm the CQs for the SQ and RQ before accepting so we can't
894 	 * miss the first message
895 	 */
896 	ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
897 	ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
898 
899 	/* Accept Connection */
900 	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
901 	memset(&conn_param, 0, sizeof conn_param);
902 	conn_param.responder_resources = 0;
903 	conn_param.initiator_depth = newxprt->sc_ord;
904 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
905 	if (ret) {
906 		dprintk("svcrdma: failed to accept new connection, ret=%d\n",
907 		       ret);
908 		goto errout;
909 	}
910 
911 	dprintk("svcrdma: new connection %p accepted with the following "
912 		"attributes:\n"
913 		"    local_ip        : %d.%d.%d.%d\n"
914 		"    local_port	     : %d\n"
915 		"    remote_ip       : %d.%d.%d.%d\n"
916 		"    remote_port     : %d\n"
917 		"    max_sge         : %d\n"
918 		"    sq_depth        : %d\n"
919 		"    max_requests    : %d\n"
920 		"    ord             : %d\n",
921 		newxprt,
922 		NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
923 			 route.addr.src_addr)->sin_addr.s_addr),
924 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
925 		       route.addr.src_addr)->sin_port),
926 		NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
927 			 route.addr.dst_addr)->sin_addr.s_addr),
928 		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
929 		       route.addr.dst_addr)->sin_port),
930 		newxprt->sc_max_sge,
931 		newxprt->sc_sq_depth,
932 		newxprt->sc_max_requests,
933 		newxprt->sc_ord);
934 
935 	return &newxprt->sc_xprt;
936 
937  errout:
938 	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
939 	/* Take a reference in case the DTO handler runs */
940 	svc_xprt_get(&newxprt->sc_xprt);
941 	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
942 		ib_destroy_qp(newxprt->sc_qp);
943 	rdma_destroy_id(newxprt->sc_cm_id);
944 	/* This call to put will destroy the transport */
945 	svc_xprt_put(&newxprt->sc_xprt);
946 	return NULL;
947 }
948 
949 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
950 {
951 }
952 
953 /*
954  * When connected, an svc_xprt has at least two references:
955  *
956  * - A reference held by the cm_id between the ESTABLISHED and
957  *   DISCONNECTED events. If the remote peer disconnected first, this
958  *   reference could be gone.
959  *
960  * - A reference held by the svc_recv code that called this function
961  *   as part of close processing.
962  *
963  * At a minimum one references should still be held.
964  */
965 static void svc_rdma_detach(struct svc_xprt *xprt)
966 {
967 	struct svcxprt_rdma *rdma =
968 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
969 	dprintk("svc: svc_rdma_detach(%p)\n", xprt);
970 
971 	/* Disconnect and flush posted WQE */
972 	rdma_disconnect(rdma->sc_cm_id);
973 }
974 
975 static void __svc_rdma_free(struct work_struct *work)
976 {
977 	struct svcxprt_rdma *rdma =
978 		container_of(work, struct svcxprt_rdma, sc_work);
979 	dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
980 
981 	/* We should only be called from kref_put */
982 	BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
983 
984 	/*
985 	 * Destroy queued, but not processed read completions. Note
986 	 * that this cleanup has to be done before destroying the
987 	 * cm_id because the device ptr is needed to unmap the dma in
988 	 * svc_rdma_put_context.
989 	 */
990 	spin_lock_bh(&rdma->sc_read_complete_lock);
991 	while (!list_empty(&rdma->sc_read_complete_q)) {
992 		struct svc_rdma_op_ctxt *ctxt;
993 		ctxt = list_entry(rdma->sc_read_complete_q.next,
994 				  struct svc_rdma_op_ctxt,
995 				  dto_q);
996 		list_del_init(&ctxt->dto_q);
997 		svc_rdma_put_context(ctxt, 1);
998 	}
999 	spin_unlock_bh(&rdma->sc_read_complete_lock);
1000 
1001 	/* Destroy queued, but not processed recv completions */
1002 	spin_lock_bh(&rdma->sc_rq_dto_lock);
1003 	while (!list_empty(&rdma->sc_rq_dto_q)) {
1004 		struct svc_rdma_op_ctxt *ctxt;
1005 		ctxt = list_entry(rdma->sc_rq_dto_q.next,
1006 				  struct svc_rdma_op_ctxt,
1007 				  dto_q);
1008 		list_del_init(&ctxt->dto_q);
1009 		svc_rdma_put_context(ctxt, 1);
1010 	}
1011 	spin_unlock_bh(&rdma->sc_rq_dto_lock);
1012 
1013 	/* Warn if we leaked a resource or under-referenced */
1014 	WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
1015 
1016 	/* Destroy the QP if present (not a listener) */
1017 	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1018 		ib_destroy_qp(rdma->sc_qp);
1019 
1020 	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1021 		ib_destroy_cq(rdma->sc_sq_cq);
1022 
1023 	if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1024 		ib_destroy_cq(rdma->sc_rq_cq);
1025 
1026 	if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1027 		ib_dereg_mr(rdma->sc_phys_mr);
1028 
1029 	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1030 		ib_dealloc_pd(rdma->sc_pd);
1031 
1032 	/* Destroy the CM ID */
1033 	rdma_destroy_id(rdma->sc_cm_id);
1034 
1035 	destroy_context_cache(rdma);
1036 	kfree(rdma);
1037 }
1038 
1039 static void svc_rdma_free(struct svc_xprt *xprt)
1040 {
1041 	struct svcxprt_rdma *rdma =
1042 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
1043 	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1044 	schedule_work(&rdma->sc_work);
1045 }
1046 
1047 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1048 {
1049 	struct svcxprt_rdma *rdma =
1050 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
1051 
1052 	/*
1053 	 * If there are fewer SQ WR available than required to send a
1054 	 * simple response, return false.
1055 	 */
1056 	if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1057 		return 0;
1058 
1059 	/*
1060 	 * ...or there are already waiters on the SQ,
1061 	 * return false.
1062 	 */
1063 	if (waitqueue_active(&rdma->sc_send_wait))
1064 		return 0;
1065 
1066 	/* Otherwise return true. */
1067 	return 1;
1068 }
1069 
1070 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1071 {
1072 	struct ib_send_wr *bad_wr;
1073 	int ret;
1074 
1075 	if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1076 		return -ENOTCONN;
1077 
1078 	BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1079 	BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
1080 		wr->opcode);
1081 	/* If the SQ is full, wait until an SQ entry is available */
1082 	while (1) {
1083 		spin_lock_bh(&xprt->sc_lock);
1084 		if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
1085 			spin_unlock_bh(&xprt->sc_lock);
1086 			atomic_inc(&rdma_stat_sq_starve);
1087 
1088 			/* See if we can opportunistically reap SQ WR to make room */
1089 			sq_cq_reap(xprt);
1090 
1091 			/* Wait until SQ WR available if SQ still full */
1092 			wait_event(xprt->sc_send_wait,
1093 				   atomic_read(&xprt->sc_sq_count) <
1094 				   xprt->sc_sq_depth);
1095 			if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1096 				return 0;
1097 			continue;
1098 		}
1099 		/* Bumped used SQ WR count and post */
1100 		svc_xprt_get(&xprt->sc_xprt);
1101 		ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1102 		if (!ret)
1103 			atomic_inc(&xprt->sc_sq_count);
1104 		else {
1105 			svc_xprt_put(&xprt->sc_xprt);
1106 			dprintk("svcrdma: failed to post SQ WR rc=%d, "
1107 			       "sc_sq_count=%d, sc_sq_depth=%d\n",
1108 			       ret, atomic_read(&xprt->sc_sq_count),
1109 			       xprt->sc_sq_depth);
1110 		}
1111 		spin_unlock_bh(&xprt->sc_lock);
1112 		break;
1113 	}
1114 	return ret;
1115 }
1116 
1117 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1118 			 enum rpcrdma_errcode err)
1119 {
1120 	struct ib_send_wr err_wr;
1121 	struct ib_sge sge;
1122 	struct page *p;
1123 	struct svc_rdma_op_ctxt *ctxt;
1124 	u32 *va;
1125 	int length;
1126 	int ret;
1127 
1128 	p = svc_rdma_get_page();
1129 	va = page_address(p);
1130 
1131 	/* XDR encode error */
1132 	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1133 
1134 	/* Prepare SGE for local address */
1135 	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1136 				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1137 	sge.lkey = xprt->sc_phys_mr->lkey;
1138 	sge.length = length;
1139 
1140 	ctxt = svc_rdma_get_context(xprt);
1141 	ctxt->count = 1;
1142 	ctxt->pages[0] = p;
1143 
1144 	/* Prepare SEND WR */
1145 	memset(&err_wr, 0, sizeof err_wr);
1146 	ctxt->wr_op = IB_WR_SEND;
1147 	err_wr.wr_id = (unsigned long)ctxt;
1148 	err_wr.sg_list = &sge;
1149 	err_wr.num_sge = 1;
1150 	err_wr.opcode = IB_WR_SEND;
1151 	err_wr.send_flags = IB_SEND_SIGNALED;
1152 
1153 	/* Post It */
1154 	ret = svc_rdma_send(xprt, &err_wr);
1155 	if (ret) {
1156 		dprintk("svcrdma: Error %d posting send for protocol error\n",
1157 			ret);
1158 		svc_rdma_put_context(ctxt, 1);
1159 	}
1160 }
1161