xref: /openbmc/linux/net/rds/ib_cm.c (revision d623f60d)
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
38 
39 #include "rds_single_path.h"
40 #include "rds.h"
41 #include "ib.h"
42 
43 /*
44  * Set the selected protocol version
45  */
46 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
47 {
48 	conn->c_version = version;
49 }
50 
51 /*
52  * Set up flow control
53  */
54 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
55 {
56 	struct rds_ib_connection *ic = conn->c_transport_data;
57 
58 	if (rds_ib_sysctl_flow_control && credits != 0) {
59 		/* We're doing flow control */
60 		ic->i_flowctl = 1;
61 		rds_ib_send_add_credits(conn, credits);
62 	} else {
63 		ic->i_flowctl = 0;
64 	}
65 }
66 
67 /*
68  * Tune RNR behavior. Without flow control, we use a rather
69  * low timeout, but not the absolute minimum - this should
70  * be tunable.
71  *
72  * We already set the RNR retry count to 7 (which is the
73  * smallest infinite number :-) above.
74  * If flow control is off, we want to change this back to 0
75  * so that we learn quickly when our credit accounting is
76  * buggy.
77  *
78  * Caller passes in a qp_attr pointer - don't waste stack spacv
79  * by allocation this twice.
80  */
81 static void
82 rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
83 {
84 	int ret;
85 
86 	attr->min_rnr_timer = IB_RNR_TIMER_000_32;
87 	ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
88 	if (ret)
89 		printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
90 }
91 
92 /*
93  * Connection established.
94  * We get here for both outgoing and incoming connection.
95  */
96 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
97 {
98 	const struct rds_ib_connect_private *dp = NULL;
99 	struct rds_ib_connection *ic = conn->c_transport_data;
100 	struct ib_qp_attr qp_attr;
101 	int err;
102 
103 	if (event->param.conn.private_data_len >= sizeof(*dp)) {
104 		dp = event->param.conn.private_data;
105 
106 		/* make sure it isn't empty data */
107 		if (dp->dp_protocol_major) {
108 			rds_ib_set_protocol(conn,
109 				RDS_PROTOCOL(dp->dp_protocol_major,
110 				dp->dp_protocol_minor));
111 			rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
112 		}
113 	}
114 
115 	if (conn->c_version < RDS_PROTOCOL(3, 1)) {
116 		pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n",
117 			  &conn->c_laddr, &conn->c_faddr,
118 			  RDS_PROTOCOL_MAJOR(conn->c_version),
119 			  RDS_PROTOCOL_MINOR(conn->c_version));
120 		set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
121 		rds_conn_destroy(conn);
122 		return;
123 	} else {
124 		pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n",
125 			  ic->i_active_side ? "Active" : "Passive",
126 			  &conn->c_laddr, &conn->c_faddr,
127 			  RDS_PROTOCOL_MAJOR(conn->c_version),
128 			  RDS_PROTOCOL_MINOR(conn->c_version),
129 			  ic->i_flowctl ? ", flow control" : "");
130 	}
131 
132 	atomic_set(&ic->i_cq_quiesce, 0);
133 
134 	/* Init rings and fill recv. this needs to wait until protocol
135 	 * negotiation is complete, since ring layout is different
136 	 * from 3.1 to 4.1.
137 	 */
138 	rds_ib_send_init_ring(ic);
139 	rds_ib_recv_init_ring(ic);
140 	/* Post receive buffers - as a side effect, this will update
141 	 * the posted credit count. */
142 	rds_ib_recv_refill(conn, 1, GFP_KERNEL);
143 
144 	/* Tune RNR behavior */
145 	rds_ib_tune_rnr(ic, &qp_attr);
146 
147 	qp_attr.qp_state = IB_QPS_RTS;
148 	err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
149 	if (err)
150 		printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
151 
152 	/* update ib_device with this local ipaddr */
153 	err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
154 	if (err)
155 		printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
156 			err);
157 
158 	/* If the peer gave us the last packet it saw, process this as if
159 	 * we had received a regular ACK. */
160 	if (dp) {
161 		/* dp structure start is not guaranteed to be 8 bytes aligned.
162 		 * Since dp_ack_seq is 64-bit extended load operations can be
163 		 * used so go through get_unaligned to avoid unaligned errors.
164 		 */
165 		__be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
166 
167 		if (dp_ack_seq)
168 			rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
169 					    NULL);
170 	}
171 
172 	rds_connect_complete(conn);
173 }
174 
175 static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
176 			struct rdma_conn_param *conn_param,
177 			struct rds_ib_connect_private *dp,
178 			u32 protocol_version,
179 			u32 max_responder_resources,
180 			u32 max_initiator_depth)
181 {
182 	struct rds_ib_connection *ic = conn->c_transport_data;
183 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
184 
185 	memset(conn_param, 0, sizeof(struct rdma_conn_param));
186 
187 	conn_param->responder_resources =
188 		min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
189 	conn_param->initiator_depth =
190 		min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
191 	conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
192 	conn_param->rnr_retry_count = 7;
193 
194 	if (dp) {
195 		memset(dp, 0, sizeof(*dp));
196 		dp->dp_saddr = conn->c_laddr;
197 		dp->dp_daddr = conn->c_faddr;
198 		dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
199 		dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
200 		dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
201 		dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
202 
203 		/* Advertise flow control */
204 		if (ic->i_flowctl) {
205 			unsigned int credits;
206 
207 			credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
208 			dp->dp_credit = cpu_to_be32(credits);
209 			atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
210 		}
211 
212 		conn_param->private_data = dp;
213 		conn_param->private_data_len = sizeof(*dp);
214 	}
215 }
216 
217 static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
218 {
219 	rdsdebug("event %u (%s) data %p\n",
220 		 event->event, ib_event_msg(event->event), data);
221 }
222 
223 /* Plucking the oldest entry from the ring can be done concurrently with
224  * the thread refilling the ring.  Each ring operation is protected by
225  * spinlocks and the transient state of refilling doesn't change the
226  * recording of which entry is oldest.
227  *
228  * This relies on IB only calling one cq comp_handler for each cq so that
229  * there will only be one caller of rds_recv_incoming() per RDS connection.
230  */
231 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
232 {
233 	struct rds_connection *conn = context;
234 	struct rds_ib_connection *ic = conn->c_transport_data;
235 
236 	rdsdebug("conn %p cq %p\n", conn, cq);
237 
238 	rds_ib_stats_inc(s_ib_evt_handler_call);
239 
240 	tasklet_schedule(&ic->i_recv_tasklet);
241 }
242 
243 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
244 		     struct ib_wc *wcs)
245 {
246 	int nr, i;
247 	struct ib_wc *wc;
248 
249 	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
250 		for (i = 0; i < nr; i++) {
251 			wc = wcs + i;
252 			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
253 				 (unsigned long long)wc->wr_id, wc->status,
254 				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
255 
256 			if (wc->wr_id <= ic->i_send_ring.w_nr ||
257 			    wc->wr_id == RDS_IB_ACK_WR_ID)
258 				rds_ib_send_cqe_handler(ic, wc);
259 			else
260 				rds_ib_mr_cqe_handler(ic, wc);
261 
262 		}
263 	}
264 }
265 
266 static void rds_ib_tasklet_fn_send(unsigned long data)
267 {
268 	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
269 	struct rds_connection *conn = ic->conn;
270 
271 	rds_ib_stats_inc(s_ib_tasklet_call);
272 
273 	/* if cq has been already reaped, ignore incoming cq event */
274 	if (atomic_read(&ic->i_cq_quiesce))
275 		return;
276 
277 	poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
278 	ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
279 	poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
280 
281 	if (rds_conn_up(conn) &&
282 	    (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
283 	    test_bit(0, &conn->c_map_queued)))
284 		rds_send_xmit(&ic->conn->c_path[0]);
285 }
286 
287 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
288 		     struct ib_wc *wcs,
289 		     struct rds_ib_ack_state *ack_state)
290 {
291 	int nr, i;
292 	struct ib_wc *wc;
293 
294 	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
295 		for (i = 0; i < nr; i++) {
296 			wc = wcs + i;
297 			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
298 				 (unsigned long long)wc->wr_id, wc->status,
299 				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
300 
301 			rds_ib_recv_cqe_handler(ic, wc, ack_state);
302 		}
303 	}
304 }
305 
306 static void rds_ib_tasklet_fn_recv(unsigned long data)
307 {
308 	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
309 	struct rds_connection *conn = ic->conn;
310 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
311 	struct rds_ib_ack_state state;
312 
313 	if (!rds_ibdev)
314 		rds_conn_drop(conn);
315 
316 	rds_ib_stats_inc(s_ib_tasklet_call);
317 
318 	/* if cq has been already reaped, ignore incoming cq event */
319 	if (atomic_read(&ic->i_cq_quiesce))
320 		return;
321 
322 	memset(&state, 0, sizeof(state));
323 	poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
324 	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
325 	poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
326 
327 	if (state.ack_next_valid)
328 		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
329 	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
330 		rds_send_drop_acked(conn, state.ack_recv, NULL);
331 		ic->i_ack_recv = state.ack_recv;
332 	}
333 
334 	if (rds_conn_up(conn))
335 		rds_ib_attempt_ack(ic);
336 }
337 
338 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
339 {
340 	struct rds_connection *conn = data;
341 	struct rds_ib_connection *ic = conn->c_transport_data;
342 
343 	rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
344 		 ib_event_msg(event->event));
345 
346 	switch (event->event) {
347 	case IB_EVENT_COMM_EST:
348 		rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
349 		break;
350 	default:
351 		rdsdebug("Fatal QP Event %u (%s) "
352 			"- connection %pI4->%pI4, reconnecting\n",
353 			event->event, ib_event_msg(event->event),
354 			&conn->c_laddr, &conn->c_faddr);
355 		rds_conn_drop(conn);
356 		break;
357 	}
358 }
359 
360 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
361 {
362 	struct rds_connection *conn = context;
363 	struct rds_ib_connection *ic = conn->c_transport_data;
364 
365 	rdsdebug("conn %p cq %p\n", conn, cq);
366 
367 	rds_ib_stats_inc(s_ib_evt_handler_call);
368 
369 	tasklet_schedule(&ic->i_send_tasklet);
370 }
371 
372 static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
373 {
374 	int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
375 	int index = rds_ibdev->dev->num_comp_vectors - 1;
376 	int i;
377 
378 	for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
379 		if (rds_ibdev->vector_load[i] < min) {
380 			index = i;
381 			min = rds_ibdev->vector_load[i];
382 		}
383 	}
384 
385 	rds_ibdev->vector_load[index]++;
386 	return index;
387 }
388 
389 static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
390 {
391 	rds_ibdev->vector_load[index]--;
392 }
393 
394 /*
395  * This needs to be very careful to not leave IS_ERR pointers around for
396  * cleanup to trip over.
397  */
398 static int rds_ib_setup_qp(struct rds_connection *conn)
399 {
400 	struct rds_ib_connection *ic = conn->c_transport_data;
401 	struct ib_device *dev = ic->i_cm_id->device;
402 	struct ib_qp_init_attr attr;
403 	struct ib_cq_init_attr cq_attr = {};
404 	struct rds_ib_device *rds_ibdev;
405 	int ret, fr_queue_space;
406 
407 	/*
408 	 * It's normal to see a null device if an incoming connection races
409 	 * with device removal, so we don't print a warning.
410 	 */
411 	rds_ibdev = rds_ib_get_client_data(dev);
412 	if (!rds_ibdev)
413 		return -EOPNOTSUPP;
414 
415 	/* The fr_queue_space is currently set to 512, to add extra space on
416 	 * completion queue and send queue. This extra space is used for FRMR
417 	 * registration and invalidation work requests
418 	 */
419 	fr_queue_space = rds_ibdev->use_fastreg ?
420 			 (RDS_IB_DEFAULT_FR_WR + 1) +
421 			 (RDS_IB_DEFAULT_FR_INV_WR + 1)
422 			 : 0;
423 
424 	/* add the conn now so that connection establishment has the dev */
425 	rds_ib_add_conn(rds_ibdev, conn);
426 
427 	if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
428 		rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
429 	if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
430 		rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
431 
432 	/* Protection domain and memory range */
433 	ic->i_pd = rds_ibdev->pd;
434 
435 	ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
436 	cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
437 	cq_attr.comp_vector = ic->i_scq_vector;
438 	ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
439 				     rds_ib_cq_event_handler, conn,
440 				     &cq_attr);
441 	if (IS_ERR(ic->i_send_cq)) {
442 		ret = PTR_ERR(ic->i_send_cq);
443 		ic->i_send_cq = NULL;
444 		ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
445 		rdsdebug("ib_create_cq send failed: %d\n", ret);
446 		goto rds_ibdev_out;
447 	}
448 
449 	ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
450 	cq_attr.cqe = ic->i_recv_ring.w_nr;
451 	cq_attr.comp_vector = ic->i_rcq_vector;
452 	ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
453 				     rds_ib_cq_event_handler, conn,
454 				     &cq_attr);
455 	if (IS_ERR(ic->i_recv_cq)) {
456 		ret = PTR_ERR(ic->i_recv_cq);
457 		ic->i_recv_cq = NULL;
458 		ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
459 		rdsdebug("ib_create_cq recv failed: %d\n", ret);
460 		goto send_cq_out;
461 	}
462 
463 	ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
464 	if (ret) {
465 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
466 		goto recv_cq_out;
467 	}
468 
469 	ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
470 	if (ret) {
471 		rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
472 		goto recv_cq_out;
473 	}
474 
475 	/* XXX negotiate max send/recv with remote? */
476 	memset(&attr, 0, sizeof(attr));
477 	attr.event_handler = rds_ib_qp_event_handler;
478 	attr.qp_context = conn;
479 	/* + 1 to allow for the single ack message */
480 	attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
481 	attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
482 	attr.cap.max_send_sge = rds_ibdev->max_sge;
483 	attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
484 	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
485 	attr.qp_type = IB_QPT_RC;
486 	attr.send_cq = ic->i_send_cq;
487 	attr.recv_cq = ic->i_recv_cq;
488 	atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
489 	atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
490 
491 	/*
492 	 * XXX this can fail if max_*_wr is too large?  Are we supposed
493 	 * to back off until we get a value that the hardware can support?
494 	 */
495 	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
496 	if (ret) {
497 		rdsdebug("rdma_create_qp failed: %d\n", ret);
498 		goto recv_cq_out;
499 	}
500 
501 	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
502 					   ic->i_send_ring.w_nr *
503 						sizeof(struct rds_header),
504 					   &ic->i_send_hdrs_dma, GFP_KERNEL);
505 	if (!ic->i_send_hdrs) {
506 		ret = -ENOMEM;
507 		rdsdebug("ib_dma_alloc_coherent send failed\n");
508 		goto qp_out;
509 	}
510 
511 	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
512 					   ic->i_recv_ring.w_nr *
513 						sizeof(struct rds_header),
514 					   &ic->i_recv_hdrs_dma, GFP_KERNEL);
515 	if (!ic->i_recv_hdrs) {
516 		ret = -ENOMEM;
517 		rdsdebug("ib_dma_alloc_coherent recv failed\n");
518 		goto send_hdrs_dma_out;
519 	}
520 
521 	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
522 				       &ic->i_ack_dma, GFP_KERNEL);
523 	if (!ic->i_ack) {
524 		ret = -ENOMEM;
525 		rdsdebug("ib_dma_alloc_coherent ack failed\n");
526 		goto recv_hdrs_dma_out;
527 	}
528 
529 	ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work),
530 					      ic->i_send_ring.w_nr),
531 				   ibdev_to_node(dev));
532 	if (!ic->i_sends) {
533 		ret = -ENOMEM;
534 		rdsdebug("send allocation failed\n");
535 		goto ack_dma_out;
536 	}
537 
538 	ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work),
539 					      ic->i_recv_ring.w_nr),
540 				   ibdev_to_node(dev));
541 	if (!ic->i_recvs) {
542 		ret = -ENOMEM;
543 		rdsdebug("recv allocation failed\n");
544 		goto sends_out;
545 	}
546 
547 	rds_ib_recv_init_ack(ic);
548 
549 	rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
550 		 ic->i_send_cq, ic->i_recv_cq);
551 
552 	goto out;
553 
554 sends_out:
555 	vfree(ic->i_sends);
556 ack_dma_out:
557 	ib_dma_free_coherent(dev, sizeof(struct rds_header),
558 			     ic->i_ack, ic->i_ack_dma);
559 recv_hdrs_dma_out:
560 	ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
561 					sizeof(struct rds_header),
562 					ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
563 send_hdrs_dma_out:
564 	ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
565 					sizeof(struct rds_header),
566 					ic->i_send_hdrs, ic->i_send_hdrs_dma);
567 qp_out:
568 	rdma_destroy_qp(ic->i_cm_id);
569 recv_cq_out:
570 	if (!ib_destroy_cq(ic->i_recv_cq))
571 		ic->i_recv_cq = NULL;
572 send_cq_out:
573 	if (!ib_destroy_cq(ic->i_send_cq))
574 		ic->i_send_cq = NULL;
575 rds_ibdev_out:
576 	rds_ib_remove_conn(rds_ibdev, conn);
577 out:
578 	rds_ib_dev_put(rds_ibdev);
579 
580 	return ret;
581 }
582 
583 static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
584 {
585 	const struct rds_ib_connect_private *dp = event->param.conn.private_data;
586 	u16 common;
587 	u32 version = 0;
588 
589 	/*
590 	 * rdma_cm private data is odd - when there is any private data in the
591 	 * request, we will be given a pretty large buffer without telling us the
592 	 * original size. The only way to tell the difference is by looking at
593 	 * the contents, which are initialized to zero.
594 	 * If the protocol version fields aren't set, this is a connection attempt
595 	 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
596 	 * We really should have changed this for OFED 1.3 :-(
597 	 */
598 
599 	/* Be paranoid. RDS always has privdata */
600 	if (!event->param.conn.private_data_len) {
601 		printk(KERN_NOTICE "RDS incoming connection has no private data, "
602 			"rejecting\n");
603 		return 0;
604 	}
605 
606 	/* Even if len is crap *now* I still want to check it. -ASG */
607 	if (event->param.conn.private_data_len < sizeof (*dp) ||
608 	    dp->dp_protocol_major == 0)
609 		return RDS_PROTOCOL_3_0;
610 
611 	common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
612 	if (dp->dp_protocol_major == 3 && common) {
613 		version = RDS_PROTOCOL_3_0;
614 		while ((common >>= 1) != 0)
615 			version++;
616 	} else
617 		printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
618 				&dp->dp_saddr,
619 				dp->dp_protocol_major,
620 				dp->dp_protocol_minor);
621 	return version;
622 }
623 
624 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
625 				    struct rdma_cm_event *event)
626 {
627 	__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
628 	__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
629 	const struct rds_ib_connect_private *dp = event->param.conn.private_data;
630 	struct rds_ib_connect_private dp_rep;
631 	struct rds_connection *conn = NULL;
632 	struct rds_ib_connection *ic = NULL;
633 	struct rdma_conn_param conn_param;
634 	u32 version;
635 	int err = 1, destroy = 1;
636 
637 	/* Check whether the remote protocol version matches ours. */
638 	version = rds_ib_protocol_compatible(event);
639 	if (!version)
640 		goto out;
641 
642 	rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
643 		 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
644 		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
645 		 (unsigned long long)be64_to_cpu(lguid),
646 		 (unsigned long long)be64_to_cpu(fguid));
647 
648 	/* RDS/IB is not currently netns aware, thus init_net */
649 	conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
650 			       &rds_ib_transport, GFP_KERNEL);
651 	if (IS_ERR(conn)) {
652 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
653 		conn = NULL;
654 		goto out;
655 	}
656 
657 	/*
658 	 * The connection request may occur while the
659 	 * previous connection exist, e.g. in case of failover.
660 	 * But as connections may be initiated simultaneously
661 	 * by both hosts, we have a random backoff mechanism -
662 	 * see the comment above rds_queue_reconnect()
663 	 */
664 	mutex_lock(&conn->c_cm_lock);
665 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
666 		if (rds_conn_state(conn) == RDS_CONN_UP) {
667 			rdsdebug("incoming connect while connecting\n");
668 			rds_conn_drop(conn);
669 			rds_ib_stats_inc(s_ib_listen_closed_stale);
670 		} else
671 		if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
672 			/* Wait and see - our connect may still be succeeding */
673 			rds_ib_stats_inc(s_ib_connect_raced);
674 		}
675 		goto out;
676 	}
677 
678 	ic = conn->c_transport_data;
679 
680 	rds_ib_set_protocol(conn, version);
681 	rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
682 
683 	/* If the peer gave us the last packet it saw, process this as if
684 	 * we had received a regular ACK. */
685 	if (dp->dp_ack_seq)
686 		rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
687 
688 	BUG_ON(cm_id->context);
689 	BUG_ON(ic->i_cm_id);
690 
691 	ic->i_cm_id = cm_id;
692 	cm_id->context = conn;
693 
694 	/* We got halfway through setting up the ib_connection, if we
695 	 * fail now, we have to take the long route out of this mess. */
696 	destroy = 0;
697 
698 	err = rds_ib_setup_qp(conn);
699 	if (err) {
700 		rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
701 		goto out;
702 	}
703 
704 	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
705 		event->param.conn.responder_resources,
706 		event->param.conn.initiator_depth);
707 
708 	/* rdma_accept() calls rdma_reject() internally if it fails */
709 	if (rdma_accept(cm_id, &conn_param))
710 		rds_ib_conn_error(conn, "rdma_accept failed\n");
711 
712 out:
713 	if (conn)
714 		mutex_unlock(&conn->c_cm_lock);
715 	if (err)
716 		rdma_reject(cm_id, NULL, 0);
717 	return destroy;
718 }
719 
720 
721 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
722 {
723 	struct rds_connection *conn = cm_id->context;
724 	struct rds_ib_connection *ic = conn->c_transport_data;
725 	struct rdma_conn_param conn_param;
726 	struct rds_ib_connect_private dp;
727 	int ret;
728 
729 	/* If the peer doesn't do protocol negotiation, we must
730 	 * default to RDSv3.0 */
731 	rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
732 	ic->i_flowctl = rds_ib_sysctl_flow_control;	/* advertise flow control */
733 
734 	ret = rds_ib_setup_qp(conn);
735 	if (ret) {
736 		rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
737 		goto out;
738 	}
739 
740 	rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
741 		UINT_MAX, UINT_MAX);
742 	ret = rdma_connect(cm_id, &conn_param);
743 	if (ret)
744 		rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
745 
746 out:
747 	/* Beware - returning non-zero tells the rdma_cm to destroy
748 	 * the cm_id. We should certainly not do it as long as we still
749 	 * "own" the cm_id. */
750 	if (ret) {
751 		if (ic->i_cm_id == cm_id)
752 			ret = 0;
753 	}
754 	ic->i_active_side = true;
755 	return ret;
756 }
757 
758 int rds_ib_conn_path_connect(struct rds_conn_path *cp)
759 {
760 	struct rds_connection *conn = cp->cp_conn;
761 	struct rds_ib_connection *ic = conn->c_transport_data;
762 	struct sockaddr_in src, dest;
763 	int ret;
764 
765 	/* XXX I wonder what affect the port space has */
766 	/* delegate cm event handler to rdma_transport */
767 	ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
768 				     RDMA_PS_TCP, IB_QPT_RC);
769 	if (IS_ERR(ic->i_cm_id)) {
770 		ret = PTR_ERR(ic->i_cm_id);
771 		ic->i_cm_id = NULL;
772 		rdsdebug("rdma_create_id() failed: %d\n", ret);
773 		goto out;
774 	}
775 
776 	rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
777 
778 	src.sin_family = AF_INET;
779 	src.sin_addr.s_addr = (__force u32)conn->c_laddr;
780 	src.sin_port = (__force u16)htons(0);
781 
782 	dest.sin_family = AF_INET;
783 	dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
784 	dest.sin_port = (__force u16)htons(RDS_PORT);
785 
786 	ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
787 				(struct sockaddr *)&dest,
788 				RDS_RDMA_RESOLVE_TIMEOUT_MS);
789 	if (ret) {
790 		rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
791 			 ret);
792 		rdma_destroy_id(ic->i_cm_id);
793 		ic->i_cm_id = NULL;
794 	}
795 
796 out:
797 	return ret;
798 }
799 
800 /*
801  * This is so careful about only cleaning up resources that were built up
802  * so that it can be called at any point during startup.  In fact it
803  * can be called multiple times for a given connection.
804  */
805 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
806 {
807 	struct rds_connection *conn = cp->cp_conn;
808 	struct rds_ib_connection *ic = conn->c_transport_data;
809 	int err = 0;
810 
811 	rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
812 		 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
813 		 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
814 
815 	if (ic->i_cm_id) {
816 		struct ib_device *dev = ic->i_cm_id->device;
817 
818 		rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
819 		err = rdma_disconnect(ic->i_cm_id);
820 		if (err) {
821 			/* Actually this may happen quite frequently, when
822 			 * an outgoing connect raced with an incoming connect.
823 			 */
824 			rdsdebug("failed to disconnect, cm: %p err %d\n",
825 				ic->i_cm_id, err);
826 		}
827 
828 		/*
829 		 * We want to wait for tx and rx completion to finish
830 		 * before we tear down the connection, but we have to be
831 		 * careful not to get stuck waiting on a send ring that
832 		 * only has unsignaled sends in it.  We've shutdown new
833 		 * sends before getting here so by waiting for signaled
834 		 * sends to complete we're ensured that there will be no
835 		 * more tx processing.
836 		 */
837 		wait_event(rds_ib_ring_empty_wait,
838 			   rds_ib_ring_empty(&ic->i_recv_ring) &&
839 			   (atomic_read(&ic->i_signaled_sends) == 0) &&
840 			   (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
841 			   (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
842 		tasklet_kill(&ic->i_send_tasklet);
843 		tasklet_kill(&ic->i_recv_tasklet);
844 
845 		atomic_set(&ic->i_cq_quiesce, 1);
846 
847 		/* first destroy the ib state that generates callbacks */
848 		if (ic->i_cm_id->qp)
849 			rdma_destroy_qp(ic->i_cm_id);
850 		if (ic->i_send_cq) {
851 			if (ic->rds_ibdev)
852 				ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
853 			ib_destroy_cq(ic->i_send_cq);
854 		}
855 
856 		if (ic->i_recv_cq) {
857 			if (ic->rds_ibdev)
858 				ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
859 			ib_destroy_cq(ic->i_recv_cq);
860 		}
861 
862 		/* then free the resources that ib callbacks use */
863 		if (ic->i_send_hdrs)
864 			ib_dma_free_coherent(dev,
865 					   ic->i_send_ring.w_nr *
866 						sizeof(struct rds_header),
867 					   ic->i_send_hdrs,
868 					   ic->i_send_hdrs_dma);
869 
870 		if (ic->i_recv_hdrs)
871 			ib_dma_free_coherent(dev,
872 					   ic->i_recv_ring.w_nr *
873 						sizeof(struct rds_header),
874 					   ic->i_recv_hdrs,
875 					   ic->i_recv_hdrs_dma);
876 
877 		if (ic->i_ack)
878 			ib_dma_free_coherent(dev, sizeof(struct rds_header),
879 					     ic->i_ack, ic->i_ack_dma);
880 
881 		if (ic->i_sends)
882 			rds_ib_send_clear_ring(ic);
883 		if (ic->i_recvs)
884 			rds_ib_recv_clear_ring(ic);
885 
886 		rdma_destroy_id(ic->i_cm_id);
887 
888 		/*
889 		 * Move connection back to the nodev list.
890 		 */
891 		if (ic->rds_ibdev)
892 			rds_ib_remove_conn(ic->rds_ibdev, conn);
893 
894 		ic->i_cm_id = NULL;
895 		ic->i_pd = NULL;
896 		ic->i_send_cq = NULL;
897 		ic->i_recv_cq = NULL;
898 		ic->i_send_hdrs = NULL;
899 		ic->i_recv_hdrs = NULL;
900 		ic->i_ack = NULL;
901 	}
902 	BUG_ON(ic->rds_ibdev);
903 
904 	/* Clear pending transmit */
905 	if (ic->i_data_op) {
906 		struct rds_message *rm;
907 
908 		rm = container_of(ic->i_data_op, struct rds_message, data);
909 		rds_message_put(rm);
910 		ic->i_data_op = NULL;
911 	}
912 
913 	/* Clear the ACK state */
914 	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
915 #ifdef KERNEL_HAS_ATOMIC64
916 	atomic64_set(&ic->i_ack_next, 0);
917 #else
918 	ic->i_ack_next = 0;
919 #endif
920 	ic->i_ack_recv = 0;
921 
922 	/* Clear flow control state */
923 	ic->i_flowctl = 0;
924 	atomic_set(&ic->i_credits, 0);
925 
926 	rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
927 	rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
928 
929 	if (ic->i_ibinc) {
930 		rds_inc_put(&ic->i_ibinc->ii_inc);
931 		ic->i_ibinc = NULL;
932 	}
933 
934 	vfree(ic->i_sends);
935 	ic->i_sends = NULL;
936 	vfree(ic->i_recvs);
937 	ic->i_recvs = NULL;
938 	ic->i_active_side = false;
939 }
940 
941 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
942 {
943 	struct rds_ib_connection *ic;
944 	unsigned long flags;
945 	int ret;
946 
947 	/* XXX too lazy? */
948 	ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
949 	if (!ic)
950 		return -ENOMEM;
951 
952 	ret = rds_ib_recv_alloc_caches(ic);
953 	if (ret) {
954 		kfree(ic);
955 		return ret;
956 	}
957 
958 	INIT_LIST_HEAD(&ic->ib_node);
959 	tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
960 		     (unsigned long)ic);
961 	tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
962 		     (unsigned long)ic);
963 	mutex_init(&ic->i_recv_mutex);
964 #ifndef KERNEL_HAS_ATOMIC64
965 	spin_lock_init(&ic->i_ack_lock);
966 #endif
967 	atomic_set(&ic->i_signaled_sends, 0);
968 
969 	/*
970 	 * rds_ib_conn_shutdown() waits for these to be emptied so they
971 	 * must be initialized before it can be called.
972 	 */
973 	rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
974 	rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
975 
976 	ic->conn = conn;
977 	conn->c_transport_data = ic;
978 
979 	spin_lock_irqsave(&ib_nodev_conns_lock, flags);
980 	list_add_tail(&ic->ib_node, &ib_nodev_conns);
981 	spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
982 
983 
984 	rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
985 	return 0;
986 }
987 
988 /*
989  * Free a connection. Connection must be shut down and not set for reconnect.
990  */
991 void rds_ib_conn_free(void *arg)
992 {
993 	struct rds_ib_connection *ic = arg;
994 	spinlock_t	*lock_ptr;
995 
996 	rdsdebug("ic %p\n", ic);
997 
998 	/*
999 	 * Conn is either on a dev's list or on the nodev list.
1000 	 * A race with shutdown() or connect() would cause problems
1001 	 * (since rds_ibdev would change) but that should never happen.
1002 	 */
1003 	lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
1004 
1005 	spin_lock_irq(lock_ptr);
1006 	list_del(&ic->ib_node);
1007 	spin_unlock_irq(lock_ptr);
1008 
1009 	rds_ib_recv_free_caches(ic);
1010 
1011 	kfree(ic);
1012 }
1013 
1014 
1015 /*
1016  * An error occurred on the connection
1017  */
1018 void
1019 __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
1020 {
1021 	va_list ap;
1022 
1023 	rds_conn_drop(conn);
1024 
1025 	va_start(ap, fmt);
1026 	vprintk(fmt, ap);
1027 	va_end(ap);
1028 }
1029