xref: /openbmc/linux/drivers/infiniband/sw/siw/siw_cm.c (revision c4f7ac64)
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /*          Fredy Neeser */
5 /*          Greg Joyce <greg@opengridcomputing.com> */
6 /* Copyright (c) 2008-2019, IBM Corporation */
7 /* Copyright (c) 2017, Open Grid Computing, Inc. */
8 
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/net.h>
12 #include <linux/inetdevice.h>
13 #include <net/addrconf.h>
14 #include <linux/workqueue.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <linux/inet.h>
18 #include <linux/tcp.h>
19 
20 #include <rdma/iw_cm.h>
21 #include <rdma/ib_verbs.h>
22 #include <rdma/ib_user_verbs.h>
23 
24 #include "siw.h"
25 #include "siw_cm.h"
26 
27 /*
28  * Set to any combination of
29  * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
30  */
31 static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
32 static const bool relaxed_ird_negotiation = true;
33 
34 static void siw_cm_llp_state_change(struct sock *s);
35 static void siw_cm_llp_data_ready(struct sock *s);
36 static void siw_cm_llp_write_space(struct sock *s);
37 static void siw_cm_llp_error_report(struct sock *s);
38 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
39 			 int status);
40 
41 static void siw_sk_assign_cm_upcalls(struct sock *sk)
42 {
43 	write_lock_bh(&sk->sk_callback_lock);
44 	sk->sk_state_change = siw_cm_llp_state_change;
45 	sk->sk_data_ready = siw_cm_llp_data_ready;
46 	sk->sk_write_space = siw_cm_llp_write_space;
47 	sk->sk_error_report = siw_cm_llp_error_report;
48 	write_unlock_bh(&sk->sk_callback_lock);
49 }
50 
51 static void siw_sk_save_upcalls(struct sock *sk)
52 {
53 	struct siw_cep *cep = sk_to_cep(sk);
54 
55 	write_lock_bh(&sk->sk_callback_lock);
56 	cep->sk_state_change = sk->sk_state_change;
57 	cep->sk_data_ready = sk->sk_data_ready;
58 	cep->sk_write_space = sk->sk_write_space;
59 	cep->sk_error_report = sk->sk_error_report;
60 	write_unlock_bh(&sk->sk_callback_lock);
61 }
62 
63 static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
64 {
65 	sk->sk_state_change = cep->sk_state_change;
66 	sk->sk_data_ready = cep->sk_data_ready;
67 	sk->sk_write_space = cep->sk_write_space;
68 	sk->sk_error_report = cep->sk_error_report;
69 	sk->sk_user_data = NULL;
70 }
71 
72 static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp)
73 {
74 	struct socket *s = cep->sock;
75 	struct sock *sk = s->sk;
76 
77 	write_lock_bh(&sk->sk_callback_lock);
78 
79 	qp->attrs.sk = s;
80 	sk->sk_data_ready = siw_qp_llp_data_ready;
81 	sk->sk_write_space = siw_qp_llp_write_space;
82 
83 	write_unlock_bh(&sk->sk_callback_lock);
84 }
85 
86 static void siw_socket_disassoc(struct socket *s)
87 {
88 	struct sock *sk = s->sk;
89 	struct siw_cep *cep;
90 
91 	if (sk) {
92 		write_lock_bh(&sk->sk_callback_lock);
93 		cep = sk_to_cep(sk);
94 		if (cep) {
95 			siw_sk_restore_upcalls(sk, cep);
96 			siw_cep_put(cep);
97 		} else {
98 			pr_warn("siw: cannot restore sk callbacks: no ep\n");
99 		}
100 		write_unlock_bh(&sk->sk_callback_lock);
101 	} else {
102 		pr_warn("siw: cannot restore sk callbacks: no sk\n");
103 	}
104 }
105 
106 static void siw_rtr_data_ready(struct sock *sk)
107 {
108 	struct siw_cep *cep;
109 	struct siw_qp *qp = NULL;
110 	read_descriptor_t rd_desc;
111 
112 	read_lock(&sk->sk_callback_lock);
113 
114 	cep = sk_to_cep(sk);
115 	if (!cep) {
116 		WARN(1, "No connection endpoint\n");
117 		goto out;
118 	}
119 	qp = sk_to_qp(sk);
120 
121 	memset(&rd_desc, 0, sizeof(rd_desc));
122 	rd_desc.arg.data = qp;
123 	rd_desc.count = 1;
124 
125 	tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
126 	/*
127 	 * Check if first frame was successfully processed.
128 	 * Signal connection full establishment if yes.
129 	 * Failed data processing would have already scheduled
130 	 * connection drop.
131 	 */
132 	if (!qp->rx_stream.rx_suspend)
133 		siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
134 out:
135 	read_unlock(&sk->sk_callback_lock);
136 	if (qp)
137 		siw_qp_socket_assoc(cep, qp);
138 }
139 
140 static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep)
141 {
142 	struct sock *sk = cep->sock->sk;
143 
144 	write_lock_bh(&sk->sk_callback_lock);
145 	sk->sk_data_ready = siw_rtr_data_ready;
146 	sk->sk_write_space = siw_qp_llp_write_space;
147 	write_unlock_bh(&sk->sk_callback_lock);
148 }
149 
150 static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
151 {
152 	cep->sock = s;
153 	siw_cep_get(cep);
154 	s->sk->sk_user_data = cep;
155 
156 	siw_sk_save_upcalls(s->sk);
157 	siw_sk_assign_cm_upcalls(s->sk);
158 }
159 
160 static struct siw_cep *siw_cep_alloc(struct siw_device *sdev)
161 {
162 	struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
163 	unsigned long flags;
164 
165 	if (!cep)
166 		return NULL;
167 
168 	INIT_LIST_HEAD(&cep->listenq);
169 	INIT_LIST_HEAD(&cep->devq);
170 	INIT_LIST_HEAD(&cep->work_freelist);
171 
172 	kref_init(&cep->ref);
173 	cep->state = SIW_EPSTATE_IDLE;
174 	init_waitqueue_head(&cep->waitq);
175 	spin_lock_init(&cep->lock);
176 	cep->sdev = sdev;
177 	cep->enhanced_rdma_conn_est = false;
178 
179 	spin_lock_irqsave(&sdev->lock, flags);
180 	list_add_tail(&cep->devq, &sdev->cep_list);
181 	spin_unlock_irqrestore(&sdev->lock, flags);
182 
183 	siw_dbg_cep(cep, "new endpoint\n");
184 	return cep;
185 }
186 
187 static void siw_cm_free_work(struct siw_cep *cep)
188 {
189 	struct list_head *w, *tmp;
190 	struct siw_cm_work *work;
191 
192 	list_for_each_safe(w, tmp, &cep->work_freelist) {
193 		work = list_entry(w, struct siw_cm_work, list);
194 		list_del(&work->list);
195 		kfree(work);
196 	}
197 }
198 
199 static void siw_cancel_mpatimer(struct siw_cep *cep)
200 {
201 	spin_lock_bh(&cep->lock);
202 	if (cep->mpa_timer) {
203 		if (cancel_delayed_work(&cep->mpa_timer->work)) {
204 			siw_cep_put(cep);
205 			kfree(cep->mpa_timer); /* not needed again */
206 		}
207 		cep->mpa_timer = NULL;
208 	}
209 	spin_unlock_bh(&cep->lock);
210 }
211 
212 static void siw_put_work(struct siw_cm_work *work)
213 {
214 	INIT_LIST_HEAD(&work->list);
215 	spin_lock_bh(&work->cep->lock);
216 	list_add(&work->list, &work->cep->work_freelist);
217 	spin_unlock_bh(&work->cep->lock);
218 }
219 
220 static void siw_cep_set_inuse(struct siw_cep *cep)
221 {
222 	unsigned long flags;
223 retry:
224 	spin_lock_irqsave(&cep->lock, flags);
225 
226 	if (cep->in_use) {
227 		spin_unlock_irqrestore(&cep->lock, flags);
228 		wait_event_interruptible(cep->waitq, !cep->in_use);
229 		if (signal_pending(current))
230 			flush_signals(current);
231 		goto retry;
232 	} else {
233 		cep->in_use = 1;
234 		spin_unlock_irqrestore(&cep->lock, flags);
235 	}
236 }
237 
238 static void siw_cep_set_free(struct siw_cep *cep)
239 {
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&cep->lock, flags);
243 	cep->in_use = 0;
244 	spin_unlock_irqrestore(&cep->lock, flags);
245 
246 	wake_up(&cep->waitq);
247 }
248 
249 static void __siw_cep_dealloc(struct kref *ref)
250 {
251 	struct siw_cep *cep = container_of(ref, struct siw_cep, ref);
252 	struct siw_device *sdev = cep->sdev;
253 	unsigned long flags;
254 
255 	WARN_ON(cep->listen_cep);
256 
257 	/* kfree(NULL) is safe */
258 	kfree(cep->mpa.pdata);
259 	spin_lock_bh(&cep->lock);
260 	if (!list_empty(&cep->work_freelist))
261 		siw_cm_free_work(cep);
262 	spin_unlock_bh(&cep->lock);
263 
264 	spin_lock_irqsave(&sdev->lock, flags);
265 	list_del(&cep->devq);
266 	spin_unlock_irqrestore(&sdev->lock, flags);
267 
268 	siw_dbg_cep(cep, "free endpoint\n");
269 	kfree(cep);
270 }
271 
272 static struct siw_cm_work *siw_get_work(struct siw_cep *cep)
273 {
274 	struct siw_cm_work *work = NULL;
275 
276 	spin_lock_bh(&cep->lock);
277 	if (!list_empty(&cep->work_freelist)) {
278 		work = list_entry(cep->work_freelist.next, struct siw_cm_work,
279 				  list);
280 		list_del_init(&work->list);
281 	}
282 	spin_unlock_bh(&cep->lock);
283 	return work;
284 }
285 
286 static int siw_cm_alloc_work(struct siw_cep *cep, int num)
287 {
288 	struct siw_cm_work *work;
289 
290 	while (num--) {
291 		work = kmalloc(sizeof(*work), GFP_KERNEL);
292 		if (!work) {
293 			if (!(list_empty(&cep->work_freelist)))
294 				siw_cm_free_work(cep);
295 			return -ENOMEM;
296 		}
297 		work->cep = cep;
298 		INIT_LIST_HEAD(&work->list);
299 		list_add(&work->list, &cep->work_freelist);
300 	}
301 	return 0;
302 }
303 
304 /*
305  * siw_cm_upcall()
306  *
307  * Upcall to IWCM to inform about async connection events
308  */
309 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
310 			 int status)
311 {
312 	struct iw_cm_event event;
313 	struct iw_cm_id *id;
314 
315 	memset(&event, 0, sizeof(event));
316 	event.status = status;
317 	event.event = reason;
318 
319 	if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
320 		event.provider_data = cep;
321 		id = cep->listen_cep->cm_id;
322 	} else {
323 		id = cep->cm_id;
324 	}
325 	/* Signal IRD and ORD */
326 	if (reason == IW_CM_EVENT_ESTABLISHED ||
327 	    reason == IW_CM_EVENT_CONNECT_REPLY) {
328 		/* Signal negotiated IRD/ORD values we will use */
329 		event.ird = cep->ird;
330 		event.ord = cep->ord;
331 	} else if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
332 		event.ird = cep->ord;
333 		event.ord = cep->ird;
334 	}
335 	/* Signal private data and address information */
336 	if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
337 	    reason == IW_CM_EVENT_CONNECT_REPLY) {
338 		u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
339 
340 		if (pd_len) {
341 			/*
342 			 * hand over MPA private data
343 			 */
344 			event.private_data_len = pd_len;
345 			event.private_data = cep->mpa.pdata;
346 
347 			/* Hide MPA V2 IRD/ORD control */
348 			if (cep->enhanced_rdma_conn_est) {
349 				event.private_data_len -=
350 					sizeof(struct mpa_v2_data);
351 				event.private_data +=
352 					sizeof(struct mpa_v2_data);
353 			}
354 		}
355 		getname_local(cep->sock, &event.local_addr);
356 		getname_peer(cep->sock, &event.remote_addr);
357 	}
358 	siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 		    cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360 
361 	return id->event_handler(id, &event);
362 }
363 
364 /*
365  * siw_qp_cm_drop()
366  *
367  * Drops established LLP connection if present and not already
368  * scheduled for dropping. Called from user context, SQ workqueue
369  * or receive IRQ. Caller signals if socket can be immediately
370  * closed (basically, if not in IRQ).
371  */
372 void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
373 {
374 	struct siw_cep *cep = qp->cep;
375 
376 	qp->rx_stream.rx_suspend = 1;
377 	qp->tx_ctx.tx_suspend = 1;
378 
379 	if (!qp->cep)
380 		return;
381 
382 	if (schedule) {
383 		siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP);
384 	} else {
385 		siw_cep_set_inuse(cep);
386 
387 		if (cep->state == SIW_EPSTATE_CLOSED) {
388 			siw_dbg_cep(cep, "already closed\n");
389 			goto out;
390 		}
391 		siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
392 
393 		if (qp->term_info.valid)
394 			siw_send_terminate(qp);
395 
396 		if (cep->cm_id) {
397 			switch (cep->state) {
398 			case SIW_EPSTATE_AWAIT_MPAREP:
399 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
400 					      -EINVAL);
401 				break;
402 
403 			case SIW_EPSTATE_RDMA_MODE:
404 				siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
405 				break;
406 
407 			case SIW_EPSTATE_IDLE:
408 			case SIW_EPSTATE_LISTENING:
409 			case SIW_EPSTATE_CONNECTING:
410 			case SIW_EPSTATE_AWAIT_MPAREQ:
411 			case SIW_EPSTATE_RECVD_MPAREQ:
412 			case SIW_EPSTATE_CLOSED:
413 			default:
414 				break;
415 			}
416 			cep->cm_id->rem_ref(cep->cm_id);
417 			cep->cm_id = NULL;
418 			siw_cep_put(cep);
419 		}
420 		cep->state = SIW_EPSTATE_CLOSED;
421 
422 		if (cep->sock) {
423 			siw_socket_disassoc(cep->sock);
424 			/*
425 			 * Immediately close socket
426 			 */
427 			sock_release(cep->sock);
428 			cep->sock = NULL;
429 		}
430 		if (cep->qp) {
431 			cep->qp = NULL;
432 			siw_qp_put(qp);
433 		}
434 out:
435 		siw_cep_set_free(cep);
436 	}
437 }
438 
439 void siw_cep_put(struct siw_cep *cep)
440 {
441 	WARN_ON(kref_read(&cep->ref) < 1);
442 	kref_put(&cep->ref, __siw_cep_dealloc);
443 }
444 
445 void siw_cep_get(struct siw_cep *cep)
446 {
447 	kref_get(&cep->ref);
448 }
449 
450 /*
451  * Expects params->pd_len in host byte order
452  */
453 static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len)
454 {
455 	struct socket *s = cep->sock;
456 	struct mpa_rr *rr = &cep->mpa.hdr;
457 	struct kvec iov[3];
458 	struct msghdr msg;
459 	int rv;
460 	int iovec_num = 0;
461 	int mpa_len;
462 
463 	memset(&msg, 0, sizeof(msg));
464 
465 	iov[iovec_num].iov_base = rr;
466 	iov[iovec_num].iov_len = sizeof(*rr);
467 	mpa_len = sizeof(*rr);
468 
469 	if (cep->enhanced_rdma_conn_est) {
470 		iovec_num++;
471 		iov[iovec_num].iov_base = &cep->mpa.v2_ctrl;
472 		iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl);
473 		mpa_len += sizeof(cep->mpa.v2_ctrl);
474 	}
475 	if (pd_len) {
476 		iovec_num++;
477 		iov[iovec_num].iov_base = (char *)pdata;
478 		iov[iovec_num].iov_len = pd_len;
479 		mpa_len += pd_len;
480 	}
481 	if (cep->enhanced_rdma_conn_est)
482 		pd_len += sizeof(cep->mpa.v2_ctrl);
483 
484 	rr->params.pd_len = cpu_to_be16(pd_len);
485 
486 	rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len);
487 
488 	return rv < 0 ? rv : 0;
489 }
490 
491 /*
492  * Receive MPA Request/Reply header.
493  *
494  * Returns 0 if complete MPA Request/Reply header including
495  * eventual private data was received. Returns -EAGAIN if
496  * header was partially received or negative error code otherwise.
497  *
498  * Context: May be called in process context only
499  */
500 static int siw_recv_mpa_rr(struct siw_cep *cep)
501 {
502 	struct mpa_rr *hdr = &cep->mpa.hdr;
503 	struct socket *s = cep->sock;
504 	u16 pd_len;
505 	int rcvd, to_rcv;
506 
507 	if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
508 		rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
509 				  sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd,
510 				  0);
511 		if (rcvd <= 0)
512 			return -ECONNABORTED;
513 
514 		cep->mpa.bytes_rcvd += rcvd;
515 
516 		if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr))
517 			return -EAGAIN;
518 
519 		if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA)
520 			return -EPROTO;
521 	}
522 	pd_len = be16_to_cpu(hdr->params.pd_len);
523 
524 	/*
525 	 * At least the MPA Request/Reply header (frame not including
526 	 * private data) has been received.
527 	 * Receive (or continue receiving) any private data.
528 	 */
529 	to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr));
530 
531 	if (!to_rcv) {
532 		/*
533 		 * We must have hdr->params.pd_len == 0 and thus received a
534 		 * complete MPA Request/Reply frame.
535 		 * Check against peer protocol violation.
536 		 */
537 		u32 word;
538 
539 		rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
540 		if (rcvd == -EAGAIN)
541 			return 0;
542 
543 		if (rcvd == 0) {
544 			siw_dbg_cep(cep, "peer EOF\n");
545 			return -EPIPE;
546 		}
547 		if (rcvd < 0) {
548 			siw_dbg_cep(cep, "error: %d\n", rcvd);
549 			return rcvd;
550 		}
551 		siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
552 
553 		return -EPROTO;
554 	}
555 
556 	/*
557 	 * At this point, we must have hdr->params.pd_len != 0.
558 	 * A private data buffer gets allocated if hdr->params.pd_len != 0.
559 	 */
560 	if (!cep->mpa.pdata) {
561 		cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
562 		if (!cep->mpa.pdata)
563 			return -ENOMEM;
564 	}
565 	rcvd = ksock_recv(
566 		s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
567 		to_rcv + 4, MSG_DONTWAIT);
568 
569 	if (rcvd < 0)
570 		return rcvd;
571 
572 	if (rcvd > to_rcv)
573 		return -EPROTO;
574 
575 	cep->mpa.bytes_rcvd += rcvd;
576 
577 	if (to_rcv == rcvd) {
578 		siw_dbg_cep(cep, "%d bytes private data received\n", pd_len);
579 		return 0;
580 	}
581 	return -EAGAIN;
582 }
583 
584 /*
585  * siw_proc_mpareq()
586  *
587  * Read MPA Request from socket and signal new connection to IWCM
588  * if success. Caller must hold lock on corresponding listening CEP.
589  */
590 static int siw_proc_mpareq(struct siw_cep *cep)
591 {
592 	struct mpa_rr *req;
593 	int version, rv;
594 	u16 pd_len;
595 
596 	rv = siw_recv_mpa_rr(cep);
597 	if (rv)
598 		return rv;
599 
600 	req = &cep->mpa.hdr;
601 
602 	version = __mpa_rr_revision(req->params.bits);
603 	pd_len = be16_to_cpu(req->params.pd_len);
604 
605 	if (version > MPA_REVISION_2)
606 		/* allow for 0, 1, and 2 only */
607 		return -EPROTO;
608 
609 	if (memcmp(req->key, MPA_KEY_REQ, 16))
610 		return -EPROTO;
611 
612 	/* Prepare for sending MPA reply */
613 	memcpy(req->key, MPA_KEY_REP, 16);
614 
615 	if (version == MPA_REVISION_2 &&
616 	    (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
617 		/*
618 		 * MPA version 2 must signal IRD/ORD values and P2P mode
619 		 * in private data if header flag MPA_RR_FLAG_ENHANCED
620 		 * is set.
621 		 */
622 		if (pd_len < sizeof(struct mpa_v2_data))
623 			goto reject_conn;
624 
625 		cep->enhanced_rdma_conn_est = true;
626 	}
627 
628 	/* MPA Markers: currently not supported. Marker TX to be added. */
629 	if (req->params.bits & MPA_RR_FLAG_MARKERS)
630 		goto reject_conn;
631 
632 	if (req->params.bits & MPA_RR_FLAG_CRC) {
633 		/*
634 		 * RFC 5044, page 27: CRC MUST be used if peer requests it.
635 		 * siw specific: 'mpa_crc_strict' parameter to reject
636 		 * connection with CRC if local CRC off enforced by
637 		 * 'mpa_crc_strict' module parameter.
638 		 */
639 		if (!mpa_crc_required && mpa_crc_strict)
640 			goto reject_conn;
641 
642 		/* Enable CRC if requested by module parameter */
643 		if (mpa_crc_required)
644 			req->params.bits |= MPA_RR_FLAG_CRC;
645 	}
646 	if (cep->enhanced_rdma_conn_est) {
647 		struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata;
648 
649 		/*
650 		 * Peer requested ORD becomes requested local IRD,
651 		 * peer requested IRD becomes requested local ORD.
652 		 * IRD and ORD get limited by global maximum values.
653 		 */
654 		cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
655 		cep->ord = min(cep->ord, SIW_MAX_ORD_QP);
656 		cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
657 		cep->ird = min(cep->ird, SIW_MAX_IRD_QP);
658 
659 		/* May get overwritten by locally negotiated values */
660 		cep->mpa.v2_ctrl.ird = htons(cep->ird);
661 		cep->mpa.v2_ctrl.ord = htons(cep->ord);
662 
663 		/*
664 		 * Support for peer sent zero length Write or Read to
665 		 * let local side enter RTS. Writes are preferred.
666 		 * Sends would require pre-posting a Receive and are
667 		 * not supported.
668 		 * Propose zero length Write if none of Read and Write
669 		 * is indicated.
670 		 */
671 		if (v2->ird & MPA_V2_PEER_TO_PEER) {
672 			cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
673 
674 			if (v2->ord & MPA_V2_RDMA_WRITE_RTR)
675 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
676 			else if (v2->ord & MPA_V2_RDMA_READ_RTR)
677 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR;
678 			else
679 				cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
680 		}
681 	}
682 
683 	cep->state = SIW_EPSTATE_RECVD_MPAREQ;
684 
685 	/* Keep reference until IWCM accepts/rejects */
686 	siw_cep_get(cep);
687 	rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
688 	if (rv)
689 		siw_cep_put(cep);
690 
691 	return rv;
692 
693 reject_conn:
694 	siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n",
695 		    req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
696 		    mpa_crc_required, mpa_crc_strict,
697 		    req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
698 
699 	req->params.bits &= ~MPA_RR_FLAG_MARKERS;
700 	req->params.bits |= MPA_RR_FLAG_REJECT;
701 
702 	if (!mpa_crc_required && mpa_crc_strict)
703 		req->params.bits &= ~MPA_RR_FLAG_CRC;
704 
705 	if (pd_len)
706 		kfree(cep->mpa.pdata);
707 
708 	cep->mpa.pdata = NULL;
709 
710 	siw_send_mpareqrep(cep, NULL, 0);
711 
712 	return -EOPNOTSUPP;
713 }
714 
715 static int siw_proc_mpareply(struct siw_cep *cep)
716 {
717 	struct siw_qp_attrs qp_attrs;
718 	enum siw_qp_attr_mask qp_attr_mask;
719 	struct siw_qp *qp = cep->qp;
720 	struct mpa_rr *rep;
721 	int rv;
722 	u16 rep_ord;
723 	u16 rep_ird;
724 	bool ird_insufficient = false;
725 	enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
726 
727 	rv = siw_recv_mpa_rr(cep);
728 	if (rv != -EAGAIN)
729 		siw_cancel_mpatimer(cep);
730 	if (rv)
731 		goto out_err;
732 
733 	rep = &cep->mpa.hdr;
734 
735 	if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
736 		/* allow for 0, 1,  and 2 only */
737 		rv = -EPROTO;
738 		goto out_err;
739 	}
740 	if (memcmp(rep->key, MPA_KEY_REP, 16)) {
741 		siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA,
742 				   LLP_ECODE_INVALID_REQ_RESP, 0);
743 		siw_send_terminate(qp);
744 		rv = -EPROTO;
745 		goto out_err;
746 	}
747 	if (rep->params.bits & MPA_RR_FLAG_REJECT) {
748 		siw_dbg_cep(cep, "got mpa reject\n");
749 		siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
750 
751 		return -ECONNRESET;
752 	}
753 	if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) {
754 		siw_dbg_cep(cep, "peer allows GSO on TX\n");
755 		qp->tx_ctx.gso_seg_limit = 0;
756 	}
757 	if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
758 	    (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) ||
759 	    (mpa_crc_strict && !mpa_crc_required &&
760 	     (rep->params.bits & MPA_RR_FLAG_CRC))) {
761 		siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n",
762 			    rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
763 			    mpa_crc_required, mpa_crc_strict,
764 			    rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
765 
766 		siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
767 
768 		return -EINVAL;
769 	}
770 	if (cep->enhanced_rdma_conn_est) {
771 		struct mpa_v2_data *v2;
772 
773 		if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 ||
774 		    !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) {
775 			/*
776 			 * Protocol failure: The responder MUST reply with
777 			 * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED.
778 			 */
779 			siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n",
780 				    __mpa_rr_revision(rep->params.bits),
781 				    rep->params.bits & MPA_RR_FLAG_ENHANCED ?
782 					    1 :
783 					    0);
784 
785 			siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
786 				      -ECONNRESET);
787 			return -EINVAL;
788 		}
789 		v2 = (struct mpa_v2_data *)cep->mpa.pdata;
790 		rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
791 		rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
792 
793 		if (cep->ird < rep_ord &&
794 		    (relaxed_ird_negotiation == false ||
795 		     rep_ord > cep->sdev->attrs.max_ird)) {
796 			siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n",
797 				    cep->ird, rep_ord,
798 				    cep->sdev->attrs.max_ord);
799 			ird_insufficient = true;
800 		}
801 		if (cep->ord > rep_ird && relaxed_ird_negotiation == false) {
802 			siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord,
803 				    rep_ird);
804 			ird_insufficient = true;
805 		}
806 		/*
807 		 * Always report negotiated peer values to user,
808 		 * even if IRD/ORD negotiation failed
809 		 */
810 		cep->ird = rep_ord;
811 		cep->ord = rep_ird;
812 
813 		if (ird_insufficient) {
814 			/*
815 			 * If the initiator IRD is insuffient for the
816 			 * responder ORD, send a TERM.
817 			 */
818 			siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
819 					   LLP_ETYPE_MPA,
820 					   LLP_ECODE_INSUFFICIENT_IRD, 0);
821 			siw_send_terminate(qp);
822 			rv = -ENOMEM;
823 			goto out_err;
824 		}
825 		if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER)
826 			mpa_p2p_mode =
827 				cep->mpa.v2_ctrl_req.ord &
828 				(MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR);
829 
830 		/*
831 		 * Check if we requested P2P mode, and if peer agrees
832 		 */
833 		if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
834 			if ((mpa_p2p_mode & v2->ord) == 0) {
835 				/*
836 				 * We requested RTR mode(s), but the peer
837 				 * did not pick any mode we support.
838 				 */
839 				siw_dbg_cep(cep,
840 					    "rtr mode:  req %2x, got %2x\n",
841 					    mpa_p2p_mode,
842 					    v2->ord & (MPA_V2_RDMA_WRITE_RTR |
843 						       MPA_V2_RDMA_READ_RTR));
844 
845 				siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
846 						   LLP_ETYPE_MPA,
847 						   LLP_ECODE_NO_MATCHING_RTR,
848 						   0);
849 				siw_send_terminate(qp);
850 				rv = -EPROTO;
851 				goto out_err;
852 			}
853 			mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR |
854 						  MPA_V2_RDMA_READ_RTR);
855 		}
856 	}
857 	memset(&qp_attrs, 0, sizeof(qp_attrs));
858 
859 	if (rep->params.bits & MPA_RR_FLAG_CRC)
860 		qp_attrs.flags = SIW_MPA_CRC;
861 
862 	qp_attrs.irq_size = cep->ird;
863 	qp_attrs.orq_size = cep->ord;
864 	qp_attrs.sk = cep->sock;
865 	qp_attrs.state = SIW_QP_STATE_RTS;
866 
867 	qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
868 		       SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA;
869 
870 	/* Move socket RX/TX under QP control */
871 	down_write(&qp->state_lock);
872 	if (qp->attrs.state > SIW_QP_STATE_RTR) {
873 		rv = -EINVAL;
874 		up_write(&qp->state_lock);
875 		goto out_err;
876 	}
877 	rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask);
878 
879 	siw_qp_socket_assoc(cep, qp);
880 
881 	up_write(&qp->state_lock);
882 
883 	/* Send extra RDMA frame to trigger peer RTS if negotiated */
884 	if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
885 		rv = siw_qp_mpa_rts(qp, mpa_p2p_mode);
886 		if (rv)
887 			goto out_err;
888 	}
889 	if (!rv) {
890 		rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
891 		if (!rv)
892 			cep->state = SIW_EPSTATE_RDMA_MODE;
893 
894 		return 0;
895 	}
896 
897 out_err:
898 	siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
899 
900 	return rv;
901 }
902 
903 /*
904  * siw_accept_newconn - accept an incoming pending connection
905  *
906  */
907 static void siw_accept_newconn(struct siw_cep *cep)
908 {
909 	struct socket *s = cep->sock;
910 	struct socket *new_s = NULL;
911 	struct siw_cep *new_cep = NULL;
912 	int rv = 0; /* debug only. should disappear */
913 
914 	if (cep->state != SIW_EPSTATE_LISTENING)
915 		goto error;
916 
917 	new_cep = siw_cep_alloc(cep->sdev);
918 	if (!new_cep)
919 		goto error;
920 
921 	/*
922 	 * 4: Allocate a sufficient number of work elements
923 	 * to allow concurrent handling of local + peer close
924 	 * events, MPA header processing + MPA timeout.
925 	 */
926 	if (siw_cm_alloc_work(new_cep, 4) != 0)
927 		goto error;
928 
929 	/*
930 	 * Copy saved socket callbacks from listening CEP
931 	 * and assign new socket with new CEP
932 	 */
933 	new_cep->sk_state_change = cep->sk_state_change;
934 	new_cep->sk_data_ready = cep->sk_data_ready;
935 	new_cep->sk_write_space = cep->sk_write_space;
936 	new_cep->sk_error_report = cep->sk_error_report;
937 
938 	rv = kernel_accept(s, &new_s, O_NONBLOCK);
939 	if (rv != 0) {
940 		/*
941 		 * Connection already aborted by peer..?
942 		 */
943 		siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv);
944 		goto error;
945 	}
946 	new_cep->sock = new_s;
947 	siw_cep_get(new_cep);
948 	new_s->sk->sk_user_data = new_cep;
949 
950 	if (siw_tcp_nagle == false)
951 		tcp_sock_set_nodelay(new_s->sk);
952 	new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
953 
954 	rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
955 	if (rv)
956 		goto error;
957 	/*
958 	 * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep.
959 	 */
960 	new_cep->listen_cep = cep;
961 	siw_cep_get(cep);
962 
963 	if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
964 		/*
965 		 * MPA REQ already queued
966 		 */
967 		siw_dbg_cep(cep, "immediate mpa request\n");
968 
969 		siw_cep_set_inuse(new_cep);
970 		rv = siw_proc_mpareq(new_cep);
971 		siw_cep_set_free(new_cep);
972 
973 		if (rv != -EAGAIN) {
974 			siw_cep_put(cep);
975 			new_cep->listen_cep = NULL;
976 			if (rv)
977 				goto error;
978 		}
979 	}
980 	return;
981 
982 error:
983 	if (new_cep)
984 		siw_cep_put(new_cep);
985 
986 	if (new_s) {
987 		siw_socket_disassoc(new_s);
988 		sock_release(new_s);
989 		new_cep->sock = NULL;
990 	}
991 	siw_dbg_cep(cep, "error %d\n", rv);
992 }
993 
994 static void siw_cm_work_handler(struct work_struct *w)
995 {
996 	struct siw_cm_work *work;
997 	struct siw_cep *cep;
998 	int release_cep = 0, rv = 0;
999 
1000 	work = container_of(w, struct siw_cm_work, work.work);
1001 	cep = work->cep;
1002 
1003 	siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1004 		    cep->qp ? qp_id(cep->qp) : UINT_MAX,
1005 		    work->type, cep->state);
1006 
1007 	siw_cep_set_inuse(cep);
1008 
1009 	switch (work->type) {
1010 	case SIW_CM_WORK_ACCEPT:
1011 		siw_accept_newconn(cep);
1012 		break;
1013 
1014 	case SIW_CM_WORK_READ_MPAHDR:
1015 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1016 			if (cep->listen_cep) {
1017 				siw_cep_set_inuse(cep->listen_cep);
1018 
1019 				if (cep->listen_cep->state ==
1020 				    SIW_EPSTATE_LISTENING)
1021 					rv = siw_proc_mpareq(cep);
1022 				else
1023 					rv = -EFAULT;
1024 
1025 				siw_cep_set_free(cep->listen_cep);
1026 
1027 				if (rv != -EAGAIN) {
1028 					siw_cep_put(cep->listen_cep);
1029 					cep->listen_cep = NULL;
1030 					if (rv)
1031 						siw_cep_put(cep);
1032 				}
1033 			}
1034 		} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1035 			rv = siw_proc_mpareply(cep);
1036 		} else {
1037 			/*
1038 			 * CEP already moved out of MPA handshake.
1039 			 * any connection management already done.
1040 			 * silently ignore the mpa packet.
1041 			 */
1042 			if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1043 				cep->sock->sk->sk_data_ready(cep->sock->sk);
1044 				siw_dbg_cep(cep, "already in RDMA mode");
1045 			} else {
1046 				siw_dbg_cep(cep, "out of state: %d\n",
1047 					    cep->state);
1048 			}
1049 		}
1050 		if (rv && rv != -EAGAIN)
1051 			release_cep = 1;
1052 		break;
1053 
1054 	case SIW_CM_WORK_CLOSE_LLP:
1055 		/*
1056 		 * QP scheduled LLP close
1057 		 */
1058 		if (cep->qp && cep->qp->term_info.valid)
1059 			siw_send_terminate(cep->qp);
1060 
1061 		if (cep->cm_id)
1062 			siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1063 
1064 		release_cep = 1;
1065 		break;
1066 
1067 	case SIW_CM_WORK_PEER_CLOSE:
1068 		if (cep->cm_id) {
1069 			if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1070 				/*
1071 				 * MPA reply not received, but connection drop
1072 				 */
1073 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1074 					      -ECONNRESET);
1075 			} else if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1076 				/*
1077 				 * NOTE: IW_CM_EVENT_DISCONNECT is given just
1078 				 *       to transition IWCM into CLOSING.
1079 				 */
1080 				siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
1081 				siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1082 			}
1083 			/*
1084 			 * for other states there is no connection
1085 			 * known to the IWCM.
1086 			 */
1087 		} else {
1088 			if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) {
1089 				/*
1090 				 * Wait for the ulp/CM to call accept/reject
1091 				 */
1092 				siw_dbg_cep(cep,
1093 					    "mpa req recvd, wait for ULP\n");
1094 			} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1095 				/*
1096 				 * Socket close before MPA request received.
1097 				 */
1098 				siw_dbg_cep(cep, "no mpareq: drop listener\n");
1099 				siw_cep_put(cep->listen_cep);
1100 				cep->listen_cep = NULL;
1101 			}
1102 		}
1103 		release_cep = 1;
1104 		break;
1105 
1106 	case SIW_CM_WORK_MPATIMEOUT:
1107 		cep->mpa_timer = NULL;
1108 
1109 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1110 			/*
1111 			 * MPA request timed out:
1112 			 * Hide any partially received private data and signal
1113 			 * timeout
1114 			 */
1115 			cep->mpa.hdr.params.pd_len = 0;
1116 
1117 			if (cep->cm_id)
1118 				siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1119 					      -ETIMEDOUT);
1120 			release_cep = 1;
1121 
1122 		} else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1123 			/*
1124 			 * No MPA request received after peer TCP stream setup.
1125 			 */
1126 			if (cep->listen_cep) {
1127 				siw_cep_put(cep->listen_cep);
1128 				cep->listen_cep = NULL;
1129 			}
1130 			release_cep = 1;
1131 		}
1132 		break;
1133 
1134 	default:
1135 		WARN(1, "Undefined CM work type: %d\n", work->type);
1136 	}
1137 	if (release_cep) {
1138 		siw_dbg_cep(cep,
1139 			    "release: timer=%s, QP[%u]\n",
1140 			    cep->mpa_timer ? "y" : "n",
1141 			    cep->qp ? qp_id(cep->qp) : UINT_MAX);
1142 
1143 		siw_cancel_mpatimer(cep);
1144 
1145 		cep->state = SIW_EPSTATE_CLOSED;
1146 
1147 		if (cep->qp) {
1148 			struct siw_qp *qp = cep->qp;
1149 			/*
1150 			 * Serialize a potential race with application
1151 			 * closing the QP and calling siw_qp_cm_drop()
1152 			 */
1153 			siw_qp_get(qp);
1154 			siw_cep_set_free(cep);
1155 
1156 			siw_qp_llp_close(qp);
1157 			siw_qp_put(qp);
1158 
1159 			siw_cep_set_inuse(cep);
1160 			cep->qp = NULL;
1161 			siw_qp_put(qp);
1162 		}
1163 		if (cep->sock) {
1164 			siw_socket_disassoc(cep->sock);
1165 			sock_release(cep->sock);
1166 			cep->sock = NULL;
1167 		}
1168 		if (cep->cm_id) {
1169 			cep->cm_id->rem_ref(cep->cm_id);
1170 			cep->cm_id = NULL;
1171 			siw_cep_put(cep);
1172 		}
1173 	}
1174 	siw_cep_set_free(cep);
1175 	siw_put_work(work);
1176 	siw_cep_put(cep);
1177 }
1178 
1179 static struct workqueue_struct *siw_cm_wq;
1180 
1181 int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1182 {
1183 	struct siw_cm_work *work = siw_get_work(cep);
1184 	unsigned long delay = 0;
1185 
1186 	if (!work) {
1187 		siw_dbg_cep(cep, "failed with no work available\n");
1188 		return -ENOMEM;
1189 	}
1190 	work->type = type;
1191 	work->cep = cep;
1192 
1193 	siw_cep_get(cep);
1194 
1195 	INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
1196 
1197 	if (type == SIW_CM_WORK_MPATIMEOUT) {
1198 		cep->mpa_timer = work;
1199 
1200 		if (cep->state == SIW_EPSTATE_AWAIT_MPAREP)
1201 			delay = MPAREQ_TIMEOUT;
1202 		else
1203 			delay = MPAREP_TIMEOUT;
1204 	}
1205 	siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1206 		    cep->qp ? qp_id(cep->qp) : -1, type, delay);
1207 
1208 	queue_delayed_work(siw_cm_wq, &work->work, delay);
1209 
1210 	return 0;
1211 }
1212 
1213 static void siw_cm_llp_data_ready(struct sock *sk)
1214 {
1215 	struct siw_cep *cep;
1216 
1217 	read_lock(&sk->sk_callback_lock);
1218 
1219 	cep = sk_to_cep(sk);
1220 	if (!cep)
1221 		goto out;
1222 
1223 	siw_dbg_cep(cep, "state: %d\n", cep->state);
1224 
1225 	switch (cep->state) {
1226 	case SIW_EPSTATE_RDMA_MODE:
1227 	case SIW_EPSTATE_LISTENING:
1228 		break;
1229 
1230 	case SIW_EPSTATE_AWAIT_MPAREQ:
1231 	case SIW_EPSTATE_AWAIT_MPAREP:
1232 		siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
1233 		break;
1234 
1235 	default:
1236 		siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state);
1237 		break;
1238 	}
1239 out:
1240 	read_unlock(&sk->sk_callback_lock);
1241 }
1242 
1243 static void siw_cm_llp_write_space(struct sock *sk)
1244 {
1245 	struct siw_cep *cep = sk_to_cep(sk);
1246 
1247 	if (cep)
1248 		siw_dbg_cep(cep, "state: %d\n", cep->state);
1249 }
1250 
1251 static void siw_cm_llp_error_report(struct sock *sk)
1252 {
1253 	struct siw_cep *cep = sk_to_cep(sk);
1254 
1255 	if (cep) {
1256 		siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n",
1257 			    sk->sk_err, sk->sk_state, cep->state);
1258 		cep->sk_error_report(sk);
1259 	}
1260 }
1261 
1262 static void siw_cm_llp_state_change(struct sock *sk)
1263 {
1264 	struct siw_cep *cep;
1265 	void (*orig_state_change)(struct sock *s);
1266 
1267 	read_lock(&sk->sk_callback_lock);
1268 
1269 	cep = sk_to_cep(sk);
1270 	if (!cep) {
1271 		/* endpoint already disassociated */
1272 		read_unlock(&sk->sk_callback_lock);
1273 		return;
1274 	}
1275 	orig_state_change = cep->sk_state_change;
1276 
1277 	siw_dbg_cep(cep, "state: %d\n", cep->state);
1278 
1279 	switch (sk->sk_state) {
1280 	case TCP_ESTABLISHED:
1281 		/*
1282 		 * handle accepting socket as special case where only
1283 		 * new connection is possible
1284 		 */
1285 		siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT);
1286 		break;
1287 
1288 	case TCP_CLOSE:
1289 	case TCP_CLOSE_WAIT:
1290 		if (cep->qp)
1291 			cep->qp->tx_ctx.tx_suspend = 1;
1292 		siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE);
1293 		break;
1294 
1295 	default:
1296 		siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
1297 	}
1298 	read_unlock(&sk->sk_callback_lock);
1299 	orig_state_change(sk);
1300 }
1301 
1302 static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
1303 			      struct sockaddr *raddr, bool afonly)
1304 {
1305 	int rv, flags = 0;
1306 	size_t size = laddr->sa_family == AF_INET ?
1307 		sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
1308 
1309 	/*
1310 	 * Make address available again asap.
1311 	 */
1312 	sock_set_reuseaddr(s->sk);
1313 
1314 	if (afonly) {
1315 		rv = ip6_sock_set_v6only(s->sk);
1316 		if (rv)
1317 			return rv;
1318 	}
1319 
1320 	rv = s->ops->bind(s, laddr, size);
1321 	if (rv < 0)
1322 		return rv;
1323 
1324 	rv = s->ops->connect(s, raddr, size, flags);
1325 
1326 	return rv < 0 ? rv : 0;
1327 }
1328 
1329 int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1330 {
1331 	struct siw_device *sdev = to_siw_dev(id->device);
1332 	struct siw_qp *qp;
1333 	struct siw_cep *cep = NULL;
1334 	struct socket *s = NULL;
1335 	struct sockaddr *laddr = (struct sockaddr *)&id->local_addr,
1336 			*raddr = (struct sockaddr *)&id->remote_addr;
1337 	bool p2p_mode = peer_to_peer, v4 = true;
1338 	u16 pd_len = params->private_data_len;
1339 	int version = mpa_version, rv;
1340 
1341 	if (pd_len > MPA_MAX_PRIVDATA)
1342 		return -EINVAL;
1343 
1344 	if (params->ird > sdev->attrs.max_ird ||
1345 	    params->ord > sdev->attrs.max_ord)
1346 		return -ENOMEM;
1347 
1348 	if (laddr->sa_family == AF_INET6)
1349 		v4 = false;
1350 	else if (laddr->sa_family != AF_INET)
1351 		return -EAFNOSUPPORT;
1352 
1353 	/*
1354 	 * Respect any iwarp port mapping: Use mapped remote address
1355 	 * if valid. Local address must not be mapped, since siw
1356 	 * uses kernel TCP stack.
1357 	 */
1358 	if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) ||
1359 	     to_sockaddr_in6(id->remote_addr).sin6_port != 0)
1360 		raddr = (struct sockaddr *)&id->m_remote_addr;
1361 
1362 	qp = siw_qp_id2obj(sdev, params->qpn);
1363 	if (!qp) {
1364 		WARN(1, "[QP %u] does not exist\n", params->qpn);
1365 		rv = -EINVAL;
1366 		goto error;
1367 	}
1368 	siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
1369 		   raddr);
1370 
1371 	rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
1372 	if (rv < 0)
1373 		goto error;
1374 
1375 	/*
1376 	 * NOTE: For simplification, connect() is called in blocking
1377 	 * mode. Might be reconsidered for async connection setup at
1378 	 * TCP level.
1379 	 */
1380 	rv = kernel_bindconnect(s, laddr, raddr, id->afonly);
1381 	if (rv != 0) {
1382 		siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
1383 		goto error;
1384 	}
1385 	if (siw_tcp_nagle == false)
1386 		tcp_sock_set_nodelay(s->sk);
1387 	cep = siw_cep_alloc(sdev);
1388 	if (!cep) {
1389 		rv = -ENOMEM;
1390 		goto error;
1391 	}
1392 	siw_cep_set_inuse(cep);
1393 
1394 	/* Associate QP with CEP */
1395 	siw_cep_get(cep);
1396 	qp->cep = cep;
1397 
1398 	/* siw_qp_get(qp) already done by QP lookup */
1399 	cep->qp = qp;
1400 
1401 	id->add_ref(id);
1402 	cep->cm_id = id;
1403 
1404 	/*
1405 	 * 4: Allocate a sufficient number of work elements
1406 	 * to allow concurrent handling of local + peer close
1407 	 * events, MPA header processing + MPA timeout.
1408 	 */
1409 	rv = siw_cm_alloc_work(cep, 4);
1410 	if (rv != 0) {
1411 		rv = -ENOMEM;
1412 		goto error;
1413 	}
1414 	cep->ird = params->ird;
1415 	cep->ord = params->ord;
1416 
1417 	if (p2p_mode && cep->ord == 0)
1418 		cep->ord = 1;
1419 
1420 	cep->state = SIW_EPSTATE_CONNECTING;
1421 
1422 	/*
1423 	 * Associate CEP with socket
1424 	 */
1425 	siw_cep_socket_assoc(cep, s);
1426 
1427 	cep->state = SIW_EPSTATE_AWAIT_MPAREP;
1428 
1429 	/*
1430 	 * Set MPA Request bits: CRC if required, no MPA Markers,
1431 	 * MPA Rev. according to module parameter 'mpa_version', Key 'Request'.
1432 	 */
1433 	cep->mpa.hdr.params.bits = 0;
1434 	if (version > MPA_REVISION_2) {
1435 		pr_warn("Setting MPA version to %u\n", MPA_REVISION_2);
1436 		version = MPA_REVISION_2;
1437 		/* Adjust also module parameter */
1438 		mpa_version = MPA_REVISION_2;
1439 	}
1440 	__mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version);
1441 
1442 	if (try_gso)
1443 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP;
1444 
1445 	if (mpa_crc_required)
1446 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC;
1447 
1448 	/*
1449 	 * If MPA version == 2:
1450 	 * o Include ORD and IRD.
1451 	 * o Indicate peer-to-peer mode, if required by module
1452 	 *   parameter 'peer_to_peer'.
1453 	 */
1454 	if (version == MPA_REVISION_2) {
1455 		cep->enhanced_rdma_conn_est = true;
1456 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED;
1457 
1458 		cep->mpa.v2_ctrl.ird = htons(cep->ird);
1459 		cep->mpa.v2_ctrl.ord = htons(cep->ord);
1460 
1461 		if (p2p_mode) {
1462 			cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
1463 			cep->mpa.v2_ctrl.ord |= rtr_type;
1464 		}
1465 		/* Remember own P2P mode requested */
1466 		cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird;
1467 		cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord;
1468 	}
1469 	memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16);
1470 
1471 	rv = siw_send_mpareqrep(cep, params->private_data, pd_len);
1472 	/*
1473 	 * Reset private data.
1474 	 */
1475 	cep->mpa.hdr.params.pd_len = 0;
1476 
1477 	if (rv >= 0) {
1478 		rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1479 		if (!rv) {
1480 			siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1481 			siw_cep_set_free(cep);
1482 			return 0;
1483 		}
1484 	}
1485 error:
1486 	siw_dbg(id->device, "failed: %d\n", rv);
1487 
1488 	if (cep) {
1489 		siw_socket_disassoc(s);
1490 		sock_release(s);
1491 		cep->sock = NULL;
1492 
1493 		cep->qp = NULL;
1494 
1495 		cep->cm_id = NULL;
1496 		id->rem_ref(id);
1497 		siw_cep_put(cep);
1498 
1499 		qp->cep = NULL;
1500 		siw_cep_put(cep);
1501 
1502 		cep->state = SIW_EPSTATE_CLOSED;
1503 
1504 		siw_cep_set_free(cep);
1505 
1506 		siw_cep_put(cep);
1507 
1508 	} else if (s) {
1509 		sock_release(s);
1510 	}
1511 	if (qp)
1512 		siw_qp_put(qp);
1513 
1514 	return rv;
1515 }
1516 
1517 /*
1518  * siw_accept - Let SoftiWARP accept an RDMA connection request
1519  *
1520  * @id:		New connection management id to be used for accepted
1521  *		connection request
1522  * @params:	Connection parameters provided by ULP for accepting connection
1523  *
1524  * Transition QP to RTS state, associate new CM id @id with accepted CEP
1525  * and get prepared for TCP input by installing socket callbacks.
1526  * Then send MPA Reply and generate the "connection established" event.
1527  * Socket callbacks must be installed before sending MPA Reply, because
1528  * the latter may cause a first RDMA message to arrive from the RDMA Initiator
1529  * side very quickly, at which time the socket callbacks must be ready.
1530  */
1531 int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1532 {
1533 	struct siw_device *sdev = to_siw_dev(id->device);
1534 	struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1535 	struct siw_qp *qp;
1536 	struct siw_qp_attrs qp_attrs;
1537 	int rv, max_priv_data = MPA_MAX_PRIVDATA;
1538 	bool wait_for_peer_rts = false;
1539 
1540 	siw_cep_set_inuse(cep);
1541 	siw_cep_put(cep);
1542 
1543 	/* Free lingering inbound private data */
1544 	if (cep->mpa.hdr.params.pd_len) {
1545 		cep->mpa.hdr.params.pd_len = 0;
1546 		kfree(cep->mpa.pdata);
1547 		cep->mpa.pdata = NULL;
1548 	}
1549 	siw_cancel_mpatimer(cep);
1550 
1551 	if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1552 		siw_dbg_cep(cep, "out of state\n");
1553 
1554 		siw_cep_set_free(cep);
1555 		siw_cep_put(cep);
1556 
1557 		return -ECONNRESET;
1558 	}
1559 	qp = siw_qp_id2obj(sdev, params->qpn);
1560 	if (!qp) {
1561 		WARN(1, "[QP %d] does not exist\n", params->qpn);
1562 		siw_cep_set_free(cep);
1563 		siw_cep_put(cep);
1564 
1565 		return -EINVAL;
1566 	}
1567 	down_write(&qp->state_lock);
1568 	if (qp->attrs.state > SIW_QP_STATE_RTR) {
1569 		rv = -EINVAL;
1570 		up_write(&qp->state_lock);
1571 		goto error;
1572 	}
1573 	siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1574 
1575 	if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1576 		siw_dbg_cep(cep, "peer allows GSO on TX\n");
1577 		qp->tx_ctx.gso_seg_limit = 0;
1578 	}
1579 	if (params->ord > sdev->attrs.max_ord ||
1580 	    params->ird > sdev->attrs.max_ird) {
1581 		siw_dbg_cep(
1582 			cep,
1583 			"[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1584 			qp_id(qp), params->ord, sdev->attrs.max_ord,
1585 			params->ird, sdev->attrs.max_ird);
1586 		rv = -EINVAL;
1587 		up_write(&qp->state_lock);
1588 		goto error;
1589 	}
1590 	if (cep->enhanced_rdma_conn_est)
1591 		max_priv_data -= sizeof(struct mpa_v2_data);
1592 
1593 	if (params->private_data_len > max_priv_data) {
1594 		siw_dbg_cep(
1595 			cep,
1596 			"[QP %u]: private data length: %d (max %d)\n",
1597 			qp_id(qp), params->private_data_len, max_priv_data);
1598 		rv = -EINVAL;
1599 		up_write(&qp->state_lock);
1600 		goto error;
1601 	}
1602 	if (cep->enhanced_rdma_conn_est) {
1603 		if (params->ord > cep->ord) {
1604 			if (relaxed_ird_negotiation) {
1605 				params->ord = cep->ord;
1606 			} else {
1607 				cep->ird = params->ird;
1608 				cep->ord = params->ord;
1609 				rv = -EINVAL;
1610 				up_write(&qp->state_lock);
1611 				goto error;
1612 			}
1613 		}
1614 		if (params->ird < cep->ird) {
1615 			if (relaxed_ird_negotiation &&
1616 			    cep->ird <= sdev->attrs.max_ird)
1617 				params->ird = cep->ird;
1618 			else {
1619 				rv = -ENOMEM;
1620 				up_write(&qp->state_lock);
1621 				goto error;
1622 			}
1623 		}
1624 		if (cep->mpa.v2_ctrl.ord &
1625 		    (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR))
1626 			wait_for_peer_rts = true;
1627 		/*
1628 		 * Signal back negotiated IRD and ORD values
1629 		 */
1630 		cep->mpa.v2_ctrl.ord =
1631 			htons(params->ord & MPA_IRD_ORD_MASK) |
1632 			(cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD);
1633 		cep->mpa.v2_ctrl.ird =
1634 			htons(params->ird & MPA_IRD_ORD_MASK) |
1635 			(cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD);
1636 	}
1637 	cep->ird = params->ird;
1638 	cep->ord = params->ord;
1639 
1640 	cep->cm_id = id;
1641 	id->add_ref(id);
1642 
1643 	memset(&qp_attrs, 0, sizeof(qp_attrs));
1644 	qp_attrs.orq_size = cep->ord;
1645 	qp_attrs.irq_size = cep->ird;
1646 	qp_attrs.sk = cep->sock;
1647 	if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC)
1648 		qp_attrs.flags = SIW_MPA_CRC;
1649 	qp_attrs.state = SIW_QP_STATE_RTS;
1650 
1651 	siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1652 
1653 	/* Associate QP with CEP */
1654 	siw_cep_get(cep);
1655 	qp->cep = cep;
1656 
1657 	/* siw_qp_get(qp) already done by QP lookup */
1658 	cep->qp = qp;
1659 
1660 	cep->state = SIW_EPSTATE_RDMA_MODE;
1661 
1662 	/* Move socket RX/TX under QP control */
1663 	rv = siw_qp_modify(qp, &qp_attrs,
1664 			   SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
1665 				   SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
1666 				   SIW_QP_ATTR_MPA);
1667 	up_write(&qp->state_lock);
1668 
1669 	if (rv)
1670 		goto error;
1671 
1672 	siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1673 		    qp_id(qp), params->private_data_len);
1674 
1675 	rv = siw_send_mpareqrep(cep, params->private_data,
1676 				params->private_data_len);
1677 	if (rv != 0)
1678 		goto error;
1679 
1680 	if (wait_for_peer_rts) {
1681 		siw_sk_assign_rtr_upcalls(cep);
1682 	} else {
1683 		siw_qp_socket_assoc(cep, qp);
1684 		rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
1685 		if (rv)
1686 			goto error;
1687 	}
1688 	siw_cep_set_free(cep);
1689 
1690 	return 0;
1691 error:
1692 	siw_socket_disassoc(cep->sock);
1693 	sock_release(cep->sock);
1694 	cep->sock = NULL;
1695 
1696 	cep->state = SIW_EPSTATE_CLOSED;
1697 
1698 	if (cep->cm_id) {
1699 		cep->cm_id->rem_ref(id);
1700 		cep->cm_id = NULL;
1701 	}
1702 	if (qp->cep) {
1703 		siw_cep_put(cep);
1704 		qp->cep = NULL;
1705 	}
1706 	cep->qp = NULL;
1707 	siw_qp_put(qp);
1708 
1709 	siw_cep_set_free(cep);
1710 	siw_cep_put(cep);
1711 
1712 	return rv;
1713 }
1714 
1715 /*
1716  * siw_reject()
1717  *
1718  * Local connection reject case. Send private data back to peer,
1719  * close connection and dereference connection id.
1720  */
1721 int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1722 {
1723 	struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1724 
1725 	siw_cep_set_inuse(cep);
1726 	siw_cep_put(cep);
1727 
1728 	siw_cancel_mpatimer(cep);
1729 
1730 	if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1731 		siw_dbg_cep(cep, "out of state\n");
1732 
1733 		siw_cep_set_free(cep);
1734 		siw_cep_put(cep); /* put last reference */
1735 
1736 		return -ECONNRESET;
1737 	}
1738 	siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1739 		    pd_len);
1740 
1741 	if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
1742 		cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
1743 		siw_send_mpareqrep(cep, pdata, pd_len);
1744 	}
1745 	siw_socket_disassoc(cep->sock);
1746 	sock_release(cep->sock);
1747 	cep->sock = NULL;
1748 
1749 	cep->state = SIW_EPSTATE_CLOSED;
1750 
1751 	siw_cep_set_free(cep);
1752 	siw_cep_put(cep);
1753 
1754 	return 0;
1755 }
1756 
1757 /*
1758  * siw_create_listen - Create resources for a listener's IWCM ID @id
1759  *
1760  * Starts listen on the socket address id->local_addr.
1761  *
1762  */
1763 int siw_create_listen(struct iw_cm_id *id, int backlog)
1764 {
1765 	struct socket *s;
1766 	struct siw_cep *cep = NULL;
1767 	struct siw_device *sdev = to_siw_dev(id->device);
1768 	int addr_family = id->local_addr.ss_family;
1769 	int rv = 0;
1770 
1771 	if (addr_family != AF_INET && addr_family != AF_INET6)
1772 		return -EAFNOSUPPORT;
1773 
1774 	rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
1775 	if (rv < 0)
1776 		return rv;
1777 
1778 	/*
1779 	 * Allow binding local port when still in TIME_WAIT from last close.
1780 	 */
1781 	sock_set_reuseaddr(s->sk);
1782 
1783 	if (addr_family == AF_INET) {
1784 		struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
1785 
1786 		/* For wildcard addr, limit binding to current device only */
1787 		if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
1788 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1789 
1790 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
1791 				  sizeof(struct sockaddr_in));
1792 	} else {
1793 		struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
1794 
1795 		if (id->afonly) {
1796 			rv = ip6_sock_set_v6only(s->sk);
1797 			if (rv) {
1798 				siw_dbg(id->device,
1799 					"ip6_sock_set_v6only erro: %d\n", rv);
1800 				goto error;
1801 			}
1802 		}
1803 
1804 		/* For wildcard addr, limit binding to current device only */
1805 		if (ipv6_addr_any(&laddr->sin6_addr))
1806 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1807 
1808 		rv = s->ops->bind(s, (struct sockaddr *)laddr,
1809 				  sizeof(struct sockaddr_in6));
1810 	}
1811 	if (rv) {
1812 		siw_dbg(id->device, "socket bind error: %d\n", rv);
1813 		goto error;
1814 	}
1815 	cep = siw_cep_alloc(sdev);
1816 	if (!cep) {
1817 		rv = -ENOMEM;
1818 		goto error;
1819 	}
1820 	siw_cep_socket_assoc(cep, s);
1821 
1822 	rv = siw_cm_alloc_work(cep, backlog);
1823 	if (rv) {
1824 		siw_dbg(id->device,
1825 			"alloc_work error %d, backlog %d\n",
1826 			rv, backlog);
1827 		goto error;
1828 	}
1829 	rv = s->ops->listen(s, backlog);
1830 	if (rv) {
1831 		siw_dbg(id->device, "listen error %d\n", rv);
1832 		goto error;
1833 	}
1834 	cep->cm_id = id;
1835 	id->add_ref(id);
1836 
1837 	/*
1838 	 * In case of a wildcard rdma_listen on a multi-homed device,
1839 	 * a listener's IWCM id is associated with more than one listening CEP.
1840 	 *
1841 	 * We currently use id->provider_data in three different ways:
1842 	 *
1843 	 * o For a listener's IWCM id, id->provider_data points to
1844 	 *   the list_head of the list of listening CEPs.
1845 	 *   Uses: siw_create_listen(), siw_destroy_listen()
1846 	 *
1847 	 * o For each accepted passive-side IWCM id, id->provider_data
1848 	 *   points to the CEP itself. This is a consequence of
1849 	 *   - siw_cm_upcall() setting event.provider_data = cep and
1850 	 *   - the IWCM's cm_conn_req_handler() setting provider_data of the
1851 	 *     new passive-side IWCM id equal to event.provider_data
1852 	 *   Uses: siw_accept(), siw_reject()
1853 	 *
1854 	 * o For an active-side IWCM id, id->provider_data is not used at all.
1855 	 *
1856 	 */
1857 	if (!id->provider_data) {
1858 		id->provider_data =
1859 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
1860 		if (!id->provider_data) {
1861 			rv = -ENOMEM;
1862 			goto error;
1863 		}
1864 		INIT_LIST_HEAD((struct list_head *)id->provider_data);
1865 	}
1866 	list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
1867 	cep->state = SIW_EPSTATE_LISTENING;
1868 
1869 	siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
1870 
1871 	return 0;
1872 
1873 error:
1874 	siw_dbg(id->device, "failed: %d\n", rv);
1875 
1876 	if (cep) {
1877 		siw_cep_set_inuse(cep);
1878 
1879 		if (cep->cm_id) {
1880 			cep->cm_id->rem_ref(cep->cm_id);
1881 			cep->cm_id = NULL;
1882 		}
1883 		cep->sock = NULL;
1884 		siw_socket_disassoc(s);
1885 		cep->state = SIW_EPSTATE_CLOSED;
1886 
1887 		siw_cep_set_free(cep);
1888 		siw_cep_put(cep);
1889 	}
1890 	sock_release(s);
1891 
1892 	return rv;
1893 }
1894 
1895 static void siw_drop_listeners(struct iw_cm_id *id)
1896 {
1897 	struct list_head *p, *tmp;
1898 
1899 	/*
1900 	 * In case of a wildcard rdma_listen on a multi-homed device,
1901 	 * a listener's IWCM id is associated with more than one listening CEP.
1902 	 */
1903 	list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
1904 		struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
1905 
1906 		list_del(p);
1907 
1908 		siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1909 
1910 		siw_cep_set_inuse(cep);
1911 
1912 		if (cep->cm_id) {
1913 			cep->cm_id->rem_ref(cep->cm_id);
1914 			cep->cm_id = NULL;
1915 		}
1916 		if (cep->sock) {
1917 			siw_socket_disassoc(cep->sock);
1918 			sock_release(cep->sock);
1919 			cep->sock = NULL;
1920 		}
1921 		cep->state = SIW_EPSTATE_CLOSED;
1922 		siw_cep_set_free(cep);
1923 		siw_cep_put(cep);
1924 	}
1925 }
1926 
1927 int siw_destroy_listen(struct iw_cm_id *id)
1928 {
1929 	if (!id->provider_data) {
1930 		siw_dbg(id->device, "no cep(s)\n");
1931 		return 0;
1932 	}
1933 	siw_drop_listeners(id);
1934 	kfree(id->provider_data);
1935 	id->provider_data = NULL;
1936 
1937 	return 0;
1938 }
1939 
1940 int siw_cm_init(void)
1941 {
1942 	/*
1943 	 * create_single_workqueue for strict ordering
1944 	 */
1945 	siw_cm_wq = create_singlethread_workqueue("siw_cm_wq");
1946 	if (!siw_cm_wq)
1947 		return -ENOMEM;
1948 
1949 	return 0;
1950 }
1951 
1952 void siw_cm_exit(void)
1953 {
1954 	if (siw_cm_wq) {
1955 		flush_workqueue(siw_cm_wq);
1956 		destroy_workqueue(siw_cm_wq);
1957 	}
1958 }
1959