1 /*
2  * Copyright (c) 2016 Chelsio Communications, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/workqueue.h>
12 #include <linux/skbuff.h>
13 #include <linux/timer.h>
14 #include <linux/notifier.h>
15 #include <linux/inetdevice.h>
16 #include <linux/ip.h>
17 #include <linux/tcp.h>
18 #include <linux/if_vlan.h>
19 
20 #include <net/neighbour.h>
21 #include <net/netevent.h>
22 #include <net/route.h>
23 #include <net/tcp.h>
24 #include <net/ip6_route.h>
25 #include <net/addrconf.h>
26 
27 #include <libcxgb_cm.h>
28 #include "cxgbit.h"
29 #include "clip_tbl.h"
30 
31 static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
32 {
33 	wr_waitp->ret = 0;
34 	reinit_completion(&wr_waitp->completion);
35 }
36 
37 static void
38 cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
39 {
40 	if (ret == CPL_ERR_NONE)
41 		wr_waitp->ret = 0;
42 	else
43 		wr_waitp->ret = -EIO;
44 
45 	if (wr_waitp->ret)
46 		pr_err("%s: err:%u", func, ret);
47 
48 	complete(&wr_waitp->completion);
49 }
50 
51 static int
52 cxgbit_wait_for_reply(struct cxgbit_device *cdev,
53 		      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
54 		      const char *func)
55 {
56 	int ret;
57 
58 	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
59 		wr_waitp->ret = -EIO;
60 		goto out;
61 	}
62 
63 	ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
64 	if (!ret) {
65 		pr_info("%s - Device %s not responding tid %u\n",
66 			func, pci_name(cdev->lldi.pdev), tid);
67 		wr_waitp->ret = -ETIMEDOUT;
68 	}
69 out:
70 	if (wr_waitp->ret)
71 		pr_info("%s: FW reply %d tid %u\n",
72 			pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
73 	return wr_waitp->ret;
74 }
75 
76 static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
77 {
78 	return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
79 }
80 
81 static struct np_info *
82 cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
83 		   unsigned int stid)
84 {
85 	struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
86 
87 	if (p) {
88 		int bucket = cxgbit_np_hashfn(cnp);
89 
90 		p->cnp = cnp;
91 		p->stid = stid;
92 		spin_lock(&cdev->np_lock);
93 		p->next = cdev->np_hash_tab[bucket];
94 		cdev->np_hash_tab[bucket] = p;
95 		spin_unlock(&cdev->np_lock);
96 	}
97 
98 	return p;
99 }
100 
101 static int
102 cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
103 {
104 	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
105 	struct np_info *p;
106 
107 	spin_lock(&cdev->np_lock);
108 	for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
109 		if (p->cnp == cnp) {
110 			stid = p->stid;
111 			break;
112 		}
113 	}
114 	spin_unlock(&cdev->np_lock);
115 
116 	return stid;
117 }
118 
119 static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
120 {
121 	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
122 	struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
123 
124 	spin_lock(&cdev->np_lock);
125 	for (p = *prev; p; prev = &p->next, p = p->next) {
126 		if (p->cnp == cnp) {
127 			stid = p->stid;
128 			*prev = p->next;
129 			kfree(p);
130 			break;
131 		}
132 	}
133 	spin_unlock(&cdev->np_lock);
134 
135 	return stid;
136 }
137 
138 void _cxgbit_free_cnp(struct kref *kref)
139 {
140 	struct cxgbit_np *cnp;
141 
142 	cnp = container_of(kref, struct cxgbit_np, kref);
143 	kfree(cnp);
144 }
145 
146 static int
147 cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
148 		      struct cxgbit_np *cnp)
149 {
150 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
151 				     &cnp->com.local_addr;
152 	int addr_type;
153 	int ret;
154 
155 	pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
156 		 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
157 
158 	addr_type = ipv6_addr_type((const struct in6_addr *)
159 				   &sin6->sin6_addr);
160 	if (addr_type != IPV6_ADDR_ANY) {
161 		ret = cxgb4_clip_get(cdev->lldi.ports[0],
162 				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
163 		if (ret) {
164 			pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
165 			       sin6->sin6_addr.s6_addr, ret);
166 			return -ENOMEM;
167 		}
168 	}
169 
170 	cxgbit_get_cnp(cnp);
171 	cxgbit_init_wr_wait(&cnp->com.wr_wait);
172 
173 	ret = cxgb4_create_server6(cdev->lldi.ports[0],
174 				   stid, &sin6->sin6_addr,
175 				   sin6->sin6_port,
176 				   cdev->lldi.rxq_ids[0]);
177 	if (!ret)
178 		ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
179 					    0, 10, __func__);
180 	else if (ret > 0)
181 		ret = net_xmit_errno(ret);
182 	else
183 		cxgbit_put_cnp(cnp);
184 
185 	if (ret) {
186 		if (ret != -ETIMEDOUT)
187 			cxgb4_clip_release(cdev->lldi.ports[0],
188 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
189 
190 		pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
191 		       ret, stid, sin6->sin6_addr.s6_addr,
192 		       ntohs(sin6->sin6_port));
193 	}
194 
195 	return ret;
196 }
197 
198 static int
199 cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
200 		      struct cxgbit_np *cnp)
201 {
202 	struct sockaddr_in *sin = (struct sockaddr_in *)
203 				   &cnp->com.local_addr;
204 	int ret;
205 
206 	pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
207 		 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
208 
209 	cxgbit_get_cnp(cnp);
210 	cxgbit_init_wr_wait(&cnp->com.wr_wait);
211 
212 	ret = cxgb4_create_server(cdev->lldi.ports[0],
213 				  stid, sin->sin_addr.s_addr,
214 				  sin->sin_port, 0,
215 				  cdev->lldi.rxq_ids[0]);
216 	if (!ret)
217 		ret = cxgbit_wait_for_reply(cdev,
218 					    &cnp->com.wr_wait,
219 					    0, 10, __func__);
220 	else if (ret > 0)
221 		ret = net_xmit_errno(ret);
222 	else
223 		cxgbit_put_cnp(cnp);
224 
225 	if (ret)
226 		pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
227 		       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
228 	return ret;
229 }
230 
231 struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
232 {
233 	struct cxgbit_device *cdev;
234 	u8 i;
235 
236 	list_for_each_entry(cdev, &cdev_list_head, list) {
237 		struct cxgb4_lld_info *lldi = &cdev->lldi;
238 
239 		for (i = 0; i < lldi->nports; i++) {
240 			if (lldi->ports[i] == ndev) {
241 				if (port_id)
242 					*port_id = i;
243 				return cdev;
244 			}
245 		}
246 	}
247 
248 	return NULL;
249 }
250 
251 static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
252 {
253 	if (ndev->priv_flags & IFF_BONDING) {
254 		pr_err("Bond devices are not supported. Interface:%s\n",
255 		       ndev->name);
256 		return NULL;
257 	}
258 
259 	if (is_vlan_dev(ndev))
260 		return vlan_dev_real_dev(ndev);
261 
262 	return ndev;
263 }
264 
265 static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
266 {
267 	struct net_device *ndev;
268 
269 	ndev = __ip_dev_find(&init_net, saddr, false);
270 	if (!ndev)
271 		return NULL;
272 
273 	return cxgbit_get_real_dev(ndev);
274 }
275 
276 static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
277 {
278 	struct net_device *ndev = NULL;
279 	bool found = false;
280 
281 	if (IS_ENABLED(CONFIG_IPV6)) {
282 		for_each_netdev_rcu(&init_net, ndev)
283 			if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
284 				found = true;
285 				break;
286 			}
287 	}
288 	if (!found)
289 		return NULL;
290 	return cxgbit_get_real_dev(ndev);
291 }
292 
293 static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
294 {
295 	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
296 	int ss_family = sockaddr->ss_family;
297 	struct net_device *ndev = NULL;
298 	struct cxgbit_device *cdev = NULL;
299 
300 	rcu_read_lock();
301 	if (ss_family == AF_INET) {
302 		struct sockaddr_in *sin;
303 
304 		sin = (struct sockaddr_in *)sockaddr;
305 		ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
306 	} else if (ss_family == AF_INET6) {
307 		struct sockaddr_in6 *sin6;
308 
309 		sin6 = (struct sockaddr_in6 *)sockaddr;
310 		ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
311 	}
312 	if (!ndev)
313 		goto out;
314 
315 	cdev = cxgbit_find_device(ndev, NULL);
316 out:
317 	rcu_read_unlock();
318 	return cdev;
319 }
320 
321 static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
322 {
323 	struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
324 	int ss_family = sockaddr->ss_family;
325 	int addr_type;
326 
327 	if (ss_family == AF_INET) {
328 		struct sockaddr_in *sin;
329 
330 		sin = (struct sockaddr_in *)sockaddr;
331 		if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
332 			return true;
333 	} else if (ss_family == AF_INET6) {
334 		struct sockaddr_in6 *sin6;
335 
336 		sin6 = (struct sockaddr_in6 *)sockaddr;
337 		addr_type = ipv6_addr_type((const struct in6_addr *)
338 				&sin6->sin6_addr);
339 		if (addr_type == IPV6_ADDR_ANY)
340 			return true;
341 	}
342 	return false;
343 }
344 
345 static int
346 __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
347 {
348 	int stid, ret;
349 	int ss_family = cnp->com.local_addr.ss_family;
350 
351 	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
352 		return -EINVAL;
353 
354 	stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
355 	if (stid < 0)
356 		return -EINVAL;
357 
358 	if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
359 		cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
360 		return -EINVAL;
361 	}
362 
363 	if (ss_family == AF_INET)
364 		ret = cxgbit_create_server4(cdev, stid, cnp);
365 	else
366 		ret = cxgbit_create_server6(cdev, stid, cnp);
367 
368 	if (ret) {
369 		if (ret != -ETIMEDOUT)
370 			cxgb4_free_stid(cdev->lldi.tids, stid,
371 					ss_family);
372 		cxgbit_np_hash_del(cdev, cnp);
373 		return ret;
374 	}
375 	return ret;
376 }
377 
378 static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
379 {
380 	struct cxgbit_device *cdev;
381 	int ret = -1;
382 
383 	mutex_lock(&cdev_list_lock);
384 	cdev = cxgbit_find_np_cdev(cnp);
385 	if (!cdev)
386 		goto out;
387 
388 	if (cxgbit_np_hash_find(cdev, cnp) >= 0)
389 		goto out;
390 
391 	if (__cxgbit_setup_cdev_np(cdev, cnp))
392 		goto out;
393 
394 	cnp->com.cdev = cdev;
395 	ret = 0;
396 out:
397 	mutex_unlock(&cdev_list_lock);
398 	return ret;
399 }
400 
401 static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
402 {
403 	struct cxgbit_device *cdev;
404 	int ret;
405 	u32 count = 0;
406 
407 	mutex_lock(&cdev_list_lock);
408 	list_for_each_entry(cdev, &cdev_list_head, list) {
409 		if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
410 			mutex_unlock(&cdev_list_lock);
411 			return -1;
412 		}
413 	}
414 
415 	list_for_each_entry(cdev, &cdev_list_head, list) {
416 		ret = __cxgbit_setup_cdev_np(cdev, cnp);
417 		if (ret == -ETIMEDOUT)
418 			break;
419 		if (ret != 0)
420 			continue;
421 		count++;
422 	}
423 	mutex_unlock(&cdev_list_lock);
424 
425 	return count ? 0 : -1;
426 }
427 
428 int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
429 {
430 	struct cxgbit_np *cnp;
431 	int ret;
432 
433 	if ((ksockaddr->ss_family != AF_INET) &&
434 	    (ksockaddr->ss_family != AF_INET6))
435 		return -EINVAL;
436 
437 	cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
438 	if (!cnp)
439 		return -ENOMEM;
440 
441 	init_waitqueue_head(&cnp->accept_wait);
442 	init_completion(&cnp->com.wr_wait.completion);
443 	init_completion(&cnp->accept_comp);
444 	INIT_LIST_HEAD(&cnp->np_accept_list);
445 	spin_lock_init(&cnp->np_accept_lock);
446 	kref_init(&cnp->kref);
447 	memcpy(&np->np_sockaddr, ksockaddr,
448 	       sizeof(struct sockaddr_storage));
449 	memcpy(&cnp->com.local_addr, &np->np_sockaddr,
450 	       sizeof(cnp->com.local_addr));
451 
452 	cnp->np = np;
453 	cnp->com.cdev = NULL;
454 
455 	if (cxgbit_inaddr_any(cnp))
456 		ret = cxgbit_setup_all_np(cnp);
457 	else
458 		ret = cxgbit_setup_cdev_np(cnp);
459 
460 	if (ret) {
461 		cxgbit_put_cnp(cnp);
462 		return -EINVAL;
463 	}
464 
465 	np->np_context = cnp;
466 	cnp->com.state = CSK_STATE_LISTEN;
467 	return 0;
468 }
469 
470 static void
471 cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
472 		     struct cxgbit_sock *csk)
473 {
474 	conn->login_family = np->np_sockaddr.ss_family;
475 	conn->login_sockaddr = csk->com.remote_addr;
476 	conn->local_sockaddr = csk->com.local_addr;
477 }
478 
479 int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
480 {
481 	struct cxgbit_np *cnp = np->np_context;
482 	struct cxgbit_sock *csk;
483 	int ret = 0;
484 
485 accept_wait:
486 	ret = wait_for_completion_interruptible(&cnp->accept_comp);
487 	if (ret)
488 		return -ENODEV;
489 
490 	spin_lock_bh(&np->np_thread_lock);
491 	if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
492 		spin_unlock_bh(&np->np_thread_lock);
493 		/**
494 		 * No point in stalling here when np_thread
495 		 * is in state RESET/SHUTDOWN/EXIT - bail
496 		 **/
497 		return -ENODEV;
498 	}
499 	spin_unlock_bh(&np->np_thread_lock);
500 
501 	spin_lock_bh(&cnp->np_accept_lock);
502 	if (list_empty(&cnp->np_accept_list)) {
503 		spin_unlock_bh(&cnp->np_accept_lock);
504 		goto accept_wait;
505 	}
506 
507 	csk = list_first_entry(&cnp->np_accept_list,
508 			       struct cxgbit_sock,
509 			       accept_node);
510 
511 	list_del_init(&csk->accept_node);
512 	spin_unlock_bh(&cnp->np_accept_lock);
513 	conn->context = csk;
514 	csk->conn = conn;
515 
516 	cxgbit_set_conn_info(np, conn, csk);
517 	return 0;
518 }
519 
520 static int
521 __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
522 {
523 	int stid, ret;
524 	bool ipv6 = false;
525 
526 	stid = cxgbit_np_hash_del(cdev, cnp);
527 	if (stid < 0)
528 		return -EINVAL;
529 	if (!test_bit(CDEV_STATE_UP, &cdev->flags))
530 		return -EINVAL;
531 
532 	if (cnp->np->np_sockaddr.ss_family == AF_INET6)
533 		ipv6 = true;
534 
535 	cxgbit_get_cnp(cnp);
536 	cxgbit_init_wr_wait(&cnp->com.wr_wait);
537 	ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
538 				  cdev->lldi.rxq_ids[0], ipv6);
539 
540 	if (ret > 0)
541 		ret = net_xmit_errno(ret);
542 
543 	if (ret) {
544 		cxgbit_put_cnp(cnp);
545 		return ret;
546 	}
547 
548 	ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
549 				    0, 10, __func__);
550 	if (ret == -ETIMEDOUT)
551 		return ret;
552 
553 	if (ipv6 && cnp->com.cdev) {
554 		struct sockaddr_in6 *sin6;
555 
556 		sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
557 		cxgb4_clip_release(cdev->lldi.ports[0],
558 				   (const u32 *)&sin6->sin6_addr.s6_addr,
559 				   1);
560 	}
561 
562 	cxgb4_free_stid(cdev->lldi.tids, stid,
563 			cnp->com.local_addr.ss_family);
564 	return 0;
565 }
566 
567 static void cxgbit_free_all_np(struct cxgbit_np *cnp)
568 {
569 	struct cxgbit_device *cdev;
570 	int ret;
571 
572 	mutex_lock(&cdev_list_lock);
573 	list_for_each_entry(cdev, &cdev_list_head, list) {
574 		ret = __cxgbit_free_cdev_np(cdev, cnp);
575 		if (ret == -ETIMEDOUT)
576 			break;
577 	}
578 	mutex_unlock(&cdev_list_lock);
579 }
580 
581 static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
582 {
583 	struct cxgbit_device *cdev;
584 	bool found = false;
585 
586 	mutex_lock(&cdev_list_lock);
587 	list_for_each_entry(cdev, &cdev_list_head, list) {
588 		if (cdev == cnp->com.cdev) {
589 			found = true;
590 			break;
591 		}
592 	}
593 	if (!found)
594 		goto out;
595 
596 	__cxgbit_free_cdev_np(cdev, cnp);
597 out:
598 	mutex_unlock(&cdev_list_lock);
599 }
600 
601 static void __cxgbit_free_conn(struct cxgbit_sock *csk);
602 
603 void cxgbit_free_np(struct iscsi_np *np)
604 {
605 	struct cxgbit_np *cnp = np->np_context;
606 	struct cxgbit_sock *csk, *tmp;
607 
608 	cnp->com.state = CSK_STATE_DEAD;
609 	if (cnp->com.cdev)
610 		cxgbit_free_cdev_np(cnp);
611 	else
612 		cxgbit_free_all_np(cnp);
613 
614 	spin_lock_bh(&cnp->np_accept_lock);
615 	list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
616 		list_del_init(&csk->accept_node);
617 		__cxgbit_free_conn(csk);
618 	}
619 	spin_unlock_bh(&cnp->np_accept_lock);
620 
621 	np->np_context = NULL;
622 	cxgbit_put_cnp(cnp);
623 }
624 
625 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
626 {
627 	struct sk_buff *skb;
628 	u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
629 
630 	skb = alloc_skb(len, GFP_ATOMIC);
631 	if (!skb)
632 		return;
633 
634 	cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
635 			      NULL, NULL);
636 
637 	cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
638 	__skb_queue_tail(&csk->txq, skb);
639 	cxgbit_push_tx_frames(csk);
640 }
641 
642 static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
643 {
644 	pr_debug("%s cxgbit_device %p\n", __func__, handle);
645 	kfree_skb(skb);
646 }
647 
648 static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
649 {
650 	struct cxgbit_device *cdev = handle;
651 	struct cpl_abort_req *req = cplhdr(skb);
652 
653 	pr_debug("%s cdev %p\n", __func__, cdev);
654 	req->cmd = CPL_ABORT_NO_RST;
655 	cxgbit_ofld_send(cdev, skb);
656 }
657 
658 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
659 {
660 	struct sk_buff *skb;
661 	u32 len = roundup(sizeof(struct cpl_abort_req), 16);
662 
663 	pr_debug("%s: csk %p tid %u; state %d\n",
664 		 __func__, csk, csk->tid, csk->com.state);
665 
666 	__skb_queue_purge(&csk->txq);
667 
668 	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
669 		cxgbit_send_tx_flowc_wr(csk);
670 
671 	skb = __skb_dequeue(&csk->skbq);
672 	cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
673 			  csk->com.cdev, cxgbit_abort_arp_failure);
674 
675 	return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
676 }
677 
678 static void
679 __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
680 {
681 	__kfree_skb(skb);
682 
683 	if (csk->com.state != CSK_STATE_ESTABLISHED)
684 		goto no_abort;
685 
686 	set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
687 	csk->com.state = CSK_STATE_ABORTING;
688 
689 	cxgbit_send_abort_req(csk);
690 
691 	return;
692 
693 no_abort:
694 	cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
695 	cxgbit_put_csk(csk);
696 }
697 
698 void cxgbit_abort_conn(struct cxgbit_sock *csk)
699 {
700 	struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
701 
702 	cxgbit_get_csk(csk);
703 	cxgbit_init_wr_wait(&csk->com.wr_wait);
704 
705 	spin_lock_bh(&csk->lock);
706 	if (csk->lock_owner) {
707 		cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
708 		__skb_queue_tail(&csk->backlogq, skb);
709 	} else {
710 		__cxgbit_abort_conn(csk, skb);
711 	}
712 	spin_unlock_bh(&csk->lock);
713 
714 	cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
715 			      csk->tid, 600, __func__);
716 }
717 
718 static void __cxgbit_free_conn(struct cxgbit_sock *csk)
719 {
720 	struct iscsi_conn *conn = csk->conn;
721 	bool release = false;
722 
723 	pr_debug("%s: state %d\n",
724 		 __func__, csk->com.state);
725 
726 	spin_lock_bh(&csk->lock);
727 	switch (csk->com.state) {
728 	case CSK_STATE_ESTABLISHED:
729 		if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
730 			csk->com.state = CSK_STATE_CLOSING;
731 			cxgbit_send_halfclose(csk);
732 		} else {
733 			csk->com.state = CSK_STATE_ABORTING;
734 			cxgbit_send_abort_req(csk);
735 		}
736 		break;
737 	case CSK_STATE_CLOSING:
738 		csk->com.state = CSK_STATE_MORIBUND;
739 		cxgbit_send_halfclose(csk);
740 		break;
741 	case CSK_STATE_DEAD:
742 		release = true;
743 		break;
744 	default:
745 		pr_err("%s: csk %p; state %d\n",
746 		       __func__, csk, csk->com.state);
747 	}
748 	spin_unlock_bh(&csk->lock);
749 
750 	if (release)
751 		cxgbit_put_csk(csk);
752 }
753 
754 void cxgbit_free_conn(struct iscsi_conn *conn)
755 {
756 	__cxgbit_free_conn(conn->context);
757 }
758 
759 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
760 {
761 	csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
762 			((csk->com.remote_addr.ss_family == AF_INET) ?
763 			sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
764 			sizeof(struct tcphdr);
765 	csk->mss = csk->emss;
766 	if (TCPOPT_TSTAMP_G(opt))
767 		csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
768 	if (csk->emss < 128)
769 		csk->emss = 128;
770 	if (csk->emss & 7)
771 		pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
772 			TCPOPT_MSS_G(opt), csk->mss, csk->emss);
773 	pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
774 		 csk->mss, csk->emss);
775 }
776 
777 static void cxgbit_free_skb(struct cxgbit_sock *csk)
778 {
779 	struct sk_buff *skb;
780 
781 	__skb_queue_purge(&csk->txq);
782 	__skb_queue_purge(&csk->rxq);
783 	__skb_queue_purge(&csk->backlogq);
784 	__skb_queue_purge(&csk->ppodq);
785 	__skb_queue_purge(&csk->skbq);
786 
787 	while ((skb = cxgbit_sock_dequeue_wr(csk)))
788 		kfree_skb(skb);
789 
790 	__kfree_skb(csk->lro_hskb);
791 }
792 
793 void _cxgbit_free_csk(struct kref *kref)
794 {
795 	struct cxgbit_sock *csk;
796 	struct cxgbit_device *cdev;
797 
798 	csk = container_of(kref, struct cxgbit_sock, kref);
799 
800 	pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
801 
802 	if (csk->com.local_addr.ss_family == AF_INET6) {
803 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
804 					     &csk->com.local_addr;
805 		cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
806 				   (const u32 *)
807 				   &sin6->sin6_addr.s6_addr, 1);
808 	}
809 
810 	cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
811 			 csk->com.local_addr.ss_family);
812 	dst_release(csk->dst);
813 	cxgb4_l2t_release(csk->l2t);
814 
815 	cdev = csk->com.cdev;
816 	spin_lock_bh(&cdev->cskq.lock);
817 	list_del(&csk->list);
818 	spin_unlock_bh(&cdev->cskq.lock);
819 
820 	cxgbit_free_skb(csk);
821 	cxgbit_put_cnp(csk->cnp);
822 	cxgbit_put_cdev(cdev);
823 
824 	kfree(csk);
825 }
826 
827 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
828 {
829 	unsigned int linkspeed;
830 	u8 scale;
831 
832 	linkspeed = pi->link_cfg.speed;
833 	scale = linkspeed / SPEED_10000;
834 
835 #define CXGBIT_10G_RCV_WIN (256 * 1024)
836 	csk->rcv_win = CXGBIT_10G_RCV_WIN;
837 	if (scale)
838 		csk->rcv_win *= scale;
839 
840 #define CXGBIT_10G_SND_WIN (256 * 1024)
841 	csk->snd_win = CXGBIT_10G_SND_WIN;
842 	if (scale)
843 		csk->snd_win *= scale;
844 
845 	pr_debug("%s snd_win %d rcv_win %d\n",
846 		 __func__, csk->snd_win, csk->rcv_win);
847 }
848 
849 #ifdef CONFIG_CHELSIO_T4_DCB
850 static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
851 {
852 	return ndev->dcbnl_ops->getstate(ndev);
853 }
854 
855 static int cxgbit_select_priority(int pri_mask)
856 {
857 	if (!pri_mask)
858 		return 0;
859 
860 	return (ffs(pri_mask) - 1);
861 }
862 
863 static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
864 {
865 	int ret;
866 	u8 caps;
867 
868 	struct dcb_app iscsi_dcb_app = {
869 		.protocol = local_port
870 	};
871 
872 	ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
873 
874 	if (ret)
875 		return 0;
876 
877 	if (caps & DCB_CAP_DCBX_VER_IEEE) {
878 		iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
879 
880 		ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
881 
882 	} else if (caps & DCB_CAP_DCBX_VER_CEE) {
883 		iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
884 
885 		ret = dcb_getapp(ndev, &iscsi_dcb_app);
886 	}
887 
888 	pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
889 
890 	return cxgbit_select_priority(ret);
891 }
892 #endif
893 
894 static int
895 cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
896 		    u16 local_port, struct dst_entry *dst,
897 		    struct cxgbit_device *cdev)
898 {
899 	struct neighbour *n;
900 	int ret, step;
901 	struct net_device *ndev;
902 	u16 rxq_idx, port_id;
903 #ifdef CONFIG_CHELSIO_T4_DCB
904 	u8 priority = 0;
905 #endif
906 
907 	n = dst_neigh_lookup(dst, peer_ip);
908 	if (!n)
909 		return -ENODEV;
910 
911 	rcu_read_lock();
912 	if (!(n->nud_state & NUD_VALID))
913 		neigh_event_send(n, NULL);
914 
915 	ret = -ENOMEM;
916 	if (n->dev->flags & IFF_LOOPBACK) {
917 		if (iptype == 4)
918 			ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
919 		else if (IS_ENABLED(CONFIG_IPV6))
920 			ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
921 		else
922 			ndev = NULL;
923 
924 		if (!ndev) {
925 			ret = -ENODEV;
926 			goto out;
927 		}
928 
929 		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
930 					 n, ndev, 0);
931 		if (!csk->l2t)
932 			goto out;
933 		csk->mtu = ndev->mtu;
934 		csk->tx_chan = cxgb4_port_chan(ndev);
935 		csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
936 						 cxgb4_port_viid(ndev));
937 		step = cdev->lldi.ntxq /
938 			cdev->lldi.nchan;
939 		csk->txq_idx = cxgb4_port_idx(ndev) * step;
940 		step = cdev->lldi.nrxq /
941 			cdev->lldi.nchan;
942 		csk->ctrlq_idx = cxgb4_port_idx(ndev);
943 		csk->rss_qid = cdev->lldi.rxq_ids[
944 				cxgb4_port_idx(ndev) * step];
945 		csk->port_id = cxgb4_port_idx(ndev);
946 		cxgbit_set_tcp_window(csk,
947 				      (struct port_info *)netdev_priv(ndev));
948 	} else {
949 		ndev = cxgbit_get_real_dev(n->dev);
950 		if (!ndev) {
951 			ret = -ENODEV;
952 			goto out;
953 		}
954 
955 #ifdef CONFIG_CHELSIO_T4_DCB
956 		if (cxgbit_get_iscsi_dcb_state(ndev))
957 			priority = cxgbit_get_iscsi_dcb_priority(ndev,
958 								 local_port);
959 
960 		csk->dcb_priority = priority;
961 
962 		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
963 #else
964 		csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
965 #endif
966 		if (!csk->l2t)
967 			goto out;
968 		port_id = cxgb4_port_idx(ndev);
969 		csk->mtu = dst_mtu(dst);
970 		csk->tx_chan = cxgb4_port_chan(ndev);
971 		csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
972 						 cxgb4_port_viid(ndev));
973 		step = cdev->lldi.ntxq /
974 			cdev->lldi.nports;
975 		csk->txq_idx = (port_id * step) +
976 				(cdev->selectq[port_id][0]++ % step);
977 		csk->ctrlq_idx = cxgb4_port_idx(ndev);
978 		step = cdev->lldi.nrxq /
979 			cdev->lldi.nports;
980 		rxq_idx = (port_id * step) +
981 				(cdev->selectq[port_id][1]++ % step);
982 		csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
983 		csk->port_id = port_id;
984 		cxgbit_set_tcp_window(csk,
985 				      (struct port_info *)netdev_priv(ndev));
986 	}
987 	ret = 0;
988 out:
989 	rcu_read_unlock();
990 	neigh_release(n);
991 	return ret;
992 }
993 
994 int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
995 {
996 	int ret = 0;
997 
998 	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
999 		kfree_skb(skb);
1000 		pr_err("%s - device not up - dropping\n", __func__);
1001 		return -EIO;
1002 	}
1003 
1004 	ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1005 	if (ret < 0)
1006 		kfree_skb(skb);
1007 	return ret < 0 ? ret : 0;
1008 }
1009 
1010 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1011 {
1012 	u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1013 	struct sk_buff *skb;
1014 
1015 	skb = alloc_skb(len, GFP_ATOMIC);
1016 	if (!skb)
1017 		return;
1018 
1019 	cxgb_mk_tid_release(skb, len, tid, 0);
1020 	cxgbit_ofld_send(cdev, skb);
1021 }
1022 
1023 int
1024 cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1025 		struct l2t_entry *l2e)
1026 {
1027 	int ret = 0;
1028 
1029 	if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1030 		kfree_skb(skb);
1031 		pr_err("%s - device not up - dropping\n", __func__);
1032 		return -EIO;
1033 	}
1034 
1035 	ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1036 	if (ret < 0)
1037 		kfree_skb(skb);
1038 	return ret < 0 ? ret : 0;
1039 }
1040 
1041 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1042 {
1043 	if (csk->com.state != CSK_STATE_ESTABLISHED) {
1044 		__kfree_skb(skb);
1045 		return;
1046 	}
1047 
1048 	cxgbit_ofld_send(csk->com.cdev, skb);
1049 }
1050 
1051 /*
1052  * CPL connection rx data ack: host ->
1053  * Send RX credits through an RX_DATA_ACK CPL message.
1054  * Returns the number of credits sent.
1055  */
1056 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1057 {
1058 	struct sk_buff *skb;
1059 	u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1060 	u32 credit_dack;
1061 
1062 	skb = alloc_skb(len, GFP_KERNEL);
1063 	if (!skb)
1064 		return -1;
1065 
1066 	credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1067 		      RX_CREDITS_V(csk->rx_credits);
1068 
1069 	cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1070 			    credit_dack);
1071 
1072 	csk->rx_credits = 0;
1073 
1074 	spin_lock_bh(&csk->lock);
1075 	if (csk->lock_owner) {
1076 		cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1077 		__skb_queue_tail(&csk->backlogq, skb);
1078 		spin_unlock_bh(&csk->lock);
1079 		return 0;
1080 	}
1081 
1082 	cxgbit_send_rx_credits(csk, skb);
1083 	spin_unlock_bh(&csk->lock);
1084 
1085 	return 0;
1086 }
1087 
1088 #define FLOWC_WR_NPARAMS_MIN    9
1089 #define FLOWC_WR_NPARAMS_MAX	11
1090 static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1091 {
1092 	struct sk_buff *skb;
1093 	u32 len, flowclen;
1094 	u8 i;
1095 
1096 	flowclen = offsetof(struct fw_flowc_wr,
1097 			    mnemval[FLOWC_WR_NPARAMS_MAX]);
1098 
1099 	len = max_t(u32, sizeof(struct cpl_abort_req),
1100 		    sizeof(struct cpl_abort_rpl));
1101 
1102 	len = max(len, flowclen);
1103 	len = roundup(len, 16);
1104 
1105 	for (i = 0; i < 3; i++) {
1106 		skb = alloc_skb(len, GFP_ATOMIC);
1107 		if (!skb)
1108 			goto out;
1109 		__skb_queue_tail(&csk->skbq, skb);
1110 	}
1111 
1112 	skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1113 	if (!skb)
1114 		goto out;
1115 
1116 	memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1117 	csk->lro_hskb = skb;
1118 
1119 	return 0;
1120 out:
1121 	__skb_queue_purge(&csk->skbq);
1122 	return -ENOMEM;
1123 }
1124 
1125 static void
1126 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1127 {
1128 	struct sk_buff *skb;
1129 	const struct tcphdr *tcph;
1130 	struct cpl_t5_pass_accept_rpl *rpl5;
1131 	struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1132 	unsigned int len = roundup(sizeof(*rpl5), 16);
1133 	unsigned int mtu_idx;
1134 	u64 opt0;
1135 	u32 opt2, hlen;
1136 	u32 wscale;
1137 	u32 win;
1138 
1139 	pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1140 
1141 	skb = alloc_skb(len, GFP_ATOMIC);
1142 	if (!skb) {
1143 		cxgbit_put_csk(csk);
1144 		return;
1145 	}
1146 
1147 	rpl5 = __skb_put_zero(skb, len);
1148 
1149 	INIT_TP_WR(rpl5, csk->tid);
1150 	OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1151 						     csk->tid));
1152 	cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1153 		      req->tcpopt.tstamp,
1154 		      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1155 	wscale = cxgb_compute_wscale(csk->rcv_win);
1156 	/*
1157 	 * Specify the largest window that will fit in opt0. The
1158 	 * remainder will be specified in the rx_data_ack.
1159 	 */
1160 	win = csk->rcv_win >> 10;
1161 	if (win > RCV_BUFSIZ_M)
1162 		win = RCV_BUFSIZ_M;
1163 	opt0 =  TCAM_BYPASS_F |
1164 		WND_SCALE_V(wscale) |
1165 		MSS_IDX_V(mtu_idx) |
1166 		L2T_IDX_V(csk->l2t->idx) |
1167 		TX_CHAN_V(csk->tx_chan) |
1168 		SMAC_SEL_V(csk->smac_idx) |
1169 		DSCP_V(csk->tos >> 2) |
1170 		ULP_MODE_V(ULP_MODE_ISCSI) |
1171 		RCV_BUFSIZ_V(win);
1172 
1173 	opt2 = RX_CHANNEL_V(0) |
1174 		RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1175 
1176 	if (!is_t5(lldi->adapter_type))
1177 		opt2 |= RX_FC_DISABLE_F;
1178 
1179 	if (req->tcpopt.tstamp)
1180 		opt2 |= TSTAMPS_EN_F;
1181 	if (req->tcpopt.sack)
1182 		opt2 |= SACK_EN_F;
1183 	if (wscale)
1184 		opt2 |= WND_SCALE_EN_F;
1185 
1186 	hlen = ntohl(req->hdr_len);
1187 
1188 	if (is_t5(lldi->adapter_type))
1189 		tcph = (struct tcphdr *)((u8 *)(req + 1) +
1190 		       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1191 	else
1192 		tcph = (struct tcphdr *)((u8 *)(req + 1) +
1193 		       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1194 
1195 	if (tcph->ece && tcph->cwr)
1196 		opt2 |= CCTRL_ECN_V(1);
1197 
1198 	opt2 |= RX_COALESCE_V(3);
1199 	opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1200 
1201 	opt2 |= T5_ISS_F;
1202 	rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1203 
1204 	opt2 |= T5_OPT_2_VALID_F;
1205 
1206 	rpl5->opt0 = cpu_to_be64(opt0);
1207 	rpl5->opt2 = cpu_to_be32(opt2);
1208 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1209 	t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
1210 	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1211 }
1212 
1213 static void
1214 cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1215 {
1216 	struct cxgbit_sock *csk = NULL;
1217 	struct cxgbit_np *cnp;
1218 	struct cpl_pass_accept_req *req = cplhdr(skb);
1219 	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1220 	struct tid_info *t = cdev->lldi.tids;
1221 	unsigned int tid = GET_TID(req);
1222 	u16 peer_mss = ntohs(req->tcpopt.mss);
1223 	unsigned short hdrs;
1224 
1225 	struct dst_entry *dst;
1226 	__u8 local_ip[16], peer_ip[16];
1227 	__be16 local_port, peer_port;
1228 	int ret;
1229 	int iptype;
1230 
1231 	pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1232 		 __func__, cdev, stid, tid);
1233 
1234 	cnp = lookup_stid(t, stid);
1235 	if (!cnp) {
1236 		pr_err("%s connect request on invalid stid %d\n",
1237 		       __func__, stid);
1238 		goto rel_skb;
1239 	}
1240 
1241 	if (cnp->com.state != CSK_STATE_LISTEN) {
1242 		pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1243 		       __func__);
1244 		goto reject;
1245 	}
1246 
1247 	csk = lookup_tid(t, tid);
1248 	if (csk) {
1249 		pr_err("%s csk not null tid %u\n",
1250 		       __func__, tid);
1251 		goto rel_skb;
1252 	}
1253 
1254 	cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1255 			peer_ip, &local_port, &peer_port);
1256 
1257 	/* Find output route */
1258 	if (iptype == 4)  {
1259 		pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1260 			 "lport %d rport %d peer_mss %d\n"
1261 			 , __func__, cnp, tid,
1262 			 local_ip, peer_ip, ntohs(local_port),
1263 			 ntohs(peer_port), peer_mss);
1264 		dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1265 				      *(__be32 *)local_ip,
1266 				      *(__be32 *)peer_ip,
1267 				      local_port, peer_port,
1268 				      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1269 	} else {
1270 		pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1271 			 "lport %d rport %d peer_mss %d\n"
1272 			 , __func__, cnp, tid,
1273 			 local_ip, peer_ip, ntohs(local_port),
1274 			 ntohs(peer_port), peer_mss);
1275 		dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1276 				       local_ip, peer_ip,
1277 				       local_port, peer_port,
1278 				       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1279 				       ((struct sockaddr_in6 *)
1280 					&cnp->com.local_addr)->sin6_scope_id);
1281 	}
1282 	if (!dst) {
1283 		pr_err("%s - failed to find dst entry!\n",
1284 		       __func__);
1285 		goto reject;
1286 	}
1287 
1288 	csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1289 	if (!csk) {
1290 		dst_release(dst);
1291 		goto rel_skb;
1292 	}
1293 
1294 	ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1295 				  dst, cdev);
1296 	if (ret) {
1297 		pr_err("%s - failed to allocate l2t entry!\n",
1298 		       __func__);
1299 		dst_release(dst);
1300 		kfree(csk);
1301 		goto reject;
1302 	}
1303 
1304 	kref_init(&csk->kref);
1305 	init_completion(&csk->com.wr_wait.completion);
1306 
1307 	INIT_LIST_HEAD(&csk->accept_node);
1308 
1309 	hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1310 		sizeof(struct tcphdr) +	(req->tcpopt.tstamp ? 12 : 0);
1311 	if (peer_mss && csk->mtu > (peer_mss + hdrs))
1312 		csk->mtu = peer_mss + hdrs;
1313 
1314 	csk->com.state = CSK_STATE_CONNECTING;
1315 	csk->com.cdev = cdev;
1316 	csk->cnp = cnp;
1317 	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1318 	csk->dst = dst;
1319 	csk->tid = tid;
1320 	csk->wr_cred = cdev->lldi.wr_cred -
1321 			DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1322 	csk->wr_max_cred = csk->wr_cred;
1323 	csk->wr_una_cred = 0;
1324 
1325 	if (iptype == 4) {
1326 		struct sockaddr_in *sin = (struct sockaddr_in *)
1327 					  &csk->com.local_addr;
1328 		sin->sin_family = AF_INET;
1329 		sin->sin_port = local_port;
1330 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
1331 
1332 		sin = (struct sockaddr_in *)&csk->com.remote_addr;
1333 		sin->sin_family = AF_INET;
1334 		sin->sin_port = peer_port;
1335 		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1336 	} else {
1337 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1338 					    &csk->com.local_addr;
1339 
1340 		sin6->sin6_family = PF_INET6;
1341 		sin6->sin6_port = local_port;
1342 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1343 		cxgb4_clip_get(cdev->lldi.ports[0],
1344 			       (const u32 *)&sin6->sin6_addr.s6_addr,
1345 			       1);
1346 
1347 		sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1348 		sin6->sin6_family = PF_INET6;
1349 		sin6->sin6_port = peer_port;
1350 		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1351 	}
1352 
1353 	skb_queue_head_init(&csk->rxq);
1354 	skb_queue_head_init(&csk->txq);
1355 	skb_queue_head_init(&csk->ppodq);
1356 	skb_queue_head_init(&csk->backlogq);
1357 	skb_queue_head_init(&csk->skbq);
1358 	cxgbit_sock_reset_wr_list(csk);
1359 	spin_lock_init(&csk->lock);
1360 	init_waitqueue_head(&csk->waitq);
1361 	init_waitqueue_head(&csk->ack_waitq);
1362 	csk->lock_owner = false;
1363 
1364 	if (cxgbit_alloc_csk_skb(csk)) {
1365 		dst_release(dst);
1366 		kfree(csk);
1367 		goto rel_skb;
1368 	}
1369 
1370 	cxgbit_get_cnp(cnp);
1371 	cxgbit_get_cdev(cdev);
1372 
1373 	spin_lock(&cdev->cskq.lock);
1374 	list_add_tail(&csk->list, &cdev->cskq.list);
1375 	spin_unlock(&cdev->cskq.lock);
1376 	cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1377 	cxgbit_pass_accept_rpl(csk, req);
1378 	goto rel_skb;
1379 
1380 reject:
1381 	cxgbit_release_tid(cdev, tid);
1382 rel_skb:
1383 	__kfree_skb(skb);
1384 }
1385 
1386 static u32
1387 cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1388 			   u32 *flowclenp)
1389 {
1390 	u32 nparams, flowclen16, flowclen;
1391 
1392 	nparams = FLOWC_WR_NPARAMS_MIN;
1393 
1394 	if (csk->snd_wscale)
1395 		nparams++;
1396 
1397 #ifdef CONFIG_CHELSIO_T4_DCB
1398 	nparams++;
1399 #endif
1400 	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1401 	flowclen16 = DIV_ROUND_UP(flowclen, 16);
1402 	flowclen = flowclen16 * 16;
1403 	/*
1404 	 * Return the number of 16-byte credits used by the flowc request.
1405 	 * Pass back the nparams and actual flowc length if requested.
1406 	 */
1407 	if (nparamsp)
1408 		*nparamsp = nparams;
1409 	if (flowclenp)
1410 		*flowclenp = flowclen;
1411 	return flowclen16;
1412 }
1413 
1414 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1415 {
1416 	struct cxgbit_device *cdev = csk->com.cdev;
1417 	struct fw_flowc_wr *flowc;
1418 	u32 nparams, flowclen16, flowclen;
1419 	struct sk_buff *skb;
1420 	u8 index;
1421 
1422 #ifdef CONFIG_CHELSIO_T4_DCB
1423 	u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1424 #endif
1425 
1426 	flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1427 
1428 	skb = __skb_dequeue(&csk->skbq);
1429 	flowc = __skb_put_zero(skb, flowclen);
1430 
1431 	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1432 					   FW_FLOWC_WR_NPARAMS_V(nparams));
1433 	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1434 					  FW_WR_FLOWID_V(csk->tid));
1435 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1436 	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1437 					    (csk->com.cdev->lldi.pf));
1438 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1439 	flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1440 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1441 	flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1442 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1443 	flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1444 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1445 	flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1446 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1447 	flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1448 	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1449 	flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1450 	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1451 	flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1452 
1453 	flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1454 	if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1455 		flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1456 	else
1457 		flowc->mnemval[8].val = cpu_to_be32(16384);
1458 
1459 	index = 9;
1460 
1461 	if (csk->snd_wscale) {
1462 		flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1463 		flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1464 		index++;
1465 	}
1466 
1467 #ifdef CONFIG_CHELSIO_T4_DCB
1468 	flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1469 	if (vlan == VLAN_NONE) {
1470 		pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1471 		flowc->mnemval[index].val = cpu_to_be32(0);
1472 	} else
1473 		flowc->mnemval[index].val = cpu_to_be32(
1474 				(vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1475 #endif
1476 
1477 	pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1478 		 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1479 		 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1480 		 csk->rcv_nxt, csk->snd_win, csk->emss);
1481 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1482 	cxgbit_ofld_send(csk->com.cdev, skb);
1483 	return flowclen16;
1484 }
1485 
1486 int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1487 {
1488 	struct sk_buff *skb;
1489 	struct cpl_set_tcb_field *req;
1490 	u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1491 	u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1492 	unsigned int len = roundup(sizeof(*req), 16);
1493 	int ret;
1494 
1495 	skb = alloc_skb(len, GFP_KERNEL);
1496 	if (!skb)
1497 		return -ENOMEM;
1498 
1499 	/*  set up ulp submode */
1500 	req = __skb_put_zero(skb, len);
1501 
1502 	INIT_TP_WR(req, csk->tid);
1503 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1504 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1505 	req->word_cookie = htons(0);
1506 	req->mask = cpu_to_be64(0x3 << 4);
1507 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1508 				(dcrc ? ULP_CRC_DATA : 0)) << 4);
1509 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1510 
1511 	cxgbit_get_csk(csk);
1512 	cxgbit_init_wr_wait(&csk->com.wr_wait);
1513 
1514 	cxgbit_ofld_send(csk->com.cdev, skb);
1515 
1516 	ret = cxgbit_wait_for_reply(csk->com.cdev,
1517 				    &csk->com.wr_wait,
1518 				    csk->tid, 5, __func__);
1519 	if (ret)
1520 		return -1;
1521 
1522 	return 0;
1523 }
1524 
1525 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1526 {
1527 	struct sk_buff *skb;
1528 	struct cpl_set_tcb_field *req;
1529 	unsigned int len = roundup(sizeof(*req), 16);
1530 	int ret;
1531 
1532 	skb = alloc_skb(len, GFP_KERNEL);
1533 	if (!skb)
1534 		return -ENOMEM;
1535 
1536 	req = __skb_put_zero(skb, len);
1537 
1538 	INIT_TP_WR(req, csk->tid);
1539 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1540 	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1541 	req->word_cookie = htons(0);
1542 	req->mask = cpu_to_be64(0x3 << 8);
1543 	req->val = cpu_to_be64(pg_idx << 8);
1544 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1545 
1546 	cxgbit_get_csk(csk);
1547 	cxgbit_init_wr_wait(&csk->com.wr_wait);
1548 
1549 	cxgbit_ofld_send(csk->com.cdev, skb);
1550 
1551 	ret = cxgbit_wait_for_reply(csk->com.cdev,
1552 				    &csk->com.wr_wait,
1553 				    csk->tid, 5, __func__);
1554 	if (ret)
1555 		return -1;
1556 
1557 	return 0;
1558 }
1559 
1560 static void
1561 cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1562 {
1563 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1564 	struct tid_info *t = cdev->lldi.tids;
1565 	unsigned int stid = GET_TID(rpl);
1566 	struct cxgbit_np *cnp = lookup_stid(t, stid);
1567 
1568 	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1569 		 __func__, cnp, stid, rpl->status);
1570 
1571 	if (!cnp) {
1572 		pr_info("%s stid %d lookup failure\n", __func__, stid);
1573 		goto rel_skb;
1574 	}
1575 
1576 	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1577 	cxgbit_put_cnp(cnp);
1578 rel_skb:
1579 	__kfree_skb(skb);
1580 }
1581 
1582 static void
1583 cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1584 {
1585 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1586 	struct tid_info *t = cdev->lldi.tids;
1587 	unsigned int stid = GET_TID(rpl);
1588 	struct cxgbit_np *cnp = lookup_stid(t, stid);
1589 
1590 	pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1591 		 __func__, cnp, stid, rpl->status);
1592 
1593 	if (!cnp) {
1594 		pr_info("%s stid %d lookup failure\n", __func__, stid);
1595 		goto rel_skb;
1596 	}
1597 
1598 	cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1599 	cxgbit_put_cnp(cnp);
1600 rel_skb:
1601 	__kfree_skb(skb);
1602 }
1603 
1604 static void
1605 cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1606 {
1607 	struct cpl_pass_establish *req = cplhdr(skb);
1608 	struct tid_info *t = cdev->lldi.tids;
1609 	unsigned int tid = GET_TID(req);
1610 	struct cxgbit_sock *csk;
1611 	struct cxgbit_np *cnp;
1612 	u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1613 	u32 snd_isn = be32_to_cpu(req->snd_isn);
1614 	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1615 
1616 	csk = lookup_tid(t, tid);
1617 	if (unlikely(!csk)) {
1618 		pr_err("can't find connection for tid %u.\n", tid);
1619 		goto rel_skb;
1620 	}
1621 	cnp = csk->cnp;
1622 
1623 	pr_debug("%s: csk %p; tid %u; cnp %p\n",
1624 		 __func__, csk, tid, cnp);
1625 
1626 	csk->write_seq = snd_isn;
1627 	csk->snd_una = snd_isn;
1628 	csk->snd_nxt = snd_isn;
1629 
1630 	csk->rcv_nxt = rcv_isn;
1631 
1632 	if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1633 		csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1634 
1635 	csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1636 	cxgbit_set_emss(csk, tcp_opt);
1637 	dst_confirm(csk->dst);
1638 	csk->com.state = CSK_STATE_ESTABLISHED;
1639 	spin_lock_bh(&cnp->np_accept_lock);
1640 	list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1641 	spin_unlock_bh(&cnp->np_accept_lock);
1642 	complete(&cnp->accept_comp);
1643 rel_skb:
1644 	__kfree_skb(skb);
1645 }
1646 
1647 static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1648 {
1649 	cxgbit_skcb_flags(skb) = 0;
1650 	spin_lock_bh(&csk->rxq.lock);
1651 	__skb_queue_tail(&csk->rxq, skb);
1652 	spin_unlock_bh(&csk->rxq.lock);
1653 	wake_up(&csk->waitq);
1654 }
1655 
1656 static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1657 {
1658 	pr_debug("%s: csk %p; tid %u; state %d\n",
1659 		 __func__, csk, csk->tid, csk->com.state);
1660 
1661 	switch (csk->com.state) {
1662 	case CSK_STATE_ESTABLISHED:
1663 		csk->com.state = CSK_STATE_CLOSING;
1664 		cxgbit_queue_rx_skb(csk, skb);
1665 		return;
1666 	case CSK_STATE_CLOSING:
1667 		/* simultaneous close */
1668 		csk->com.state = CSK_STATE_MORIBUND;
1669 		break;
1670 	case CSK_STATE_MORIBUND:
1671 		csk->com.state = CSK_STATE_DEAD;
1672 		cxgbit_put_csk(csk);
1673 		break;
1674 	case CSK_STATE_ABORTING:
1675 		break;
1676 	default:
1677 		pr_info("%s: cpl_peer_close in bad state %d\n",
1678 			__func__, csk->com.state);
1679 	}
1680 
1681 	__kfree_skb(skb);
1682 }
1683 
1684 static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1685 {
1686 	pr_debug("%s: csk %p; tid %u; state %d\n",
1687 		 __func__, csk, csk->tid, csk->com.state);
1688 
1689 	switch (csk->com.state) {
1690 	case CSK_STATE_CLOSING:
1691 		csk->com.state = CSK_STATE_MORIBUND;
1692 		break;
1693 	case CSK_STATE_MORIBUND:
1694 		csk->com.state = CSK_STATE_DEAD;
1695 		cxgbit_put_csk(csk);
1696 		break;
1697 	case CSK_STATE_ABORTING:
1698 	case CSK_STATE_DEAD:
1699 		break;
1700 	default:
1701 		pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1702 			__func__, csk->com.state);
1703 	}
1704 
1705 	__kfree_skb(skb);
1706 }
1707 
1708 static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1709 {
1710 	struct cpl_abort_req_rss *hdr = cplhdr(skb);
1711 	unsigned int tid = GET_TID(hdr);
1712 	struct sk_buff *rpl_skb;
1713 	bool release = false;
1714 	bool wakeup_thread = false;
1715 	u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1716 
1717 	pr_debug("%s: csk %p; tid %u; state %d\n",
1718 		 __func__, csk, tid, csk->com.state);
1719 
1720 	if (cxgb_is_neg_adv(hdr->status)) {
1721 		pr_err("%s: got neg advise %d on tid %u\n",
1722 		       __func__, hdr->status, tid);
1723 		goto rel_skb;
1724 	}
1725 
1726 	switch (csk->com.state) {
1727 	case CSK_STATE_CONNECTING:
1728 	case CSK_STATE_MORIBUND:
1729 		csk->com.state = CSK_STATE_DEAD;
1730 		release = true;
1731 		break;
1732 	case CSK_STATE_ESTABLISHED:
1733 		csk->com.state = CSK_STATE_DEAD;
1734 		wakeup_thread = true;
1735 		break;
1736 	case CSK_STATE_CLOSING:
1737 		csk->com.state = CSK_STATE_DEAD;
1738 		if (!csk->conn)
1739 			release = true;
1740 		break;
1741 	case CSK_STATE_ABORTING:
1742 		break;
1743 	default:
1744 		pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1745 			__func__, csk->com.state);
1746 		csk->com.state = CSK_STATE_DEAD;
1747 	}
1748 
1749 	__skb_queue_purge(&csk->txq);
1750 
1751 	if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1752 		cxgbit_send_tx_flowc_wr(csk);
1753 
1754 	rpl_skb = __skb_dequeue(&csk->skbq);
1755 
1756 	cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1757 	cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1758 
1759 	if (wakeup_thread) {
1760 		cxgbit_queue_rx_skb(csk, skb);
1761 		return;
1762 	}
1763 
1764 	if (release)
1765 		cxgbit_put_csk(csk);
1766 rel_skb:
1767 	__kfree_skb(skb);
1768 }
1769 
1770 static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1771 {
1772 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1773 
1774 	pr_debug("%s: csk %p; tid %u; state %d\n",
1775 		 __func__, csk, csk->tid, csk->com.state);
1776 
1777 	switch (csk->com.state) {
1778 	case CSK_STATE_ABORTING:
1779 		csk->com.state = CSK_STATE_DEAD;
1780 		if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1781 			cxgbit_wake_up(&csk->com.wr_wait, __func__,
1782 				       rpl->status);
1783 		cxgbit_put_csk(csk);
1784 		break;
1785 	default:
1786 		pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1787 			__func__, csk->com.state);
1788 	}
1789 
1790 	__kfree_skb(skb);
1791 }
1792 
1793 static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1794 {
1795 	const struct sk_buff *skb = csk->wr_pending_head;
1796 	u32 credit = 0;
1797 
1798 	if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1799 		pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1800 		       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1801 		return true;
1802 	}
1803 
1804 	while (skb) {
1805 		credit += (__force u32)skb->csum;
1806 		skb = cxgbit_skcb_tx_wr_next(skb);
1807 	}
1808 
1809 	if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1810 		pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1811 		       csk, csk->tid, csk->wr_cred,
1812 		       credit, csk->wr_max_cred);
1813 
1814 		return true;
1815 	}
1816 
1817 	return false;
1818 }
1819 
1820 static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1821 {
1822 	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1823 	u32 credits = rpl->credits;
1824 	u32 snd_una = ntohl(rpl->snd_una);
1825 
1826 	csk->wr_cred += credits;
1827 	if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1828 		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1829 
1830 	while (credits) {
1831 		struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1832 		const u32 csum = (__force u32)p->csum;
1833 
1834 		if (unlikely(!p)) {
1835 			pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1836 			       csk, csk->tid, credits,
1837 			       csk->wr_cred, csk->wr_una_cred);
1838 			break;
1839 		}
1840 
1841 		if (unlikely(credits < csum)) {
1842 			pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1843 				csk,  csk->tid,
1844 				credits, csk->wr_cred, csk->wr_una_cred,
1845 				csum);
1846 			p->csum = (__force __wsum)(csum - credits);
1847 			break;
1848 		}
1849 
1850 		cxgbit_sock_dequeue_wr(csk);
1851 		credits -= csum;
1852 		kfree_skb(p);
1853 	}
1854 
1855 	if (unlikely(cxgbit_credit_err(csk))) {
1856 		cxgbit_queue_rx_skb(csk, skb);
1857 		return;
1858 	}
1859 
1860 	if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1861 		if (unlikely(before(snd_una, csk->snd_una))) {
1862 			pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1863 				csk, csk->tid, snd_una,
1864 				csk->snd_una);
1865 			goto rel_skb;
1866 		}
1867 
1868 		if (csk->snd_una != snd_una) {
1869 			csk->snd_una = snd_una;
1870 			dst_confirm(csk->dst);
1871 			wake_up(&csk->ack_waitq);
1872 		}
1873 	}
1874 
1875 	if (skb_queue_len(&csk->txq))
1876 		cxgbit_push_tx_frames(csk);
1877 
1878 rel_skb:
1879 	__kfree_skb(skb);
1880 }
1881 
1882 static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1883 {
1884 	struct cxgbit_sock *csk;
1885 	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1886 	unsigned int tid = GET_TID(rpl);
1887 	struct cxgb4_lld_info *lldi = &cdev->lldi;
1888 	struct tid_info *t = lldi->tids;
1889 
1890 	csk = lookup_tid(t, tid);
1891 	if (unlikely(!csk)) {
1892 		pr_err("can't find connection for tid %u.\n", tid);
1893 		goto rel_skb;
1894 	} else {
1895 		cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1896 	}
1897 
1898 	cxgbit_put_csk(csk);
1899 rel_skb:
1900 	__kfree_skb(skb);
1901 }
1902 
1903 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1904 {
1905 	struct cxgbit_sock *csk;
1906 	struct cpl_rx_data *cpl = cplhdr(skb);
1907 	unsigned int tid = GET_TID(cpl);
1908 	struct cxgb4_lld_info *lldi = &cdev->lldi;
1909 	struct tid_info *t = lldi->tids;
1910 
1911 	csk = lookup_tid(t, tid);
1912 	if (unlikely(!csk)) {
1913 		pr_err("can't find conn. for tid %u.\n", tid);
1914 		goto rel_skb;
1915 	}
1916 
1917 	cxgbit_queue_rx_skb(csk, skb);
1918 	return;
1919 rel_skb:
1920 	__kfree_skb(skb);
1921 }
1922 
1923 static void
1924 __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1925 {
1926 	spin_lock(&csk->lock);
1927 	if (csk->lock_owner) {
1928 		__skb_queue_tail(&csk->backlogq, skb);
1929 		spin_unlock(&csk->lock);
1930 		return;
1931 	}
1932 
1933 	cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1934 	spin_unlock(&csk->lock);
1935 }
1936 
1937 static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1938 {
1939 	cxgbit_get_csk(csk);
1940 	__cxgbit_process_rx_cpl(csk, skb);
1941 	cxgbit_put_csk(csk);
1942 }
1943 
1944 static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1945 {
1946 	struct cxgbit_sock *csk;
1947 	struct cpl_tx_data *cpl = cplhdr(skb);
1948 	struct cxgb4_lld_info *lldi = &cdev->lldi;
1949 	struct tid_info *t = lldi->tids;
1950 	unsigned int tid = GET_TID(cpl);
1951 	u8 opcode = cxgbit_skcb_rx_opcode(skb);
1952 	bool ref = true;
1953 
1954 	switch (opcode) {
1955 	case CPL_FW4_ACK:
1956 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1957 			ref = false;
1958 			break;
1959 	case CPL_PEER_CLOSE:
1960 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1961 			break;
1962 	case CPL_CLOSE_CON_RPL:
1963 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1964 			break;
1965 	case CPL_ABORT_REQ_RSS:
1966 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1967 			break;
1968 	case CPL_ABORT_RPL_RSS:
1969 			cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1970 			break;
1971 	default:
1972 		goto rel_skb;
1973 	}
1974 
1975 	csk = lookup_tid(t, tid);
1976 	if (unlikely(!csk)) {
1977 		pr_err("can't find conn. for tid %u.\n", tid);
1978 		goto rel_skb;
1979 	}
1980 
1981 	if (ref)
1982 		cxgbit_process_rx_cpl(csk, skb);
1983 	else
1984 		__cxgbit_process_rx_cpl(csk, skb);
1985 
1986 	return;
1987 rel_skb:
1988 	__kfree_skb(skb);
1989 }
1990 
1991 cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1992 	[CPL_PASS_OPEN_RPL]	= cxgbit_pass_open_rpl,
1993 	[CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1994 	[CPL_PASS_ACCEPT_REQ]	= cxgbit_pass_accept_req,
1995 	[CPL_PASS_ESTABLISH]	= cxgbit_pass_establish,
1996 	[CPL_SET_TCB_RPL]	= cxgbit_set_tcb_rpl,
1997 	[CPL_RX_DATA]		= cxgbit_rx_data,
1998 	[CPL_FW4_ACK]		= cxgbit_rx_cpl,
1999 	[CPL_PEER_CLOSE]	= cxgbit_rx_cpl,
2000 	[CPL_CLOSE_CON_RPL]	= cxgbit_rx_cpl,
2001 	[CPL_ABORT_REQ_RSS]	= cxgbit_rx_cpl,
2002 	[CPL_ABORT_RPL_RSS]	= cxgbit_rx_cpl,
2003 };
2004