xref: /openbmc/linux/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (revision 588b48ca)
1 /*
2  * cxgb4i.c: Chelsio T4 iSCSI driver.
3  *
4  * Copyright (c) 2010 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by:	Karen Xie (kxie@chelsio.com)
11  *		Rakesh Ranjan (rranjan@chelsio.com)
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15 
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <scsi/scsi_host.h>
19 #include <net/tcp.h>
20 #include <net/dst.h>
21 #include <linux/netdevice.h>
22 #include <net/addrconf.h>
23 
24 #include "t4_regs.h"
25 #include "t4_msg.h"
26 #include "cxgb4.h"
27 #include "cxgb4_uld.h"
28 #include "t4fw_api.h"
29 #include "l2t.h"
30 #include "cxgb4i.h"
31 
32 static unsigned int dbg_level;
33 
34 #include "../libcxgbi.h"
35 
36 #define	DRV_MODULE_NAME		"cxgb4i"
37 #define DRV_MODULE_DESC		"Chelsio T4/T5 iSCSI Driver"
38 #define	DRV_MODULE_VERSION	"0.9.4"
39 
40 static char version[] =
41 	DRV_MODULE_DESC " " DRV_MODULE_NAME
42 	" v" DRV_MODULE_VERSION "\n";
43 
44 MODULE_AUTHOR("Chelsio Communications, Inc.");
45 MODULE_DESCRIPTION(DRV_MODULE_DESC);
46 MODULE_VERSION(DRV_MODULE_VERSION);
47 MODULE_LICENSE("GPL");
48 
49 module_param(dbg_level, uint, 0644);
50 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
51 
52 static int cxgb4i_rcv_win = 256 * 1024;
53 module_param(cxgb4i_rcv_win, int, 0644);
54 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
55 
56 static int cxgb4i_snd_win = 128 * 1024;
57 module_param(cxgb4i_snd_win, int, 0644);
58 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
59 
60 static int cxgb4i_rx_credit_thres = 10 * 1024;
61 module_param(cxgb4i_rx_credit_thres, int, 0644);
62 MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 		"RX credits return threshold in bytes (default=10KB)");
64 
65 static unsigned int cxgb4i_max_connect = (8 * 1024);
66 module_param(cxgb4i_max_connect, uint, 0644);
67 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
68 
69 static unsigned short cxgb4i_sport_base = 20000;
70 module_param(cxgb4i_sport_base, ushort, 0644);
71 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
72 
73 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
74 
75 static void *t4_uld_add(const struct cxgb4_lld_info *);
76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77 static int t4_uld_state_change(void *, enum cxgb4_state state);
78 
79 static const struct cxgb4_uld_info cxgb4i_uld_info = {
80 	.name = DRV_MODULE_NAME,
81 	.add = t4_uld_add,
82 	.rx_handler = t4_uld_rx_handler,
83 	.state_change = t4_uld_state_change,
84 };
85 
86 static struct scsi_host_template cxgb4i_host_template = {
87 	.module		= THIS_MODULE,
88 	.name		= DRV_MODULE_NAME,
89 	.proc_name	= DRV_MODULE_NAME,
90 	.can_queue	= CXGB4I_SCSI_HOST_QDEPTH,
91 	.queuecommand	= iscsi_queuecommand,
92 	.change_queue_depth = iscsi_change_queue_depth,
93 	.sg_tablesize	= SG_ALL,
94 	.max_sectors	= 0xFFFF,
95 	.cmd_per_lun	= ISCSI_DEF_CMD_PER_LUN,
96 	.eh_abort_handler = iscsi_eh_abort,
97 	.eh_device_reset_handler = iscsi_eh_device_reset,
98 	.eh_target_reset_handler = iscsi_eh_recover_target,
99 	.target_alloc	= iscsi_target_alloc,
100 	.use_clustering	= DISABLE_CLUSTERING,
101 	.this_id	= -1,
102 };
103 
104 static struct iscsi_transport cxgb4i_iscsi_transport = {
105 	.owner		= THIS_MODULE,
106 	.name		= DRV_MODULE_NAME,
107 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
108 				CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 				CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
110 	.attr_is_visible	= cxgbi_attr_is_visible,
111 	.get_host_param	= cxgbi_get_host_param,
112 	.set_host_param	= cxgbi_set_host_param,
113 	/* session management */
114 	.create_session	= cxgbi_create_session,
115 	.destroy_session	= cxgbi_destroy_session,
116 	.get_session_param = iscsi_session_get_param,
117 	/* connection management */
118 	.create_conn	= cxgbi_create_conn,
119 	.bind_conn		= cxgbi_bind_conn,
120 	.destroy_conn	= iscsi_tcp_conn_teardown,
121 	.start_conn		= iscsi_conn_start,
122 	.stop_conn		= iscsi_conn_stop,
123 	.get_conn_param	= iscsi_conn_get_param,
124 	.set_param	= cxgbi_set_conn_param,
125 	.get_stats	= cxgbi_get_conn_stats,
126 	/* pdu xmit req from user space */
127 	.send_pdu	= iscsi_conn_send_pdu,
128 	/* task */
129 	.init_task	= iscsi_tcp_task_init,
130 	.xmit_task	= iscsi_tcp_task_xmit,
131 	.cleanup_task	= cxgbi_cleanup_task,
132 	/* pdu */
133 	.alloc_pdu	= cxgbi_conn_alloc_pdu,
134 	.init_pdu	= cxgbi_conn_init_pdu,
135 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
136 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
137 	/* TCP connect/disconnect */
138 	.get_ep_param	= cxgbi_get_ep_param,
139 	.ep_connect	= cxgbi_ep_connect,
140 	.ep_poll	= cxgbi_ep_poll,
141 	.ep_disconnect	= cxgbi_ep_disconnect,
142 	/* Error recovery timeout call */
143 	.session_recovery_timedout = iscsi_session_recovery_timedout,
144 };
145 
146 static struct scsi_transport_template *cxgb4i_stt;
147 
148 /*
149  * CPL (Chelsio Protocol Language) defines a message passing interface between
150  * the host driver and Chelsio asic.
151  * The section below implments CPLs that related to iscsi tcp connection
152  * open/close/abort and data send/receive.
153  */
154 
155 #define DIV_ROUND_UP(n, d)	(((n) + (d) - 1) / (d))
156 #define RCV_BUFSIZ_MASK		0x3FFU
157 #define MAX_IMM_TX_PKT_LEN	128
158 
159 static inline void set_queue(struct sk_buff *skb, unsigned int queue,
160 				const struct cxgbi_sock *csk)
161 {
162 	skb->queue_mapping = queue;
163 }
164 
165 static int push_tx_frames(struct cxgbi_sock *, int);
166 
167 /*
168  * is_ofld_imm - check whether a packet can be sent as immediate data
169  * @skb: the packet
170  *
171  * Returns true if a packet can be sent as an offload WR with immediate
172  * data.  We currently use the same limit as for Ethernet packets.
173  */
174 static inline int is_ofld_imm(const struct sk_buff *skb)
175 {
176 	return skb->len <= (MAX_IMM_TX_PKT_LEN -
177 			sizeof(struct fw_ofld_tx_data_wr));
178 }
179 
180 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
181 				struct l2t_entry *e)
182 {
183 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
184 	int t4 = is_t4(lldi->adapter_type);
185 	int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
186 	unsigned long long opt0;
187 	unsigned int opt2;
188 	unsigned int qid_atid = ((unsigned int)csk->atid) |
189 				 (((unsigned int)csk->rss_qid) << 14);
190 
191 	opt0 = KEEP_ALIVE(1) |
192 		WND_SCALE(wscale) |
193 		MSS_IDX(csk->mss_idx) |
194 		L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
195 		TX_CHAN(csk->tx_chan) |
196 		SMAC_SEL(csk->smac_idx) |
197 		ULP_MODE(ULP_MODE_ISCSI) |
198 		RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
199 	opt2 = RX_CHANNEL(0) |
200 		RSS_QUEUE_VALID |
201 		(1 << 20) |
202 		RSS_QUEUE(csk->rss_qid);
203 
204 	if (is_t4(lldi->adapter_type)) {
205 		struct cpl_act_open_req *req =
206 				(struct cpl_act_open_req *)skb->head;
207 
208 		INIT_TP_WR(req, 0);
209 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
210 					qid_atid));
211 		req->local_port = csk->saddr.sin_port;
212 		req->peer_port = csk->daddr.sin_port;
213 		req->local_ip = csk->saddr.sin_addr.s_addr;
214 		req->peer_ip = csk->daddr.sin_addr.s_addr;
215 		req->opt0 = cpu_to_be64(opt0);
216 		req->params = cpu_to_be32(cxgb4_select_ntuple(
217 					csk->cdev->ports[csk->port_id],
218 					csk->l2t));
219 		opt2 |= 1 << 22;
220 		req->opt2 = cpu_to_be32(opt2);
221 
222 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
223 			"csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
224 			csk, &req->local_ip, ntohs(req->local_port),
225 			&req->peer_ip, ntohs(req->peer_port),
226 			csk->atid, csk->rss_qid);
227 	} else {
228 		struct cpl_t5_act_open_req *req =
229 				(struct cpl_t5_act_open_req *)skb->head;
230 
231 		INIT_TP_WR(req, 0);
232 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
233 					qid_atid));
234 		req->local_port = csk->saddr.sin_port;
235 		req->peer_port = csk->daddr.sin_port;
236 		req->local_ip = csk->saddr.sin_addr.s_addr;
237 		req->peer_ip = csk->daddr.sin_addr.s_addr;
238 		req->opt0 = cpu_to_be64(opt0);
239 		req->params = cpu_to_be64(V_FILTER_TUPLE(
240 				cxgb4_select_ntuple(
241 					csk->cdev->ports[csk->port_id],
242 					csk->l2t)));
243 		opt2 |= 1 << 31;
244 		req->opt2 = cpu_to_be32(opt2);
245 
246 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
247 			"csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
248 			csk, &req->local_ip, ntohs(req->local_port),
249 			&req->peer_ip, ntohs(req->peer_port),
250 			csk->atid, csk->rss_qid);
251 	}
252 
253 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
254 
255 	pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
256 		       (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
257 		       csk->state, csk->flags, csk->atid, csk->rss_qid);
258 
259 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
260 }
261 
262 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
263 			       struct l2t_entry *e)
264 {
265 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
266 	int t4 = is_t4(lldi->adapter_type);
267 	int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
268 	unsigned long long opt0;
269 	unsigned int opt2;
270 	unsigned int qid_atid = ((unsigned int)csk->atid) |
271 				 (((unsigned int)csk->rss_qid) << 14);
272 
273 	opt0 = KEEP_ALIVE(1) |
274 		WND_SCALE(wscale) |
275 		MSS_IDX(csk->mss_idx) |
276 		L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
277 		TX_CHAN(csk->tx_chan) |
278 		SMAC_SEL(csk->smac_idx) |
279 		ULP_MODE(ULP_MODE_ISCSI) |
280 		RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
281 
282 	opt2 = RX_CHANNEL(0) |
283 		RSS_QUEUE_VALID |
284 		RX_FC_DISABLE |
285 		RSS_QUEUE(csk->rss_qid);
286 
287 	if (t4) {
288 		struct cpl_act_open_req6 *req =
289 			    (struct cpl_act_open_req6 *)skb->head;
290 
291 		INIT_TP_WR(req, 0);
292 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
293 							    qid_atid));
294 		req->local_port = csk->saddr6.sin6_port;
295 		req->peer_port = csk->daddr6.sin6_port;
296 
297 		req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
298 		req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
299 								    8);
300 		req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
301 		req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
302 								    8);
303 
304 		req->opt0 = cpu_to_be64(opt0);
305 
306 		opt2 |= RX_FC_VALID;
307 		req->opt2 = cpu_to_be32(opt2);
308 
309 		req->params = cpu_to_be32(cxgb4_select_ntuple(
310 					  csk->cdev->ports[csk->port_id],
311 					  csk->l2t));
312 	} else {
313 		struct cpl_t5_act_open_req6 *req =
314 				(struct cpl_t5_act_open_req6 *)skb->head;
315 
316 		INIT_TP_WR(req, 0);
317 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
318 							    qid_atid));
319 		req->local_port = csk->saddr6.sin6_port;
320 		req->peer_port = csk->daddr6.sin6_port;
321 		req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
322 		req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
323 									8);
324 		req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
325 		req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
326 									8);
327 		req->opt0 = cpu_to_be64(opt0);
328 
329 		opt2 |= T5_OPT_2_VALID;
330 		req->opt2 = cpu_to_be32(opt2);
331 
332 		req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
333 					  csk->cdev->ports[csk->port_id],
334 					  csk->l2t)));
335 	}
336 
337 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
338 
339 	pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
340 		t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
341 		&csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
342 		&csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
343 		csk->rss_qid);
344 
345 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
346 }
347 
348 static void send_close_req(struct cxgbi_sock *csk)
349 {
350 	struct sk_buff *skb = csk->cpl_close;
351 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
352 	unsigned int tid = csk->tid;
353 
354 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
355 		"csk 0x%p,%u,0x%lx, tid %u.\n",
356 		csk, csk->state, csk->flags, csk->tid);
357 	csk->cpl_close = NULL;
358 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
359 	INIT_TP_WR(req, tid);
360 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
361 	req->rsvd = 0;
362 
363 	cxgbi_sock_skb_entail(csk, skb);
364 	if (csk->state >= CTP_ESTABLISHED)
365 		push_tx_frames(csk, 1);
366 }
367 
368 static void abort_arp_failure(void *handle, struct sk_buff *skb)
369 {
370 	struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
371 	struct cpl_abort_req *req;
372 
373 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
374 		"csk 0x%p,%u,0x%lx, tid %u, abort.\n",
375 		csk, csk->state, csk->flags, csk->tid);
376 	req = (struct cpl_abort_req *)skb->data;
377 	req->cmd = CPL_ABORT_NO_RST;
378 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
379 }
380 
381 static void send_abort_req(struct cxgbi_sock *csk)
382 {
383 	struct cpl_abort_req *req;
384 	struct sk_buff *skb = csk->cpl_abort_req;
385 
386 	if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
387 		return;
388 	cxgbi_sock_set_state(csk, CTP_ABORTING);
389 	cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
390 	cxgbi_sock_purge_write_queue(csk);
391 
392 	csk->cpl_abort_req = NULL;
393 	req = (struct cpl_abort_req *)skb->head;
394 	set_queue(skb, CPL_PRIORITY_DATA, csk);
395 	req->cmd = CPL_ABORT_SEND_RST;
396 	t4_set_arp_err_handler(skb, csk, abort_arp_failure);
397 	INIT_TP_WR(req, csk->tid);
398 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
399 	req->rsvd0 = htonl(csk->snd_nxt);
400 	req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
401 
402 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
403 		"csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
404 		csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
405 		req->rsvd1);
406 
407 	cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
408 }
409 
410 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
411 {
412 	struct sk_buff *skb = csk->cpl_abort_rpl;
413 	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
414 
415 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
416 		"csk 0x%p,%u,0x%lx,%u, status %d.\n",
417 		csk, csk->state, csk->flags, csk->tid, rst_status);
418 
419 	csk->cpl_abort_rpl = NULL;
420 	set_queue(skb, CPL_PRIORITY_DATA, csk);
421 	INIT_TP_WR(rpl, csk->tid);
422 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
423 	rpl->cmd = rst_status;
424 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
425 }
426 
427 /*
428  * CPL connection rx data ack: host ->
429  * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
430  * credits sent.
431  */
432 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
433 {
434 	struct sk_buff *skb;
435 	struct cpl_rx_data_ack *req;
436 
437 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
438 		"csk 0x%p,%u,0x%lx,%u, credit %u.\n",
439 		csk, csk->state, csk->flags, csk->tid, credits);
440 
441 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
442 	if (!skb) {
443 		pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
444 		return 0;
445 	}
446 	req = (struct cpl_rx_data_ack *)skb->head;
447 
448 	set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
449 	INIT_TP_WR(req, csk->tid);
450 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
451 				      csk->tid));
452 	req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
453 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
454 	return credits;
455 }
456 
457 /*
458  * sgl_len - calculates the size of an SGL of the given capacity
459  * @n: the number of SGL entries
460  * Calculates the number of flits needed for a scatter/gather list that
461  * can hold the given number of entries.
462  */
463 static inline unsigned int sgl_len(unsigned int n)
464 {
465 	n--;
466 	return (3 * n) / 2 + (n & 1) + 2;
467 }
468 
469 /*
470  * calc_tx_flits_ofld - calculate # of flits for an offload packet
471  * @skb: the packet
472  *
473  * Returns the number of flits needed for the given offload packet.
474  * These packets are already fully constructed and no additional headers
475  * will be added.
476  */
477 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
478 {
479 	unsigned int flits, cnt;
480 
481 	if (is_ofld_imm(skb))
482 		return DIV_ROUND_UP(skb->len, 8);
483 	flits = skb_transport_offset(skb) / 8;
484 	cnt = skb_shinfo(skb)->nr_frags;
485 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
486 		cnt++;
487 	return flits + sgl_len(cnt);
488 }
489 
490 static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
491 {
492 	struct sk_buff *skb;
493 	struct fw_flowc_wr *flowc;
494 	int flowclen, i;
495 
496 	flowclen = 80;
497 	skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
498 	flowc = (struct fw_flowc_wr *)skb->head;
499 	flowc->op_to_nparams =
500 		htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
501 	flowc->flowid_len16 =
502 		htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
503 				FW_WR_FLOWID(csk->tid));
504 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
505 	flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
506 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
507 	flowc->mnemval[1].val = htonl(csk->tx_chan);
508 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
509 	flowc->mnemval[2].val = htonl(csk->tx_chan);
510 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
511 	flowc->mnemval[3].val = htonl(csk->rss_qid);
512 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
513 	flowc->mnemval[4].val = htonl(csk->snd_nxt);
514 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
515 	flowc->mnemval[5].val = htonl(csk->rcv_nxt);
516 	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
517 	flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
518 	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
519 	flowc->mnemval[7].val = htonl(csk->advmss);
520 	flowc->mnemval[8].mnemonic = 0;
521 	flowc->mnemval[8].val = 0;
522 	for (i = 0; i < 9; i++) {
523 		flowc->mnemval[i].r4[0] = 0;
524 		flowc->mnemval[i].r4[1] = 0;
525 		flowc->mnemval[i].r4[2] = 0;
526 	}
527 	set_queue(skb, CPL_PRIORITY_DATA, csk);
528 
529 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
530 		"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
531 		csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
532 		csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
533 		csk->advmss);
534 
535 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
536 }
537 
538 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
539 				   int dlen, int len, u32 credits, int compl)
540 {
541 	struct fw_ofld_tx_data_wr *req;
542 	unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
543 	unsigned int wr_ulp_mode = 0;
544 
545 	req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
546 
547 	if (is_ofld_imm(skb)) {
548 		req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
549 					FW_WR_COMPL(1) |
550 					FW_WR_IMMDLEN(dlen));
551 		req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
552 						FW_WR_LEN16(credits));
553 	} else {
554 		req->op_to_immdlen =
555 			cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
556 					FW_WR_COMPL(1) |
557 					FW_WR_IMMDLEN(0));
558 		req->flowid_len16 =
559 			cpu_to_be32(FW_WR_FLOWID(csk->tid) |
560 					FW_WR_LEN16(credits));
561 	}
562 	if (submode)
563 		wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
564 				FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
565 	req->tunnel_to_proxy = htonl(wr_ulp_mode |
566 		 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
567 	req->plen = htonl(len);
568 	if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
569 		cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
570 }
571 
572 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
573 {
574 	kfree_skb(skb);
575 }
576 
577 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
578 {
579 	int total_size = 0;
580 	struct sk_buff *skb;
581 
582 	if (unlikely(csk->state < CTP_ESTABLISHED ||
583 		csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
584 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
585 			 1 << CXGBI_DBG_PDU_TX,
586 			"csk 0x%p,%u,0x%lx,%u, in closing state.\n",
587 			csk, csk->state, csk->flags, csk->tid);
588 		return 0;
589 	}
590 
591 	while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
592 		int dlen = skb->len;
593 		int len = skb->len;
594 		unsigned int credits_needed;
595 
596 		skb_reset_transport_header(skb);
597 		if (is_ofld_imm(skb))
598 			credits_needed = DIV_ROUND_UP(dlen +
599 					sizeof(struct fw_ofld_tx_data_wr), 16);
600 		else
601 			credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
602 					+ sizeof(struct fw_ofld_tx_data_wr),
603 					16);
604 
605 		if (csk->wr_cred < credits_needed) {
606 			log_debug(1 << CXGBI_DBG_PDU_TX,
607 				"csk 0x%p, skb %u/%u, wr %d < %u.\n",
608 				csk, skb->len, skb->data_len,
609 				credits_needed, csk->wr_cred);
610 			break;
611 		}
612 		__skb_unlink(skb, &csk->write_queue);
613 		set_queue(skb, CPL_PRIORITY_DATA, csk);
614 		skb->csum = credits_needed;
615 		csk->wr_cred -= credits_needed;
616 		csk->wr_una_cred += credits_needed;
617 		cxgbi_sock_enqueue_wr(csk, skb);
618 
619 		log_debug(1 << CXGBI_DBG_PDU_TX,
620 			"csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
621 			csk, skb->len, skb->data_len, credits_needed,
622 			csk->wr_cred, csk->wr_una_cred);
623 
624 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
625 			if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
626 				send_tx_flowc_wr(csk);
627 				skb->csum += 5;
628 				csk->wr_cred -= 5;
629 				csk->wr_una_cred += 5;
630 			}
631 			len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
632 			make_tx_data_wr(csk, skb, dlen, len, credits_needed,
633 					req_completion);
634 			csk->snd_nxt += len;
635 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
636 		}
637 		total_size += skb->truesize;
638 		t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
639 
640 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
641 			"csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
642 			csk, csk->state, csk->flags, csk->tid, skb, len);
643 
644 		cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
645 	}
646 	return total_size;
647 }
648 
649 static inline void free_atid(struct cxgbi_sock *csk)
650 {
651 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
652 
653 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
654 		cxgb4_free_atid(lldi->tids, csk->atid);
655 		cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
656 		cxgbi_sock_put(csk);
657 	}
658 }
659 
660 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
661 {
662 	struct cxgbi_sock *csk;
663 	struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
664 	unsigned short tcp_opt = ntohs(req->tcp_opt);
665 	unsigned int tid = GET_TID(req);
666 	unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
667 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
668 	struct tid_info *t = lldi->tids;
669 	u32 rcv_isn = be32_to_cpu(req->rcv_isn);
670 
671 	csk = lookup_atid(t, atid);
672 	if (unlikely(!csk)) {
673 		pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
674 		goto rel_skb;
675 	}
676 
677 	if (csk->atid != atid) {
678 		pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
679 			atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
680 		goto rel_skb;
681 	}
682 
683 	pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
684 		       (&csk->saddr), (&csk->daddr),
685 		       atid, tid, csk, csk->state, csk->flags, rcv_isn);
686 
687 	module_put(THIS_MODULE);
688 
689 	cxgbi_sock_get(csk);
690 	csk->tid = tid;
691 	cxgb4_insert_tid(lldi->tids, csk, tid);
692 	cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
693 
694 	free_atid(csk);
695 
696 	spin_lock_bh(&csk->lock);
697 	if (unlikely(csk->state != CTP_ACTIVE_OPEN))
698 		pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
699 			csk, csk->state, csk->flags, csk->tid);
700 
701 	if (csk->retry_timer.function) {
702 		del_timer(&csk->retry_timer);
703 		csk->retry_timer.function = NULL;
704 	}
705 
706 	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
707 	/*
708 	 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
709 	 * pass through opt0.
710 	 */
711 	if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
712 		csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
713 
714 	csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
715 	if (GET_TCPOPT_TSTAMP(tcp_opt))
716 		csk->advmss -= 12;
717 	if (csk->advmss < 128)
718 		csk->advmss = 128;
719 
720 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
721 		"csk 0x%p, mss_idx %u, advmss %u.\n",
722 			csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
723 
724 	cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
725 
726 	if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
727 		send_abort_req(csk);
728 	else {
729 		if (skb_queue_len(&csk->write_queue))
730 			push_tx_frames(csk, 0);
731 		cxgbi_conn_tx_open(csk);
732 	}
733 	spin_unlock_bh(&csk->lock);
734 
735 rel_skb:
736 	__kfree_skb(skb);
737 }
738 
739 static int act_open_rpl_status_to_errno(int status)
740 {
741 	switch (status) {
742 	case CPL_ERR_CONN_RESET:
743 		return -ECONNREFUSED;
744 	case CPL_ERR_ARP_MISS:
745 		return -EHOSTUNREACH;
746 	case CPL_ERR_CONN_TIMEDOUT:
747 		return -ETIMEDOUT;
748 	case CPL_ERR_TCAM_FULL:
749 		return -ENOMEM;
750 	case CPL_ERR_CONN_EXIST:
751 		return -EADDRINUSE;
752 	default:
753 		return -EIO;
754 	}
755 }
756 
757 static void csk_act_open_retry_timer(unsigned long data)
758 {
759 	struct sk_buff *skb;
760 	struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
761 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
762 	void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
763 				   struct l2t_entry *);
764 	int t4 = is_t4(lldi->adapter_type), size, size6;
765 
766 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
767 		"csk 0x%p,%u,0x%lx,%u.\n",
768 		csk, csk->state, csk->flags, csk->tid);
769 
770 	cxgbi_sock_get(csk);
771 	spin_lock_bh(&csk->lock);
772 
773 	if (t4) {
774 		size = sizeof(struct cpl_act_open_req);
775 		size6 = sizeof(struct cpl_act_open_req6);
776 	} else {
777 		size = sizeof(struct cpl_t5_act_open_req);
778 		size6 = sizeof(struct cpl_t5_act_open_req6);
779 	}
780 
781 	if (csk->csk_family == AF_INET) {
782 		send_act_open_func = send_act_open_req;
783 		skb = alloc_wr(size, 0, GFP_ATOMIC);
784 	} else {
785 		send_act_open_func = send_act_open_req6;
786 		skb = alloc_wr(size6, 0, GFP_ATOMIC);
787 	}
788 
789 	if (!skb)
790 		cxgbi_sock_fail_act_open(csk, -ENOMEM);
791 	else {
792 		skb->sk = (struct sock *)csk;
793 		t4_set_arp_err_handler(skb, csk,
794 				       cxgbi_sock_act_open_req_arp_failure);
795 		send_act_open_func(csk, skb, csk->l2t);
796 	}
797 
798 	spin_unlock_bh(&csk->lock);
799 	cxgbi_sock_put(csk);
800 
801 }
802 
803 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
804 {
805 	struct cxgbi_sock *csk;
806 	struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
807 	unsigned int tid = GET_TID(rpl);
808 	unsigned int atid =
809 		GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
810 	unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
811 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
812 	struct tid_info *t = lldi->tids;
813 
814 	csk = lookup_atid(t, atid);
815 	if (unlikely(!csk)) {
816 		pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
817 		goto rel_skb;
818 	}
819 
820 	pr_info_ipaddr("tid %u/%u, status %u.\n"
821 		       "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
822 		       atid, tid, status, csk, csk->state, csk->flags);
823 
824 	if (status == CPL_ERR_RTX_NEG_ADVICE)
825 		goto rel_skb;
826 
827 	if (status && status != CPL_ERR_TCAM_FULL &&
828 	    status != CPL_ERR_CONN_EXIST &&
829 	    status != CPL_ERR_ARP_MISS)
830 		cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
831 
832 	cxgbi_sock_get(csk);
833 	spin_lock_bh(&csk->lock);
834 
835 	if (status == CPL_ERR_CONN_EXIST &&
836 	    csk->retry_timer.function != csk_act_open_retry_timer) {
837 		csk->retry_timer.function = csk_act_open_retry_timer;
838 		mod_timer(&csk->retry_timer, jiffies + HZ / 2);
839 	} else
840 		cxgbi_sock_fail_act_open(csk,
841 					act_open_rpl_status_to_errno(status));
842 
843 	spin_unlock_bh(&csk->lock);
844 	cxgbi_sock_put(csk);
845 rel_skb:
846 	__kfree_skb(skb);
847 }
848 
849 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
850 {
851 	struct cxgbi_sock *csk;
852 	struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
853 	unsigned int tid = GET_TID(req);
854 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
855 	struct tid_info *t = lldi->tids;
856 
857 	csk = lookup_tid(t, tid);
858 	if (unlikely(!csk)) {
859 		pr_err("can't find connection for tid %u.\n", tid);
860 		goto rel_skb;
861 	}
862 	pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
863 		       (&csk->saddr), (&csk->daddr),
864 		       csk, csk->state, csk->flags, csk->tid);
865 	cxgbi_sock_rcv_peer_close(csk);
866 rel_skb:
867 	__kfree_skb(skb);
868 }
869 
870 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
871 {
872 	struct cxgbi_sock *csk;
873 	struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
874 	unsigned int tid = GET_TID(rpl);
875 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
876 	struct tid_info *t = lldi->tids;
877 
878 	csk = lookup_tid(t, tid);
879 	if (unlikely(!csk)) {
880 		pr_err("can't find connection for tid %u.\n", tid);
881 		goto rel_skb;
882 	}
883 	pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
884 		       (&csk->saddr), (&csk->daddr),
885 		       csk, csk->state, csk->flags, csk->tid);
886 	cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
887 rel_skb:
888 	__kfree_skb(skb);
889 }
890 
891 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
892 								int *need_rst)
893 {
894 	switch (abort_reason) {
895 	case CPL_ERR_BAD_SYN: /* fall through */
896 	case CPL_ERR_CONN_RESET:
897 		return csk->state > CTP_ESTABLISHED ?
898 			-EPIPE : -ECONNRESET;
899 	case CPL_ERR_XMIT_TIMEDOUT:
900 	case CPL_ERR_PERSIST_TIMEDOUT:
901 	case CPL_ERR_FINWAIT2_TIMEDOUT:
902 	case CPL_ERR_KEEPALIVE_TIMEDOUT:
903 		return -ETIMEDOUT;
904 	default:
905 		return -EIO;
906 	}
907 }
908 
909 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
910 {
911 	struct cxgbi_sock *csk;
912 	struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
913 	unsigned int tid = GET_TID(req);
914 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
915 	struct tid_info *t = lldi->tids;
916 	int rst_status = CPL_ABORT_NO_RST;
917 
918 	csk = lookup_tid(t, tid);
919 	if (unlikely(!csk)) {
920 		pr_err("can't find connection for tid %u.\n", tid);
921 		goto rel_skb;
922 	}
923 
924 	pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
925 		       (&csk->saddr), (&csk->daddr),
926 		       csk, csk->state, csk->flags, csk->tid, req->status);
927 
928 	if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
929 	    req->status == CPL_ERR_PERSIST_NEG_ADVICE)
930 		goto rel_skb;
931 
932 	cxgbi_sock_get(csk);
933 	spin_lock_bh(&csk->lock);
934 
935 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
936 		cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
937 		cxgbi_sock_set_state(csk, CTP_ABORTING);
938 		goto done;
939 	}
940 
941 	cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
942 	send_abort_rpl(csk, rst_status);
943 
944 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
945 		csk->err = abort_status_to_errno(csk, req->status, &rst_status);
946 		cxgbi_sock_closed(csk);
947 	}
948 done:
949 	spin_unlock_bh(&csk->lock);
950 	cxgbi_sock_put(csk);
951 rel_skb:
952 	__kfree_skb(skb);
953 }
954 
955 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
956 {
957 	struct cxgbi_sock *csk;
958 	struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
959 	unsigned int tid = GET_TID(rpl);
960 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
961 	struct tid_info *t = lldi->tids;
962 
963 	csk = lookup_tid(t, tid);
964 	if (!csk)
965 		goto rel_skb;
966 
967 	if (csk)
968 		pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
969 			       (&csk->saddr), (&csk->daddr), csk,
970 			       csk->state, csk->flags, csk->tid, rpl->status);
971 
972 	if (rpl->status == CPL_ERR_ABORT_FAILED)
973 		goto rel_skb;
974 
975 	cxgbi_sock_rcv_abort_rpl(csk);
976 rel_skb:
977 	__kfree_skb(skb);
978 }
979 
980 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
981 {
982 	struct cxgbi_sock *csk;
983 	struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
984 	unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
985 	unsigned int tid = GET_TID(cpl);
986 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
987 	struct tid_info *t = lldi->tids;
988 
989 	csk = lookup_tid(t, tid);
990 	if (unlikely(!csk)) {
991 		pr_err("can't find conn. for tid %u.\n", tid);
992 		goto rel_skb;
993 	}
994 
995 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
996 		"csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
997 		csk, csk->state, csk->flags, csk->tid, skb, skb->len,
998 		pdu_len_ddp);
999 
1000 	spin_lock_bh(&csk->lock);
1001 
1002 	if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1003 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1004 			"csk 0x%p,%u,0x%lx,%u, bad state.\n",
1005 			csk, csk->state, csk->flags, csk->tid);
1006 		if (csk->state != CTP_ABORTING)
1007 			goto abort_conn;
1008 		else
1009 			goto discard;
1010 	}
1011 
1012 	cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1013 	cxgbi_skcb_flags(skb) = 0;
1014 
1015 	skb_reset_transport_header(skb);
1016 	__skb_pull(skb, sizeof(*cpl));
1017 	__pskb_trim(skb, ntohs(cpl->len));
1018 
1019 	if (!csk->skb_ulp_lhdr) {
1020 		unsigned char *bhs;
1021 		unsigned int hlen, dlen, plen;
1022 
1023 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1024 			"csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1025 			csk, csk->state, csk->flags, csk->tid, skb);
1026 		csk->skb_ulp_lhdr = skb;
1027 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1028 
1029 		if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
1030 			pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1031 				csk->tid, cxgbi_skcb_tcp_seq(skb),
1032 				csk->rcv_nxt);
1033 			goto abort_conn;
1034 		}
1035 
1036 		bhs = skb->data;
1037 		hlen = ntohs(cpl->len);
1038 		dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1039 
1040 		plen = ISCSI_PDU_LEN(pdu_len_ddp);
1041 		if (is_t4(lldi->adapter_type))
1042 			plen -= 40;
1043 
1044 		if ((hlen + dlen) != plen) {
1045 			pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1046 				"mismatch %u != %u + %u, seq 0x%x.\n",
1047 				csk->tid, plen, hlen, dlen,
1048 				cxgbi_skcb_tcp_seq(skb));
1049 			goto abort_conn;
1050 		}
1051 
1052 		cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1053 		if (dlen)
1054 			cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1055 		csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1056 
1057 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1058 			"csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1059 			csk, skb, *bhs, hlen, dlen,
1060 			ntohl(*((unsigned int *)(bhs + 16))),
1061 			ntohl(*((unsigned int *)(bhs + 24))));
1062 
1063 	} else {
1064 		struct sk_buff *lskb = csk->skb_ulp_lhdr;
1065 
1066 		cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1067 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1068 			"csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1069 			csk, csk->state, csk->flags, skb, lskb);
1070 	}
1071 
1072 	__skb_queue_tail(&csk->receive_queue, skb);
1073 	spin_unlock_bh(&csk->lock);
1074 	return;
1075 
1076 abort_conn:
1077 	send_abort_req(csk);
1078 discard:
1079 	spin_unlock_bh(&csk->lock);
1080 rel_skb:
1081 	__kfree_skb(skb);
1082 }
1083 
1084 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1085 				  struct sk_buff *skb)
1086 {
1087 	struct cxgbi_sock *csk;
1088 	struct sk_buff *lskb;
1089 	struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1090 	unsigned int tid = GET_TID(rpl);
1091 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1092 	struct tid_info *t = lldi->tids;
1093 	unsigned int status = ntohl(rpl->ddpvld);
1094 
1095 	csk = lookup_tid(t, tid);
1096 	if (unlikely(!csk)) {
1097 		pr_err("can't find connection for tid %u.\n", tid);
1098 		goto rel_skb;
1099 	}
1100 
1101 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1102 		"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1103 		csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1104 
1105 	spin_lock_bh(&csk->lock);
1106 
1107 	if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1108 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1109 			"csk 0x%p,%u,0x%lx,%u, bad state.\n",
1110 			csk, csk->state, csk->flags, csk->tid);
1111 		if (csk->state != CTP_ABORTING)
1112 			goto abort_conn;
1113 		else
1114 			goto discard;
1115 	}
1116 
1117 	if (!csk->skb_ulp_lhdr) {
1118 		pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1119 		goto abort_conn;
1120 	}
1121 
1122 	lskb = csk->skb_ulp_lhdr;
1123 	csk->skb_ulp_lhdr = NULL;
1124 
1125 	cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1126 
1127 	if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1128 		pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1129 			csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1130 
1131 	if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1132 		pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1133 			csk, lskb, status, cxgbi_skcb_flags(lskb));
1134 		cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1135 	}
1136 	if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1137 		pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1138 			csk, lskb, status, cxgbi_skcb_flags(lskb));
1139 		cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1140 	}
1141 	if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1142 		log_debug(1 << CXGBI_DBG_PDU_RX,
1143 			"csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1144 			csk, lskb, status);
1145 		cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1146 	}
1147 	if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1148 		!cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1149 		log_debug(1 << CXGBI_DBG_PDU_RX,
1150 			"csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1151 			csk, lskb, status);
1152 		cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1153 	}
1154 	log_debug(1 << CXGBI_DBG_PDU_RX,
1155 		"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1156 		csk, lskb, cxgbi_skcb_flags(lskb));
1157 
1158 	cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1159 	cxgbi_conn_pdu_ready(csk);
1160 	spin_unlock_bh(&csk->lock);
1161 	goto rel_skb;
1162 
1163 abort_conn:
1164 	send_abort_req(csk);
1165 discard:
1166 	spin_unlock_bh(&csk->lock);
1167 rel_skb:
1168 	__kfree_skb(skb);
1169 }
1170 
1171 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1172 {
1173 	struct cxgbi_sock *csk;
1174 	struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1175 	unsigned int tid = GET_TID(rpl);
1176 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1177 	struct tid_info *t = lldi->tids;
1178 
1179 	csk = lookup_tid(t, tid);
1180 	if (unlikely(!csk))
1181 		pr_err("can't find connection for tid %u.\n", tid);
1182 	else {
1183 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1184 			"csk 0x%p,%u,0x%lx,%u.\n",
1185 			csk, csk->state, csk->flags, csk->tid);
1186 		cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1187 					rpl->seq_vld);
1188 	}
1189 	__kfree_skb(skb);
1190 }
1191 
1192 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1193 {
1194 	struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1195 	unsigned int tid = GET_TID(rpl);
1196 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1197 	struct tid_info *t = lldi->tids;
1198 	struct cxgbi_sock *csk;
1199 
1200 	csk = lookup_tid(t, tid);
1201 	if (!csk)
1202 		pr_err("can't find conn. for tid %u.\n", tid);
1203 
1204 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1205 		"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1206 		csk, csk->state, csk->flags, csk->tid, rpl->status);
1207 
1208 	if (rpl->status != CPL_ERR_NONE)
1209 		pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1210 			csk, tid, rpl->status);
1211 
1212 	__kfree_skb(skb);
1213 }
1214 
1215 static int alloc_cpls(struct cxgbi_sock *csk)
1216 {
1217 	csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1218 					0, GFP_KERNEL);
1219 	if (!csk->cpl_close)
1220 		return -ENOMEM;
1221 
1222 	csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1223 					0, GFP_KERNEL);
1224 	if (!csk->cpl_abort_req)
1225 		goto free_cpls;
1226 
1227 	csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1228 					0, GFP_KERNEL);
1229 	if (!csk->cpl_abort_rpl)
1230 		goto free_cpls;
1231 	return 0;
1232 
1233 free_cpls:
1234 	cxgbi_sock_free_cpl_skbs(csk);
1235 	return -ENOMEM;
1236 }
1237 
1238 static inline void l2t_put(struct cxgbi_sock *csk)
1239 {
1240 	if (csk->l2t) {
1241 		cxgb4_l2t_release(csk->l2t);
1242 		csk->l2t = NULL;
1243 		cxgbi_sock_put(csk);
1244 	}
1245 }
1246 
1247 static void release_offload_resources(struct cxgbi_sock *csk)
1248 {
1249 	struct cxgb4_lld_info *lldi;
1250 
1251 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1252 		"csk 0x%p,%u,0x%lx,%u.\n",
1253 		csk, csk->state, csk->flags, csk->tid);
1254 
1255 	cxgbi_sock_free_cpl_skbs(csk);
1256 	if (csk->wr_cred != csk->wr_max_cred) {
1257 		cxgbi_sock_purge_wr_queue(csk);
1258 		cxgbi_sock_reset_wr_list(csk);
1259 	}
1260 
1261 	l2t_put(csk);
1262 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1263 		free_atid(csk);
1264 	else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1265 		lldi = cxgbi_cdev_priv(csk->cdev);
1266 		cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1267 		cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1268 		cxgbi_sock_put(csk);
1269 	}
1270 	csk->dst = NULL;
1271 	csk->cdev = NULL;
1272 }
1273 
1274 static int init_act_open(struct cxgbi_sock *csk)
1275 {
1276 	struct cxgbi_device *cdev = csk->cdev;
1277 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1278 	struct net_device *ndev = cdev->ports[csk->port_id];
1279 	struct sk_buff *skb = NULL;
1280 	struct neighbour *n = NULL;
1281 	void *daddr;
1282 	unsigned int step;
1283 	unsigned int size, size6;
1284 	int t4 = is_t4(lldi->adapter_type);
1285 
1286 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1287 		"csk 0x%p,%u,0x%lx,%u.\n",
1288 		csk, csk->state, csk->flags, csk->tid);
1289 
1290 	if (csk->csk_family == AF_INET)
1291 		daddr = &csk->daddr.sin_addr.s_addr;
1292 #if IS_ENABLED(CONFIG_IPV6)
1293 	else if (csk->csk_family == AF_INET6)
1294 		daddr = &csk->daddr6.sin6_addr;
1295 #endif
1296 	else {
1297 		pr_err("address family 0x%x not supported\n", csk->csk_family);
1298 		goto rel_resource;
1299 	}
1300 
1301 	n = dst_neigh_lookup(csk->dst, daddr);
1302 
1303 	if (!n) {
1304 		pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1305 		goto rel_resource;
1306 	}
1307 
1308 	csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1309 	if (csk->atid < 0) {
1310 		pr_err("%s, NO atid available.\n", ndev->name);
1311 		return -EINVAL;
1312 	}
1313 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1314 	cxgbi_sock_get(csk);
1315 
1316 	n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1317 	if (!n) {
1318 		pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1319 		goto rel_resource;
1320 	}
1321 	csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1322 	if (!csk->l2t) {
1323 		pr_err("%s, cannot alloc l2t.\n", ndev->name);
1324 		goto rel_resource;
1325 	}
1326 	cxgbi_sock_get(csk);
1327 
1328 	if (t4) {
1329 		size = sizeof(struct cpl_act_open_req);
1330 		size6 = sizeof(struct cpl_act_open_req6);
1331 	} else {
1332 		size = sizeof(struct cpl_t5_act_open_req);
1333 		size6 = sizeof(struct cpl_t5_act_open_req6);
1334 	}
1335 
1336 	if (csk->csk_family == AF_INET)
1337 		skb = alloc_wr(size, 0, GFP_NOIO);
1338 	else
1339 		skb = alloc_wr(size6, 0, GFP_NOIO);
1340 
1341 	if (!skb)
1342 		goto rel_resource;
1343 	skb->sk = (struct sock *)csk;
1344 	t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1345 
1346 	if (!csk->mtu)
1347 		csk->mtu = dst_mtu(csk->dst);
1348 	cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1349 	csk->tx_chan = cxgb4_port_chan(ndev);
1350 	/* SMT two entries per row */
1351 	csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1352 	step = lldi->ntxq / lldi->nchan;
1353 	csk->txq_idx = cxgb4_port_idx(ndev) * step;
1354 	step = lldi->nrxq / lldi->nchan;
1355 	csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1356 	csk->wr_cred = lldi->wr_cred -
1357 		       DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1358 	csk->wr_max_cred = csk->wr_cred;
1359 	csk->wr_una_cred = 0;
1360 	cxgbi_sock_reset_wr_list(csk);
1361 	csk->err = 0;
1362 
1363 	pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1364 		       (&csk->saddr), (&csk->daddr), csk, csk->state,
1365 		       csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1366 		       csk->mtu, csk->mss_idx, csk->smac_idx);
1367 
1368 	/* must wait for either a act_open_rpl or act_open_establish */
1369 	try_module_get(THIS_MODULE);
1370 	cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1371 	if (csk->csk_family == AF_INET)
1372 		send_act_open_req(csk, skb, csk->l2t);
1373 	else
1374 		send_act_open_req6(csk, skb, csk->l2t);
1375 	neigh_release(n);
1376 
1377 	return 0;
1378 
1379 rel_resource:
1380 	if (n)
1381 		neigh_release(n);
1382 	if (skb)
1383 		__kfree_skb(skb);
1384 	return -EINVAL;
1385 }
1386 
1387 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1388 	[CPL_ACT_ESTABLISH] = do_act_establish,
1389 	[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1390 	[CPL_PEER_CLOSE] = do_peer_close,
1391 	[CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1392 	[CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1393 	[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1394 	[CPL_FW4_ACK] = do_fw4_ack,
1395 	[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1396 	[CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
1397 	[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1398 	[CPL_RX_DATA_DDP] = do_rx_data_ddp,
1399 	[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1400 };
1401 
1402 int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1403 {
1404 	int rc;
1405 
1406 	if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1407 		cxgb4i_max_connect = CXGB4I_MAX_CONN;
1408 
1409 	rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1410 					cxgb4i_max_connect);
1411 	if (rc < 0)
1412 		return rc;
1413 
1414 	cdev->csk_release_offload_resources = release_offload_resources;
1415 	cdev->csk_push_tx_frames = push_tx_frames;
1416 	cdev->csk_send_abort_req = send_abort_req;
1417 	cdev->csk_send_close_req = send_close_req;
1418 	cdev->csk_send_rx_credits = send_rx_credits;
1419 	cdev->csk_alloc_cpls = alloc_cpls;
1420 	cdev->csk_init_act_open = init_act_open;
1421 
1422 	pr_info("cdev 0x%p, offload up, added.\n", cdev);
1423 	return 0;
1424 }
1425 
1426 /*
1427  * functions to program the pagepod in h/w
1428  */
1429 #define ULPMEM_IDATA_MAX_NPPODS	4 /* 256/PPOD_SIZE */
1430 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1431 				struct ulp_mem_io *req,
1432 				unsigned int wr_len, unsigned int dlen,
1433 				unsigned int pm_addr)
1434 {
1435 	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1436 
1437 	INIT_ULPTX_WR(req, wr_len, 0, 0);
1438 	if (is_t4(lldi->adapter_type))
1439 		req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1440 					(ULP_MEMIO_ORDER(1)));
1441 	else
1442 		req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1443 					(V_T5_ULP_MEMIO_IMM(1)));
1444 	req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1445 	req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1446 	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1447 
1448 	idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1449 	idata->len = htonl(dlen);
1450 }
1451 
1452 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1453 				struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1454 				unsigned int npods,
1455 				struct cxgbi_gather_list *gl,
1456 				unsigned int gl_pidx)
1457 {
1458 	struct cxgbi_ddp_info *ddp = cdev->ddp;
1459 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1460 	struct sk_buff *skb;
1461 	struct ulp_mem_io *req;
1462 	struct ulptx_idata *idata;
1463 	struct cxgbi_pagepod *ppod;
1464 	unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1465 	unsigned int dlen = PPOD_SIZE * npods;
1466 	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1467 				sizeof(struct ulptx_idata) + dlen, 16);
1468 	unsigned int i;
1469 
1470 	skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1471 	if (!skb) {
1472 		pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1473 			cdev, idx, npods);
1474 		return -ENOMEM;
1475 	}
1476 	req = (struct ulp_mem_io *)skb->head;
1477 	set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1478 
1479 	ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
1480 	idata = (struct ulptx_idata *)(req + 1);
1481 	ppod = (struct cxgbi_pagepod *)(idata + 1);
1482 
1483 	for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1484 		if (!hdr && !gl)
1485 			cxgbi_ddp_ppod_clear(ppod);
1486 		else
1487 			cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1488 	}
1489 
1490 	cxgb4_ofld_send(cdev->ports[port_id], skb);
1491 	return 0;
1492 }
1493 
1494 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1495 			unsigned int idx, unsigned int npods,
1496 			struct cxgbi_gather_list *gl)
1497 {
1498 	unsigned int i, cnt;
1499 	int err = 0;
1500 
1501 	for (i = 0; i < npods; i += cnt, idx += cnt) {
1502 		cnt = npods - i;
1503 		if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1504 			cnt = ULPMEM_IDATA_MAX_NPPODS;
1505 		err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1506 					idx, cnt, gl, 4 * i);
1507 		if (err < 0)
1508 			break;
1509 	}
1510 	return err;
1511 }
1512 
1513 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1514 			  unsigned int idx, unsigned int npods)
1515 {
1516 	unsigned int i, cnt;
1517 	int err;
1518 
1519 	for (i = 0; i < npods; i += cnt, idx += cnt) {
1520 		cnt = npods - i;
1521 		if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1522 			cnt = ULPMEM_IDATA_MAX_NPPODS;
1523 		err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1524 					idx, cnt, NULL, 0);
1525 		if (err < 0)
1526 			break;
1527 	}
1528 }
1529 
1530 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1531 				int pg_idx, bool reply)
1532 {
1533 	struct sk_buff *skb;
1534 	struct cpl_set_tcb_field *req;
1535 
1536 	if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1537 		return 0;
1538 
1539 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1540 	if (!skb)
1541 		return -ENOMEM;
1542 
1543 	/*  set up ulp page size */
1544 	req = (struct cpl_set_tcb_field *)skb->head;
1545 	INIT_TP_WR(req, csk->tid);
1546 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1547 	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1548 	req->word_cookie = htons(0);
1549 	req->mask = cpu_to_be64(0x3 << 8);
1550 	req->val = cpu_to_be64(pg_idx << 8);
1551 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1552 
1553 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1554 		"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1555 
1556 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1557 	return 0;
1558 }
1559 
1560 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1561 				 int hcrc, int dcrc, int reply)
1562 {
1563 	struct sk_buff *skb;
1564 	struct cpl_set_tcb_field *req;
1565 
1566 	if (!hcrc && !dcrc)
1567 		return 0;
1568 
1569 	skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1570 	if (!skb)
1571 		return -ENOMEM;
1572 
1573 	csk->hcrc_len = (hcrc ? 4 : 0);
1574 	csk->dcrc_len = (dcrc ? 4 : 0);
1575 	/*  set up ulp submode */
1576 	req = (struct cpl_set_tcb_field *)skb->head;
1577 	INIT_TP_WR(req, tid);
1578 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1579 	req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1580 	req->word_cookie = htons(0);
1581 	req->mask = cpu_to_be64(0x3 << 4);
1582 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1583 				(dcrc ? ULP_CRC_DATA : 0)) << 4);
1584 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1585 
1586 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1587 		"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1588 
1589 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1590 	return 0;
1591 }
1592 
1593 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1594 {
1595 	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1596 	struct cxgbi_ddp_info *ddp = cdev->ddp;
1597 	unsigned int tagmask, pgsz_factor[4];
1598 	int err;
1599 
1600 	if (ddp) {
1601 		kref_get(&ddp->refcnt);
1602 		pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1603 			cdev, cdev->ddp);
1604 		return -EALREADY;
1605 	}
1606 
1607 	err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1608 			lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1609 			lldi->iscsi_iolen, lldi->iscsi_iolen);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	ddp = cdev->ddp;
1614 
1615 	tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1616 	cxgbi_ddp_page_size_factor(pgsz_factor);
1617 	cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1618 
1619 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1620 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1621 	cdev->csk_ddp_set = ddp_set_map;
1622 	cdev->csk_ddp_clear = ddp_clear_map;
1623 
1624 	pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1625 		cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1626 		cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1627 	pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1628 		" %u/%u.\n",
1629 		cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1630 		ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1631 		ddp->max_rxsz, lldi->iscsi_iolen);
1632 	pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1633 		cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1634 		ddp->max_rxsz);
1635 	return 0;
1636 }
1637 
1638 #if IS_ENABLED(CONFIG_IPV6)
1639 static int cxgbi_inet6addr_handler(struct notifier_block *this,
1640 				   unsigned long event, void *data)
1641 {
1642 	struct inet6_ifaddr *ifa = data;
1643 	struct net_device *event_dev = ifa->idev->dev;
1644 	struct cxgbi_device *cdev;
1645 	int ret = NOTIFY_DONE;
1646 
1647 	if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1648 		event_dev = vlan_dev_real_dev(event_dev);
1649 
1650 	cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
1651 
1652 	if (!cdev)
1653 		return ret;
1654 
1655 	switch (event) {
1656 	case NETDEV_UP:
1657 		ret = cxgb4_clip_get(event_dev,
1658 				     (const struct in6_addr *)
1659 				     ((ifa)->addr.s6_addr));
1660 		if (ret < 0)
1661 			return ret;
1662 
1663 		ret = NOTIFY_OK;
1664 		break;
1665 
1666 	case NETDEV_DOWN:
1667 		cxgb4_clip_release(event_dev,
1668 				   (const struct in6_addr *)
1669 				   ((ifa)->addr.s6_addr));
1670 		ret = NOTIFY_OK;
1671 		break;
1672 
1673 	default:
1674 		break;
1675 	}
1676 
1677 	return ret;
1678 }
1679 
1680 static struct notifier_block cxgbi_inet6addr_notifier = {
1681 	.notifier_call = cxgbi_inet6addr_handler
1682 };
1683 
1684 /* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
1685  * a physical device.
1686  * The physical device reference is needed to send the actual CLIP command.
1687  */
1688 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
1689 {
1690 	struct inet6_dev *idev = NULL;
1691 	struct inet6_ifaddr *ifa;
1692 	int ret = 0;
1693 
1694 	idev = __in6_dev_get(root_dev);
1695 	if (!idev)
1696 		return ret;
1697 
1698 	read_lock_bh(&idev->lock);
1699 	list_for_each_entry(ifa, &idev->addr_list, if_list) {
1700 		pr_info("updating the clip for addr %pI6\n",
1701 			ifa->addr.s6_addr);
1702 		ret = cxgb4_clip_get(dev, (const struct in6_addr *)
1703 				     ifa->addr.s6_addr);
1704 		if (ret < 0)
1705 			break;
1706 	}
1707 
1708 	read_unlock_bh(&idev->lock);
1709 	return ret;
1710 }
1711 
1712 static int update_root_dev_clip(struct net_device *dev)
1713 {
1714 	struct net_device *root_dev = NULL;
1715 	int i, ret = 0;
1716 
1717 	/* First populate the real net device's IPv6 address */
1718 	ret = update_dev_clip(dev, dev);
1719 	if (ret)
1720 		return ret;
1721 
1722 	/* Parse all bond and vlan devices layered on top of the physical dev */
1723 	root_dev = netdev_master_upper_dev_get(dev);
1724 	if (root_dev) {
1725 		ret = update_dev_clip(root_dev, dev);
1726 		if (ret)
1727 			return ret;
1728 	}
1729 
1730 	for (i = 0; i < VLAN_N_VID; i++) {
1731 		root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
1732 		if (!root_dev)
1733 			continue;
1734 
1735 		ret = update_dev_clip(root_dev, dev);
1736 		if (ret)
1737 			break;
1738 	}
1739 	return ret;
1740 }
1741 
1742 static void cxgbi_update_clip(struct cxgbi_device *cdev)
1743 {
1744 	int i;
1745 
1746 	rcu_read_lock();
1747 
1748 	for (i = 0; i < cdev->nports; i++) {
1749 		struct net_device *dev = cdev->ports[i];
1750 		int ret = 0;
1751 
1752 		if (dev)
1753 			ret = update_root_dev_clip(dev);
1754 		if (ret < 0)
1755 			break;
1756 	}
1757 	rcu_read_unlock();
1758 }
1759 #endif /* IS_ENABLED(CONFIG_IPV6) */
1760 
1761 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1762 {
1763 	struct cxgbi_device *cdev;
1764 	struct port_info *pi;
1765 	int i, rc;
1766 
1767 	cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1768 	if (!cdev) {
1769 		pr_info("t4 device 0x%p, register failed.\n", lldi);
1770 		return NULL;
1771 	}
1772 	pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1773 		cdev, lldi->adapter_type, lldi->nports,
1774 		lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1775 		lldi->nrxq, lldi->wr_cred);
1776 	for (i = 0; i < lldi->nrxq; i++)
1777 		log_debug(1 << CXGBI_DBG_DEV,
1778 			"t4 0x%p, rxq id #%d: %u.\n",
1779 			cdev, i, lldi->rxq_ids[i]);
1780 
1781 	memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1782 	cdev->flags = CXGBI_FLAG_DEV_T4;
1783 	cdev->pdev = lldi->pdev;
1784 	cdev->ports = lldi->ports;
1785 	cdev->nports = lldi->nports;
1786 	cdev->mtus = lldi->mtus;
1787 	cdev->nmtus = NMTUS;
1788 	cdev->snd_win = cxgb4i_snd_win;
1789 	cdev->rcv_win = cxgb4i_rcv_win;
1790 	cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1791 	cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1792 	cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1793 	cdev->itp = &cxgb4i_iscsi_transport;
1794 
1795 	cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1796 	pr_info("cdev 0x%p,%s, pfvf %u.\n",
1797 		cdev, lldi->ports[0]->name, cdev->pfvf);
1798 
1799 	rc = cxgb4i_ddp_init(cdev);
1800 	if (rc) {
1801 		pr_info("t4 0x%p ddp init failed.\n", cdev);
1802 		goto err_out;
1803 	}
1804 	rc = cxgb4i_ofld_init(cdev);
1805 	if (rc) {
1806 		pr_info("t4 0x%p ofld init failed.\n", cdev);
1807 		goto err_out;
1808 	}
1809 
1810 	rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1811 				&cxgb4i_host_template, cxgb4i_stt);
1812 	if (rc)
1813 		goto err_out;
1814 
1815 	for (i = 0; i < cdev->nports; i++) {
1816 		pi = netdev_priv(lldi->ports[i]);
1817 		cdev->hbas[i]->port_id = pi->port_id;
1818 	}
1819 	return cdev;
1820 
1821 err_out:
1822 	cxgbi_device_unregister(cdev);
1823 	return ERR_PTR(-ENOMEM);
1824 }
1825 
1826 #define RX_PULL_LEN	128
1827 static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1828 				const struct pkt_gl *pgl)
1829 {
1830 	const struct cpl_act_establish *rpl;
1831 	struct sk_buff *skb;
1832 	unsigned int opc;
1833 	struct cxgbi_device *cdev = handle;
1834 
1835 	if (pgl == NULL) {
1836 		unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1837 
1838 		skb = alloc_wr(len, 0, GFP_ATOMIC);
1839 		if (!skb)
1840 			goto nomem;
1841 		skb_copy_to_linear_data(skb, &rsp[1], len);
1842 	} else {
1843 		if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1844 			pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1845 				pgl->va, be64_to_cpu(*rsp),
1846 				be64_to_cpu(*(u64 *)pgl->va),
1847 				pgl->tot_len);
1848 			return 0;
1849 		}
1850 		skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1851 		if (unlikely(!skb))
1852 			goto nomem;
1853 	}
1854 
1855 	rpl = (struct cpl_act_establish *)skb->data;
1856 	opc = rpl->ot.opcode;
1857 	log_debug(1 << CXGBI_DBG_TOE,
1858 		"cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1859 		 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1860 	if (cxgb4i_cplhandlers[opc])
1861 		cxgb4i_cplhandlers[opc](cdev, skb);
1862 	else {
1863 		pr_err("No handler for opcode 0x%x.\n", opc);
1864 		__kfree_skb(skb);
1865 	}
1866 	return 0;
1867 nomem:
1868 	log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1869 	return 1;
1870 }
1871 
1872 static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1873 {
1874 	struct cxgbi_device *cdev = handle;
1875 
1876 	switch (state) {
1877 	case CXGB4_STATE_UP:
1878 		pr_info("cdev 0x%p, UP.\n", cdev);
1879 #if IS_ENABLED(CONFIG_IPV6)
1880 		cxgbi_update_clip(cdev);
1881 #endif
1882 		/* re-initialize */
1883 		break;
1884 	case CXGB4_STATE_START_RECOVERY:
1885 		pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1886 		/* close all connections */
1887 		break;
1888 	case CXGB4_STATE_DOWN:
1889 		pr_info("cdev 0x%p, DOWN.\n", cdev);
1890 		break;
1891 	case CXGB4_STATE_DETACH:
1892 		pr_info("cdev 0x%p, DETACH.\n", cdev);
1893 		cxgbi_device_unregister(cdev);
1894 		break;
1895 	default:
1896 		pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1897 		break;
1898 	}
1899 	return 0;
1900 }
1901 
1902 static int __init cxgb4i_init_module(void)
1903 {
1904 	int rc;
1905 
1906 	printk(KERN_INFO "%s", version);
1907 
1908 	rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1909 	if (rc < 0)
1910 		return rc;
1911 	cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1912 
1913 #if IS_ENABLED(CONFIG_IPV6)
1914 	register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1915 #endif
1916 	return 0;
1917 }
1918 
1919 static void __exit cxgb4i_exit_module(void)
1920 {
1921 #if IS_ENABLED(CONFIG_IPV6)
1922 	unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1923 #endif
1924 	cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1925 	cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1926 	cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1927 }
1928 
1929 module_init(cxgb4i_init_module);
1930 module_exit(cxgb4i_exit_module);
1931