xref: /openbmc/linux/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c (revision 71501859)
1 /*
2  * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3  *
4  * Copyright (C) 2003-2015 Chelsio Communications.  All rights reserved.
5  *
6  * This program is distributed in the hope that it will be useful, but WITHOUT
7  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
9  * release for licensing terms and conditions.
10  *
11  * Written by:	Dimitris Michailidis (dm@chelsio.com)
12  *		Karen Xie (kxie@chelsio.com)
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
20 
21 #include "common.h"
22 #include "t3_cpl.h"
23 #include "t3cdev.h"
24 #include "cxgb3_defs.h"
25 #include "cxgb3_ctl_defs.h"
26 #include "cxgb3_offload.h"
27 #include "firmware_exports.h"
28 #include "cxgb3i.h"
29 
30 static unsigned int dbg_level;
31 #include "../libcxgbi.h"
32 
33 #define DRV_MODULE_NAME         "cxgb3i"
34 #define DRV_MODULE_DESC         "Chelsio T3 iSCSI Driver"
35 #define DRV_MODULE_VERSION	"2.0.1-ko"
36 #define DRV_MODULE_RELDATE	"Apr. 2015"
37 
38 static char version[] =
39 	DRV_MODULE_DESC " " DRV_MODULE_NAME
40 	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41 
42 MODULE_AUTHOR("Chelsio Communications, Inc.");
43 MODULE_DESCRIPTION(DRV_MODULE_DESC);
44 MODULE_VERSION(DRV_MODULE_VERSION);
45 MODULE_LICENSE("GPL");
46 
47 module_param(dbg_level, uint, 0644);
48 MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
49 
50 static int cxgb3i_rcv_win = 256 * 1024;
51 module_param(cxgb3i_rcv_win, int, 0644);
52 MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
53 
54 static int cxgb3i_snd_win = 128 * 1024;
55 module_param(cxgb3i_snd_win, int, 0644);
56 MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
57 
58 static int cxgb3i_rx_credit_thres = 10 * 1024;
59 module_param(cxgb3i_rx_credit_thres, int, 0644);
60 MODULE_PARM_DESC(cxgb3i_rx_credit_thres,
61 		 "RX credits return threshold in bytes (default=10KB)");
62 
63 static unsigned int cxgb3i_max_connect = 8 * 1024;
64 module_param(cxgb3i_max_connect, uint, 0644);
65 MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
66 
67 static unsigned int cxgb3i_sport_base = 20000;
68 module_param(cxgb3i_sport_base, uint, 0644);
69 MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
70 
71 static void cxgb3i_dev_open(struct t3cdev *);
72 static void cxgb3i_dev_close(struct t3cdev *);
73 static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
74 
75 static struct cxgb3_client t3_client = {
76 	.name = DRV_MODULE_NAME,
77 	.handlers = cxgb3i_cpl_handlers,
78 	.add = cxgb3i_dev_open,
79 	.remove = cxgb3i_dev_close,
80 	.event_handler = cxgb3i_dev_event_handler,
81 };
82 
83 static struct scsi_host_template cxgb3i_host_template = {
84 	.module		= THIS_MODULE,
85 	.name		= DRV_MODULE_NAME,
86 	.proc_name	= DRV_MODULE_NAME,
87 	.can_queue	= CXGB3I_SCSI_HOST_QDEPTH,
88 	.queuecommand	= iscsi_queuecommand,
89 	.change_queue_depth = scsi_change_queue_depth,
90 	.sg_tablesize	= SG_ALL,
91 	.max_sectors	= 0xFFFF,
92 	.cmd_per_lun	= ISCSI_DEF_CMD_PER_LUN,
93 	.eh_timed_out	= iscsi_eh_cmd_timed_out,
94 	.eh_abort_handler = iscsi_eh_abort,
95 	.eh_device_reset_handler = iscsi_eh_device_reset,
96 	.eh_target_reset_handler = iscsi_eh_recover_target,
97 	.target_alloc	= iscsi_target_alloc,
98 	.dma_boundary	= PAGE_SIZE - 1,
99 	.this_id	= -1,
100 	.track_queue_depth = 1,
101 };
102 
103 static struct iscsi_transport cxgb3i_iscsi_transport = {
104 	.owner		= THIS_MODULE,
105 	.name		= DRV_MODULE_NAME,
106 	/* owner and name should be set already */
107 	.caps		= CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
108 				| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 				CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
110 	.attr_is_visible	= cxgbi_attr_is_visible,
111 	.get_host_param	= cxgbi_get_host_param,
112 	.set_host_param	= cxgbi_set_host_param,
113 	/* session management */
114 	.create_session	= cxgbi_create_session,
115 	.destroy_session	= cxgbi_destroy_session,
116 	.get_session_param = iscsi_session_get_param,
117 	/* connection management */
118 	.create_conn	= cxgbi_create_conn,
119 	.bind_conn	= cxgbi_bind_conn,
120 	.destroy_conn	= iscsi_tcp_conn_teardown,
121 	.start_conn	= iscsi_conn_start,
122 	.stop_conn	= iscsi_conn_stop,
123 	.get_conn_param	= iscsi_conn_get_param,
124 	.set_param	= cxgbi_set_conn_param,
125 	.get_stats	= cxgbi_get_conn_stats,
126 	/* pdu xmit req from user space */
127 	.send_pdu	= iscsi_conn_send_pdu,
128 	/* task */
129 	.init_task	= iscsi_tcp_task_init,
130 	.xmit_task	= iscsi_tcp_task_xmit,
131 	.cleanup_task	= cxgbi_cleanup_task,
132 	/* pdu */
133 	.alloc_pdu	= cxgbi_conn_alloc_pdu,
134 	.init_pdu	= cxgbi_conn_init_pdu,
135 	.xmit_pdu	= cxgbi_conn_xmit_pdu,
136 	.parse_pdu_itt	= cxgbi_parse_pdu_itt,
137 	/* TCP connect/disconnect */
138 	.get_ep_param	= cxgbi_get_ep_param,
139 	.ep_connect	= cxgbi_ep_connect,
140 	.ep_poll	= cxgbi_ep_poll,
141 	.ep_disconnect	= cxgbi_ep_disconnect,
142 	/* Error recovery timeout call */
143 	.session_recovery_timedout = iscsi_session_recovery_timedout,
144 };
145 
146 static struct scsi_transport_template *cxgb3i_stt;
147 
148 /*
149  * CPL (Chelsio Protocol Language) defines a message passing interface between
150  * the host driver and Chelsio asic.
151  * The section below implments CPLs that related to iscsi tcp connection
152  * open/close/abort and data send/receive.
153  */
154 
155 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
156 
157 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
158 			      const struct l2t_entry *e)
159 {
160 	unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
161 	struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
162 
163 	skb->priority = CPL_PRIORITY_SETUP;
164 
165 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
166 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
167 	req->local_port = csk->saddr.sin_port;
168 	req->peer_port = csk->daddr.sin_port;
169 	req->local_ip = csk->saddr.sin_addr.s_addr;
170 	req->peer_ip = csk->daddr.sin_addr.s_addr;
171 
172 	req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
173 			V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
174 			V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
175 	req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
176 			V_RCV_BUFSIZ(csk->rcv_win >> 10));
177 
178 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
179 		"csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
180 		csk, csk->state, csk->flags, csk->atid,
181 		&req->local_ip, ntohs(req->local_port),
182 		&req->peer_ip, ntohs(req->peer_port),
183 		csk->mss_idx, e->idx, e->smt_idx);
184 
185 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
186 }
187 
188 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
189 {
190 	cxgbi_sock_act_open_req_arp_failure(NULL, skb);
191 }
192 
193 /*
194  * CPL connection close request: host ->
195  *
196  * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
197  * the write queue (i.e., after any unsent txt data).
198  */
199 static void send_close_req(struct cxgbi_sock *csk)
200 {
201 	struct sk_buff *skb = csk->cpl_close;
202 	struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
203 	unsigned int tid = csk->tid;
204 
205 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
206 		"csk 0x%p,%u,0x%lx,%u.\n",
207 		csk, csk->state, csk->flags, csk->tid);
208 
209 	csk->cpl_close = NULL;
210 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
211 	req->wr.wr_lo = htonl(V_WR_TID(tid));
212 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
213 	req->rsvd = htonl(csk->write_seq);
214 
215 	cxgbi_sock_skb_entail(csk, skb);
216 	if (csk->state >= CTP_ESTABLISHED)
217 		push_tx_frames(csk, 1);
218 }
219 
220 /*
221  * CPL connection abort request: host ->
222  *
223  * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
224  * for the same connection and also that we do not try to send a message
225  * after the connection has closed.
226  */
227 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
228 {
229 	struct cpl_abort_req *req = cplhdr(skb);
230 
231 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
232 		"t3dev 0x%p, tid %u, skb 0x%p.\n",
233 		tdev, GET_TID(req), skb);
234 	req->cmd = CPL_ABORT_NO_RST;
235 	cxgb3_ofld_send(tdev, skb);
236 }
237 
238 static void send_abort_req(struct cxgbi_sock *csk)
239 {
240 	struct sk_buff *skb = csk->cpl_abort_req;
241 	struct cpl_abort_req *req;
242 
243 	if (unlikely(csk->state == CTP_ABORTING || !skb))
244 		return;
245 	cxgbi_sock_set_state(csk, CTP_ABORTING);
246 	cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
247 	/* Purge the send queue so we don't send anything after an abort. */
248 	cxgbi_sock_purge_write_queue(csk);
249 
250 	csk->cpl_abort_req = NULL;
251 	req = (struct cpl_abort_req *)skb->head;
252 	skb->priority = CPL_PRIORITY_DATA;
253 	set_arp_failure_handler(skb, abort_arp_failure);
254 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
255 	req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
256 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
257 	req->rsvd0 = htonl(csk->snd_nxt);
258 	req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
259 	req->cmd = CPL_ABORT_SEND_RST;
260 
261 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
262 		"csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
263 		csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
264 		req->rsvd1);
265 
266 	l2t_send(csk->cdev->lldev, skb, csk->l2t);
267 }
268 
269 /*
270  * CPL connection abort reply: host ->
271  *
272  * Send an ABORT_RPL message in response of the ABORT_REQ received.
273  */
274 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
275 {
276 	struct sk_buff *skb = csk->cpl_abort_rpl;
277 	struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
278 
279 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
280 		"csk 0x%p,%u,0x%lx,%u, status %d.\n",
281 		csk, csk->state, csk->flags, csk->tid, rst_status);
282 
283 	csk->cpl_abort_rpl = NULL;
284 	skb->priority = CPL_PRIORITY_DATA;
285 	rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
286 	rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
287 	OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
288 	rpl->cmd = rst_status;
289 	cxgb3_ofld_send(csk->cdev->lldev, skb);
290 }
291 
292 /*
293  * CPL connection rx data ack: host ->
294  * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
295  * credits sent.
296  */
297 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
298 {
299 	struct sk_buff *skb;
300 	struct cpl_rx_data_ack *req;
301 	u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
302 
303 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
304 		"csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
305 		csk, csk->state, csk->flags, csk->tid, credits, dack);
306 
307 	skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
308 	if (!skb) {
309 		pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
310 		return 0;
311 	}
312 	req = (struct cpl_rx_data_ack *)skb->head;
313 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
314 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
315 	req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
316 				V_RX_CREDITS(credits));
317 	skb->priority = CPL_PRIORITY_ACK;
318 	cxgb3_ofld_send(csk->cdev->lldev, skb);
319 	return credits;
320 }
321 
322 /*
323  * CPL connection tx data: host ->
324  *
325  * Send iscsi PDU via TX_DATA CPL message. Returns the number of
326  * credits sent.
327  * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
328  * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
329  */
330 
331 static unsigned int wrlen __read_mostly;
332 static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
333 
334 static void init_wr_tab(unsigned int wr_len)
335 {
336 	int i;
337 
338 	if (skb_wrs[1])		/* already initialized */
339 		return;
340 	for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
341 		int sgl_len = (3 * i) / 2 + (i & 1);
342 
343 		sgl_len += 3;
344 		skb_wrs[i] = (sgl_len <= wr_len
345 			      ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
346 	}
347 	wrlen = wr_len * 8;
348 }
349 
350 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
351 				   int len, int req_completion)
352 {
353 	struct tx_data_wr *req;
354 	struct l2t_entry *l2t = csk->l2t;
355 
356 	skb_reset_transport_header(skb);
357 	req = __skb_push(skb, sizeof(*req));
358 	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
359 			(req_completion ? F_WR_COMPL : 0));
360 	req->wr_lo = htonl(V_WR_TID(csk->tid));
361 	/* len includes the length of any HW ULP additions */
362 	req->len = htonl(len);
363 	/* V_TX_ULP_SUBMODE sets both the mode and submode */
364 	req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
365 			   V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
366 	req->sndseq = htonl(csk->snd_nxt);
367 	req->param = htonl(V_TX_PORT(l2t->smt_idx));
368 
369 	if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
370 		req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
371 				    V_TX_CPU_IDX(csk->rss_qid));
372 		/* sendbuffer is in units of 32KB. */
373 		req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
374 		cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
375 	}
376 }
377 
378 /*
379  * push_tx_frames -- start transmit
380  *
381  * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
382  * connection's send queue and sends them on to T3.  Must be called with the
383  * connection's lock held.  Returns the amount of send buffer space that was
384  * freed as a result of sending queued data to T3.
385  */
386 
387 static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
388 {
389 	kfree_skb(skb);
390 }
391 
392 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
393 {
394 	int total_size = 0;
395 	struct sk_buff *skb;
396 
397 	if (unlikely(csk->state < CTP_ESTABLISHED ||
398 		csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
399 			log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
400 				"csk 0x%p,%u,0x%lx,%u, in closing state.\n",
401 				csk, csk->state, csk->flags, csk->tid);
402 		return 0;
403 	}
404 
405 	while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
406 		int len = skb->len;	/* length before skb_push */
407 		int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
408 		int wrs_needed = skb_wrs[frags];
409 
410 		if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
411 			wrs_needed = 1;
412 
413 		WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
414 
415 		if (csk->wr_cred < wrs_needed) {
416 			log_debug(1 << CXGBI_DBG_PDU_TX,
417 				"csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
418 				csk, skb->len, skb->data_len, frags,
419 				wrs_needed, csk->wr_cred);
420 			break;
421 		}
422 
423 		__skb_unlink(skb, &csk->write_queue);
424 		skb->priority = CPL_PRIORITY_DATA;
425 		skb->csum = wrs_needed;	/* remember this until the WR_ACK */
426 		csk->wr_cred -= wrs_needed;
427 		csk->wr_una_cred += wrs_needed;
428 		cxgbi_sock_enqueue_wr(csk, skb);
429 
430 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
431 			"csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
432 			"left %u, unack %u.\n",
433 			csk, skb->len, skb->data_len, frags, skb->csum,
434 			csk->wr_cred, csk->wr_una_cred);
435 
436 		if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
437 			if ((req_completion &&
438 				csk->wr_una_cred == wrs_needed) ||
439 			     csk->wr_una_cred >= csk->wr_max_cred / 2) {
440 				req_completion = 1;
441 				csk->wr_una_cred = 0;
442 			}
443 			len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
444 			make_tx_data_wr(csk, skb, len, req_completion);
445 			csk->snd_nxt += len;
446 			cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
447 		}
448 		total_size += skb->truesize;
449 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
450 			"csk 0x%p, tid 0x%x, send skb 0x%p.\n",
451 			csk, csk->tid, skb);
452 		set_arp_failure_handler(skb, arp_failure_skb_discard);
453 		l2t_send(csk->cdev->lldev, skb, csk->l2t);
454 	}
455 	return total_size;
456 }
457 
458 /*
459  * Process a CPL_ACT_ESTABLISH message: -> host
460  * Updates connection state from an active establish CPL message.  Runs with
461  * the connection lock held.
462  */
463 
464 static inline void free_atid(struct cxgbi_sock *csk)
465 {
466 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
467 		cxgb3_free_atid(csk->cdev->lldev, csk->atid);
468 		cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
469 		cxgbi_sock_put(csk);
470 	}
471 }
472 
473 static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
474 {
475 	struct cxgbi_sock *csk = ctx;
476 	struct cpl_act_establish *req = cplhdr(skb);
477 	unsigned int tid = GET_TID(req);
478 	unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
479 	u32 rcv_isn = ntohl(req->rcv_isn);	/* real RCV_ISN + 1 */
480 
481 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
482 		"atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
483 		atid, atid, csk, csk->state, csk->flags, rcv_isn);
484 
485 	cxgbi_sock_get(csk);
486 	cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
487 	csk->tid = tid;
488 	cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
489 
490 	free_atid(csk);
491 
492 	csk->rss_qid = G_QNUM(ntohs(skb->csum));
493 
494 	spin_lock_bh(&csk->lock);
495 	if (csk->retry_timer.function) {
496 		del_timer(&csk->retry_timer);
497 		csk->retry_timer.function = NULL;
498 	}
499 
500 	if (unlikely(csk->state != CTP_ACTIVE_OPEN))
501 		pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
502 			csk, csk->state, csk->flags, csk->tid);
503 
504 	csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
505 	if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
506 		csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
507 
508 	cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
509 
510 	if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
511 		/* upper layer has requested closing */
512 		send_abort_req(csk);
513 	else {
514 		if (skb_queue_len(&csk->write_queue))
515 			push_tx_frames(csk, 1);
516 		cxgbi_conn_tx_open(csk);
517 	}
518 
519 	spin_unlock_bh(&csk->lock);
520 	__kfree_skb(skb);
521 	return 0;
522 }
523 
524 /*
525  * Process a CPL_ACT_OPEN_RPL message: -> host
526  * Handle active open failures.
527  */
528 static int act_open_rpl_status_to_errno(int status)
529 {
530 	switch (status) {
531 	case CPL_ERR_CONN_RESET:
532 		return -ECONNREFUSED;
533 	case CPL_ERR_ARP_MISS:
534 		return -EHOSTUNREACH;
535 	case CPL_ERR_CONN_TIMEDOUT:
536 		return -ETIMEDOUT;
537 	case CPL_ERR_TCAM_FULL:
538 		return -ENOMEM;
539 	case CPL_ERR_CONN_EXIST:
540 		return -EADDRINUSE;
541 	default:
542 		return -EIO;
543 	}
544 }
545 
546 static void act_open_retry_timer(struct timer_list *t)
547 {
548 	struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
549 	struct sk_buff *skb;
550 
551 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
552 		"csk 0x%p,%u,0x%lx,%u.\n",
553 		csk, csk->state, csk->flags, csk->tid);
554 
555 	cxgbi_sock_get(csk);
556 	spin_lock_bh(&csk->lock);
557 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
558 	if (!skb)
559 		cxgbi_sock_fail_act_open(csk, -ENOMEM);
560 	else {
561 		skb->sk = (struct sock *)csk;
562 		set_arp_failure_handler(skb, act_open_arp_failure);
563 		send_act_open_req(csk, skb, csk->l2t);
564 	}
565 	spin_unlock_bh(&csk->lock);
566 	cxgbi_sock_put(csk);
567 }
568 
569 static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
570 {
571 	struct cxgbi_sock *csk = ctx;
572 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
573 
574 	pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
575 		csk, csk->state, csk->flags, csk->atid, rpl->status,
576 		&csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
577 		&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
578 
579 	if (rpl->status != CPL_ERR_TCAM_FULL &&
580 	    rpl->status != CPL_ERR_CONN_EXIST &&
581 	    rpl->status != CPL_ERR_ARP_MISS)
582 		cxgb3_queue_tid_release(tdev, GET_TID(rpl));
583 
584 	cxgbi_sock_get(csk);
585 	spin_lock_bh(&csk->lock);
586 	if (rpl->status == CPL_ERR_CONN_EXIST &&
587 	    csk->retry_timer.function != act_open_retry_timer) {
588 		csk->retry_timer.function = act_open_retry_timer;
589 		mod_timer(&csk->retry_timer, jiffies + HZ / 2);
590 	} else
591 		cxgbi_sock_fail_act_open(csk,
592 				act_open_rpl_status_to_errno(rpl->status));
593 
594 	spin_unlock_bh(&csk->lock);
595 	cxgbi_sock_put(csk);
596 	__kfree_skb(skb);
597 	return 0;
598 }
599 
600 /*
601  * Process PEER_CLOSE CPL messages: -> host
602  * Handle peer FIN.
603  */
604 static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
605 {
606 	struct cxgbi_sock *csk = ctx;
607 
608 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
609 		"csk 0x%p,%u,0x%lx,%u.\n",
610 		csk, csk->state, csk->flags, csk->tid);
611 
612 	cxgbi_sock_rcv_peer_close(csk);
613 	__kfree_skb(skb);
614 	return 0;
615 }
616 
617 /*
618  * Process CLOSE_CONN_RPL CPL message: -> host
619  * Process a peer ACK to our FIN.
620  */
621 static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
622 			    void *ctx)
623 {
624 	struct cxgbi_sock *csk = ctx;
625 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
626 
627 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
628 		"csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
629 		csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
630 
631 	cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
632 	__kfree_skb(skb);
633 	return 0;
634 }
635 
636 /*
637  * Process ABORT_REQ_RSS CPL message: -> host
638  * Process abort requests.  If we are waiting for an ABORT_RPL we ignore this
639  * request except that we need to reply to it.
640  */
641 
642 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
643 				 int *need_rst)
644 {
645 	switch (abort_reason) {
646 	case CPL_ERR_BAD_SYN:
647 	case CPL_ERR_CONN_RESET:
648 		return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
649 	case CPL_ERR_XMIT_TIMEDOUT:
650 	case CPL_ERR_PERSIST_TIMEDOUT:
651 	case CPL_ERR_FINWAIT2_TIMEDOUT:
652 	case CPL_ERR_KEEPALIVE_TIMEDOUT:
653 		return -ETIMEDOUT;
654 	default:
655 		return -EIO;
656 	}
657 }
658 
659 static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
660 {
661 	const struct cpl_abort_req_rss *req = cplhdr(skb);
662 	struct cxgbi_sock *csk = ctx;
663 	int rst_status = CPL_ABORT_NO_RST;
664 
665 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
666 		"csk 0x%p,%u,0x%lx,%u.\n",
667 		csk, csk->state, csk->flags, csk->tid);
668 
669 	if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
670 	    req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
671 		goto done;
672 	}
673 
674 	cxgbi_sock_get(csk);
675 	spin_lock_bh(&csk->lock);
676 
677 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
678 		cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
679 		cxgbi_sock_set_state(csk, CTP_ABORTING);
680 		goto out;
681 	}
682 
683 	cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
684 	send_abort_rpl(csk, rst_status);
685 
686 	if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
687 		csk->err = abort_status_to_errno(csk, req->status, &rst_status);
688 		cxgbi_sock_closed(csk);
689 	}
690 
691 out:
692 	spin_unlock_bh(&csk->lock);
693 	cxgbi_sock_put(csk);
694 done:
695 	__kfree_skb(skb);
696 	return 0;
697 }
698 
699 /*
700  * Process ABORT_RPL_RSS CPL message: -> host
701  * Process abort replies.  We only process these messages if we anticipate
702  * them as the coordination between SW and HW in this area is somewhat lacking
703  * and sometimes we get ABORT_RPLs after we are done with the connection that
704  * originated the ABORT_REQ.
705  */
706 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
707 {
708 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
709 	struct cxgbi_sock *csk = ctx;
710 
711 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
712 		"status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
713 		rpl->status, csk, csk ? csk->state : 0,
714 		csk ? csk->flags : 0UL);
715 	/*
716 	 * Ignore replies to post-close aborts indicating that the abort was
717 	 * requested too late.  These connections are terminated when we get
718 	 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
719 	 * arrives the TID is either no longer used or it has been recycled.
720 	 */
721 	if (rpl->status == CPL_ERR_ABORT_FAILED)
722 		goto rel_skb;
723 	/*
724 	 * Sometimes we've already closed the connection, e.g., a post-close
725 	 * abort races with ABORT_REQ_RSS, the latter frees the connection
726 	 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
727 	 * but FW turns the ABORT_REQ into a regular one and so we get
728 	 * ABORT_RPL_RSS with status 0 and no connection.
729 	 */
730 	if (csk)
731 		cxgbi_sock_rcv_abort_rpl(csk);
732 rel_skb:
733 	__kfree_skb(skb);
734 	return 0;
735 }
736 
737 /*
738  * Process RX_ISCSI_HDR CPL message: -> host
739  * Handle received PDUs, the payload could be DDP'ed. If not, the payload
740  * follow after the bhs.
741  */
742 static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
743 {
744 	struct cxgbi_sock *csk = ctx;
745 	struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
746 	struct cpl_iscsi_hdr_norss data_cpl;
747 	struct cpl_rx_data_ddp_norss ddp_cpl;
748 	unsigned int hdr_len, data_len, status;
749 	unsigned int len;
750 	int err;
751 
752 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
753 		"csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
754 		csk, csk->state, csk->flags, csk->tid, skb, skb->len);
755 
756 	spin_lock_bh(&csk->lock);
757 
758 	if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
759 		log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
760 			"csk 0x%p,%u,0x%lx,%u, bad state.\n",
761 			csk, csk->state, csk->flags, csk->tid);
762 		if (csk->state != CTP_ABORTING)
763 			goto abort_conn;
764 		else
765 			goto discard;
766 	}
767 
768 	cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
769 	cxgbi_skcb_flags(skb) = 0;
770 
771 	skb_reset_transport_header(skb);
772 	__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
773 
774 	len = hdr_len = ntohs(hdr_cpl->len);
775 	/* msg coalesce is off or not enough data received */
776 	if (skb->len <= hdr_len) {
777 		pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
778 			csk->cdev->ports[csk->port_id]->name, csk->tid,
779 			skb->len, hdr_len);
780 		goto abort_conn;
781 	}
782 	cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
783 
784 	err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
785 			    sizeof(ddp_cpl));
786 	if (err < 0) {
787 		pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
788 			csk->cdev->ports[csk->port_id]->name, csk->tid,
789 			skb->len, sizeof(ddp_cpl), err);
790 		goto abort_conn;
791 	}
792 
793 	cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
794 	cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
795 	cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
796 	status = ntohl(ddp_cpl.ddp_status);
797 
798 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
799 		"csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
800 		csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
801 
802 	if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
803 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
804 	if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
805 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
806 	if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
807 		cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
808 
809 	if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
810 		err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
811 		if (err < 0) {
812 			pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
813 				csk->cdev->ports[csk->port_id]->name,
814 				csk->tid, sizeof(data_cpl), skb->len, err);
815 			goto abort_conn;
816 		}
817 		data_len = ntohs(data_cpl.len);
818 		log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
819 			"skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
820 			skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
821 		len += sizeof(data_cpl) + data_len;
822 	} else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
823 		cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
824 
825 	csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
826 	__pskb_trim(skb, len);
827 	__skb_queue_tail(&csk->receive_queue, skb);
828 	cxgbi_conn_pdu_ready(csk);
829 
830 	spin_unlock_bh(&csk->lock);
831 	return 0;
832 
833 abort_conn:
834 	send_abort_req(csk);
835 discard:
836 	spin_unlock_bh(&csk->lock);
837 	__kfree_skb(skb);
838 	return 0;
839 }
840 
841 /*
842  * Process TX_DATA_ACK CPL messages: -> host
843  * Process an acknowledgment of WR completion.  Advance snd_una and send the
844  * next batch of work requests from the write queue.
845  */
846 static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
847 {
848 	struct cxgbi_sock *csk = ctx;
849 	struct cpl_wr_ack *hdr = cplhdr(skb);
850 
851 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
852 		"csk 0x%p,%u,0x%lx,%u, cr %u.\n",
853 		csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
854 
855 	cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
856 	__kfree_skb(skb);
857 	return 0;
858 }
859 
860 /*
861  * for each connection, pre-allocate skbs needed for close/abort requests. So
862  * that we can service the request right away.
863  */
864 static int alloc_cpls(struct cxgbi_sock *csk)
865 {
866 	csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
867 					GFP_KERNEL);
868 	if (!csk->cpl_close)
869 		return -ENOMEM;
870 	csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
871 					GFP_KERNEL);
872 	if (!csk->cpl_abort_req)
873 		goto free_cpl_skbs;
874 
875 	csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
876 					GFP_KERNEL);
877 	if (!csk->cpl_abort_rpl)
878 		goto free_cpl_skbs;
879 
880 	return 0;
881 
882 free_cpl_skbs:
883 	cxgbi_sock_free_cpl_skbs(csk);
884 	return -ENOMEM;
885 }
886 
887 static void l2t_put(struct cxgbi_sock *csk)
888 {
889 	struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
890 
891 	if (csk->l2t) {
892 		l2t_release(t3dev, csk->l2t);
893 		csk->l2t = NULL;
894 		cxgbi_sock_put(csk);
895 	}
896 }
897 
898 /*
899  * release_offload_resources - release offload resource
900  * Release resources held by an offload connection (TID, L2T entry, etc.)
901  */
902 static void release_offload_resources(struct cxgbi_sock *csk)
903 {
904 	struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
905 
906 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
907 		"csk 0x%p,%u,0x%lx,%u.\n",
908 		csk, csk->state, csk->flags, csk->tid);
909 
910 	csk->rss_qid = 0;
911 	cxgbi_sock_free_cpl_skbs(csk);
912 
913 	if (csk->wr_cred != csk->wr_max_cred) {
914 		cxgbi_sock_purge_wr_queue(csk);
915 		cxgbi_sock_reset_wr_list(csk);
916 	}
917 	l2t_put(csk);
918 	if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
919 		free_atid(csk);
920 	else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
921 		cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
922 		cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
923 		cxgbi_sock_put(csk);
924 	}
925 	csk->dst = NULL;
926 	csk->cdev = NULL;
927 }
928 
929 static void update_address(struct cxgbi_hba *chba)
930 {
931 	if (chba->ipv4addr) {
932 		if (chba->vdev &&
933 		    chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
934 			cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
935 			cxgb3i_set_private_ipv4addr(chba->ndev, 0);
936 			pr_info("%s set %pI4.\n",
937 				chba->vdev->name, &chba->ipv4addr);
938 		} else if (chba->ipv4addr !=
939 				cxgb3i_get_private_ipv4addr(chba->ndev)) {
940 			cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
941 			pr_info("%s set %pI4.\n",
942 				chba->ndev->name, &chba->ipv4addr);
943 		}
944 	} else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
945 		if (chba->vdev)
946 			cxgb3i_set_private_ipv4addr(chba->vdev, 0);
947 		cxgb3i_set_private_ipv4addr(chba->ndev, 0);
948 	}
949 }
950 
951 static int init_act_open(struct cxgbi_sock *csk)
952 {
953 	struct dst_entry *dst = csk->dst;
954 	struct cxgbi_device *cdev = csk->cdev;
955 	struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
956 	struct net_device *ndev = cdev->ports[csk->port_id];
957 	struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
958 	struct sk_buff *skb = NULL;
959 	int ret;
960 
961 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
962 		"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
963 
964 	update_address(chba);
965 	if (chba->ipv4addr)
966 		csk->saddr.sin_addr.s_addr = chba->ipv4addr;
967 
968 	csk->rss_qid = 0;
969 	csk->l2t = t3_l2t_get(t3dev, dst, ndev,
970 			      &csk->daddr.sin_addr.s_addr);
971 	if (!csk->l2t) {
972 		pr_err("NO l2t available.\n");
973 		return -EINVAL;
974 	}
975 	cxgbi_sock_get(csk);
976 
977 	csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
978 	if (csk->atid < 0) {
979 		pr_err("NO atid available.\n");
980 		ret = -EINVAL;
981 		goto put_sock;
982 	}
983 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
984 	cxgbi_sock_get(csk);
985 
986 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
987 	if (!skb) {
988 		ret = -ENOMEM;
989 		goto free_atid;
990 	}
991 	skb->sk = (struct sock *)csk;
992 	set_arp_failure_handler(skb, act_open_arp_failure);
993 	csk->snd_win = cxgb3i_snd_win;
994 	csk->rcv_win = cxgb3i_rcv_win;
995 
996 	csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
997 	csk->wr_una_cred = 0;
998 	csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
999 	cxgbi_sock_reset_wr_list(csk);
1000 	csk->err = 0;
1001 
1002 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1003 		"csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1004 		csk, csk->state, csk->flags,
1005 		&csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1006 		&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1007 
1008 	cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1009 	send_act_open_req(csk, skb, csk->l2t);
1010 	return 0;
1011 
1012 free_atid:
1013 	cxgb3_free_atid(t3dev, csk->atid);
1014 put_sock:
1015 	cxgbi_sock_put(csk);
1016 	l2t_release(t3dev, csk->l2t);
1017 	csk->l2t = NULL;
1018 
1019 	return ret;
1020 }
1021 
1022 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1023 	[CPL_ACT_ESTABLISH] = do_act_establish,
1024 	[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1025 	[CPL_PEER_CLOSE] = do_peer_close,
1026 	[CPL_ABORT_REQ_RSS] = do_abort_req,
1027 	[CPL_ABORT_RPL_RSS] = do_abort_rpl,
1028 	[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1029 	[CPL_TX_DMA_ACK] = do_wr_ack,
1030 	[CPL_ISCSI_HDR] = do_iscsi_hdr,
1031 };
1032 
1033 /**
1034  * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1035  * @cdev:	cxgbi adapter
1036  */
1037 static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1038 {
1039 	struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1040 	struct adap_ports port;
1041 	struct ofld_page_info rx_page_info;
1042 	unsigned int wr_len;
1043 	int rc;
1044 
1045 	if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1046 	    t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1047 	    t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1048 		pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1049 		return -EINVAL;
1050 	}
1051 
1052 	if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1053 		cxgb3i_max_connect = CXGBI_MAX_CONN;
1054 
1055 	rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1056 					cxgb3i_max_connect);
1057 	if (rc < 0)
1058 		return rc;
1059 
1060 	init_wr_tab(wr_len);
1061 	cdev->csk_release_offload_resources = release_offload_resources;
1062 	cdev->csk_push_tx_frames = push_tx_frames;
1063 	cdev->csk_send_abort_req = send_abort_req;
1064 	cdev->csk_send_close_req = send_close_req;
1065 	cdev->csk_send_rx_credits = send_rx_credits;
1066 	cdev->csk_alloc_cpls = alloc_cpls;
1067 	cdev->csk_init_act_open = init_act_open;
1068 
1069 	pr_info("cdev 0x%p, offload up, added.\n", cdev);
1070 	return 0;
1071 }
1072 
1073 /*
1074  * functions to program the pagepod in h/w
1075  */
1076 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1077 {
1078 	struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1079 
1080 	memset(req, 0, sizeof(*req));
1081 
1082 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1083 	req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1084 				   V_ULPTX_CMD(ULP_MEM_WRITE));
1085 	req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
1086 			 V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
1087 }
1088 
1089 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1090 {
1091 	return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
1092 }
1093 
1094 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1095 		       struct cxgbi_task_tag_info *ttinfo)
1096 {
1097 	unsigned int idx = ttinfo->idx;
1098 	unsigned int npods = ttinfo->npods;
1099 	struct scatterlist *sg = ttinfo->sgl;
1100 	struct cxgbi_pagepod *ppod;
1101 	struct ulp_mem_io *req;
1102 	unsigned int sg_off;
1103 	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1104 	int i;
1105 
1106 	for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1107 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1108 					       IPPOD_SIZE, 0, GFP_ATOMIC);
1109 
1110 		if (!skb)
1111 			return -ENOMEM;
1112 		ulp_mem_io_set_hdr(skb, pm_addr);
1113 		req = (struct ulp_mem_io *)skb->head;
1114 		ppod = (struct cxgbi_pagepod *)(req + 1);
1115 		sg_off = i * PPOD_PAGES_MAX;
1116 		cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
1117 				       &sg_off);
1118 		skb->priority = CPL_PRIORITY_CONTROL;
1119 		cxgb3_ofld_send(ppm->lldev, skb);
1120 	}
1121 	return 0;
1122 }
1123 
1124 static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1125 			  struct cxgbi_task_tag_info *ttinfo)
1126 {
1127 	unsigned int idx = ttinfo->idx;
1128 	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1129 	unsigned int npods = ttinfo->npods;
1130 	int i;
1131 
1132 	log_debug(1 << CXGBI_DBG_DDP,
1133 		  "cdev 0x%p, clear idx %u, npods %u.\n",
1134 		  cdev, idx, npods);
1135 
1136 	for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1137 		struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1138 					       IPPOD_SIZE, 0, GFP_ATOMIC);
1139 
1140 		if (!skb) {
1141 			pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
1142 			       cdev, idx, i, npods);
1143 			continue;
1144 		}
1145 		ulp_mem_io_set_hdr(skb, pm_addr);
1146 		skb->priority = CPL_PRIORITY_CONTROL;
1147 		cxgb3_ofld_send(ppm->lldev, skb);
1148 	}
1149 }
1150 
1151 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1152 				unsigned int tid, int pg_idx)
1153 {
1154 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1155 					GFP_KERNEL);
1156 	struct cpl_set_tcb_field *req;
1157 	u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1158 
1159 	log_debug(1 << CXGBI_DBG_DDP,
1160 		"csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1161 	if (!skb)
1162 		return -ENOMEM;
1163 
1164 	/* set up ulp submode and page size */
1165 	req = (struct cpl_set_tcb_field *)skb->head;
1166 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1167 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1168 	req->reply = V_NO_REPLY(1);
1169 	req->cpu_idx = 0;
1170 	req->word = htons(31);
1171 	req->mask = cpu_to_be64(0xF0000000);
1172 	req->val = cpu_to_be64(val << 28);
1173 	skb->priority = CPL_PRIORITY_CONTROL;
1174 
1175 	cxgb3_ofld_send(csk->cdev->lldev, skb);
1176 	return 0;
1177 }
1178 
1179 /**
1180  * ddp_setup_conn_digest - setup conn. digest setting
1181  * @csk: cxgb tcp socket
1182  * @tid: connection id
1183  * @hcrc: header digest enabled
1184  * @dcrc: data digest enabled
1185  * set up the iscsi digest settings for a connection identified by tid
1186  */
1187 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1188 				 int hcrc, int dcrc)
1189 {
1190 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1191 					GFP_KERNEL);
1192 	struct cpl_set_tcb_field *req;
1193 	u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1194 
1195 	log_debug(1 << CXGBI_DBG_DDP,
1196 		"csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1197 	if (!skb)
1198 		return -ENOMEM;
1199 
1200 	/* set up ulp submode and page size */
1201 	req = (struct cpl_set_tcb_field *)skb->head;
1202 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1203 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1204 	req->reply = V_NO_REPLY(1);
1205 	req->cpu_idx = 0;
1206 	req->word = htons(31);
1207 	req->mask = cpu_to_be64(0x0F000000);
1208 	req->val = cpu_to_be64(val << 24);
1209 	skb->priority = CPL_PRIORITY_CONTROL;
1210 
1211 	cxgb3_ofld_send(csk->cdev->lldev, skb);
1212 	return 0;
1213 }
1214 
1215 /**
1216  * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
1217  * @cdev: cxgb3i adapter
1218  * initialize the ddp pagepod manager for a given adapter
1219  */
1220 static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1221 {
1222 	struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1223 	struct net_device *ndev = cdev->ports[0];
1224 	struct cxgbi_tag_format tformat;
1225 	unsigned int ppmax, tagmask = 0;
1226 	struct ulp_iscsi_info uinfo;
1227 	int i, err;
1228 
1229 	err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1230 	if (err < 0) {
1231 		pr_err("%s, failed to get iscsi param %d.\n",
1232 		       ndev->name, err);
1233 		return err;
1234 	}
1235 	if (uinfo.llimit >= uinfo.ulimit) {
1236 		pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
1237 			ndev->name, uinfo.llimit, uinfo.ulimit);
1238 		return -EACCES;
1239 	}
1240 
1241 	ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
1242 	tagmask = cxgbi_tagmask_set(ppmax);
1243 
1244 	pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
1245 		ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
1246 		tagmask);
1247 
1248 	memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1249 	for (i = 0; i < 4; i++)
1250 		tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
1251 	cxgbi_tagmask_check(tagmask, &tformat);
1252 
1253 	err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
1254 				  (uinfo.ulimit - uinfo.llimit + 1),
1255 				  uinfo.llimit, uinfo.llimit, 0, 0, 0);
1256 	if (err)
1257 		return err;
1258 
1259 	if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
1260 		uinfo.tagmask = tagmask;
1261 		uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
1262 
1263 		err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1264 		if (err < 0) {
1265 			pr_err("T3 %s fail to set iscsi param %d.\n",
1266 			       ndev->name, err);
1267 			cdev->flags |= CXGBI_FLAG_DDP_OFF;
1268 		}
1269 		err = 0;
1270 	}
1271 
1272 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1273 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1274 	cdev->csk_ddp_set_map = ddp_set_map;
1275 	cdev->csk_ddp_clear_map = ddp_clear_map;
1276 	cdev->cdev2ppm = cdev2ppm;
1277 	cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1278 				  uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1279 	cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1280 				  uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1281 
1282 	return 0;
1283 }
1284 
1285 static void cxgb3i_dev_close(struct t3cdev *t3dev)
1286 {
1287 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1288 
1289 	if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1290 		pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1291 		return;
1292 	}
1293 
1294 	cxgbi_device_unregister(cdev);
1295 }
1296 
1297 /**
1298  * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1299  * @t3dev: t3cdev adapter
1300  */
1301 static void cxgb3i_dev_open(struct t3cdev *t3dev)
1302 {
1303 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1304 	struct adapter *adapter = tdev2adap(t3dev);
1305 	int i, err;
1306 
1307 	if (cdev) {
1308 		pr_info("0x%p, updating.\n", cdev);
1309 		return;
1310 	}
1311 
1312 	cdev = cxgbi_device_register(0, adapter->params.nports);
1313 	if (!cdev) {
1314 		pr_warn("device 0x%p register failed.\n", t3dev);
1315 		return;
1316 	}
1317 
1318 	cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1319 	cdev->lldev = t3dev;
1320 	cdev->pdev = adapter->pdev;
1321 	cdev->ports = adapter->port;
1322 	cdev->nports = adapter->params.nports;
1323 	cdev->mtus = adapter->params.mtus;
1324 	cdev->nmtus = NMTUS;
1325 	cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1326 	cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1327 	cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1328 	cdev->itp = &cxgb3i_iscsi_transport;
1329 
1330 	err = cxgb3i_ddp_init(cdev);
1331 	if (err) {
1332 		pr_info("0x%p ddp init failed %d\n", cdev, err);
1333 		goto err_out;
1334 	}
1335 
1336 	err = cxgb3i_ofld_init(cdev);
1337 	if (err) {
1338 		pr_info("0x%p offload init failed\n", cdev);
1339 		goto err_out;
1340 	}
1341 
1342 	err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1343 				&cxgb3i_host_template, cxgb3i_stt);
1344 	if (err)
1345 		goto err_out;
1346 
1347 	for (i = 0; i < cdev->nports; i++)
1348 		cdev->hbas[i]->ipv4addr =
1349 			cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1350 
1351 	pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1352 		cdev, cdev ? cdev->flags : 0, t3dev, err);
1353 	return;
1354 
1355 err_out:
1356 	cxgbi_device_unregister(cdev);
1357 }
1358 
1359 static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1360 {
1361 	struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1362 
1363 	log_debug(1 << CXGBI_DBG_TOE,
1364 		"0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1365 		t3dev, cdev, event, port);
1366 	if (!cdev)
1367 		return;
1368 
1369 	switch (event) {
1370 	case OFFLOAD_STATUS_DOWN:
1371 		cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1372 		break;
1373 	case OFFLOAD_STATUS_UP:
1374 		cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1375 		break;
1376 	}
1377 }
1378 
1379 /**
1380  * cxgb3i_init_module - module init entry point
1381  *
1382  * initialize any driver wide global data structures and register itself
1383  *	with the cxgb3 module
1384  */
1385 static int __init cxgb3i_init_module(void)
1386 {
1387 	int rc;
1388 
1389 	printk(KERN_INFO "%s", version);
1390 
1391 	rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1392 	if (rc < 0)
1393 		return rc;
1394 
1395 	cxgb3_register_client(&t3_client);
1396 	return 0;
1397 }
1398 
1399 /**
1400  * cxgb3i_exit_module - module cleanup/exit entry point
1401  *
1402  * go through the driver hba list and for each hba, release any resource held.
1403  *	and unregisters iscsi transport and the cxgb3 module
1404  */
1405 static void __exit cxgb3i_exit_module(void)
1406 {
1407 	cxgb3_unregister_client(&t3_client);
1408 	cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1409 	cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1410 }
1411 
1412 module_init(cxgb3i_init_module);
1413 module_exit(cxgb3i_exit_module);
1414