1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <scsi/scsi_host.h> 19 #include <net/tcp.h> 20 #include <net/dst.h> 21 #include <linux/netdevice.h> 22 23 #include "t4_regs.h" 24 #include "t4_msg.h" 25 #include "cxgb4.h" 26 #include "cxgb4_uld.h" 27 #include "t4fw_api.h" 28 #include "l2t.h" 29 #include "cxgb4i.h" 30 31 static unsigned int dbg_level; 32 33 #include "../libcxgbi.h" 34 35 #define DRV_MODULE_NAME "cxgb4i" 36 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 37 #define DRV_MODULE_VERSION "0.9.4" 38 39 static char version[] = 40 DRV_MODULE_DESC " " DRV_MODULE_NAME 41 " v" DRV_MODULE_VERSION "\n"; 42 43 MODULE_AUTHOR("Chelsio Communications, Inc."); 44 MODULE_DESCRIPTION(DRV_MODULE_DESC); 45 MODULE_VERSION(DRV_MODULE_VERSION); 46 MODULE_LICENSE("GPL"); 47 48 module_param(dbg_level, uint, 0644); 49 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 50 51 static int cxgb4i_rcv_win = 256 * 1024; 52 module_param(cxgb4i_rcv_win, int, 0644); 53 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 54 55 static int cxgb4i_snd_win = 128 * 1024; 56 module_param(cxgb4i_snd_win, int, 0644); 57 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 58 59 static int cxgb4i_rx_credit_thres = 10 * 1024; 60 module_param(cxgb4i_rx_credit_thres, int, 0644); 61 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 62 "RX credits return threshold in bytes (default=10KB)"); 63 64 static unsigned int cxgb4i_max_connect = (8 * 1024); 65 module_param(cxgb4i_max_connect, uint, 0644); 66 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 67 68 static unsigned short cxgb4i_sport_base = 20000; 69 module_param(cxgb4i_sport_base, ushort, 0644); 70 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 71 72 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 73 74 static void *t4_uld_add(const struct cxgb4_lld_info *); 75 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 76 static int t4_uld_state_change(void *, enum cxgb4_state state); 77 78 static const struct cxgb4_uld_info cxgb4i_uld_info = { 79 .name = DRV_MODULE_NAME, 80 .add = t4_uld_add, 81 .rx_handler = t4_uld_rx_handler, 82 .state_change = t4_uld_state_change, 83 }; 84 85 static struct scsi_host_template cxgb4i_host_template = { 86 .module = THIS_MODULE, 87 .name = DRV_MODULE_NAME, 88 .proc_name = DRV_MODULE_NAME, 89 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 90 .queuecommand = iscsi_queuecommand, 91 .change_queue_depth = iscsi_change_queue_depth, 92 .sg_tablesize = SG_ALL, 93 .max_sectors = 0xFFFF, 94 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 95 .eh_abort_handler = iscsi_eh_abort, 96 .eh_device_reset_handler = iscsi_eh_device_reset, 97 .eh_target_reset_handler = iscsi_eh_recover_target, 98 .target_alloc = iscsi_target_alloc, 99 .use_clustering = DISABLE_CLUSTERING, 100 .this_id = -1, 101 }; 102 103 static struct iscsi_transport cxgb4i_iscsi_transport = { 104 .owner = THIS_MODULE, 105 .name = DRV_MODULE_NAME, 106 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 107 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 108 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 109 .attr_is_visible = cxgbi_attr_is_visible, 110 .get_host_param = cxgbi_get_host_param, 111 .set_host_param = cxgbi_set_host_param, 112 /* session management */ 113 .create_session = cxgbi_create_session, 114 .destroy_session = cxgbi_destroy_session, 115 .get_session_param = iscsi_session_get_param, 116 /* connection management */ 117 .create_conn = cxgbi_create_conn, 118 .bind_conn = cxgbi_bind_conn, 119 .destroy_conn = iscsi_tcp_conn_teardown, 120 .start_conn = iscsi_conn_start, 121 .stop_conn = iscsi_conn_stop, 122 .get_conn_param = iscsi_conn_get_param, 123 .set_param = cxgbi_set_conn_param, 124 .get_stats = cxgbi_get_conn_stats, 125 /* pdu xmit req from user space */ 126 .send_pdu = iscsi_conn_send_pdu, 127 /* task */ 128 .init_task = iscsi_tcp_task_init, 129 .xmit_task = iscsi_tcp_task_xmit, 130 .cleanup_task = cxgbi_cleanup_task, 131 /* pdu */ 132 .alloc_pdu = cxgbi_conn_alloc_pdu, 133 .init_pdu = cxgbi_conn_init_pdu, 134 .xmit_pdu = cxgbi_conn_xmit_pdu, 135 .parse_pdu_itt = cxgbi_parse_pdu_itt, 136 /* TCP connect/disconnect */ 137 .get_ep_param = cxgbi_get_ep_param, 138 .ep_connect = cxgbi_ep_connect, 139 .ep_poll = cxgbi_ep_poll, 140 .ep_disconnect = cxgbi_ep_disconnect, 141 /* Error recovery timeout call */ 142 .session_recovery_timedout = iscsi_session_recovery_timedout, 143 }; 144 145 static struct scsi_transport_template *cxgb4i_stt; 146 147 /* 148 * CPL (Chelsio Protocol Language) defines a message passing interface between 149 * the host driver and Chelsio asic. 150 * The section below implments CPLs that related to iscsi tcp connection 151 * open/close/abort and data send/receive. 152 */ 153 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 154 #define RCV_BUFSIZ_MASK 0x3FFU 155 #define MAX_IMM_TX_PKT_LEN 128 156 157 static inline void set_queue(struct sk_buff *skb, unsigned int queue, 158 const struct cxgbi_sock *csk) 159 { 160 skb->queue_mapping = queue; 161 } 162 163 static int push_tx_frames(struct cxgbi_sock *, int); 164 165 /* 166 * is_ofld_imm - check whether a packet can be sent as immediate data 167 * @skb: the packet 168 * 169 * Returns true if a packet can be sent as an offload WR with immediate 170 * data. We currently use the same limit as for Ethernet packets. 171 */ 172 static inline int is_ofld_imm(const struct sk_buff *skb) 173 { 174 return skb->len <= (MAX_IMM_TX_PKT_LEN - 175 sizeof(struct fw_ofld_tx_data_wr)); 176 } 177 178 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 179 struct l2t_entry *e) 180 { 181 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 182 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 183 unsigned long long opt0; 184 unsigned int opt2; 185 unsigned int qid_atid = ((unsigned int)csk->atid) | 186 (((unsigned int)csk->rss_qid) << 14); 187 188 opt0 = KEEP_ALIVE(1) | 189 WND_SCALE(wscale) | 190 MSS_IDX(csk->mss_idx) | 191 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | 192 TX_CHAN(csk->tx_chan) | 193 SMAC_SEL(csk->smac_idx) | 194 ULP_MODE(ULP_MODE_ISCSI) | 195 RCV_BUFSIZ(cxgb4i_rcv_win >> 10); 196 opt2 = RX_CHANNEL(0) | 197 RSS_QUEUE_VALID | 198 (1 << 20) | 199 RSS_QUEUE(csk->rss_qid); 200 201 if (is_t4(lldi->adapter_type)) { 202 struct cpl_act_open_req *req = 203 (struct cpl_act_open_req *)skb->head; 204 205 INIT_TP_WR(req, 0); 206 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 207 qid_atid)); 208 req->local_port = csk->saddr.sin_port; 209 req->peer_port = csk->daddr.sin_port; 210 req->local_ip = csk->saddr.sin_addr.s_addr; 211 req->peer_ip = csk->daddr.sin_addr.s_addr; 212 req->opt0 = cpu_to_be64(opt0); 213 req->params = cpu_to_be32(cxgb4_select_ntuple( 214 csk->cdev->ports[csk->port_id], 215 csk->l2t)); 216 opt2 |= 1 << 22; 217 req->opt2 = cpu_to_be32(opt2); 218 219 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 220 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 221 csk, &req->local_ip, ntohs(req->local_port), 222 &req->peer_ip, ntohs(req->peer_port), 223 csk->atid, csk->rss_qid); 224 } else { 225 struct cpl_t5_act_open_req *req = 226 (struct cpl_t5_act_open_req *)skb->head; 227 228 INIT_TP_WR(req, 0); 229 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 230 qid_atid)); 231 req->local_port = csk->saddr.sin_port; 232 req->peer_port = csk->daddr.sin_port; 233 req->local_ip = csk->saddr.sin_addr.s_addr; 234 req->peer_ip = csk->daddr.sin_addr.s_addr; 235 req->opt0 = cpu_to_be64(opt0); 236 req->params = cpu_to_be64(V_FILTER_TUPLE( 237 cxgb4_select_ntuple( 238 csk->cdev->ports[csk->port_id], 239 csk->l2t))); 240 opt2 |= 1 << 31; 241 req->opt2 = cpu_to_be32(opt2); 242 243 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 244 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 245 csk, &req->local_ip, ntohs(req->local_port), 246 &req->peer_ip, ntohs(req->peer_port), 247 csk->atid, csk->rss_qid); 248 } 249 250 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 251 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 252 } 253 254 static void send_close_req(struct cxgbi_sock *csk) 255 { 256 struct sk_buff *skb = csk->cpl_close; 257 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 258 unsigned int tid = csk->tid; 259 260 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 261 "csk 0x%p,%u,0x%lx, tid %u.\n", 262 csk, csk->state, csk->flags, csk->tid); 263 csk->cpl_close = NULL; 264 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 265 INIT_TP_WR(req, tid); 266 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 267 req->rsvd = 0; 268 269 cxgbi_sock_skb_entail(csk, skb); 270 if (csk->state >= CTP_ESTABLISHED) 271 push_tx_frames(csk, 1); 272 } 273 274 static void abort_arp_failure(void *handle, struct sk_buff *skb) 275 { 276 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 277 struct cpl_abort_req *req; 278 279 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 280 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 281 csk, csk->state, csk->flags, csk->tid); 282 req = (struct cpl_abort_req *)skb->data; 283 req->cmd = CPL_ABORT_NO_RST; 284 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 285 } 286 287 static void send_abort_req(struct cxgbi_sock *csk) 288 { 289 struct cpl_abort_req *req; 290 struct sk_buff *skb = csk->cpl_abort_req; 291 292 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 293 return; 294 cxgbi_sock_set_state(csk, CTP_ABORTING); 295 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 296 cxgbi_sock_purge_write_queue(csk); 297 298 csk->cpl_abort_req = NULL; 299 req = (struct cpl_abort_req *)skb->head; 300 set_queue(skb, CPL_PRIORITY_DATA, csk); 301 req->cmd = CPL_ABORT_SEND_RST; 302 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 303 INIT_TP_WR(req, csk->tid); 304 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 305 req->rsvd0 = htonl(csk->snd_nxt); 306 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 307 308 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 309 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 310 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 311 req->rsvd1); 312 313 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 314 } 315 316 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 317 { 318 struct sk_buff *skb = csk->cpl_abort_rpl; 319 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 320 321 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 322 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 323 csk, csk->state, csk->flags, csk->tid, rst_status); 324 325 csk->cpl_abort_rpl = NULL; 326 set_queue(skb, CPL_PRIORITY_DATA, csk); 327 INIT_TP_WR(rpl, csk->tid); 328 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 329 rpl->cmd = rst_status; 330 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 331 } 332 333 /* 334 * CPL connection rx data ack: host -> 335 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 336 * credits sent. 337 */ 338 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 339 { 340 struct sk_buff *skb; 341 struct cpl_rx_data_ack *req; 342 343 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 344 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 345 csk, csk->state, csk->flags, csk->tid, credits); 346 347 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 348 if (!skb) { 349 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 350 return 0; 351 } 352 req = (struct cpl_rx_data_ack *)skb->head; 353 354 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 355 INIT_TP_WR(req, csk->tid); 356 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 357 csk->tid)); 358 req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); 359 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 360 return credits; 361 } 362 363 /* 364 * sgl_len - calculates the size of an SGL of the given capacity 365 * @n: the number of SGL entries 366 * Calculates the number of flits needed for a scatter/gather list that 367 * can hold the given number of entries. 368 */ 369 static inline unsigned int sgl_len(unsigned int n) 370 { 371 n--; 372 return (3 * n) / 2 + (n & 1) + 2; 373 } 374 375 /* 376 * calc_tx_flits_ofld - calculate # of flits for an offload packet 377 * @skb: the packet 378 * 379 * Returns the number of flits needed for the given offload packet. 380 * These packets are already fully constructed and no additional headers 381 * will be added. 382 */ 383 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 384 { 385 unsigned int flits, cnt; 386 387 if (is_ofld_imm(skb)) 388 return DIV_ROUND_UP(skb->len, 8); 389 flits = skb_transport_offset(skb) / 8; 390 cnt = skb_shinfo(skb)->nr_frags; 391 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 392 cnt++; 393 return flits + sgl_len(cnt); 394 } 395 396 static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) 397 { 398 struct sk_buff *skb; 399 struct fw_flowc_wr *flowc; 400 int flowclen, i; 401 402 flowclen = 80; 403 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 404 flowc = (struct fw_flowc_wr *)skb->head; 405 flowc->op_to_nparams = 406 htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); 407 flowc->flowid_len16 = 408 htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | 409 FW_WR_FLOWID(csk->tid)); 410 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 411 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 412 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 413 flowc->mnemval[1].val = htonl(csk->tx_chan); 414 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 415 flowc->mnemval[2].val = htonl(csk->tx_chan); 416 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 417 flowc->mnemval[3].val = htonl(csk->rss_qid); 418 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 419 flowc->mnemval[4].val = htonl(csk->snd_nxt); 420 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 421 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 422 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 423 flowc->mnemval[6].val = htonl(cxgb4i_snd_win); 424 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 425 flowc->mnemval[7].val = htonl(csk->advmss); 426 flowc->mnemval[8].mnemonic = 0; 427 flowc->mnemval[8].val = 0; 428 for (i = 0; i < 9; i++) { 429 flowc->mnemval[i].r4[0] = 0; 430 flowc->mnemval[i].r4[1] = 0; 431 flowc->mnemval[i].r4[2] = 0; 432 } 433 set_queue(skb, CPL_PRIORITY_DATA, csk); 434 435 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 436 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 437 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 438 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, 439 csk->advmss); 440 441 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 442 } 443 444 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 445 int dlen, int len, u32 credits, int compl) 446 { 447 struct fw_ofld_tx_data_wr *req; 448 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 449 unsigned int wr_ulp_mode = 0; 450 451 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 452 453 if (is_ofld_imm(skb)) { 454 req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) | 455 FW_WR_COMPL(1) | 456 FW_WR_IMMDLEN(dlen)); 457 req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | 458 FW_WR_LEN16(credits)); 459 } else { 460 req->op_to_immdlen = 461 cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) | 462 FW_WR_COMPL(1) | 463 FW_WR_IMMDLEN(0)); 464 req->flowid_len16 = 465 cpu_to_be32(FW_WR_FLOWID(csk->tid) | 466 FW_WR_LEN16(credits)); 467 } 468 if (submode) 469 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | 470 FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); 471 req->tunnel_to_proxy = htonl(wr_ulp_mode | 472 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1)); 473 req->plen = htonl(len); 474 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 475 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 476 } 477 478 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 479 { 480 kfree_skb(skb); 481 } 482 483 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 484 { 485 int total_size = 0; 486 struct sk_buff *skb; 487 488 if (unlikely(csk->state < CTP_ESTABLISHED || 489 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 490 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 491 1 << CXGBI_DBG_PDU_TX, 492 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 493 csk, csk->state, csk->flags, csk->tid); 494 return 0; 495 } 496 497 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 498 int dlen = skb->len; 499 int len = skb->len; 500 unsigned int credits_needed; 501 502 skb_reset_transport_header(skb); 503 if (is_ofld_imm(skb)) 504 credits_needed = DIV_ROUND_UP(dlen + 505 sizeof(struct fw_ofld_tx_data_wr), 16); 506 else 507 credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb) 508 + sizeof(struct fw_ofld_tx_data_wr), 509 16); 510 511 if (csk->wr_cred < credits_needed) { 512 log_debug(1 << CXGBI_DBG_PDU_TX, 513 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 514 csk, skb->len, skb->data_len, 515 credits_needed, csk->wr_cred); 516 break; 517 } 518 __skb_unlink(skb, &csk->write_queue); 519 set_queue(skb, CPL_PRIORITY_DATA, csk); 520 skb->csum = credits_needed; 521 csk->wr_cred -= credits_needed; 522 csk->wr_una_cred += credits_needed; 523 cxgbi_sock_enqueue_wr(csk, skb); 524 525 log_debug(1 << CXGBI_DBG_PDU_TX, 526 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 527 csk, skb->len, skb->data_len, credits_needed, 528 csk->wr_cred, csk->wr_una_cred); 529 530 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 531 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 532 send_tx_flowc_wr(csk); 533 skb->csum += 5; 534 csk->wr_cred -= 5; 535 csk->wr_una_cred += 5; 536 } 537 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 538 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 539 req_completion); 540 csk->snd_nxt += len; 541 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 542 } 543 total_size += skb->truesize; 544 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 545 546 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 547 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 548 csk, csk->state, csk->flags, csk->tid, skb, len); 549 550 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 551 } 552 return total_size; 553 } 554 555 static inline void free_atid(struct cxgbi_sock *csk) 556 { 557 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 558 559 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 560 cxgb4_free_atid(lldi->tids, csk->atid); 561 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 562 cxgbi_sock_put(csk); 563 } 564 } 565 566 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 567 { 568 struct cxgbi_sock *csk; 569 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 570 unsigned short tcp_opt = ntohs(req->tcp_opt); 571 unsigned int tid = GET_TID(req); 572 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 573 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 574 struct tid_info *t = lldi->tids; 575 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 576 577 csk = lookup_atid(t, atid); 578 if (unlikely(!csk)) { 579 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 580 goto rel_skb; 581 } 582 583 if (csk->atid != atid) { 584 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 585 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 586 goto rel_skb; 587 } 588 589 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 590 "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", 591 csk, csk->state, csk->flags, tid, atid, rcv_isn); 592 593 cxgbi_sock_get(csk); 594 csk->tid = tid; 595 cxgb4_insert_tid(lldi->tids, csk, tid); 596 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 597 598 free_atid(csk); 599 600 spin_lock_bh(&csk->lock); 601 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 602 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 603 csk, csk->state, csk->flags, csk->tid); 604 605 if (csk->retry_timer.function) { 606 del_timer(&csk->retry_timer); 607 csk->retry_timer.function = NULL; 608 } 609 610 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 611 /* 612 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 613 * pass through opt0. 614 */ 615 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) 616 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); 617 618 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; 619 if (GET_TCPOPT_TSTAMP(tcp_opt)) 620 csk->advmss -= 12; 621 if (csk->advmss < 128) 622 csk->advmss = 128; 623 624 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 625 "csk 0x%p, mss_idx %u, advmss %u.\n", 626 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); 627 628 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 629 630 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 631 send_abort_req(csk); 632 else { 633 if (skb_queue_len(&csk->write_queue)) 634 push_tx_frames(csk, 0); 635 cxgbi_conn_tx_open(csk); 636 } 637 spin_unlock_bh(&csk->lock); 638 639 rel_skb: 640 __kfree_skb(skb); 641 } 642 643 static int act_open_rpl_status_to_errno(int status) 644 { 645 switch (status) { 646 case CPL_ERR_CONN_RESET: 647 return -ECONNREFUSED; 648 case CPL_ERR_ARP_MISS: 649 return -EHOSTUNREACH; 650 case CPL_ERR_CONN_TIMEDOUT: 651 return -ETIMEDOUT; 652 case CPL_ERR_TCAM_FULL: 653 return -ENOMEM; 654 case CPL_ERR_CONN_EXIST: 655 return -EADDRINUSE; 656 default: 657 return -EIO; 658 } 659 } 660 661 static void csk_act_open_retry_timer(unsigned long data) 662 { 663 struct sk_buff *skb; 664 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 665 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 666 667 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 668 "csk 0x%p,%u,0x%lx,%u.\n", 669 csk, csk->state, csk->flags, csk->tid); 670 671 cxgbi_sock_get(csk); 672 spin_lock_bh(&csk->lock); 673 skb = alloc_wr(is_t4(lldi->adapter_type) ? 674 sizeof(struct cpl_act_open_req) : 675 sizeof(struct cpl_t5_act_open_req), 676 0, GFP_ATOMIC); 677 if (!skb) 678 cxgbi_sock_fail_act_open(csk, -ENOMEM); 679 else { 680 skb->sk = (struct sock *)csk; 681 t4_set_arp_err_handler(skb, csk, 682 cxgbi_sock_act_open_req_arp_failure); 683 send_act_open_req(csk, skb, csk->l2t); 684 } 685 spin_unlock_bh(&csk->lock); 686 cxgbi_sock_put(csk); 687 } 688 689 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 690 { 691 struct cxgbi_sock *csk; 692 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 693 unsigned int tid = GET_TID(rpl); 694 unsigned int atid = 695 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); 696 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); 697 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 698 struct tid_info *t = lldi->tids; 699 700 csk = lookup_atid(t, atid); 701 if (unlikely(!csk)) { 702 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 703 goto rel_skb; 704 } 705 706 pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", 707 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), 708 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), 709 atid, tid, status, csk, csk->state, csk->flags); 710 711 if (status == CPL_ERR_RTX_NEG_ADVICE) 712 goto rel_skb; 713 714 if (status && status != CPL_ERR_TCAM_FULL && 715 status != CPL_ERR_CONN_EXIST && 716 status != CPL_ERR_ARP_MISS) 717 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); 718 719 cxgbi_sock_get(csk); 720 spin_lock_bh(&csk->lock); 721 722 if (status == CPL_ERR_CONN_EXIST && 723 csk->retry_timer.function != csk_act_open_retry_timer) { 724 csk->retry_timer.function = csk_act_open_retry_timer; 725 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 726 } else 727 cxgbi_sock_fail_act_open(csk, 728 act_open_rpl_status_to_errno(status)); 729 730 spin_unlock_bh(&csk->lock); 731 cxgbi_sock_put(csk); 732 rel_skb: 733 __kfree_skb(skb); 734 } 735 736 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 737 { 738 struct cxgbi_sock *csk; 739 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 740 unsigned int tid = GET_TID(req); 741 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 742 struct tid_info *t = lldi->tids; 743 744 csk = lookup_tid(t, tid); 745 if (unlikely(!csk)) { 746 pr_err("can't find connection for tid %u.\n", tid); 747 goto rel_skb; 748 } 749 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 750 "csk 0x%p,%u,0x%lx,%u.\n", 751 csk, csk->state, csk->flags, csk->tid); 752 cxgbi_sock_rcv_peer_close(csk); 753 rel_skb: 754 __kfree_skb(skb); 755 } 756 757 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 758 { 759 struct cxgbi_sock *csk; 760 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 761 unsigned int tid = GET_TID(rpl); 762 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 763 struct tid_info *t = lldi->tids; 764 765 csk = lookup_tid(t, tid); 766 if (unlikely(!csk)) { 767 pr_err("can't find connection for tid %u.\n", tid); 768 goto rel_skb; 769 } 770 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 771 "csk 0x%p,%u,0x%lx,%u.\n", 772 csk, csk->state, csk->flags, csk->tid); 773 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 774 rel_skb: 775 __kfree_skb(skb); 776 } 777 778 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 779 int *need_rst) 780 { 781 switch (abort_reason) { 782 case CPL_ERR_BAD_SYN: /* fall through */ 783 case CPL_ERR_CONN_RESET: 784 return csk->state > CTP_ESTABLISHED ? 785 -EPIPE : -ECONNRESET; 786 case CPL_ERR_XMIT_TIMEDOUT: 787 case CPL_ERR_PERSIST_TIMEDOUT: 788 case CPL_ERR_FINWAIT2_TIMEDOUT: 789 case CPL_ERR_KEEPALIVE_TIMEDOUT: 790 return -ETIMEDOUT; 791 default: 792 return -EIO; 793 } 794 } 795 796 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 797 { 798 struct cxgbi_sock *csk; 799 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 800 unsigned int tid = GET_TID(req); 801 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 802 struct tid_info *t = lldi->tids; 803 int rst_status = CPL_ABORT_NO_RST; 804 805 csk = lookup_tid(t, tid); 806 if (unlikely(!csk)) { 807 pr_err("can't find connection for tid %u.\n", tid); 808 goto rel_skb; 809 } 810 811 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 812 "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n", 813 csk, csk->state, csk->flags, csk->tid, req->status); 814 815 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 816 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 817 goto rel_skb; 818 819 cxgbi_sock_get(csk); 820 spin_lock_bh(&csk->lock); 821 822 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { 823 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 824 cxgbi_sock_set_state(csk, CTP_ABORTING); 825 goto done; 826 } 827 828 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 829 send_abort_rpl(csk, rst_status); 830 831 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 832 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 833 cxgbi_sock_closed(csk); 834 } 835 done: 836 spin_unlock_bh(&csk->lock); 837 cxgbi_sock_put(csk); 838 rel_skb: 839 __kfree_skb(skb); 840 } 841 842 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 843 { 844 struct cxgbi_sock *csk; 845 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 846 unsigned int tid = GET_TID(rpl); 847 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 848 struct tid_info *t = lldi->tids; 849 850 csk = lookup_tid(t, tid); 851 if (!csk) 852 goto rel_skb; 853 854 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 855 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", 856 rpl->status, csk, csk ? csk->state : 0, 857 csk ? csk->flags : 0UL); 858 859 if (rpl->status == CPL_ERR_ABORT_FAILED) 860 goto rel_skb; 861 862 cxgbi_sock_rcv_abort_rpl(csk); 863 rel_skb: 864 __kfree_skb(skb); 865 } 866 867 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 868 { 869 struct cxgbi_sock *csk; 870 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 871 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 872 unsigned int tid = GET_TID(cpl); 873 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 874 struct tid_info *t = lldi->tids; 875 876 csk = lookup_tid(t, tid); 877 if (unlikely(!csk)) { 878 pr_err("can't find conn. for tid %u.\n", tid); 879 goto rel_skb; 880 } 881 882 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 883 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 884 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 885 pdu_len_ddp); 886 887 spin_lock_bh(&csk->lock); 888 889 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 890 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 891 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 892 csk, csk->state, csk->flags, csk->tid); 893 if (csk->state != CTP_ABORTING) 894 goto abort_conn; 895 else 896 goto discard; 897 } 898 899 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 900 cxgbi_skcb_flags(skb) = 0; 901 902 skb_reset_transport_header(skb); 903 __skb_pull(skb, sizeof(*cpl)); 904 __pskb_trim(skb, ntohs(cpl->len)); 905 906 if (!csk->skb_ulp_lhdr) { 907 unsigned char *bhs; 908 unsigned int hlen, dlen, plen; 909 910 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 911 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 912 csk, csk->state, csk->flags, csk->tid, skb); 913 csk->skb_ulp_lhdr = skb; 914 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 915 916 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 917 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 918 csk->tid, cxgbi_skcb_tcp_seq(skb), 919 csk->rcv_nxt); 920 goto abort_conn; 921 } 922 923 bhs = skb->data; 924 hlen = ntohs(cpl->len); 925 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 926 927 plen = ISCSI_PDU_LEN(pdu_len_ddp); 928 if (is_t4(lldi->adapter_type)) 929 plen -= 40; 930 931 if ((hlen + dlen) != plen) { 932 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 933 "mismatch %u != %u + %u, seq 0x%x.\n", 934 csk->tid, plen, hlen, dlen, 935 cxgbi_skcb_tcp_seq(skb)); 936 goto abort_conn; 937 } 938 939 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 940 if (dlen) 941 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 942 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 943 944 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 945 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 946 csk, skb, *bhs, hlen, dlen, 947 ntohl(*((unsigned int *)(bhs + 16))), 948 ntohl(*((unsigned int *)(bhs + 24)))); 949 950 } else { 951 struct sk_buff *lskb = csk->skb_ulp_lhdr; 952 953 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 954 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 955 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 956 csk, csk->state, csk->flags, skb, lskb); 957 } 958 959 __skb_queue_tail(&csk->receive_queue, skb); 960 spin_unlock_bh(&csk->lock); 961 return; 962 963 abort_conn: 964 send_abort_req(csk); 965 discard: 966 spin_unlock_bh(&csk->lock); 967 rel_skb: 968 __kfree_skb(skb); 969 } 970 971 static void do_rx_data_ddp(struct cxgbi_device *cdev, 972 struct sk_buff *skb) 973 { 974 struct cxgbi_sock *csk; 975 struct sk_buff *lskb; 976 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 977 unsigned int tid = GET_TID(rpl); 978 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 979 struct tid_info *t = lldi->tids; 980 unsigned int status = ntohl(rpl->ddpvld); 981 982 csk = lookup_tid(t, tid); 983 if (unlikely(!csk)) { 984 pr_err("can't find connection for tid %u.\n", tid); 985 goto rel_skb; 986 } 987 988 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 989 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 990 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 991 992 spin_lock_bh(&csk->lock); 993 994 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 995 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 996 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 997 csk, csk->state, csk->flags, csk->tid); 998 if (csk->state != CTP_ABORTING) 999 goto abort_conn; 1000 else 1001 goto discard; 1002 } 1003 1004 if (!csk->skb_ulp_lhdr) { 1005 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1006 goto abort_conn; 1007 } 1008 1009 lskb = csk->skb_ulp_lhdr; 1010 csk->skb_ulp_lhdr = NULL; 1011 1012 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1013 1014 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1015 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1016 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1017 1018 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1019 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1020 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1021 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); 1022 } 1023 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1024 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1025 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1026 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); 1027 } 1028 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1029 log_debug(1 << CXGBI_DBG_PDU_RX, 1030 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1031 csk, lskb, status); 1032 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); 1033 } 1034 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1035 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { 1036 log_debug(1 << CXGBI_DBG_PDU_RX, 1037 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1038 csk, lskb, status); 1039 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); 1040 } 1041 log_debug(1 << CXGBI_DBG_PDU_RX, 1042 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1043 csk, lskb, cxgbi_skcb_flags(lskb)); 1044 1045 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1046 cxgbi_conn_pdu_ready(csk); 1047 spin_unlock_bh(&csk->lock); 1048 goto rel_skb; 1049 1050 abort_conn: 1051 send_abort_req(csk); 1052 discard: 1053 spin_unlock_bh(&csk->lock); 1054 rel_skb: 1055 __kfree_skb(skb); 1056 } 1057 1058 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1059 { 1060 struct cxgbi_sock *csk; 1061 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1062 unsigned int tid = GET_TID(rpl); 1063 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1064 struct tid_info *t = lldi->tids; 1065 1066 csk = lookup_tid(t, tid); 1067 if (unlikely(!csk)) 1068 pr_err("can't find connection for tid %u.\n", tid); 1069 else { 1070 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1071 "csk 0x%p,%u,0x%lx,%u.\n", 1072 csk, csk->state, csk->flags, csk->tid); 1073 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1074 rpl->seq_vld); 1075 } 1076 __kfree_skb(skb); 1077 } 1078 1079 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1080 { 1081 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1082 unsigned int tid = GET_TID(rpl); 1083 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1084 struct tid_info *t = lldi->tids; 1085 struct cxgbi_sock *csk; 1086 1087 csk = lookup_tid(t, tid); 1088 if (!csk) 1089 pr_err("can't find conn. for tid %u.\n", tid); 1090 1091 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1092 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1093 csk, csk->state, csk->flags, csk->tid, rpl->status); 1094 1095 if (rpl->status != CPL_ERR_NONE) 1096 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1097 csk, tid, rpl->status); 1098 1099 __kfree_skb(skb); 1100 } 1101 1102 static int alloc_cpls(struct cxgbi_sock *csk) 1103 { 1104 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1105 0, GFP_KERNEL); 1106 if (!csk->cpl_close) 1107 return -ENOMEM; 1108 1109 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1110 0, GFP_KERNEL); 1111 if (!csk->cpl_abort_req) 1112 goto free_cpls; 1113 1114 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1115 0, GFP_KERNEL); 1116 if (!csk->cpl_abort_rpl) 1117 goto free_cpls; 1118 return 0; 1119 1120 free_cpls: 1121 cxgbi_sock_free_cpl_skbs(csk); 1122 return -ENOMEM; 1123 } 1124 1125 static inline void l2t_put(struct cxgbi_sock *csk) 1126 { 1127 if (csk->l2t) { 1128 cxgb4_l2t_release(csk->l2t); 1129 csk->l2t = NULL; 1130 cxgbi_sock_put(csk); 1131 } 1132 } 1133 1134 static void release_offload_resources(struct cxgbi_sock *csk) 1135 { 1136 struct cxgb4_lld_info *lldi; 1137 1138 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1139 "csk 0x%p,%u,0x%lx,%u.\n", 1140 csk, csk->state, csk->flags, csk->tid); 1141 1142 cxgbi_sock_free_cpl_skbs(csk); 1143 if (csk->wr_cred != csk->wr_max_cred) { 1144 cxgbi_sock_purge_wr_queue(csk); 1145 cxgbi_sock_reset_wr_list(csk); 1146 } 1147 1148 l2t_put(csk); 1149 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1150 free_atid(csk); 1151 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1152 lldi = cxgbi_cdev_priv(csk->cdev); 1153 cxgb4_remove_tid(lldi->tids, 0, csk->tid); 1154 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1155 cxgbi_sock_put(csk); 1156 } 1157 csk->dst = NULL; 1158 csk->cdev = NULL; 1159 } 1160 1161 static int init_act_open(struct cxgbi_sock *csk) 1162 { 1163 struct cxgbi_device *cdev = csk->cdev; 1164 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1165 struct net_device *ndev = cdev->ports[csk->port_id]; 1166 struct port_info *pi = netdev_priv(ndev); 1167 struct sk_buff *skb = NULL; 1168 struct neighbour *n; 1169 unsigned int step; 1170 1171 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1172 "csk 0x%p,%u,0x%lx,%u.\n", 1173 csk, csk->state, csk->flags, csk->tid); 1174 1175 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1176 if (csk->atid < 0) { 1177 pr_err("%s, NO atid available.\n", ndev->name); 1178 return -EINVAL; 1179 } 1180 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1181 cxgbi_sock_get(csk); 1182 1183 n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr); 1184 if (!n) { 1185 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1186 goto rel_resource; 1187 } 1188 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1189 if (!csk->l2t) { 1190 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1191 goto rel_resource; 1192 } 1193 cxgbi_sock_get(csk); 1194 1195 skb = alloc_wr(is_t4(lldi->adapter_type) ? 1196 sizeof(struct cpl_act_open_req) : 1197 sizeof(struct cpl_t5_act_open_req), 1198 0, GFP_ATOMIC); 1199 if (!skb) 1200 goto rel_resource; 1201 skb->sk = (struct sock *)csk; 1202 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1203 1204 if (!csk->mtu) 1205 csk->mtu = dst_mtu(csk->dst); 1206 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1207 csk->tx_chan = cxgb4_port_chan(ndev); 1208 /* SMT two entries per row */ 1209 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; 1210 step = lldi->ntxq / lldi->nchan; 1211 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1212 step = lldi->nrxq / lldi->nchan; 1213 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1214 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; 1215 csk->wr_una_cred = 0; 1216 cxgbi_sock_reset_wr_list(csk); 1217 csk->err = 0; 1218 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1219 "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n", 1220 csk, pi->port_id, ndev->name, csk->tx_chan, 1221 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, 1222 csk->smac_idx); 1223 1224 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1225 send_act_open_req(csk, skb, csk->l2t); 1226 neigh_release(n); 1227 return 0; 1228 1229 rel_resource: 1230 if (n) 1231 neigh_release(n); 1232 if (skb) 1233 __kfree_skb(skb); 1234 return -EINVAL; 1235 } 1236 1237 #define CPL_ISCSI_DATA 0xB2 1238 #define CPL_RX_ISCSI_DDP 0x49 1239 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1240 [CPL_ACT_ESTABLISH] = do_act_establish, 1241 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1242 [CPL_PEER_CLOSE] = do_peer_close, 1243 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1244 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1245 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1246 [CPL_FW4_ACK] = do_fw4_ack, 1247 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1248 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1249 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1250 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1251 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1252 }; 1253 1254 int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1255 { 1256 int rc; 1257 1258 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1259 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1260 1261 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1262 cxgb4i_max_connect); 1263 if (rc < 0) 1264 return rc; 1265 1266 cdev->csk_release_offload_resources = release_offload_resources; 1267 cdev->csk_push_tx_frames = push_tx_frames; 1268 cdev->csk_send_abort_req = send_abort_req; 1269 cdev->csk_send_close_req = send_close_req; 1270 cdev->csk_send_rx_credits = send_rx_credits; 1271 cdev->csk_alloc_cpls = alloc_cpls; 1272 cdev->csk_init_act_open = init_act_open; 1273 1274 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1275 return 0; 1276 } 1277 1278 /* 1279 * functions to program the pagepod in h/w 1280 */ 1281 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ 1282 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, 1283 struct ulp_mem_io *req, 1284 unsigned int wr_len, unsigned int dlen, 1285 unsigned int pm_addr) 1286 { 1287 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1288 1289 INIT_ULPTX_WR(req, wr_len, 0, 0); 1290 if (is_t4(lldi->adapter_type)) 1291 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1292 (ULP_MEMIO_ORDER(1))); 1293 else 1294 req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | 1295 (V_T5_ULP_MEMIO_IMM(1))); 1296 req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); 1297 req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); 1298 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1299 1300 idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); 1301 idata->len = htonl(dlen); 1302 } 1303 1304 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, 1305 struct cxgbi_pagepod_hdr *hdr, unsigned int idx, 1306 unsigned int npods, 1307 struct cxgbi_gather_list *gl, 1308 unsigned int gl_pidx) 1309 { 1310 struct cxgbi_ddp_info *ddp = cdev->ddp; 1311 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1312 struct sk_buff *skb; 1313 struct ulp_mem_io *req; 1314 struct ulptx_idata *idata; 1315 struct cxgbi_pagepod *ppod; 1316 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; 1317 unsigned int dlen = PPOD_SIZE * npods; 1318 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1319 sizeof(struct ulptx_idata) + dlen, 16); 1320 unsigned int i; 1321 1322 skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1323 if (!skb) { 1324 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", 1325 cdev, idx, npods); 1326 return -ENOMEM; 1327 } 1328 req = (struct ulp_mem_io *)skb->head; 1329 set_queue(skb, CPL_PRIORITY_CONTROL, NULL); 1330 1331 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1332 idata = (struct ulptx_idata *)(req + 1); 1333 ppod = (struct cxgbi_pagepod *)(idata + 1); 1334 1335 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { 1336 if (!hdr && !gl) 1337 cxgbi_ddp_ppod_clear(ppod); 1338 else 1339 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); 1340 } 1341 1342 cxgb4_ofld_send(cdev->ports[port_id], skb); 1343 return 0; 1344 } 1345 1346 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, 1347 unsigned int idx, unsigned int npods, 1348 struct cxgbi_gather_list *gl) 1349 { 1350 unsigned int i, cnt; 1351 int err = 0; 1352 1353 for (i = 0; i < npods; i += cnt, idx += cnt) { 1354 cnt = npods - i; 1355 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1356 cnt = ULPMEM_IDATA_MAX_NPPODS; 1357 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, 1358 idx, cnt, gl, 4 * i); 1359 if (err < 0) 1360 break; 1361 } 1362 return err; 1363 } 1364 1365 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, 1366 unsigned int idx, unsigned int npods) 1367 { 1368 unsigned int i, cnt; 1369 int err; 1370 1371 for (i = 0; i < npods; i += cnt, idx += cnt) { 1372 cnt = npods - i; 1373 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1374 cnt = ULPMEM_IDATA_MAX_NPPODS; 1375 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, 1376 idx, cnt, NULL, 0); 1377 if (err < 0) 1378 break; 1379 } 1380 } 1381 1382 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1383 int pg_idx, bool reply) 1384 { 1385 struct sk_buff *skb; 1386 struct cpl_set_tcb_field *req; 1387 1388 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1389 return 0; 1390 1391 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1392 if (!skb) 1393 return -ENOMEM; 1394 1395 /* set up ulp page size */ 1396 req = (struct cpl_set_tcb_field *)skb->head; 1397 INIT_TP_WR(req, csk->tid); 1398 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1399 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1400 req->word_cookie = htons(0); 1401 req->mask = cpu_to_be64(0x3 << 8); 1402 req->val = cpu_to_be64(pg_idx << 8); 1403 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1404 1405 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1406 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 1407 1408 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1409 return 0; 1410 } 1411 1412 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1413 int hcrc, int dcrc, int reply) 1414 { 1415 struct sk_buff *skb; 1416 struct cpl_set_tcb_field *req; 1417 1418 if (!hcrc && !dcrc) 1419 return 0; 1420 1421 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1422 if (!skb) 1423 return -ENOMEM; 1424 1425 csk->hcrc_len = (hcrc ? 4 : 0); 1426 csk->dcrc_len = (dcrc ? 4 : 0); 1427 /* set up ulp submode */ 1428 req = (struct cpl_set_tcb_field *)skb->head; 1429 INIT_TP_WR(req, tid); 1430 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1431 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1432 req->word_cookie = htons(0); 1433 req->mask = cpu_to_be64(0x3 << 4); 1434 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 1435 (dcrc ? ULP_CRC_DATA : 0)) << 4); 1436 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1437 1438 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1439 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 1440 1441 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1442 return 0; 1443 } 1444 1445 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 1446 { 1447 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1448 struct cxgbi_ddp_info *ddp = cdev->ddp; 1449 unsigned int tagmask, pgsz_factor[4]; 1450 int err; 1451 1452 if (ddp) { 1453 kref_get(&ddp->refcnt); 1454 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", 1455 cdev, cdev->ddp); 1456 return -EALREADY; 1457 } 1458 1459 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, 1460 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, 1461 lldi->iscsi_iolen, lldi->iscsi_iolen); 1462 if (err < 0) 1463 return err; 1464 1465 ddp = cdev->ddp; 1466 1467 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; 1468 cxgbi_ddp_page_size_factor(pgsz_factor); 1469 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); 1470 1471 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 1472 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 1473 cdev->csk_ddp_set = ddp_set_map; 1474 cdev->csk_ddp_clear = ddp_clear_map; 1475 1476 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", 1477 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, 1478 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); 1479 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " 1480 " %u/%u.\n", 1481 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, 1482 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, 1483 ddp->max_rxsz, lldi->iscsi_iolen); 1484 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", 1485 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, 1486 ddp->max_rxsz); 1487 return 0; 1488 } 1489 1490 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1491 { 1492 struct cxgbi_device *cdev; 1493 struct port_info *pi; 1494 int i, rc; 1495 1496 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 1497 if (!cdev) { 1498 pr_info("t4 device 0x%p, register failed.\n", lldi); 1499 return NULL; 1500 } 1501 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 1502 cdev, lldi->adapter_type, lldi->nports, 1503 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 1504 lldi->nrxq, lldi->wr_cred); 1505 for (i = 0; i < lldi->nrxq; i++) 1506 log_debug(1 << CXGBI_DBG_DEV, 1507 "t4 0x%p, rxq id #%d: %u.\n", 1508 cdev, i, lldi->rxq_ids[i]); 1509 1510 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 1511 cdev->flags = CXGBI_FLAG_DEV_T4; 1512 cdev->pdev = lldi->pdev; 1513 cdev->ports = lldi->ports; 1514 cdev->nports = lldi->nports; 1515 cdev->mtus = lldi->mtus; 1516 cdev->nmtus = NMTUS; 1517 cdev->snd_win = cxgb4i_snd_win; 1518 cdev->rcv_win = cxgb4i_rcv_win; 1519 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 1520 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 1521 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1522 cdev->itp = &cxgb4i_iscsi_transport; 1523 1524 cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; 1525 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1526 cdev, lldi->ports[0]->name, cdev->pfvf); 1527 1528 rc = cxgb4i_ddp_init(cdev); 1529 if (rc) { 1530 pr_info("t4 0x%p ddp init failed.\n", cdev); 1531 goto err_out; 1532 } 1533 rc = cxgb4i_ofld_init(cdev); 1534 if (rc) { 1535 pr_info("t4 0x%p ofld init failed.\n", cdev); 1536 goto err_out; 1537 } 1538 1539 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 1540 &cxgb4i_host_template, cxgb4i_stt); 1541 if (rc) 1542 goto err_out; 1543 1544 for (i = 0; i < cdev->nports; i++) { 1545 pi = netdev_priv(lldi->ports[i]); 1546 cdev->hbas[i]->port_id = pi->port_id; 1547 } 1548 return cdev; 1549 1550 err_out: 1551 cxgbi_device_unregister(cdev); 1552 return ERR_PTR(-ENOMEM); 1553 } 1554 1555 #define RX_PULL_LEN 128 1556 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 1557 const struct pkt_gl *pgl) 1558 { 1559 const struct cpl_act_establish *rpl; 1560 struct sk_buff *skb; 1561 unsigned int opc; 1562 struct cxgbi_device *cdev = handle; 1563 1564 if (pgl == NULL) { 1565 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1566 1567 skb = alloc_wr(len, 0, GFP_ATOMIC); 1568 if (!skb) 1569 goto nomem; 1570 skb_copy_to_linear_data(skb, &rsp[1], len); 1571 } else { 1572 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 1573 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 1574 pgl->va, be64_to_cpu(*rsp), 1575 be64_to_cpu(*(u64 *)pgl->va), 1576 pgl->tot_len); 1577 return 0; 1578 } 1579 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 1580 if (unlikely(!skb)) 1581 goto nomem; 1582 } 1583 1584 rpl = (struct cpl_act_establish *)skb->data; 1585 opc = rpl->ot.opcode; 1586 log_debug(1 << CXGBI_DBG_TOE, 1587 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 1588 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 1589 if (cxgb4i_cplhandlers[opc]) 1590 cxgb4i_cplhandlers[opc](cdev, skb); 1591 else { 1592 pr_err("No handler for opcode 0x%x.\n", opc); 1593 __kfree_skb(skb); 1594 } 1595 return 0; 1596 nomem: 1597 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 1598 return 1; 1599 } 1600 1601 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 1602 { 1603 struct cxgbi_device *cdev = handle; 1604 1605 switch (state) { 1606 case CXGB4_STATE_UP: 1607 pr_info("cdev 0x%p, UP.\n", cdev); 1608 /* re-initialize */ 1609 break; 1610 case CXGB4_STATE_START_RECOVERY: 1611 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 1612 /* close all connections */ 1613 break; 1614 case CXGB4_STATE_DOWN: 1615 pr_info("cdev 0x%p, DOWN.\n", cdev); 1616 break; 1617 case CXGB4_STATE_DETACH: 1618 pr_info("cdev 0x%p, DETACH.\n", cdev); 1619 cxgbi_device_unregister(cdev); 1620 break; 1621 default: 1622 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 1623 break; 1624 } 1625 return 0; 1626 } 1627 1628 static int __init cxgb4i_init_module(void) 1629 { 1630 int rc; 1631 1632 printk(KERN_INFO "%s", version); 1633 1634 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1635 if (rc < 0) 1636 return rc; 1637 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1638 return 0; 1639 } 1640 1641 static void __exit cxgb4i_exit_module(void) 1642 { 1643 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1644 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1645 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1646 } 1647 1648 module_init(cxgb4i_init_module); 1649 module_exit(cxgb4i_exit_module); 1650