1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <scsi/scsi_host.h> 20 #include <net/tcp.h> 21 #include <net/dst.h> 22 #include <linux/netdevice.h> 23 #include <net/addrconf.h> 24 25 #include "t4_regs.h" 26 #include "t4_msg.h" 27 #include "cxgb4.h" 28 #include "cxgb4_uld.h" 29 #include "t4fw_api.h" 30 #include "l2t.h" 31 #include "cxgb4i.h" 32 #include "clip_tbl.h" 33 34 static unsigned int dbg_level; 35 36 #include "../libcxgbi.h" 37 38 #define DRV_MODULE_NAME "cxgb4i" 39 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 40 #define DRV_MODULE_VERSION "0.9.5-ko" 41 #define DRV_MODULE_RELDATE "Apr. 2015" 42 43 static char version[] = 44 DRV_MODULE_DESC " " DRV_MODULE_NAME 45 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 46 47 MODULE_AUTHOR("Chelsio Communications, Inc."); 48 MODULE_DESCRIPTION(DRV_MODULE_DESC); 49 MODULE_VERSION(DRV_MODULE_VERSION); 50 MODULE_LICENSE("GPL"); 51 52 module_param(dbg_level, uint, 0644); 53 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 54 55 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) 56 static int cxgb4i_rcv_win = -1; 57 module_param(cxgb4i_rcv_win, int, 0644); 58 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 59 60 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) 61 static int cxgb4i_snd_win = -1; 62 module_param(cxgb4i_snd_win, int, 0644); 63 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 64 65 static int cxgb4i_rx_credit_thres = 10 * 1024; 66 module_param(cxgb4i_rx_credit_thres, int, 0644); 67 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 68 "RX credits return threshold in bytes (default=10KB)"); 69 70 static unsigned int cxgb4i_max_connect = (8 * 1024); 71 module_param(cxgb4i_max_connect, uint, 0644); 72 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 73 74 static unsigned short cxgb4i_sport_base = 20000; 75 module_param(cxgb4i_sport_base, ushort, 0644); 76 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 77 78 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 79 80 static void *t4_uld_add(const struct cxgb4_lld_info *); 81 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 82 static int t4_uld_state_change(void *, enum cxgb4_state state); 83 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 84 85 static const struct cxgb4_uld_info cxgb4i_uld_info = { 86 .name = DRV_MODULE_NAME, 87 .nrxq = MAX_ULD_QSETS, 88 .ntxq = MAX_ULD_QSETS, 89 .rxq_size = 1024, 90 .lro = false, 91 .add = t4_uld_add, 92 .rx_handler = t4_uld_rx_handler, 93 .state_change = t4_uld_state_change, 94 }; 95 96 static struct scsi_host_template cxgb4i_host_template = { 97 .module = THIS_MODULE, 98 .name = DRV_MODULE_NAME, 99 .proc_name = DRV_MODULE_NAME, 100 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 101 .queuecommand = iscsi_queuecommand, 102 .change_queue_depth = scsi_change_queue_depth, 103 .sg_tablesize = SG_ALL, 104 .max_sectors = 0xFFFF, 105 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 106 .eh_abort_handler = iscsi_eh_abort, 107 .eh_device_reset_handler = iscsi_eh_device_reset, 108 .eh_target_reset_handler = iscsi_eh_recover_target, 109 .target_alloc = iscsi_target_alloc, 110 .use_clustering = DISABLE_CLUSTERING, 111 .this_id = -1, 112 .track_queue_depth = 1, 113 }; 114 115 static struct iscsi_transport cxgb4i_iscsi_transport = { 116 .owner = THIS_MODULE, 117 .name = DRV_MODULE_NAME, 118 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 119 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 120 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 121 .attr_is_visible = cxgbi_attr_is_visible, 122 .get_host_param = cxgbi_get_host_param, 123 .set_host_param = cxgbi_set_host_param, 124 /* session management */ 125 .create_session = cxgbi_create_session, 126 .destroy_session = cxgbi_destroy_session, 127 .get_session_param = iscsi_session_get_param, 128 /* connection management */ 129 .create_conn = cxgbi_create_conn, 130 .bind_conn = cxgbi_bind_conn, 131 .destroy_conn = iscsi_tcp_conn_teardown, 132 .start_conn = iscsi_conn_start, 133 .stop_conn = iscsi_conn_stop, 134 .get_conn_param = iscsi_conn_get_param, 135 .set_param = cxgbi_set_conn_param, 136 .get_stats = cxgbi_get_conn_stats, 137 /* pdu xmit req from user space */ 138 .send_pdu = iscsi_conn_send_pdu, 139 /* task */ 140 .init_task = iscsi_tcp_task_init, 141 .xmit_task = iscsi_tcp_task_xmit, 142 .cleanup_task = cxgbi_cleanup_task, 143 /* pdu */ 144 .alloc_pdu = cxgbi_conn_alloc_pdu, 145 .init_pdu = cxgbi_conn_init_pdu, 146 .xmit_pdu = cxgbi_conn_xmit_pdu, 147 .parse_pdu_itt = cxgbi_parse_pdu_itt, 148 /* TCP connect/disconnect */ 149 .get_ep_param = cxgbi_get_ep_param, 150 .ep_connect = cxgbi_ep_connect, 151 .ep_poll = cxgbi_ep_poll, 152 .ep_disconnect = cxgbi_ep_disconnect, 153 /* Error recovery timeout call */ 154 .session_recovery_timedout = iscsi_session_recovery_timedout, 155 }; 156 157 static struct scsi_transport_template *cxgb4i_stt; 158 159 /* 160 * CPL (Chelsio Protocol Language) defines a message passing interface between 161 * the host driver and Chelsio asic. 162 * The section below implments CPLs that related to iscsi tcp connection 163 * open/close/abort and data send/receive. 164 */ 165 166 #define RCV_BUFSIZ_MASK 0x3FFU 167 #define MAX_IMM_TX_PKT_LEN 256 168 169 static int push_tx_frames(struct cxgbi_sock *, int); 170 171 /* 172 * is_ofld_imm - check whether a packet can be sent as immediate data 173 * @skb: the packet 174 * 175 * Returns true if a packet can be sent as an offload WR with immediate 176 * data. We currently use the same limit as for Ethernet packets. 177 */ 178 static inline bool is_ofld_imm(const struct sk_buff *skb) 179 { 180 int len = skb->len; 181 182 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 183 len += sizeof(struct fw_ofld_tx_data_wr); 184 185 return len <= MAX_IMM_TX_PKT_LEN; 186 } 187 188 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 189 struct l2t_entry *e) 190 { 191 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 192 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 193 unsigned long long opt0; 194 unsigned int opt2; 195 unsigned int qid_atid = ((unsigned int)csk->atid) | 196 (((unsigned int)csk->rss_qid) << 14); 197 198 opt0 = KEEP_ALIVE_F | 199 WND_SCALE_V(wscale) | 200 MSS_IDX_V(csk->mss_idx) | 201 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 202 TX_CHAN_V(csk->tx_chan) | 203 SMAC_SEL_V(csk->smac_idx) | 204 ULP_MODE_V(ULP_MODE_ISCSI) | 205 RCV_BUFSIZ_V(csk->rcv_win >> 10); 206 207 opt2 = RX_CHANNEL_V(0) | 208 RSS_QUEUE_VALID_F | 209 RSS_QUEUE_V(csk->rss_qid); 210 211 if (is_t4(lldi->adapter_type)) { 212 struct cpl_act_open_req *req = 213 (struct cpl_act_open_req *)skb->head; 214 215 INIT_TP_WR(req, 0); 216 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 217 qid_atid)); 218 req->local_port = csk->saddr.sin_port; 219 req->peer_port = csk->daddr.sin_port; 220 req->local_ip = csk->saddr.sin_addr.s_addr; 221 req->peer_ip = csk->daddr.sin_addr.s_addr; 222 req->opt0 = cpu_to_be64(opt0); 223 req->params = cpu_to_be32(cxgb4_select_ntuple( 224 csk->cdev->ports[csk->port_id], 225 csk->l2t)); 226 opt2 |= RX_FC_VALID_F; 227 req->opt2 = cpu_to_be32(opt2); 228 229 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 230 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 231 csk, &req->local_ip, ntohs(req->local_port), 232 &req->peer_ip, ntohs(req->peer_port), 233 csk->atid, csk->rss_qid); 234 } else if (is_t5(lldi->adapter_type)) { 235 struct cpl_t5_act_open_req *req = 236 (struct cpl_t5_act_open_req *)skb->head; 237 u32 isn = (prandom_u32() & ~7UL) - 1; 238 239 INIT_TP_WR(req, 0); 240 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 241 qid_atid)); 242 req->local_port = csk->saddr.sin_port; 243 req->peer_port = csk->daddr.sin_port; 244 req->local_ip = csk->saddr.sin_addr.s_addr; 245 req->peer_ip = csk->daddr.sin_addr.s_addr; 246 req->opt0 = cpu_to_be64(opt0); 247 req->params = cpu_to_be64(FILTER_TUPLE_V( 248 cxgb4_select_ntuple( 249 csk->cdev->ports[csk->port_id], 250 csk->l2t))); 251 req->rsvd = cpu_to_be32(isn); 252 opt2 |= T5_ISS_VALID; 253 opt2 |= T5_OPT_2_VALID_F; 254 255 req->opt2 = cpu_to_be32(opt2); 256 257 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 258 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 259 csk, &req->local_ip, ntohs(req->local_port), 260 &req->peer_ip, ntohs(req->peer_port), 261 csk->atid, csk->rss_qid); 262 } else { 263 struct cpl_t6_act_open_req *req = 264 (struct cpl_t6_act_open_req *)skb->head; 265 u32 isn = (prandom_u32() & ~7UL) - 1; 266 267 INIT_TP_WR(req, 0); 268 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 269 qid_atid)); 270 req->local_port = csk->saddr.sin_port; 271 req->peer_port = csk->daddr.sin_port; 272 req->local_ip = csk->saddr.sin_addr.s_addr; 273 req->peer_ip = csk->daddr.sin_addr.s_addr; 274 req->opt0 = cpu_to_be64(opt0); 275 req->params = cpu_to_be64(FILTER_TUPLE_V( 276 cxgb4_select_ntuple( 277 csk->cdev->ports[csk->port_id], 278 csk->l2t))); 279 req->rsvd = cpu_to_be32(isn); 280 281 opt2 |= T5_ISS_VALID; 282 opt2 |= RX_FC_DISABLE_F; 283 opt2 |= T5_OPT_2_VALID_F; 284 285 req->opt2 = cpu_to_be32(opt2); 286 req->rsvd2 = cpu_to_be32(0); 287 req->opt3 = cpu_to_be32(0); 288 289 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 290 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 291 csk, &req->local_ip, ntohs(req->local_port), 292 &req->peer_ip, ntohs(req->peer_port), 293 csk->atid, csk->rss_qid); 294 } 295 296 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 297 298 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 299 (&csk->saddr), (&csk->daddr), 300 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, 301 csk->state, csk->flags, csk->atid, csk->rss_qid); 302 303 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 304 } 305 306 #if IS_ENABLED(CONFIG_IPV6) 307 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 308 struct l2t_entry *e) 309 { 310 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 311 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 312 unsigned long long opt0; 313 unsigned int opt2; 314 unsigned int qid_atid = ((unsigned int)csk->atid) | 315 (((unsigned int)csk->rss_qid) << 14); 316 317 opt0 = KEEP_ALIVE_F | 318 WND_SCALE_V(wscale) | 319 MSS_IDX_V(csk->mss_idx) | 320 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 321 TX_CHAN_V(csk->tx_chan) | 322 SMAC_SEL_V(csk->smac_idx) | 323 ULP_MODE_V(ULP_MODE_ISCSI) | 324 RCV_BUFSIZ_V(csk->rcv_win >> 10); 325 326 opt2 = RX_CHANNEL_V(0) | 327 RSS_QUEUE_VALID_F | 328 RSS_QUEUE_V(csk->rss_qid); 329 330 if (is_t4(lldi->adapter_type)) { 331 struct cpl_act_open_req6 *req = 332 (struct cpl_act_open_req6 *)skb->head; 333 334 INIT_TP_WR(req, 0); 335 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 336 qid_atid)); 337 req->local_port = csk->saddr6.sin6_port; 338 req->peer_port = csk->daddr6.sin6_port; 339 340 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 341 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 342 8); 343 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 344 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 345 8); 346 347 req->opt0 = cpu_to_be64(opt0); 348 349 opt2 |= RX_FC_VALID_F; 350 req->opt2 = cpu_to_be32(opt2); 351 352 req->params = cpu_to_be32(cxgb4_select_ntuple( 353 csk->cdev->ports[csk->port_id], 354 csk->l2t)); 355 } else if (is_t5(lldi->adapter_type)) { 356 struct cpl_t5_act_open_req6 *req = 357 (struct cpl_t5_act_open_req6 *)skb->head; 358 359 INIT_TP_WR(req, 0); 360 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 361 qid_atid)); 362 req->local_port = csk->saddr6.sin6_port; 363 req->peer_port = csk->daddr6.sin6_port; 364 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 365 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 366 8); 367 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 368 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 369 8); 370 req->opt0 = cpu_to_be64(opt0); 371 372 opt2 |= T5_OPT_2_VALID_F; 373 req->opt2 = cpu_to_be32(opt2); 374 375 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 376 csk->cdev->ports[csk->port_id], 377 csk->l2t))); 378 } else { 379 struct cpl_t6_act_open_req6 *req = 380 (struct cpl_t6_act_open_req6 *)skb->head; 381 382 INIT_TP_WR(req, 0); 383 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 384 qid_atid)); 385 req->local_port = csk->saddr6.sin6_port; 386 req->peer_port = csk->daddr6.sin6_port; 387 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 388 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 389 8); 390 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 391 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 392 8); 393 req->opt0 = cpu_to_be64(opt0); 394 395 opt2 |= RX_FC_DISABLE_F; 396 opt2 |= T5_OPT_2_VALID_F; 397 398 req->opt2 = cpu_to_be32(opt2); 399 400 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 401 csk->cdev->ports[csk->port_id], 402 csk->l2t))); 403 404 req->rsvd2 = cpu_to_be32(0); 405 req->opt3 = cpu_to_be32(0); 406 } 407 408 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 409 410 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 411 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, 412 csk->flags, csk->atid, 413 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 414 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 415 csk->rss_qid); 416 417 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 418 } 419 #endif 420 421 static void send_close_req(struct cxgbi_sock *csk) 422 { 423 struct sk_buff *skb = csk->cpl_close; 424 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 425 unsigned int tid = csk->tid; 426 427 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 428 "csk 0x%p,%u,0x%lx, tid %u.\n", 429 csk, csk->state, csk->flags, csk->tid); 430 csk->cpl_close = NULL; 431 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 432 INIT_TP_WR(req, tid); 433 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 434 req->rsvd = 0; 435 436 cxgbi_sock_skb_entail(csk, skb); 437 if (csk->state >= CTP_ESTABLISHED) 438 push_tx_frames(csk, 1); 439 } 440 441 static void abort_arp_failure(void *handle, struct sk_buff *skb) 442 { 443 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 444 struct cpl_abort_req *req; 445 446 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 447 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 448 csk, csk->state, csk->flags, csk->tid); 449 req = (struct cpl_abort_req *)skb->data; 450 req->cmd = CPL_ABORT_NO_RST; 451 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 452 } 453 454 static void send_abort_req(struct cxgbi_sock *csk) 455 { 456 struct cpl_abort_req *req; 457 struct sk_buff *skb = csk->cpl_abort_req; 458 459 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 460 return; 461 462 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 463 send_tx_flowc_wr(csk); 464 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 465 } 466 467 cxgbi_sock_set_state(csk, CTP_ABORTING); 468 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 469 cxgbi_sock_purge_write_queue(csk); 470 471 csk->cpl_abort_req = NULL; 472 req = (struct cpl_abort_req *)skb->head; 473 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 474 req->cmd = CPL_ABORT_SEND_RST; 475 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 476 INIT_TP_WR(req, csk->tid); 477 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 478 req->rsvd0 = htonl(csk->snd_nxt); 479 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 480 481 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 482 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 483 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 484 req->rsvd1); 485 486 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 487 } 488 489 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 490 { 491 struct sk_buff *skb = csk->cpl_abort_rpl; 492 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 493 494 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 495 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 496 csk, csk->state, csk->flags, csk->tid, rst_status); 497 498 csk->cpl_abort_rpl = NULL; 499 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 500 INIT_TP_WR(rpl, csk->tid); 501 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 502 rpl->cmd = rst_status; 503 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 504 } 505 506 /* 507 * CPL connection rx data ack: host -> 508 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 509 * credits sent. 510 */ 511 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 512 { 513 struct sk_buff *skb; 514 struct cpl_rx_data_ack *req; 515 516 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 517 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 518 csk, csk->state, csk->flags, csk->tid, credits); 519 520 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 521 if (!skb) { 522 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 523 return 0; 524 } 525 req = (struct cpl_rx_data_ack *)skb->head; 526 527 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 528 INIT_TP_WR(req, csk->tid); 529 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 530 csk->tid)); 531 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 532 | RX_FORCE_ACK_F); 533 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 534 return credits; 535 } 536 537 /* 538 * sgl_len - calculates the size of an SGL of the given capacity 539 * @n: the number of SGL entries 540 * Calculates the number of flits needed for a scatter/gather list that 541 * can hold the given number of entries. 542 */ 543 static inline unsigned int sgl_len(unsigned int n) 544 { 545 n--; 546 return (3 * n) / 2 + (n & 1) + 2; 547 } 548 549 /* 550 * calc_tx_flits_ofld - calculate # of flits for an offload packet 551 * @skb: the packet 552 * 553 * Returns the number of flits needed for the given offload packet. 554 * These packets are already fully constructed and no additional headers 555 * will be added. 556 */ 557 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 558 { 559 unsigned int flits, cnt; 560 561 if (is_ofld_imm(skb)) 562 return DIV_ROUND_UP(skb->len, 8); 563 flits = skb_transport_offset(skb) / 8; 564 cnt = skb_shinfo(skb)->nr_frags; 565 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 566 cnt++; 567 return flits + sgl_len(cnt); 568 } 569 570 #define FLOWC_WR_NPARAMS_MIN 9 571 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 572 { 573 int nparams, flowclen16, flowclen; 574 575 nparams = FLOWC_WR_NPARAMS_MIN; 576 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 577 flowclen16 = DIV_ROUND_UP(flowclen, 16); 578 flowclen = flowclen16 * 16; 579 /* 580 * Return the number of 16-byte credits used by the FlowC request. 581 * Pass back the nparams and actual FlowC length if requested. 582 */ 583 if (nparamsp) 584 *nparamsp = nparams; 585 if (flowclenp) 586 *flowclenp = flowclen; 587 588 return flowclen16; 589 } 590 591 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 592 { 593 struct sk_buff *skb; 594 struct fw_flowc_wr *flowc; 595 int nparams, flowclen16, flowclen; 596 597 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 598 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 599 flowc = (struct fw_flowc_wr *)skb->head; 600 flowc->op_to_nparams = 601 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 602 flowc->flowid_len16 = 603 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 604 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 605 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 606 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 607 flowc->mnemval[1].val = htonl(csk->tx_chan); 608 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 609 flowc->mnemval[2].val = htonl(csk->tx_chan); 610 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 611 flowc->mnemval[3].val = htonl(csk->rss_qid); 612 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 613 flowc->mnemval[4].val = htonl(csk->snd_nxt); 614 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 615 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 616 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 617 flowc->mnemval[6].val = htonl(csk->snd_win); 618 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 619 flowc->mnemval[7].val = htonl(csk->advmss); 620 flowc->mnemval[8].mnemonic = 0; 621 flowc->mnemval[8].val = 0; 622 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 623 flowc->mnemval[8].val = 16384; 624 625 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 626 627 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 628 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 629 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 630 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, 631 csk->advmss); 632 633 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 634 635 return flowclen16; 636 } 637 638 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 639 int dlen, int len, u32 credits, int compl) 640 { 641 struct fw_ofld_tx_data_wr *req; 642 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 643 unsigned int wr_ulp_mode = 0, val; 644 bool imm = is_ofld_imm(skb); 645 646 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 647 648 if (imm) { 649 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 650 FW_WR_COMPL_F | 651 FW_WR_IMMDLEN_V(dlen)); 652 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 653 FW_WR_LEN16_V(credits)); 654 } else { 655 req->op_to_immdlen = 656 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 657 FW_WR_COMPL_F | 658 FW_WR_IMMDLEN_V(0)); 659 req->flowid_len16 = 660 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 661 FW_WR_LEN16_V(credits)); 662 } 663 if (submode) 664 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 665 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 666 val = skb_peek(&csk->write_queue) ? 0 : 1; 667 req->tunnel_to_proxy = htonl(wr_ulp_mode | 668 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 669 req->plen = htonl(len); 670 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 671 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 672 } 673 674 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 675 { 676 kfree_skb(skb); 677 } 678 679 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 680 { 681 int total_size = 0; 682 struct sk_buff *skb; 683 684 if (unlikely(csk->state < CTP_ESTABLISHED || 685 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 686 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 687 1 << CXGBI_DBG_PDU_TX, 688 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 689 csk, csk->state, csk->flags, csk->tid); 690 return 0; 691 } 692 693 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 694 int dlen = skb->len; 695 int len = skb->len; 696 unsigned int credits_needed; 697 int flowclen16 = 0; 698 699 skb_reset_transport_header(skb); 700 if (is_ofld_imm(skb)) 701 credits_needed = DIV_ROUND_UP(dlen, 16); 702 else 703 credits_needed = DIV_ROUND_UP( 704 8 * calc_tx_flits_ofld(skb), 705 16); 706 707 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 708 credits_needed += DIV_ROUND_UP( 709 sizeof(struct fw_ofld_tx_data_wr), 710 16); 711 712 /* 713 * Assumes the initial credits is large enough to support 714 * fw_flowc_wr plus largest possible first payload 715 */ 716 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 717 flowclen16 = send_tx_flowc_wr(csk); 718 csk->wr_cred -= flowclen16; 719 csk->wr_una_cred += flowclen16; 720 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 721 } 722 723 if (csk->wr_cred < credits_needed) { 724 log_debug(1 << CXGBI_DBG_PDU_TX, 725 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 726 csk, skb->len, skb->data_len, 727 credits_needed, csk->wr_cred); 728 break; 729 } 730 __skb_unlink(skb, &csk->write_queue); 731 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 732 skb->csum = credits_needed + flowclen16; 733 csk->wr_cred -= credits_needed; 734 csk->wr_una_cred += credits_needed; 735 cxgbi_sock_enqueue_wr(csk, skb); 736 737 log_debug(1 << CXGBI_DBG_PDU_TX, 738 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 739 csk, skb->len, skb->data_len, credits_needed, 740 csk->wr_cred, csk->wr_una_cred); 741 742 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 743 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 744 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 745 req_completion); 746 csk->snd_nxt += len; 747 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 748 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && 749 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { 750 struct cpl_close_con_req *req = 751 (struct cpl_close_con_req *)skb->data; 752 req->wr.wr_hi |= htonl(FW_WR_COMPL_F); 753 } 754 total_size += skb->truesize; 755 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 756 757 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 758 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 759 csk, csk->state, csk->flags, csk->tid, skb, len); 760 761 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 762 } 763 return total_size; 764 } 765 766 static inline void free_atid(struct cxgbi_sock *csk) 767 { 768 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 769 770 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 771 cxgb4_free_atid(lldi->tids, csk->atid); 772 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 773 cxgbi_sock_put(csk); 774 } 775 } 776 777 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 778 { 779 struct cxgbi_sock *csk; 780 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 781 unsigned short tcp_opt = ntohs(req->tcp_opt); 782 unsigned int tid = GET_TID(req); 783 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 784 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 785 struct tid_info *t = lldi->tids; 786 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 787 788 csk = lookup_atid(t, atid); 789 if (unlikely(!csk)) { 790 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 791 goto rel_skb; 792 } 793 794 if (csk->atid != atid) { 795 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 796 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 797 goto rel_skb; 798 } 799 800 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 801 (&csk->saddr), (&csk->daddr), 802 atid, tid, csk, csk->state, csk->flags, rcv_isn); 803 804 module_put(cdev->owner); 805 806 cxgbi_sock_get(csk); 807 csk->tid = tid; 808 cxgb4_insert_tid(lldi->tids, csk, tid); 809 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 810 811 free_atid(csk); 812 813 spin_lock_bh(&csk->lock); 814 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 815 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 816 csk, csk->state, csk->flags, csk->tid); 817 818 if (csk->retry_timer.function) { 819 del_timer(&csk->retry_timer); 820 csk->retry_timer.function = NULL; 821 } 822 823 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 824 /* 825 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 826 * pass through opt0. 827 */ 828 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) 829 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); 830 831 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 832 if (TCPOPT_TSTAMP_G(tcp_opt)) 833 csk->advmss -= 12; 834 if (csk->advmss < 128) 835 csk->advmss = 128; 836 837 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 838 "csk 0x%p, mss_idx %u, advmss %u.\n", 839 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); 840 841 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 842 843 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 844 send_abort_req(csk); 845 else { 846 if (skb_queue_len(&csk->write_queue)) 847 push_tx_frames(csk, 0); 848 cxgbi_conn_tx_open(csk); 849 } 850 spin_unlock_bh(&csk->lock); 851 852 rel_skb: 853 __kfree_skb(skb); 854 } 855 856 static int act_open_rpl_status_to_errno(int status) 857 { 858 switch (status) { 859 case CPL_ERR_CONN_RESET: 860 return -ECONNREFUSED; 861 case CPL_ERR_ARP_MISS: 862 return -EHOSTUNREACH; 863 case CPL_ERR_CONN_TIMEDOUT: 864 return -ETIMEDOUT; 865 case CPL_ERR_TCAM_FULL: 866 return -ENOMEM; 867 case CPL_ERR_CONN_EXIST: 868 return -EADDRINUSE; 869 default: 870 return -EIO; 871 } 872 } 873 874 static void csk_act_open_retry_timer(unsigned long data) 875 { 876 struct sk_buff *skb = NULL; 877 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 878 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 879 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 880 struct l2t_entry *); 881 int t4 = is_t4(lldi->adapter_type), size, size6; 882 883 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 884 "csk 0x%p,%u,0x%lx,%u.\n", 885 csk, csk->state, csk->flags, csk->tid); 886 887 cxgbi_sock_get(csk); 888 spin_lock_bh(&csk->lock); 889 890 if (t4) { 891 size = sizeof(struct cpl_act_open_req); 892 size6 = sizeof(struct cpl_act_open_req6); 893 } else { 894 size = sizeof(struct cpl_t5_act_open_req); 895 size6 = sizeof(struct cpl_t5_act_open_req6); 896 } 897 898 if (csk->csk_family == AF_INET) { 899 send_act_open_func = send_act_open_req; 900 skb = alloc_wr(size, 0, GFP_ATOMIC); 901 #if IS_ENABLED(CONFIG_IPV6) 902 } else { 903 send_act_open_func = send_act_open_req6; 904 skb = alloc_wr(size6, 0, GFP_ATOMIC); 905 #endif 906 } 907 908 if (!skb) 909 cxgbi_sock_fail_act_open(csk, -ENOMEM); 910 else { 911 skb->sk = (struct sock *)csk; 912 t4_set_arp_err_handler(skb, csk, 913 cxgbi_sock_act_open_req_arp_failure); 914 send_act_open_func(csk, skb, csk->l2t); 915 } 916 917 spin_unlock_bh(&csk->lock); 918 cxgbi_sock_put(csk); 919 920 } 921 922 static inline bool is_neg_adv(unsigned int status) 923 { 924 return status == CPL_ERR_RTX_NEG_ADVICE || 925 status == CPL_ERR_KEEPALV_NEG_ADVICE || 926 status == CPL_ERR_PERSIST_NEG_ADVICE; 927 } 928 929 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 930 { 931 struct cxgbi_sock *csk; 932 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 933 unsigned int tid = GET_TID(rpl); 934 unsigned int atid = 935 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); 936 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); 937 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 938 struct tid_info *t = lldi->tids; 939 940 csk = lookup_atid(t, atid); 941 if (unlikely(!csk)) { 942 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 943 goto rel_skb; 944 } 945 946 pr_info_ipaddr("tid %u/%u, status %u.\n" 947 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 948 atid, tid, status, csk, csk->state, csk->flags); 949 950 if (is_neg_adv(status)) 951 goto rel_skb; 952 953 module_put(cdev->owner); 954 955 if (status && status != CPL_ERR_TCAM_FULL && 956 status != CPL_ERR_CONN_EXIST && 957 status != CPL_ERR_ARP_MISS) 958 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); 959 960 cxgbi_sock_get(csk); 961 spin_lock_bh(&csk->lock); 962 963 if (status == CPL_ERR_CONN_EXIST && 964 csk->retry_timer.function != csk_act_open_retry_timer) { 965 csk->retry_timer.function = csk_act_open_retry_timer; 966 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 967 } else 968 cxgbi_sock_fail_act_open(csk, 969 act_open_rpl_status_to_errno(status)); 970 971 spin_unlock_bh(&csk->lock); 972 cxgbi_sock_put(csk); 973 rel_skb: 974 __kfree_skb(skb); 975 } 976 977 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 978 { 979 struct cxgbi_sock *csk; 980 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 981 unsigned int tid = GET_TID(req); 982 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 983 struct tid_info *t = lldi->tids; 984 985 csk = lookup_tid(t, tid); 986 if (unlikely(!csk)) { 987 pr_err("can't find connection for tid %u.\n", tid); 988 goto rel_skb; 989 } 990 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 991 (&csk->saddr), (&csk->daddr), 992 csk, csk->state, csk->flags, csk->tid); 993 cxgbi_sock_rcv_peer_close(csk); 994 rel_skb: 995 __kfree_skb(skb); 996 } 997 998 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 999 { 1000 struct cxgbi_sock *csk; 1001 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 1002 unsigned int tid = GET_TID(rpl); 1003 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1004 struct tid_info *t = lldi->tids; 1005 1006 csk = lookup_tid(t, tid); 1007 if (unlikely(!csk)) { 1008 pr_err("can't find connection for tid %u.\n", tid); 1009 goto rel_skb; 1010 } 1011 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1012 (&csk->saddr), (&csk->daddr), 1013 csk, csk->state, csk->flags, csk->tid); 1014 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 1015 rel_skb: 1016 __kfree_skb(skb); 1017 } 1018 1019 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 1020 int *need_rst) 1021 { 1022 switch (abort_reason) { 1023 case CPL_ERR_BAD_SYN: /* fall through */ 1024 case CPL_ERR_CONN_RESET: 1025 return csk->state > CTP_ESTABLISHED ? 1026 -EPIPE : -ECONNRESET; 1027 case CPL_ERR_XMIT_TIMEDOUT: 1028 case CPL_ERR_PERSIST_TIMEDOUT: 1029 case CPL_ERR_FINWAIT2_TIMEDOUT: 1030 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1031 return -ETIMEDOUT; 1032 default: 1033 return -EIO; 1034 } 1035 } 1036 1037 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1038 { 1039 struct cxgbi_sock *csk; 1040 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 1041 unsigned int tid = GET_TID(req); 1042 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1043 struct tid_info *t = lldi->tids; 1044 int rst_status = CPL_ABORT_NO_RST; 1045 1046 csk = lookup_tid(t, tid); 1047 if (unlikely(!csk)) { 1048 pr_err("can't find connection for tid %u.\n", tid); 1049 goto rel_skb; 1050 } 1051 1052 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1053 (&csk->saddr), (&csk->daddr), 1054 csk, csk->state, csk->flags, csk->tid, req->status); 1055 1056 if (is_neg_adv(req->status)) 1057 goto rel_skb; 1058 1059 cxgbi_sock_get(csk); 1060 spin_lock_bh(&csk->lock); 1061 1062 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 1063 1064 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 1065 send_tx_flowc_wr(csk); 1066 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 1067 } 1068 1069 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 1070 cxgbi_sock_set_state(csk, CTP_ABORTING); 1071 1072 send_abort_rpl(csk, rst_status); 1073 1074 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 1075 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1076 cxgbi_sock_closed(csk); 1077 } 1078 1079 spin_unlock_bh(&csk->lock); 1080 cxgbi_sock_put(csk); 1081 rel_skb: 1082 __kfree_skb(skb); 1083 } 1084 1085 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1086 { 1087 struct cxgbi_sock *csk; 1088 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1089 unsigned int tid = GET_TID(rpl); 1090 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1091 struct tid_info *t = lldi->tids; 1092 1093 csk = lookup_tid(t, tid); 1094 if (!csk) 1095 goto rel_skb; 1096 1097 if (csk) 1098 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1099 (&csk->saddr), (&csk->daddr), csk, 1100 csk->state, csk->flags, csk->tid, rpl->status); 1101 1102 if (rpl->status == CPL_ERR_ABORT_FAILED) 1103 goto rel_skb; 1104 1105 cxgbi_sock_rcv_abort_rpl(csk); 1106 rel_skb: 1107 __kfree_skb(skb); 1108 } 1109 1110 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1111 { 1112 struct cxgbi_sock *csk; 1113 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1114 unsigned int tid = GET_TID(cpl); 1115 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1116 struct tid_info *t = lldi->tids; 1117 1118 csk = lookup_tid(t, tid); 1119 if (!csk) { 1120 pr_err("can't find connection for tid %u.\n", tid); 1121 } else { 1122 /* not expecting this, reset the connection. */ 1123 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1124 spin_lock_bh(&csk->lock); 1125 send_abort_req(csk); 1126 spin_unlock_bh(&csk->lock); 1127 } 1128 __kfree_skb(skb); 1129 } 1130 1131 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1132 { 1133 struct cxgbi_sock *csk; 1134 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1135 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1136 unsigned int tid = GET_TID(cpl); 1137 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1138 struct tid_info *t = lldi->tids; 1139 1140 csk = lookup_tid(t, tid); 1141 if (unlikely(!csk)) { 1142 pr_err("can't find conn. for tid %u.\n", tid); 1143 goto rel_skb; 1144 } 1145 1146 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1147 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1148 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1149 pdu_len_ddp); 1150 1151 spin_lock_bh(&csk->lock); 1152 1153 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1154 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1155 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1156 csk, csk->state, csk->flags, csk->tid); 1157 if (csk->state != CTP_ABORTING) 1158 goto abort_conn; 1159 else 1160 goto discard; 1161 } 1162 1163 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1164 cxgbi_skcb_flags(skb) = 0; 1165 1166 skb_reset_transport_header(skb); 1167 __skb_pull(skb, sizeof(*cpl)); 1168 __pskb_trim(skb, ntohs(cpl->len)); 1169 1170 if (!csk->skb_ulp_lhdr) { 1171 unsigned char *bhs; 1172 unsigned int hlen, dlen, plen; 1173 1174 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1175 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1176 csk, csk->state, csk->flags, csk->tid, skb); 1177 csk->skb_ulp_lhdr = skb; 1178 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1179 1180 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 1181 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1182 csk->tid, cxgbi_skcb_tcp_seq(skb), 1183 csk->rcv_nxt); 1184 goto abort_conn; 1185 } 1186 1187 bhs = skb->data; 1188 hlen = ntohs(cpl->len); 1189 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1190 1191 plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1192 if (is_t4(lldi->adapter_type)) 1193 plen -= 40; 1194 1195 if ((hlen + dlen) != plen) { 1196 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1197 "mismatch %u != %u + %u, seq 0x%x.\n", 1198 csk->tid, plen, hlen, dlen, 1199 cxgbi_skcb_tcp_seq(skb)); 1200 goto abort_conn; 1201 } 1202 1203 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1204 if (dlen) 1205 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1206 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1207 1208 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1209 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1210 csk, skb, *bhs, hlen, dlen, 1211 ntohl(*((unsigned int *)(bhs + 16))), 1212 ntohl(*((unsigned int *)(bhs + 24)))); 1213 1214 } else { 1215 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1216 1217 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1218 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1219 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1220 csk, csk->state, csk->flags, skb, lskb); 1221 } 1222 1223 __skb_queue_tail(&csk->receive_queue, skb); 1224 spin_unlock_bh(&csk->lock); 1225 return; 1226 1227 abort_conn: 1228 send_abort_req(csk); 1229 discard: 1230 spin_unlock_bh(&csk->lock); 1231 rel_skb: 1232 __kfree_skb(skb); 1233 } 1234 1235 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1236 { 1237 struct cxgbi_sock *csk; 1238 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1239 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1240 struct tid_info *t = lldi->tids; 1241 struct sk_buff *lskb; 1242 u32 tid = GET_TID(cpl); 1243 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1244 1245 csk = lookup_tid(t, tid); 1246 if (unlikely(!csk)) { 1247 pr_err("can't find conn. for tid %u.\n", tid); 1248 goto rel_skb; 1249 } 1250 1251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1252 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1253 csk, csk->state, csk->flags, csk->tid, skb, 1254 skb->len, pdu_len_ddp); 1255 1256 spin_lock_bh(&csk->lock); 1257 1258 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1259 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1260 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1261 csk, csk->state, csk->flags, csk->tid); 1262 1263 if (csk->state != CTP_ABORTING) 1264 goto abort_conn; 1265 else 1266 goto discard; 1267 } 1268 1269 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); 1270 cxgbi_skcb_flags(skb) = 0; 1271 1272 skb_reset_transport_header(skb); 1273 __skb_pull(skb, sizeof(*cpl)); 1274 __pskb_trim(skb, ntohs(cpl->len)); 1275 1276 if (!csk->skb_ulp_lhdr) 1277 csk->skb_ulp_lhdr = skb; 1278 1279 lskb = csk->skb_ulp_lhdr; 1280 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1281 1282 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1283 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1284 csk, csk->state, csk->flags, skb, lskb); 1285 1286 __skb_queue_tail(&csk->receive_queue, skb); 1287 spin_unlock_bh(&csk->lock); 1288 return; 1289 1290 abort_conn: 1291 send_abort_req(csk); 1292 discard: 1293 spin_unlock_bh(&csk->lock); 1294 rel_skb: 1295 __kfree_skb(skb); 1296 } 1297 1298 static void 1299 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, 1300 struct sk_buff *skb, u32 ddpvld) 1301 { 1302 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1303 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1304 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1305 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); 1306 } 1307 1308 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1309 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1310 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1311 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); 1312 } 1313 1314 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1315 log_debug(1 << CXGBI_DBG_PDU_RX, 1316 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1317 csk, skb, ddpvld); 1318 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); 1319 } 1320 1321 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1322 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1323 log_debug(1 << CXGBI_DBG_PDU_RX, 1324 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1325 csk, skb, ddpvld); 1326 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); 1327 } 1328 } 1329 1330 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1331 struct sk_buff *skb) 1332 { 1333 struct cxgbi_sock *csk; 1334 struct sk_buff *lskb; 1335 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1336 unsigned int tid = GET_TID(rpl); 1337 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1338 struct tid_info *t = lldi->tids; 1339 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1340 1341 csk = lookup_tid(t, tid); 1342 if (unlikely(!csk)) { 1343 pr_err("can't find connection for tid %u.\n", tid); 1344 goto rel_skb; 1345 } 1346 1347 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1348 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1349 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); 1350 1351 spin_lock_bh(&csk->lock); 1352 1353 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1354 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1355 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1356 csk, csk->state, csk->flags, csk->tid); 1357 if (csk->state != CTP_ABORTING) 1358 goto abort_conn; 1359 else 1360 goto discard; 1361 } 1362 1363 if (!csk->skb_ulp_lhdr) { 1364 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1365 goto abort_conn; 1366 } 1367 1368 lskb = csk->skb_ulp_lhdr; 1369 csk->skb_ulp_lhdr = NULL; 1370 1371 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1372 1373 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1374 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1375 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1376 1377 cxgb4i_process_ddpvld(csk, lskb, ddpvld); 1378 1379 log_debug(1 << CXGBI_DBG_PDU_RX, 1380 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1381 csk, lskb, cxgbi_skcb_flags(lskb)); 1382 1383 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1384 cxgbi_conn_pdu_ready(csk); 1385 spin_unlock_bh(&csk->lock); 1386 goto rel_skb; 1387 1388 abort_conn: 1389 send_abort_req(csk); 1390 discard: 1391 spin_unlock_bh(&csk->lock); 1392 rel_skb: 1393 __kfree_skb(skb); 1394 } 1395 1396 static void 1397 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) 1398 { 1399 struct cxgbi_sock *csk; 1400 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; 1401 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1402 struct tid_info *t = lldi->tids; 1403 struct sk_buff *data_skb = NULL; 1404 u32 tid = GET_TID(rpl); 1405 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1406 u32 seq = be32_to_cpu(rpl->seq); 1407 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); 1408 1409 csk = lookup_tid(t, tid); 1410 if (unlikely(!csk)) { 1411 pr_err("can't find connection for tid %u.\n", tid); 1412 goto rel_skb; 1413 } 1414 1415 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1416 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " 1417 "pdu_len_ddp %u, status %u.\n", 1418 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, 1419 ntohs(rpl->len), pdu_len_ddp, rpl->status); 1420 1421 spin_lock_bh(&csk->lock); 1422 1423 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1424 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1425 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1426 csk, csk->state, csk->flags, csk->tid); 1427 1428 if (csk->state != CTP_ABORTING) 1429 goto abort_conn; 1430 else 1431 goto discard; 1432 } 1433 1434 cxgbi_skcb_tcp_seq(skb) = seq; 1435 cxgbi_skcb_flags(skb) = 0; 1436 cxgbi_skcb_rx_pdulen(skb) = 0; 1437 1438 skb_reset_transport_header(skb); 1439 __skb_pull(skb, sizeof(*rpl)); 1440 __pskb_trim(skb, be16_to_cpu(rpl->len)); 1441 1442 csk->rcv_nxt = seq + pdu_len_ddp; 1443 1444 if (csk->skb_ulp_lhdr) { 1445 data_skb = skb_peek(&csk->receive_queue); 1446 if (!data_skb || 1447 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { 1448 pr_err("Error! freelist data not found 0x%p, tid %u\n", 1449 data_skb, tid); 1450 1451 goto abort_conn; 1452 } 1453 __skb_unlink(data_skb, &csk->receive_queue); 1454 1455 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); 1456 1457 __skb_queue_tail(&csk->receive_queue, skb); 1458 __skb_queue_tail(&csk->receive_queue, data_skb); 1459 } else { 1460 __skb_queue_tail(&csk->receive_queue, skb); 1461 } 1462 1463 csk->skb_ulp_lhdr = NULL; 1464 1465 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1466 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); 1467 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); 1468 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); 1469 1470 cxgb4i_process_ddpvld(csk, skb, ddpvld); 1471 1472 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", 1473 csk, skb, cxgbi_skcb_flags(skb)); 1474 1475 cxgbi_conn_pdu_ready(csk); 1476 spin_unlock_bh(&csk->lock); 1477 1478 return; 1479 1480 abort_conn: 1481 send_abort_req(csk); 1482 discard: 1483 spin_unlock_bh(&csk->lock); 1484 rel_skb: 1485 __kfree_skb(skb); 1486 } 1487 1488 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1489 { 1490 struct cxgbi_sock *csk; 1491 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1492 unsigned int tid = GET_TID(rpl); 1493 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1494 struct tid_info *t = lldi->tids; 1495 1496 csk = lookup_tid(t, tid); 1497 if (unlikely(!csk)) 1498 pr_err("can't find connection for tid %u.\n", tid); 1499 else { 1500 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1501 "csk 0x%p,%u,0x%lx,%u.\n", 1502 csk, csk->state, csk->flags, csk->tid); 1503 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1504 rpl->seq_vld); 1505 } 1506 __kfree_skb(skb); 1507 } 1508 1509 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1510 { 1511 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1512 unsigned int tid = GET_TID(rpl); 1513 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1514 struct tid_info *t = lldi->tids; 1515 struct cxgbi_sock *csk; 1516 1517 csk = lookup_tid(t, tid); 1518 if (!csk) 1519 pr_err("can't find conn. for tid %u.\n", tid); 1520 1521 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1522 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1523 csk, csk->state, csk->flags, csk->tid, rpl->status); 1524 1525 if (rpl->status != CPL_ERR_NONE) 1526 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1527 csk, tid, rpl->status); 1528 1529 __kfree_skb(skb); 1530 } 1531 1532 static int alloc_cpls(struct cxgbi_sock *csk) 1533 { 1534 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1535 0, GFP_KERNEL); 1536 if (!csk->cpl_close) 1537 return -ENOMEM; 1538 1539 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1540 0, GFP_KERNEL); 1541 if (!csk->cpl_abort_req) 1542 goto free_cpls; 1543 1544 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1545 0, GFP_KERNEL); 1546 if (!csk->cpl_abort_rpl) 1547 goto free_cpls; 1548 return 0; 1549 1550 free_cpls: 1551 cxgbi_sock_free_cpl_skbs(csk); 1552 return -ENOMEM; 1553 } 1554 1555 static inline void l2t_put(struct cxgbi_sock *csk) 1556 { 1557 if (csk->l2t) { 1558 cxgb4_l2t_release(csk->l2t); 1559 csk->l2t = NULL; 1560 cxgbi_sock_put(csk); 1561 } 1562 } 1563 1564 static void release_offload_resources(struct cxgbi_sock *csk) 1565 { 1566 struct cxgb4_lld_info *lldi; 1567 #if IS_ENABLED(CONFIG_IPV6) 1568 struct net_device *ndev = csk->cdev->ports[csk->port_id]; 1569 #endif 1570 1571 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1572 "csk 0x%p,%u,0x%lx,%u.\n", 1573 csk, csk->state, csk->flags, csk->tid); 1574 1575 cxgbi_sock_free_cpl_skbs(csk); 1576 if (csk->wr_cred != csk->wr_max_cred) { 1577 cxgbi_sock_purge_wr_queue(csk); 1578 cxgbi_sock_reset_wr_list(csk); 1579 } 1580 1581 l2t_put(csk); 1582 #if IS_ENABLED(CONFIG_IPV6) 1583 if (csk->csk_family == AF_INET6) 1584 cxgb4_clip_release(ndev, 1585 (const u32 *)&csk->saddr6.sin6_addr, 1); 1586 #endif 1587 1588 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1589 free_atid(csk); 1590 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1591 lldi = cxgbi_cdev_priv(csk->cdev); 1592 cxgb4_remove_tid(lldi->tids, 0, csk->tid); 1593 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1594 cxgbi_sock_put(csk); 1595 } 1596 csk->dst = NULL; 1597 csk->cdev = NULL; 1598 } 1599 1600 static int init_act_open(struct cxgbi_sock *csk) 1601 { 1602 struct cxgbi_device *cdev = csk->cdev; 1603 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1604 struct net_device *ndev = cdev->ports[csk->port_id]; 1605 struct sk_buff *skb = NULL; 1606 struct neighbour *n = NULL; 1607 void *daddr; 1608 unsigned int step; 1609 unsigned int size, size6; 1610 unsigned int linkspeed; 1611 unsigned int rcv_winf, snd_winf; 1612 1613 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1614 "csk 0x%p,%u,0x%lx,%u.\n", 1615 csk, csk->state, csk->flags, csk->tid); 1616 1617 if (csk->csk_family == AF_INET) 1618 daddr = &csk->daddr.sin_addr.s_addr; 1619 #if IS_ENABLED(CONFIG_IPV6) 1620 else if (csk->csk_family == AF_INET6) 1621 daddr = &csk->daddr6.sin6_addr; 1622 #endif 1623 else { 1624 pr_err("address family 0x%x not supported\n", csk->csk_family); 1625 goto rel_resource; 1626 } 1627 1628 n = dst_neigh_lookup(csk->dst, daddr); 1629 1630 if (!n) { 1631 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1632 goto rel_resource; 1633 } 1634 1635 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1636 if (csk->atid < 0) { 1637 pr_err("%s, NO atid available.\n", ndev->name); 1638 goto rel_resource_without_clip; 1639 } 1640 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1641 cxgbi_sock_get(csk); 1642 1643 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1644 if (!csk->l2t) { 1645 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1646 goto rel_resource_without_clip; 1647 } 1648 cxgbi_sock_get(csk); 1649 1650 #if IS_ENABLED(CONFIG_IPV6) 1651 if (csk->csk_family == AF_INET6) 1652 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1653 #endif 1654 1655 if (is_t4(lldi->adapter_type)) { 1656 size = sizeof(struct cpl_act_open_req); 1657 size6 = sizeof(struct cpl_act_open_req6); 1658 } else if (is_t5(lldi->adapter_type)) { 1659 size = sizeof(struct cpl_t5_act_open_req); 1660 size6 = sizeof(struct cpl_t5_act_open_req6); 1661 } else { 1662 size = sizeof(struct cpl_t6_act_open_req); 1663 size6 = sizeof(struct cpl_t6_act_open_req6); 1664 } 1665 1666 if (csk->csk_family == AF_INET) 1667 skb = alloc_wr(size, 0, GFP_NOIO); 1668 #if IS_ENABLED(CONFIG_IPV6) 1669 else 1670 skb = alloc_wr(size6, 0, GFP_NOIO); 1671 #endif 1672 1673 if (!skb) 1674 goto rel_resource; 1675 skb->sk = (struct sock *)csk; 1676 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1677 1678 if (!csk->mtu) 1679 csk->mtu = dst_mtu(csk->dst); 1680 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1681 csk->tx_chan = cxgb4_port_chan(ndev); 1682 csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type, 1683 cxgb4_port_viid(ndev)); 1684 step = lldi->ntxq / lldi->nchan; 1685 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1686 step = lldi->nrxq / lldi->nchan; 1687 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1688 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; 1689 csk->snd_win = cxgb4i_snd_win; 1690 csk->rcv_win = cxgb4i_rcv_win; 1691 if (cxgb4i_rcv_win <= 0) { 1692 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; 1693 rcv_winf = linkspeed / SPEED_10000; 1694 if (rcv_winf) 1695 csk->rcv_win *= rcv_winf; 1696 } 1697 if (cxgb4i_snd_win <= 0) { 1698 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; 1699 snd_winf = linkspeed / SPEED_10000; 1700 if (snd_winf) 1701 csk->snd_win *= snd_winf; 1702 } 1703 csk->wr_cred = lldi->wr_cred - 1704 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1705 csk->wr_max_cred = csk->wr_cred; 1706 csk->wr_una_cred = 0; 1707 cxgbi_sock_reset_wr_list(csk); 1708 csk->err = 0; 1709 1710 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1711 (&csk->saddr), (&csk->daddr), csk, csk->state, 1712 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1713 csk->mtu, csk->mss_idx, csk->smac_idx); 1714 1715 /* must wait for either a act_open_rpl or act_open_establish */ 1716 if (!try_module_get(cdev->owner)) { 1717 pr_err("%s, try_module_get failed.\n", ndev->name); 1718 goto rel_resource; 1719 } 1720 1721 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1722 if (csk->csk_family == AF_INET) 1723 send_act_open_req(csk, skb, csk->l2t); 1724 #if IS_ENABLED(CONFIG_IPV6) 1725 else 1726 send_act_open_req6(csk, skb, csk->l2t); 1727 #endif 1728 neigh_release(n); 1729 1730 return 0; 1731 1732 rel_resource: 1733 #if IS_ENABLED(CONFIG_IPV6) 1734 if (csk->csk_family == AF_INET6) 1735 cxgb4_clip_release(ndev, 1736 (const u32 *)&csk->saddr6.sin6_addr, 1); 1737 #endif 1738 rel_resource_without_clip: 1739 if (n) 1740 neigh_release(n); 1741 if (skb) 1742 __kfree_skb(skb); 1743 return -EINVAL; 1744 } 1745 1746 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1747 [CPL_ACT_ESTABLISH] = do_act_establish, 1748 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1749 [CPL_PEER_CLOSE] = do_peer_close, 1750 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1751 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1752 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1753 [CPL_FW4_ACK] = do_fw4_ack, 1754 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1755 [CPL_ISCSI_DATA] = do_rx_iscsi_data, 1756 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1757 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1758 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1759 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, 1760 [CPL_RX_DATA] = do_rx_data, 1761 }; 1762 1763 static int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1764 { 1765 int rc; 1766 1767 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1768 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1769 1770 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1771 cxgb4i_max_connect); 1772 if (rc < 0) 1773 return rc; 1774 1775 cdev->csk_release_offload_resources = release_offload_resources; 1776 cdev->csk_push_tx_frames = push_tx_frames; 1777 cdev->csk_send_abort_req = send_abort_req; 1778 cdev->csk_send_close_req = send_close_req; 1779 cdev->csk_send_rx_credits = send_rx_credits; 1780 cdev->csk_alloc_cpls = alloc_cpls; 1781 cdev->csk_init_act_open = init_act_open; 1782 1783 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1784 return 0; 1785 } 1786 1787 static inline void 1788 ulp_mem_io_set_hdr(struct cxgbi_device *cdev, 1789 struct ulp_mem_io *req, 1790 unsigned int wr_len, unsigned int dlen, 1791 unsigned int pm_addr, 1792 int tid) 1793 { 1794 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1795 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1796 1797 INIT_ULPTX_WR(req, wr_len, 0, tid); 1798 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | 1799 FW_WR_ATOMIC_V(0)); 1800 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1801 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | 1802 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); 1803 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1804 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1805 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1806 1807 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1808 idata->len = htonl(dlen); 1809 } 1810 1811 static struct sk_buff * 1812 ddp_ppod_init_idata(struct cxgbi_device *cdev, 1813 struct cxgbi_ppm *ppm, 1814 unsigned int idx, unsigned int npods, 1815 unsigned int tid) 1816 { 1817 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; 1818 unsigned int dlen = npods << PPOD_SIZE_SHIFT; 1819 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1820 sizeof(struct ulptx_idata) + dlen, 16); 1821 struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1822 1823 if (!skb) { 1824 pr_err("%s: %s idx %u, npods %u, OOM.\n", 1825 __func__, ppm->ndev->name, idx, npods); 1826 return NULL; 1827 } 1828 1829 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, 1830 pm_addr, tid); 1831 1832 return skb; 1833 } 1834 1835 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1836 struct cxgbi_task_tag_info *ttinfo, 1837 unsigned int idx, unsigned int npods, 1838 struct scatterlist **sg_pp, 1839 unsigned int *sg_off) 1840 { 1841 struct cxgbi_device *cdev = csk->cdev; 1842 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, 1843 csk->tid); 1844 struct ulp_mem_io *req; 1845 struct ulptx_idata *idata; 1846 struct cxgbi_pagepod *ppod; 1847 int i; 1848 1849 if (!skb) 1850 return -ENOMEM; 1851 1852 req = (struct ulp_mem_io *)skb->head; 1853 idata = (struct ulptx_idata *)(req + 1); 1854 ppod = (struct cxgbi_pagepod *)(idata + 1); 1855 1856 for (i = 0; i < npods; i++, ppod++) 1857 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); 1858 1859 cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); 1860 cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); 1861 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 1862 1863 spin_lock_bh(&csk->lock); 1864 cxgbi_sock_skb_entail(csk, skb); 1865 spin_unlock_bh(&csk->lock); 1866 1867 return 0; 1868 } 1869 1870 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1871 struct cxgbi_task_tag_info *ttinfo) 1872 { 1873 unsigned int pidx = ttinfo->idx; 1874 unsigned int npods = ttinfo->npods; 1875 unsigned int i, cnt; 1876 int err = 0; 1877 struct scatterlist *sg = ttinfo->sgl; 1878 unsigned int offset = 0; 1879 1880 ttinfo->cid = csk->port_id; 1881 1882 for (i = 0; i < npods; i += cnt, pidx += cnt) { 1883 cnt = npods - i; 1884 1885 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1886 cnt = ULPMEM_IDATA_MAX_NPPODS; 1887 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, 1888 &sg, &offset); 1889 if (err < 0) 1890 break; 1891 } 1892 1893 return err; 1894 } 1895 1896 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1897 int pg_idx, bool reply) 1898 { 1899 struct sk_buff *skb; 1900 struct cpl_set_tcb_field *req; 1901 1902 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1903 return 0; 1904 1905 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1906 if (!skb) 1907 return -ENOMEM; 1908 1909 /* set up ulp page size */ 1910 req = (struct cpl_set_tcb_field *)skb->head; 1911 INIT_TP_WR(req, csk->tid); 1912 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1913 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1914 req->word_cookie = htons(0); 1915 req->mask = cpu_to_be64(0x3 << 8); 1916 req->val = cpu_to_be64(pg_idx << 8); 1917 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1918 1919 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1920 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 1921 1922 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1923 return 0; 1924 } 1925 1926 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1927 int hcrc, int dcrc, int reply) 1928 { 1929 struct sk_buff *skb; 1930 struct cpl_set_tcb_field *req; 1931 1932 if (!hcrc && !dcrc) 1933 return 0; 1934 1935 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1936 if (!skb) 1937 return -ENOMEM; 1938 1939 csk->hcrc_len = (hcrc ? 4 : 0); 1940 csk->dcrc_len = (dcrc ? 4 : 0); 1941 /* set up ulp submode */ 1942 req = (struct cpl_set_tcb_field *)skb->head; 1943 INIT_TP_WR(req, tid); 1944 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1945 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1946 req->word_cookie = htons(0); 1947 req->mask = cpu_to_be64(0x3 << 4); 1948 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 1949 (dcrc ? ULP_CRC_DATA : 0)) << 4); 1950 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1951 1952 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1953 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 1954 1955 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1956 return 0; 1957 } 1958 1959 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 1960 { 1961 return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) 1962 (cxgbi_cdev_priv(cdev)))->iscsi_ppm); 1963 } 1964 1965 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 1966 { 1967 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1968 struct net_device *ndev = cdev->ports[0]; 1969 struct cxgbi_tag_format tformat; 1970 unsigned int ppmax; 1971 int i; 1972 1973 if (!lldi->vr->iscsi.size) { 1974 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); 1975 return -EACCES; 1976 } 1977 1978 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; 1979 ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; 1980 1981 memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); 1982 for (i = 0; i < 4; i++) 1983 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) 1984 & 0xF; 1985 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); 1986 1987 cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax, 1988 lldi->iscsi_llimit, lldi->vr->iscsi.start, 2); 1989 1990 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 1991 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 1992 cdev->csk_ddp_set_map = ddp_set_map; 1993 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1994 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 1995 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1996 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 1997 cdev->cdev2ppm = cdev2ppm; 1998 1999 return 0; 2000 } 2001 2002 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 2003 { 2004 struct cxgbi_device *cdev; 2005 struct port_info *pi; 2006 int i, rc; 2007 2008 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 2009 if (!cdev) { 2010 pr_info("t4 device 0x%p, register failed.\n", lldi); 2011 return NULL; 2012 } 2013 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 2014 cdev, lldi->adapter_type, lldi->nports, 2015 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 2016 lldi->nrxq, lldi->wr_cred); 2017 for (i = 0; i < lldi->nrxq; i++) 2018 log_debug(1 << CXGBI_DBG_DEV, 2019 "t4 0x%p, rxq id #%d: %u.\n", 2020 cdev, i, lldi->rxq_ids[i]); 2021 2022 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 2023 cdev->flags = CXGBI_FLAG_DEV_T4; 2024 cdev->pdev = lldi->pdev; 2025 cdev->ports = lldi->ports; 2026 cdev->nports = lldi->nports; 2027 cdev->mtus = lldi->mtus; 2028 cdev->nmtus = NMTUS; 2029 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= 2030 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; 2031 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 2032 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 2033 cdev->itp = &cxgb4i_iscsi_transport; 2034 cdev->owner = THIS_MODULE; 2035 2036 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 2037 << FW_VIID_PFN_S; 2038 pr_info("cdev 0x%p,%s, pfvf %u.\n", 2039 cdev, lldi->ports[0]->name, cdev->pfvf); 2040 2041 rc = cxgb4i_ddp_init(cdev); 2042 if (rc) { 2043 pr_info("t4 0x%p ddp init failed.\n", cdev); 2044 goto err_out; 2045 } 2046 rc = cxgb4i_ofld_init(cdev); 2047 if (rc) { 2048 pr_info("t4 0x%p ofld init failed.\n", cdev); 2049 goto err_out; 2050 } 2051 2052 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 2053 &cxgb4i_host_template, cxgb4i_stt); 2054 if (rc) 2055 goto err_out; 2056 2057 for (i = 0; i < cdev->nports; i++) { 2058 pi = netdev_priv(lldi->ports[i]); 2059 cdev->hbas[i]->port_id = pi->port_id; 2060 } 2061 return cdev; 2062 2063 err_out: 2064 cxgbi_device_unregister(cdev); 2065 return ERR_PTR(-ENOMEM); 2066 } 2067 2068 #define RX_PULL_LEN 128 2069 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 2070 const struct pkt_gl *pgl) 2071 { 2072 const struct cpl_act_establish *rpl; 2073 struct sk_buff *skb; 2074 unsigned int opc; 2075 struct cxgbi_device *cdev = handle; 2076 2077 if (pgl == NULL) { 2078 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 2079 2080 skb = alloc_wr(len, 0, GFP_ATOMIC); 2081 if (!skb) 2082 goto nomem; 2083 skb_copy_to_linear_data(skb, &rsp[1], len); 2084 } else { 2085 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 2086 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 2087 pgl->va, be64_to_cpu(*rsp), 2088 be64_to_cpu(*(u64 *)pgl->va), 2089 pgl->tot_len); 2090 return 0; 2091 } 2092 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 2093 if (unlikely(!skb)) 2094 goto nomem; 2095 } 2096 2097 rpl = (struct cpl_act_establish *)skb->data; 2098 opc = rpl->ot.opcode; 2099 log_debug(1 << CXGBI_DBG_TOE, 2100 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 2101 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 2102 if (cxgb4i_cplhandlers[opc]) 2103 cxgb4i_cplhandlers[opc](cdev, skb); 2104 else { 2105 pr_err("No handler for opcode 0x%x.\n", opc); 2106 __kfree_skb(skb); 2107 } 2108 return 0; 2109 nomem: 2110 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 2111 return 1; 2112 } 2113 2114 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 2115 { 2116 struct cxgbi_device *cdev = handle; 2117 2118 switch (state) { 2119 case CXGB4_STATE_UP: 2120 pr_info("cdev 0x%p, UP.\n", cdev); 2121 break; 2122 case CXGB4_STATE_START_RECOVERY: 2123 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 2124 /* close all connections */ 2125 break; 2126 case CXGB4_STATE_DOWN: 2127 pr_info("cdev 0x%p, DOWN.\n", cdev); 2128 break; 2129 case CXGB4_STATE_DETACH: 2130 pr_info("cdev 0x%p, DETACH.\n", cdev); 2131 cxgbi_device_unregister(cdev); 2132 break; 2133 default: 2134 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 2135 break; 2136 } 2137 return 0; 2138 } 2139 2140 static int __init cxgb4i_init_module(void) 2141 { 2142 int rc; 2143 2144 printk(KERN_INFO "%s", version); 2145 2146 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2147 if (rc < 0) 2148 return rc; 2149 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 2150 2151 return 0; 2152 } 2153 2154 static void __exit cxgb4i_exit_module(void) 2155 { 2156 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 2157 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 2158 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2159 } 2160 2161 module_init(cxgb4i_init_module); 2162 module_exit(cxgb4i_exit_module); 2163