1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <scsi/scsi_host.h> 20 #include <net/tcp.h> 21 #include <net/dst.h> 22 #include <linux/netdevice.h> 23 #include <net/addrconf.h> 24 25 #include "t4_regs.h" 26 #include "t4_msg.h" 27 #include "cxgb4.h" 28 #include "cxgb4_uld.h" 29 #include "t4fw_api.h" 30 #include "l2t.h" 31 #include "cxgb4i.h" 32 #include "clip_tbl.h" 33 34 static unsigned int dbg_level; 35 36 #include "../libcxgbi.h" 37 38 #ifdef CONFIG_CHELSIO_T4_DCB 39 #include <net/dcbevent.h> 40 #include "cxgb4_dcb.h" 41 #endif 42 43 #define DRV_MODULE_NAME "cxgb4i" 44 #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" 45 #define DRV_MODULE_VERSION "0.9.5-ko" 46 #define DRV_MODULE_RELDATE "Apr. 2015" 47 48 static char version[] = 49 DRV_MODULE_DESC " " DRV_MODULE_NAME 50 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 51 52 MODULE_AUTHOR("Chelsio Communications, Inc."); 53 MODULE_DESCRIPTION(DRV_MODULE_DESC); 54 MODULE_VERSION(DRV_MODULE_VERSION); 55 MODULE_LICENSE("GPL"); 56 57 module_param(dbg_level, uint, 0644); 58 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 59 60 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) 61 static int cxgb4i_rcv_win = -1; 62 module_param(cxgb4i_rcv_win, int, 0644); 63 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes"); 64 65 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) 66 static int cxgb4i_snd_win = -1; 67 module_param(cxgb4i_snd_win, int, 0644); 68 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 69 70 static int cxgb4i_rx_credit_thres = 10 * 1024; 71 module_param(cxgb4i_rx_credit_thres, int, 0644); 72 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 73 "RX credits return threshold in bytes (default=10KB)"); 74 75 static unsigned int cxgb4i_max_connect = (8 * 1024); 76 module_param(cxgb4i_max_connect, uint, 0644); 77 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 78 79 static unsigned short cxgb4i_sport_base = 20000; 80 module_param(cxgb4i_sport_base, ushort, 0644); 81 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 82 83 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 84 85 static void *t4_uld_add(const struct cxgb4_lld_info *); 86 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 87 static int t4_uld_state_change(void *, enum cxgb4_state state); 88 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 89 90 static const struct cxgb4_uld_info cxgb4i_uld_info = { 91 .name = DRV_MODULE_NAME, 92 .nrxq = MAX_ULD_QSETS, 93 .ntxq = MAX_ULD_QSETS, 94 .rxq_size = 1024, 95 .lro = false, 96 .add = t4_uld_add, 97 .rx_handler = t4_uld_rx_handler, 98 .state_change = t4_uld_state_change, 99 }; 100 101 static struct scsi_host_template cxgb4i_host_template = { 102 .module = THIS_MODULE, 103 .name = DRV_MODULE_NAME, 104 .proc_name = DRV_MODULE_NAME, 105 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 106 .queuecommand = iscsi_queuecommand, 107 .change_queue_depth = scsi_change_queue_depth, 108 .sg_tablesize = SG_ALL, 109 .max_sectors = 0xFFFF, 110 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 111 .eh_timed_out = iscsi_eh_cmd_timed_out, 112 .eh_abort_handler = iscsi_eh_abort, 113 .eh_device_reset_handler = iscsi_eh_device_reset, 114 .eh_target_reset_handler = iscsi_eh_recover_target, 115 .target_alloc = iscsi_target_alloc, 116 .dma_boundary = PAGE_SIZE - 1, 117 .this_id = -1, 118 .track_queue_depth = 1, 119 }; 120 121 static struct iscsi_transport cxgb4i_iscsi_transport = { 122 .owner = THIS_MODULE, 123 .name = DRV_MODULE_NAME, 124 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 125 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 126 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 127 .attr_is_visible = cxgbi_attr_is_visible, 128 .get_host_param = cxgbi_get_host_param, 129 .set_host_param = cxgbi_set_host_param, 130 /* session management */ 131 .create_session = cxgbi_create_session, 132 .destroy_session = cxgbi_destroy_session, 133 .get_session_param = iscsi_session_get_param, 134 /* connection management */ 135 .create_conn = cxgbi_create_conn, 136 .bind_conn = cxgbi_bind_conn, 137 .destroy_conn = iscsi_tcp_conn_teardown, 138 .start_conn = iscsi_conn_start, 139 .stop_conn = iscsi_conn_stop, 140 .get_conn_param = iscsi_conn_get_param, 141 .set_param = cxgbi_set_conn_param, 142 .get_stats = cxgbi_get_conn_stats, 143 /* pdu xmit req from user space */ 144 .send_pdu = iscsi_conn_send_pdu, 145 /* task */ 146 .init_task = iscsi_tcp_task_init, 147 .xmit_task = iscsi_tcp_task_xmit, 148 .cleanup_task = cxgbi_cleanup_task, 149 /* pdu */ 150 .alloc_pdu = cxgbi_conn_alloc_pdu, 151 .init_pdu = cxgbi_conn_init_pdu, 152 .xmit_pdu = cxgbi_conn_xmit_pdu, 153 .parse_pdu_itt = cxgbi_parse_pdu_itt, 154 /* TCP connect/disconnect */ 155 .get_ep_param = cxgbi_get_ep_param, 156 .ep_connect = cxgbi_ep_connect, 157 .ep_poll = cxgbi_ep_poll, 158 .ep_disconnect = cxgbi_ep_disconnect, 159 /* Error recovery timeout call */ 160 .session_recovery_timedout = iscsi_session_recovery_timedout, 161 }; 162 163 #ifdef CONFIG_CHELSIO_T4_DCB 164 static int 165 cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); 166 167 static struct notifier_block cxgb4_dcb_change = { 168 .notifier_call = cxgb4_dcb_change_notify, 169 }; 170 #endif 171 172 static struct scsi_transport_template *cxgb4i_stt; 173 174 /* 175 * CPL (Chelsio Protocol Language) defines a message passing interface between 176 * the host driver and Chelsio asic. 177 * The section below implments CPLs that related to iscsi tcp connection 178 * open/close/abort and data send/receive. 179 */ 180 181 #define RCV_BUFSIZ_MASK 0x3FFU 182 #define MAX_IMM_TX_PKT_LEN 256 183 184 static int push_tx_frames(struct cxgbi_sock *, int); 185 186 /* 187 * is_ofld_imm - check whether a packet can be sent as immediate data 188 * @skb: the packet 189 * 190 * Returns true if a packet can be sent as an offload WR with immediate 191 * data. We currently use the same limit as for Ethernet packets. 192 */ 193 static inline bool is_ofld_imm(const struct sk_buff *skb) 194 { 195 int len = skb->len; 196 197 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 198 len += sizeof(struct fw_ofld_tx_data_wr); 199 200 return len <= MAX_IMM_TX_PKT_LEN; 201 } 202 203 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 204 struct l2t_entry *e) 205 { 206 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 207 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 208 unsigned long long opt0; 209 unsigned int opt2; 210 unsigned int qid_atid = ((unsigned int)csk->atid) | 211 (((unsigned int)csk->rss_qid) << 14); 212 213 opt0 = KEEP_ALIVE_F | 214 WND_SCALE_V(wscale) | 215 MSS_IDX_V(csk->mss_idx) | 216 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 217 TX_CHAN_V(csk->tx_chan) | 218 SMAC_SEL_V(csk->smac_idx) | 219 ULP_MODE_V(ULP_MODE_ISCSI) | 220 RCV_BUFSIZ_V(csk->rcv_win >> 10); 221 222 opt2 = RX_CHANNEL_V(0) | 223 RSS_QUEUE_VALID_F | 224 RSS_QUEUE_V(csk->rss_qid); 225 226 if (is_t4(lldi->adapter_type)) { 227 struct cpl_act_open_req *req = 228 (struct cpl_act_open_req *)skb->head; 229 230 INIT_TP_WR(req, 0); 231 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 232 qid_atid)); 233 req->local_port = csk->saddr.sin_port; 234 req->peer_port = csk->daddr.sin_port; 235 req->local_ip = csk->saddr.sin_addr.s_addr; 236 req->peer_ip = csk->daddr.sin_addr.s_addr; 237 req->opt0 = cpu_to_be64(opt0); 238 req->params = cpu_to_be32(cxgb4_select_ntuple( 239 csk->cdev->ports[csk->port_id], 240 csk->l2t)); 241 opt2 |= RX_FC_VALID_F; 242 req->opt2 = cpu_to_be32(opt2); 243 244 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 245 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 246 csk, &req->local_ip, ntohs(req->local_port), 247 &req->peer_ip, ntohs(req->peer_port), 248 csk->atid, csk->rss_qid); 249 } else if (is_t5(lldi->adapter_type)) { 250 struct cpl_t5_act_open_req *req = 251 (struct cpl_t5_act_open_req *)skb->head; 252 u32 isn = (prandom_u32() & ~7UL) - 1; 253 254 INIT_TP_WR(req, 0); 255 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 256 qid_atid)); 257 req->local_port = csk->saddr.sin_port; 258 req->peer_port = csk->daddr.sin_port; 259 req->local_ip = csk->saddr.sin_addr.s_addr; 260 req->peer_ip = csk->daddr.sin_addr.s_addr; 261 req->opt0 = cpu_to_be64(opt0); 262 req->params = cpu_to_be64(FILTER_TUPLE_V( 263 cxgb4_select_ntuple( 264 csk->cdev->ports[csk->port_id], 265 csk->l2t))); 266 req->rsvd = cpu_to_be32(isn); 267 opt2 |= T5_ISS_VALID; 268 opt2 |= T5_OPT_2_VALID_F; 269 270 req->opt2 = cpu_to_be32(opt2); 271 272 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 273 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 274 csk, &req->local_ip, ntohs(req->local_port), 275 &req->peer_ip, ntohs(req->peer_port), 276 csk->atid, csk->rss_qid); 277 } else { 278 struct cpl_t6_act_open_req *req = 279 (struct cpl_t6_act_open_req *)skb->head; 280 u32 isn = (prandom_u32() & ~7UL) - 1; 281 282 INIT_TP_WR(req, 0); 283 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 284 qid_atid)); 285 req->local_port = csk->saddr.sin_port; 286 req->peer_port = csk->daddr.sin_port; 287 req->local_ip = csk->saddr.sin_addr.s_addr; 288 req->peer_ip = csk->daddr.sin_addr.s_addr; 289 req->opt0 = cpu_to_be64(opt0); 290 req->params = cpu_to_be64(FILTER_TUPLE_V( 291 cxgb4_select_ntuple( 292 csk->cdev->ports[csk->port_id], 293 csk->l2t))); 294 req->rsvd = cpu_to_be32(isn); 295 296 opt2 |= T5_ISS_VALID; 297 opt2 |= RX_FC_DISABLE_F; 298 opt2 |= T5_OPT_2_VALID_F; 299 300 req->opt2 = cpu_to_be32(opt2); 301 req->rsvd2 = cpu_to_be32(0); 302 req->opt3 = cpu_to_be32(0); 303 304 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 305 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 306 csk, &req->local_ip, ntohs(req->local_port), 307 &req->peer_ip, ntohs(req->peer_port), 308 csk->atid, csk->rss_qid); 309 } 310 311 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 312 313 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 314 (&csk->saddr), (&csk->daddr), 315 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, 316 csk->state, csk->flags, csk->atid, csk->rss_qid); 317 318 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 319 } 320 321 #if IS_ENABLED(CONFIG_IPV6) 322 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 323 struct l2t_entry *e) 324 { 325 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 326 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 327 unsigned long long opt0; 328 unsigned int opt2; 329 unsigned int qid_atid = ((unsigned int)csk->atid) | 330 (((unsigned int)csk->rss_qid) << 14); 331 332 opt0 = KEEP_ALIVE_F | 333 WND_SCALE_V(wscale) | 334 MSS_IDX_V(csk->mss_idx) | 335 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 336 TX_CHAN_V(csk->tx_chan) | 337 SMAC_SEL_V(csk->smac_idx) | 338 ULP_MODE_V(ULP_MODE_ISCSI) | 339 RCV_BUFSIZ_V(csk->rcv_win >> 10); 340 341 opt2 = RX_CHANNEL_V(0) | 342 RSS_QUEUE_VALID_F | 343 RSS_QUEUE_V(csk->rss_qid); 344 345 if (is_t4(lldi->adapter_type)) { 346 struct cpl_act_open_req6 *req = 347 (struct cpl_act_open_req6 *)skb->head; 348 349 INIT_TP_WR(req, 0); 350 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 351 qid_atid)); 352 req->local_port = csk->saddr6.sin6_port; 353 req->peer_port = csk->daddr6.sin6_port; 354 355 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 356 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 357 8); 358 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 359 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 360 8); 361 362 req->opt0 = cpu_to_be64(opt0); 363 364 opt2 |= RX_FC_VALID_F; 365 req->opt2 = cpu_to_be32(opt2); 366 367 req->params = cpu_to_be32(cxgb4_select_ntuple( 368 csk->cdev->ports[csk->port_id], 369 csk->l2t)); 370 } else if (is_t5(lldi->adapter_type)) { 371 struct cpl_t5_act_open_req6 *req = 372 (struct cpl_t5_act_open_req6 *)skb->head; 373 374 INIT_TP_WR(req, 0); 375 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 376 qid_atid)); 377 req->local_port = csk->saddr6.sin6_port; 378 req->peer_port = csk->daddr6.sin6_port; 379 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 380 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 381 8); 382 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 383 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 384 8); 385 req->opt0 = cpu_to_be64(opt0); 386 387 opt2 |= T5_OPT_2_VALID_F; 388 req->opt2 = cpu_to_be32(opt2); 389 390 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 391 csk->cdev->ports[csk->port_id], 392 csk->l2t))); 393 } else { 394 struct cpl_t6_act_open_req6 *req = 395 (struct cpl_t6_act_open_req6 *)skb->head; 396 397 INIT_TP_WR(req, 0); 398 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 399 qid_atid)); 400 req->local_port = csk->saddr6.sin6_port; 401 req->peer_port = csk->daddr6.sin6_port; 402 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 403 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 404 8); 405 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 406 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 407 8); 408 req->opt0 = cpu_to_be64(opt0); 409 410 opt2 |= RX_FC_DISABLE_F; 411 opt2 |= T5_OPT_2_VALID_F; 412 413 req->opt2 = cpu_to_be32(opt2); 414 415 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 416 csk->cdev->ports[csk->port_id], 417 csk->l2t))); 418 419 req->rsvd2 = cpu_to_be32(0); 420 req->opt3 = cpu_to_be32(0); 421 } 422 423 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 424 425 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 426 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, 427 csk->flags, csk->atid, 428 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 429 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 430 csk->rss_qid); 431 432 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 433 } 434 #endif 435 436 static void send_close_req(struct cxgbi_sock *csk) 437 { 438 struct sk_buff *skb = csk->cpl_close; 439 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 440 unsigned int tid = csk->tid; 441 442 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 443 "csk 0x%p,%u,0x%lx, tid %u.\n", 444 csk, csk->state, csk->flags, csk->tid); 445 csk->cpl_close = NULL; 446 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 447 INIT_TP_WR(req, tid); 448 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 449 req->rsvd = 0; 450 451 cxgbi_sock_skb_entail(csk, skb); 452 if (csk->state >= CTP_ESTABLISHED) 453 push_tx_frames(csk, 1); 454 } 455 456 static void abort_arp_failure(void *handle, struct sk_buff *skb) 457 { 458 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 459 struct cpl_abort_req *req; 460 461 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 462 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 463 csk, csk->state, csk->flags, csk->tid); 464 req = (struct cpl_abort_req *)skb->data; 465 req->cmd = CPL_ABORT_NO_RST; 466 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 467 } 468 469 static void send_abort_req(struct cxgbi_sock *csk) 470 { 471 struct cpl_abort_req *req; 472 struct sk_buff *skb = csk->cpl_abort_req; 473 474 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 475 return; 476 477 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 478 send_tx_flowc_wr(csk); 479 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 480 } 481 482 cxgbi_sock_set_state(csk, CTP_ABORTING); 483 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 484 cxgbi_sock_purge_write_queue(csk); 485 486 csk->cpl_abort_req = NULL; 487 req = (struct cpl_abort_req *)skb->head; 488 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 489 req->cmd = CPL_ABORT_SEND_RST; 490 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 491 INIT_TP_WR(req, csk->tid); 492 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 493 req->rsvd0 = htonl(csk->snd_nxt); 494 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 495 496 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 497 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 498 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 499 req->rsvd1); 500 501 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 502 } 503 504 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 505 { 506 struct sk_buff *skb = csk->cpl_abort_rpl; 507 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 508 509 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 510 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 511 csk, csk->state, csk->flags, csk->tid, rst_status); 512 513 csk->cpl_abort_rpl = NULL; 514 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 515 INIT_TP_WR(rpl, csk->tid); 516 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 517 rpl->cmd = rst_status; 518 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 519 } 520 521 /* 522 * CPL connection rx data ack: host -> 523 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 524 * credits sent. 525 */ 526 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 527 { 528 struct sk_buff *skb; 529 struct cpl_rx_data_ack *req; 530 531 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 532 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 533 csk, csk->state, csk->flags, csk->tid, credits); 534 535 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 536 if (!skb) { 537 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 538 return 0; 539 } 540 req = (struct cpl_rx_data_ack *)skb->head; 541 542 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 543 INIT_TP_WR(req, csk->tid); 544 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 545 csk->tid)); 546 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 547 | RX_FORCE_ACK_F); 548 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 549 return credits; 550 } 551 552 /* 553 * sgl_len - calculates the size of an SGL of the given capacity 554 * @n: the number of SGL entries 555 * Calculates the number of flits needed for a scatter/gather list that 556 * can hold the given number of entries. 557 */ 558 static inline unsigned int sgl_len(unsigned int n) 559 { 560 n--; 561 return (3 * n) / 2 + (n & 1) + 2; 562 } 563 564 /* 565 * calc_tx_flits_ofld - calculate # of flits for an offload packet 566 * @skb: the packet 567 * 568 * Returns the number of flits needed for the given offload packet. 569 * These packets are already fully constructed and no additional headers 570 * will be added. 571 */ 572 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 573 { 574 unsigned int flits, cnt; 575 576 if (is_ofld_imm(skb)) 577 return DIV_ROUND_UP(skb->len, 8); 578 flits = skb_transport_offset(skb) / 8; 579 cnt = skb_shinfo(skb)->nr_frags; 580 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 581 cnt++; 582 return flits + sgl_len(cnt); 583 } 584 585 #define FLOWC_WR_NPARAMS_MIN 9 586 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 587 { 588 int nparams, flowclen16, flowclen; 589 590 nparams = FLOWC_WR_NPARAMS_MIN; 591 #ifdef CONFIG_CHELSIO_T4_DCB 592 nparams++; 593 #endif 594 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 595 flowclen16 = DIV_ROUND_UP(flowclen, 16); 596 flowclen = flowclen16 * 16; 597 /* 598 * Return the number of 16-byte credits used by the FlowC request. 599 * Pass back the nparams and actual FlowC length if requested. 600 */ 601 if (nparamsp) 602 *nparamsp = nparams; 603 if (flowclenp) 604 *flowclenp = flowclen; 605 606 return flowclen16; 607 } 608 609 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 610 { 611 struct sk_buff *skb; 612 struct fw_flowc_wr *flowc; 613 int nparams, flowclen16, flowclen; 614 615 #ifdef CONFIG_CHELSIO_T4_DCB 616 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; 617 #endif 618 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 619 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 620 flowc = (struct fw_flowc_wr *)skb->head; 621 flowc->op_to_nparams = 622 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 623 flowc->flowid_len16 = 624 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 625 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 626 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 627 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 628 flowc->mnemval[1].val = htonl(csk->tx_chan); 629 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 630 flowc->mnemval[2].val = htonl(csk->tx_chan); 631 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 632 flowc->mnemval[3].val = htonl(csk->rss_qid); 633 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 634 flowc->mnemval[4].val = htonl(csk->snd_nxt); 635 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 636 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 637 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 638 flowc->mnemval[6].val = htonl(csk->snd_win); 639 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 640 flowc->mnemval[7].val = htonl(csk->advmss); 641 flowc->mnemval[8].mnemonic = 0; 642 flowc->mnemval[8].val = 0; 643 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 644 flowc->mnemval[8].val = 16384; 645 #ifdef CONFIG_CHELSIO_T4_DCB 646 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; 647 if (vlan == CPL_L2T_VLAN_NONE) { 648 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n", 649 csk->tid); 650 flowc->mnemval[9].val = cpu_to_be32(0); 651 } else { 652 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> 653 VLAN_PRIO_SHIFT); 654 } 655 #endif 656 657 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 658 659 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 660 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 661 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 662 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, 663 csk->advmss); 664 665 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 666 667 return flowclen16; 668 } 669 670 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 671 int dlen, int len, u32 credits, int compl) 672 { 673 struct fw_ofld_tx_data_wr *req; 674 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 675 unsigned int wr_ulp_mode = 0, val; 676 bool imm = is_ofld_imm(skb); 677 678 req = __skb_push(skb, sizeof(*req)); 679 680 if (imm) { 681 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 682 FW_WR_COMPL_F | 683 FW_WR_IMMDLEN_V(dlen)); 684 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 685 FW_WR_LEN16_V(credits)); 686 } else { 687 req->op_to_immdlen = 688 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 689 FW_WR_COMPL_F | 690 FW_WR_IMMDLEN_V(0)); 691 req->flowid_len16 = 692 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 693 FW_WR_LEN16_V(credits)); 694 } 695 if (submode) 696 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 697 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 698 val = skb_peek(&csk->write_queue) ? 0 : 1; 699 req->tunnel_to_proxy = htonl(wr_ulp_mode | 700 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 701 req->plen = htonl(len); 702 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 703 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 704 } 705 706 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 707 { 708 kfree_skb(skb); 709 } 710 711 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 712 { 713 int total_size = 0; 714 struct sk_buff *skb; 715 716 if (unlikely(csk->state < CTP_ESTABLISHED || 717 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 718 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 719 1 << CXGBI_DBG_PDU_TX, 720 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 721 csk, csk->state, csk->flags, csk->tid); 722 return 0; 723 } 724 725 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 726 int dlen = skb->len; 727 int len = skb->len; 728 unsigned int credits_needed; 729 int flowclen16 = 0; 730 731 skb_reset_transport_header(skb); 732 if (is_ofld_imm(skb)) 733 credits_needed = DIV_ROUND_UP(dlen, 16); 734 else 735 credits_needed = DIV_ROUND_UP( 736 8 * calc_tx_flits_ofld(skb), 737 16); 738 739 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 740 credits_needed += DIV_ROUND_UP( 741 sizeof(struct fw_ofld_tx_data_wr), 742 16); 743 744 /* 745 * Assumes the initial credits is large enough to support 746 * fw_flowc_wr plus largest possible first payload 747 */ 748 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 749 flowclen16 = send_tx_flowc_wr(csk); 750 csk->wr_cred -= flowclen16; 751 csk->wr_una_cred += flowclen16; 752 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 753 } 754 755 if (csk->wr_cred < credits_needed) { 756 log_debug(1 << CXGBI_DBG_PDU_TX, 757 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 758 csk, skb->len, skb->data_len, 759 credits_needed, csk->wr_cred); 760 break; 761 } 762 __skb_unlink(skb, &csk->write_queue); 763 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 764 skb->csum = credits_needed + flowclen16; 765 csk->wr_cred -= credits_needed; 766 csk->wr_una_cred += credits_needed; 767 cxgbi_sock_enqueue_wr(csk, skb); 768 769 log_debug(1 << CXGBI_DBG_PDU_TX, 770 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 771 csk, skb->len, skb->data_len, credits_needed, 772 csk->wr_cred, csk->wr_una_cred); 773 774 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 775 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 776 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 777 req_completion); 778 csk->snd_nxt += len; 779 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 780 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && 781 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { 782 struct cpl_close_con_req *req = 783 (struct cpl_close_con_req *)skb->data; 784 req->wr.wr_hi |= htonl(FW_WR_COMPL_F); 785 } 786 total_size += skb->truesize; 787 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 788 789 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 790 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 791 csk, csk->state, csk->flags, csk->tid, skb, len); 792 793 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 794 } 795 return total_size; 796 } 797 798 static inline void free_atid(struct cxgbi_sock *csk) 799 { 800 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 801 802 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 803 cxgb4_free_atid(lldi->tids, csk->atid); 804 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 805 cxgbi_sock_put(csk); 806 } 807 } 808 809 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 810 { 811 struct cxgbi_sock *csk; 812 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 813 unsigned short tcp_opt = ntohs(req->tcp_opt); 814 unsigned int tid = GET_TID(req); 815 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 816 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 817 struct tid_info *t = lldi->tids; 818 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 819 820 csk = lookup_atid(t, atid); 821 if (unlikely(!csk)) { 822 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 823 goto rel_skb; 824 } 825 826 if (csk->atid != atid) { 827 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 828 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 829 goto rel_skb; 830 } 831 832 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 833 (&csk->saddr), (&csk->daddr), 834 atid, tid, csk, csk->state, csk->flags, rcv_isn); 835 836 module_put(cdev->owner); 837 838 cxgbi_sock_get(csk); 839 csk->tid = tid; 840 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); 841 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 842 843 free_atid(csk); 844 845 spin_lock_bh(&csk->lock); 846 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 847 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 848 csk, csk->state, csk->flags, csk->tid); 849 850 if (csk->retry_timer.function) { 851 del_timer(&csk->retry_timer); 852 csk->retry_timer.function = NULL; 853 } 854 855 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 856 /* 857 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 858 * pass through opt0. 859 */ 860 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) 861 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); 862 863 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 864 if (TCPOPT_TSTAMP_G(tcp_opt)) 865 csk->advmss -= 12; 866 if (csk->advmss < 128) 867 csk->advmss = 128; 868 869 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 870 "csk 0x%p, mss_idx %u, advmss %u.\n", 871 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); 872 873 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 874 875 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 876 send_abort_req(csk); 877 else { 878 if (skb_queue_len(&csk->write_queue)) 879 push_tx_frames(csk, 0); 880 cxgbi_conn_tx_open(csk); 881 } 882 spin_unlock_bh(&csk->lock); 883 884 rel_skb: 885 __kfree_skb(skb); 886 } 887 888 static int act_open_rpl_status_to_errno(int status) 889 { 890 switch (status) { 891 case CPL_ERR_CONN_RESET: 892 return -ECONNREFUSED; 893 case CPL_ERR_ARP_MISS: 894 return -EHOSTUNREACH; 895 case CPL_ERR_CONN_TIMEDOUT: 896 return -ETIMEDOUT; 897 case CPL_ERR_TCAM_FULL: 898 return -ENOMEM; 899 case CPL_ERR_CONN_EXIST: 900 return -EADDRINUSE; 901 default: 902 return -EIO; 903 } 904 } 905 906 static void csk_act_open_retry_timer(struct timer_list *t) 907 { 908 struct sk_buff *skb = NULL; 909 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); 910 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 911 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 912 struct l2t_entry *); 913 int t4 = is_t4(lldi->adapter_type), size, size6; 914 915 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 916 "csk 0x%p,%u,0x%lx,%u.\n", 917 csk, csk->state, csk->flags, csk->tid); 918 919 cxgbi_sock_get(csk); 920 spin_lock_bh(&csk->lock); 921 922 if (t4) { 923 size = sizeof(struct cpl_act_open_req); 924 size6 = sizeof(struct cpl_act_open_req6); 925 } else { 926 size = sizeof(struct cpl_t5_act_open_req); 927 size6 = sizeof(struct cpl_t5_act_open_req6); 928 } 929 930 if (csk->csk_family == AF_INET) { 931 send_act_open_func = send_act_open_req; 932 skb = alloc_wr(size, 0, GFP_ATOMIC); 933 #if IS_ENABLED(CONFIG_IPV6) 934 } else { 935 send_act_open_func = send_act_open_req6; 936 skb = alloc_wr(size6, 0, GFP_ATOMIC); 937 #endif 938 } 939 940 if (!skb) 941 cxgbi_sock_fail_act_open(csk, -ENOMEM); 942 else { 943 skb->sk = (struct sock *)csk; 944 t4_set_arp_err_handler(skb, csk, 945 cxgbi_sock_act_open_req_arp_failure); 946 send_act_open_func(csk, skb, csk->l2t); 947 } 948 949 spin_unlock_bh(&csk->lock); 950 cxgbi_sock_put(csk); 951 952 } 953 954 static inline bool is_neg_adv(unsigned int status) 955 { 956 return status == CPL_ERR_RTX_NEG_ADVICE || 957 status == CPL_ERR_KEEPALV_NEG_ADVICE || 958 status == CPL_ERR_PERSIST_NEG_ADVICE; 959 } 960 961 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 962 { 963 struct cxgbi_sock *csk; 964 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 965 unsigned int tid = GET_TID(rpl); 966 unsigned int atid = 967 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); 968 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); 969 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 970 struct tid_info *t = lldi->tids; 971 972 csk = lookup_atid(t, atid); 973 if (unlikely(!csk)) { 974 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 975 goto rel_skb; 976 } 977 978 pr_info_ipaddr("tid %u/%u, status %u.\n" 979 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 980 atid, tid, status, csk, csk->state, csk->flags); 981 982 if (is_neg_adv(status)) 983 goto rel_skb; 984 985 module_put(cdev->owner); 986 987 if (status && status != CPL_ERR_TCAM_FULL && 988 status != CPL_ERR_CONN_EXIST && 989 status != CPL_ERR_ARP_MISS) 990 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), 991 csk->csk_family); 992 993 cxgbi_sock_get(csk); 994 spin_lock_bh(&csk->lock); 995 996 if (status == CPL_ERR_CONN_EXIST && 997 csk->retry_timer.function != csk_act_open_retry_timer) { 998 csk->retry_timer.function = csk_act_open_retry_timer; 999 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 1000 } else 1001 cxgbi_sock_fail_act_open(csk, 1002 act_open_rpl_status_to_errno(status)); 1003 1004 spin_unlock_bh(&csk->lock); 1005 cxgbi_sock_put(csk); 1006 rel_skb: 1007 __kfree_skb(skb); 1008 } 1009 1010 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 1011 { 1012 struct cxgbi_sock *csk; 1013 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 1014 unsigned int tid = GET_TID(req); 1015 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1016 struct tid_info *t = lldi->tids; 1017 1018 csk = lookup_tid(t, tid); 1019 if (unlikely(!csk)) { 1020 pr_err("can't find connection for tid %u.\n", tid); 1021 goto rel_skb; 1022 } 1023 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1024 (&csk->saddr), (&csk->daddr), 1025 csk, csk->state, csk->flags, csk->tid); 1026 cxgbi_sock_rcv_peer_close(csk); 1027 rel_skb: 1028 __kfree_skb(skb); 1029 } 1030 1031 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1032 { 1033 struct cxgbi_sock *csk; 1034 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 1035 unsigned int tid = GET_TID(rpl); 1036 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1037 struct tid_info *t = lldi->tids; 1038 1039 csk = lookup_tid(t, tid); 1040 if (unlikely(!csk)) { 1041 pr_err("can't find connection for tid %u.\n", tid); 1042 goto rel_skb; 1043 } 1044 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1045 (&csk->saddr), (&csk->daddr), 1046 csk, csk->state, csk->flags, csk->tid); 1047 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 1048 rel_skb: 1049 __kfree_skb(skb); 1050 } 1051 1052 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 1053 int *need_rst) 1054 { 1055 switch (abort_reason) { 1056 case CPL_ERR_BAD_SYN: /* fall through */ 1057 case CPL_ERR_CONN_RESET: 1058 return csk->state > CTP_ESTABLISHED ? 1059 -EPIPE : -ECONNRESET; 1060 case CPL_ERR_XMIT_TIMEDOUT: 1061 case CPL_ERR_PERSIST_TIMEDOUT: 1062 case CPL_ERR_FINWAIT2_TIMEDOUT: 1063 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1064 return -ETIMEDOUT; 1065 default: 1066 return -EIO; 1067 } 1068 } 1069 1070 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1071 { 1072 struct cxgbi_sock *csk; 1073 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 1074 unsigned int tid = GET_TID(req); 1075 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1076 struct tid_info *t = lldi->tids; 1077 int rst_status = CPL_ABORT_NO_RST; 1078 1079 csk = lookup_tid(t, tid); 1080 if (unlikely(!csk)) { 1081 pr_err("can't find connection for tid %u.\n", tid); 1082 goto rel_skb; 1083 } 1084 1085 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1086 (&csk->saddr), (&csk->daddr), 1087 csk, csk->state, csk->flags, csk->tid, req->status); 1088 1089 if (is_neg_adv(req->status)) 1090 goto rel_skb; 1091 1092 cxgbi_sock_get(csk); 1093 spin_lock_bh(&csk->lock); 1094 1095 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 1096 1097 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 1098 send_tx_flowc_wr(csk); 1099 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 1100 } 1101 1102 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 1103 cxgbi_sock_set_state(csk, CTP_ABORTING); 1104 1105 send_abort_rpl(csk, rst_status); 1106 1107 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 1108 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1109 cxgbi_sock_closed(csk); 1110 } 1111 1112 spin_unlock_bh(&csk->lock); 1113 cxgbi_sock_put(csk); 1114 rel_skb: 1115 __kfree_skb(skb); 1116 } 1117 1118 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1119 { 1120 struct cxgbi_sock *csk; 1121 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1122 unsigned int tid = GET_TID(rpl); 1123 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1124 struct tid_info *t = lldi->tids; 1125 1126 csk = lookup_tid(t, tid); 1127 if (!csk) 1128 goto rel_skb; 1129 1130 if (csk) 1131 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1132 (&csk->saddr), (&csk->daddr), csk, 1133 csk->state, csk->flags, csk->tid, rpl->status); 1134 1135 if (rpl->status == CPL_ERR_ABORT_FAILED) 1136 goto rel_skb; 1137 1138 cxgbi_sock_rcv_abort_rpl(csk); 1139 rel_skb: 1140 __kfree_skb(skb); 1141 } 1142 1143 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1144 { 1145 struct cxgbi_sock *csk; 1146 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1147 unsigned int tid = GET_TID(cpl); 1148 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1149 struct tid_info *t = lldi->tids; 1150 1151 csk = lookup_tid(t, tid); 1152 if (!csk) { 1153 pr_err("can't find connection for tid %u.\n", tid); 1154 } else { 1155 /* not expecting this, reset the connection. */ 1156 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1157 spin_lock_bh(&csk->lock); 1158 send_abort_req(csk); 1159 spin_unlock_bh(&csk->lock); 1160 } 1161 __kfree_skb(skb); 1162 } 1163 1164 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1165 { 1166 struct cxgbi_sock *csk; 1167 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1168 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1169 unsigned int tid = GET_TID(cpl); 1170 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1171 struct tid_info *t = lldi->tids; 1172 1173 csk = lookup_tid(t, tid); 1174 if (unlikely(!csk)) { 1175 pr_err("can't find conn. for tid %u.\n", tid); 1176 goto rel_skb; 1177 } 1178 1179 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1180 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1181 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1182 pdu_len_ddp); 1183 1184 spin_lock_bh(&csk->lock); 1185 1186 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1187 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1188 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1189 csk, csk->state, csk->flags, csk->tid); 1190 if (csk->state != CTP_ABORTING) 1191 goto abort_conn; 1192 else 1193 goto discard; 1194 } 1195 1196 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1197 cxgbi_skcb_flags(skb) = 0; 1198 1199 skb_reset_transport_header(skb); 1200 __skb_pull(skb, sizeof(*cpl)); 1201 __pskb_trim(skb, ntohs(cpl->len)); 1202 1203 if (!csk->skb_ulp_lhdr) { 1204 unsigned char *bhs; 1205 unsigned int hlen, dlen, plen; 1206 1207 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1208 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1209 csk, csk->state, csk->flags, csk->tid, skb); 1210 csk->skb_ulp_lhdr = skb; 1211 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1212 1213 if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && 1214 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { 1215 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1216 csk->tid, cxgbi_skcb_tcp_seq(skb), 1217 csk->rcv_nxt); 1218 goto abort_conn; 1219 } 1220 1221 bhs = skb->data; 1222 hlen = ntohs(cpl->len); 1223 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1224 1225 plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1226 if (is_t4(lldi->adapter_type)) 1227 plen -= 40; 1228 1229 if ((hlen + dlen) != plen) { 1230 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1231 "mismatch %u != %u + %u, seq 0x%x.\n", 1232 csk->tid, plen, hlen, dlen, 1233 cxgbi_skcb_tcp_seq(skb)); 1234 goto abort_conn; 1235 } 1236 1237 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1238 if (dlen) 1239 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1240 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1241 1242 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1243 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1244 csk, skb, *bhs, hlen, dlen, 1245 ntohl(*((unsigned int *)(bhs + 16))), 1246 ntohl(*((unsigned int *)(bhs + 24)))); 1247 1248 } else { 1249 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1250 1251 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1252 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1253 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1254 csk, csk->state, csk->flags, skb, lskb); 1255 } 1256 1257 __skb_queue_tail(&csk->receive_queue, skb); 1258 spin_unlock_bh(&csk->lock); 1259 return; 1260 1261 abort_conn: 1262 send_abort_req(csk); 1263 discard: 1264 spin_unlock_bh(&csk->lock); 1265 rel_skb: 1266 __kfree_skb(skb); 1267 } 1268 1269 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1270 { 1271 struct cxgbi_sock *csk; 1272 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1273 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1274 struct tid_info *t = lldi->tids; 1275 struct sk_buff *lskb; 1276 u32 tid = GET_TID(cpl); 1277 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1278 1279 csk = lookup_tid(t, tid); 1280 if (unlikely(!csk)) { 1281 pr_err("can't find conn. for tid %u.\n", tid); 1282 goto rel_skb; 1283 } 1284 1285 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1286 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1287 csk, csk->state, csk->flags, csk->tid, skb, 1288 skb->len, pdu_len_ddp); 1289 1290 spin_lock_bh(&csk->lock); 1291 1292 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1293 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1294 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1295 csk, csk->state, csk->flags, csk->tid); 1296 1297 if (csk->state != CTP_ABORTING) 1298 goto abort_conn; 1299 else 1300 goto discard; 1301 } 1302 1303 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); 1304 cxgbi_skcb_flags(skb) = 0; 1305 1306 skb_reset_transport_header(skb); 1307 __skb_pull(skb, sizeof(*cpl)); 1308 __pskb_trim(skb, ntohs(cpl->len)); 1309 1310 if (!csk->skb_ulp_lhdr) 1311 csk->skb_ulp_lhdr = skb; 1312 1313 lskb = csk->skb_ulp_lhdr; 1314 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1315 1316 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1317 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1318 csk, csk->state, csk->flags, skb, lskb); 1319 1320 __skb_queue_tail(&csk->receive_queue, skb); 1321 spin_unlock_bh(&csk->lock); 1322 return; 1323 1324 abort_conn: 1325 send_abort_req(csk); 1326 discard: 1327 spin_unlock_bh(&csk->lock); 1328 rel_skb: 1329 __kfree_skb(skb); 1330 } 1331 1332 static void 1333 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, 1334 struct sk_buff *skb, u32 ddpvld) 1335 { 1336 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1337 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1338 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1339 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); 1340 } 1341 1342 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1343 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1344 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1345 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); 1346 } 1347 1348 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1349 log_debug(1 << CXGBI_DBG_PDU_RX, 1350 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1351 csk, skb, ddpvld); 1352 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); 1353 } 1354 1355 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1356 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1357 log_debug(1 << CXGBI_DBG_PDU_RX, 1358 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1359 csk, skb, ddpvld); 1360 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); 1361 } 1362 } 1363 1364 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1365 struct sk_buff *skb) 1366 { 1367 struct cxgbi_sock *csk; 1368 struct sk_buff *lskb; 1369 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1370 unsigned int tid = GET_TID(rpl); 1371 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1372 struct tid_info *t = lldi->tids; 1373 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1374 1375 csk = lookup_tid(t, tid); 1376 if (unlikely(!csk)) { 1377 pr_err("can't find connection for tid %u.\n", tid); 1378 goto rel_skb; 1379 } 1380 1381 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1382 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1383 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); 1384 1385 spin_lock_bh(&csk->lock); 1386 1387 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1388 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1389 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1390 csk, csk->state, csk->flags, csk->tid); 1391 if (csk->state != CTP_ABORTING) 1392 goto abort_conn; 1393 else 1394 goto discard; 1395 } 1396 1397 if (!csk->skb_ulp_lhdr) { 1398 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1399 goto abort_conn; 1400 } 1401 1402 lskb = csk->skb_ulp_lhdr; 1403 csk->skb_ulp_lhdr = NULL; 1404 1405 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1406 1407 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1408 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1409 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1410 1411 cxgb4i_process_ddpvld(csk, lskb, ddpvld); 1412 1413 log_debug(1 << CXGBI_DBG_PDU_RX, 1414 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1415 csk, lskb, cxgbi_skcb_flags(lskb)); 1416 1417 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1418 cxgbi_conn_pdu_ready(csk); 1419 spin_unlock_bh(&csk->lock); 1420 goto rel_skb; 1421 1422 abort_conn: 1423 send_abort_req(csk); 1424 discard: 1425 spin_unlock_bh(&csk->lock); 1426 rel_skb: 1427 __kfree_skb(skb); 1428 } 1429 1430 static void 1431 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) 1432 { 1433 struct cxgbi_sock *csk; 1434 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; 1435 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1436 struct tid_info *t = lldi->tids; 1437 struct sk_buff *data_skb = NULL; 1438 u32 tid = GET_TID(rpl); 1439 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1440 u32 seq = be32_to_cpu(rpl->seq); 1441 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); 1442 1443 csk = lookup_tid(t, tid); 1444 if (unlikely(!csk)) { 1445 pr_err("can't find connection for tid %u.\n", tid); 1446 goto rel_skb; 1447 } 1448 1449 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1450 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " 1451 "pdu_len_ddp %u, status %u.\n", 1452 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, 1453 ntohs(rpl->len), pdu_len_ddp, rpl->status); 1454 1455 spin_lock_bh(&csk->lock); 1456 1457 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1458 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1459 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1460 csk, csk->state, csk->flags, csk->tid); 1461 1462 if (csk->state != CTP_ABORTING) 1463 goto abort_conn; 1464 else 1465 goto discard; 1466 } 1467 1468 cxgbi_skcb_tcp_seq(skb) = seq; 1469 cxgbi_skcb_flags(skb) = 0; 1470 cxgbi_skcb_rx_pdulen(skb) = 0; 1471 1472 skb_reset_transport_header(skb); 1473 __skb_pull(skb, sizeof(*rpl)); 1474 __pskb_trim(skb, be16_to_cpu(rpl->len)); 1475 1476 csk->rcv_nxt = seq + pdu_len_ddp; 1477 1478 if (csk->skb_ulp_lhdr) { 1479 data_skb = skb_peek(&csk->receive_queue); 1480 if (!data_skb || 1481 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { 1482 pr_err("Error! freelist data not found 0x%p, tid %u\n", 1483 data_skb, tid); 1484 1485 goto abort_conn; 1486 } 1487 __skb_unlink(data_skb, &csk->receive_queue); 1488 1489 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); 1490 1491 __skb_queue_tail(&csk->receive_queue, skb); 1492 __skb_queue_tail(&csk->receive_queue, data_skb); 1493 } else { 1494 __skb_queue_tail(&csk->receive_queue, skb); 1495 } 1496 1497 csk->skb_ulp_lhdr = NULL; 1498 1499 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1500 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); 1501 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); 1502 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); 1503 1504 cxgb4i_process_ddpvld(csk, skb, ddpvld); 1505 1506 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", 1507 csk, skb, cxgbi_skcb_flags(skb)); 1508 1509 cxgbi_conn_pdu_ready(csk); 1510 spin_unlock_bh(&csk->lock); 1511 1512 return; 1513 1514 abort_conn: 1515 send_abort_req(csk); 1516 discard: 1517 spin_unlock_bh(&csk->lock); 1518 rel_skb: 1519 __kfree_skb(skb); 1520 } 1521 1522 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1523 { 1524 struct cxgbi_sock *csk; 1525 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1526 unsigned int tid = GET_TID(rpl); 1527 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1528 struct tid_info *t = lldi->tids; 1529 1530 csk = lookup_tid(t, tid); 1531 if (unlikely(!csk)) 1532 pr_err("can't find connection for tid %u.\n", tid); 1533 else { 1534 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1535 "csk 0x%p,%u,0x%lx,%u.\n", 1536 csk, csk->state, csk->flags, csk->tid); 1537 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1538 rpl->seq_vld); 1539 } 1540 __kfree_skb(skb); 1541 } 1542 1543 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1544 { 1545 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1546 unsigned int tid = GET_TID(rpl); 1547 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1548 struct tid_info *t = lldi->tids; 1549 struct cxgbi_sock *csk; 1550 1551 csk = lookup_tid(t, tid); 1552 if (!csk) { 1553 pr_err("can't find conn. for tid %u.\n", tid); 1554 return; 1555 } 1556 1557 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1558 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1559 csk, csk->state, csk->flags, csk->tid, rpl->status); 1560 1561 if (rpl->status != CPL_ERR_NONE) { 1562 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1563 csk, tid, rpl->status); 1564 csk->err = -EINVAL; 1565 } 1566 1567 complete(&csk->cmpl); 1568 1569 __kfree_skb(skb); 1570 } 1571 1572 static int alloc_cpls(struct cxgbi_sock *csk) 1573 { 1574 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1575 0, GFP_KERNEL); 1576 if (!csk->cpl_close) 1577 return -ENOMEM; 1578 1579 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1580 0, GFP_KERNEL); 1581 if (!csk->cpl_abort_req) 1582 goto free_cpls; 1583 1584 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1585 0, GFP_KERNEL); 1586 if (!csk->cpl_abort_rpl) 1587 goto free_cpls; 1588 return 0; 1589 1590 free_cpls: 1591 cxgbi_sock_free_cpl_skbs(csk); 1592 return -ENOMEM; 1593 } 1594 1595 static inline void l2t_put(struct cxgbi_sock *csk) 1596 { 1597 if (csk->l2t) { 1598 cxgb4_l2t_release(csk->l2t); 1599 csk->l2t = NULL; 1600 cxgbi_sock_put(csk); 1601 } 1602 } 1603 1604 static void release_offload_resources(struct cxgbi_sock *csk) 1605 { 1606 struct cxgb4_lld_info *lldi; 1607 #if IS_ENABLED(CONFIG_IPV6) 1608 struct net_device *ndev = csk->cdev->ports[csk->port_id]; 1609 #endif 1610 1611 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1612 "csk 0x%p,%u,0x%lx,%u.\n", 1613 csk, csk->state, csk->flags, csk->tid); 1614 1615 cxgbi_sock_free_cpl_skbs(csk); 1616 cxgbi_sock_purge_write_queue(csk); 1617 if (csk->wr_cred != csk->wr_max_cred) { 1618 cxgbi_sock_purge_wr_queue(csk); 1619 cxgbi_sock_reset_wr_list(csk); 1620 } 1621 1622 l2t_put(csk); 1623 #if IS_ENABLED(CONFIG_IPV6) 1624 if (csk->csk_family == AF_INET6) 1625 cxgb4_clip_release(ndev, 1626 (const u32 *)&csk->saddr6.sin6_addr, 1); 1627 #endif 1628 1629 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1630 free_atid(csk); 1631 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1632 lldi = cxgbi_cdev_priv(csk->cdev); 1633 cxgb4_remove_tid(lldi->tids, 0, csk->tid, 1634 csk->csk_family); 1635 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1636 cxgbi_sock_put(csk); 1637 } 1638 csk->dst = NULL; 1639 } 1640 1641 #ifdef CONFIG_CHELSIO_T4_DCB 1642 static inline u8 get_iscsi_dcb_state(struct net_device *ndev) 1643 { 1644 return ndev->dcbnl_ops->getstate(ndev); 1645 } 1646 1647 static int select_priority(int pri_mask) 1648 { 1649 if (!pri_mask) 1650 return 0; 1651 return (ffs(pri_mask) - 1); 1652 } 1653 1654 static u8 get_iscsi_dcb_priority(struct net_device *ndev) 1655 { 1656 int rv; 1657 u8 caps; 1658 1659 struct dcb_app iscsi_dcb_app = { 1660 .protocol = 3260 1661 }; 1662 1663 rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); 1664 if (rv) 1665 return 0; 1666 1667 if (caps & DCB_CAP_DCBX_VER_IEEE) { 1668 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; 1669 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); 1670 } else if (caps & DCB_CAP_DCBX_VER_CEE) { 1671 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; 1672 rv = dcb_getapp(ndev, &iscsi_dcb_app); 1673 } 1674 1675 log_debug(1 << CXGBI_DBG_ISCSI, 1676 "iSCSI priority is set to %u\n", select_priority(rv)); 1677 return select_priority(rv); 1678 } 1679 #endif 1680 1681 static int init_act_open(struct cxgbi_sock *csk) 1682 { 1683 struct cxgbi_device *cdev = csk->cdev; 1684 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1685 struct net_device *ndev = cdev->ports[csk->port_id]; 1686 struct sk_buff *skb = NULL; 1687 struct neighbour *n = NULL; 1688 void *daddr; 1689 unsigned int step; 1690 unsigned int rxq_idx; 1691 unsigned int size, size6; 1692 unsigned int linkspeed; 1693 unsigned int rcv_winf, snd_winf; 1694 #ifdef CONFIG_CHELSIO_T4_DCB 1695 u8 priority = 0; 1696 #endif 1697 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1698 "csk 0x%p,%u,0x%lx,%u.\n", 1699 csk, csk->state, csk->flags, csk->tid); 1700 1701 if (csk->csk_family == AF_INET) 1702 daddr = &csk->daddr.sin_addr.s_addr; 1703 #if IS_ENABLED(CONFIG_IPV6) 1704 else if (csk->csk_family == AF_INET6) 1705 daddr = &csk->daddr6.sin6_addr; 1706 #endif 1707 else { 1708 pr_err("address family 0x%x not supported\n", csk->csk_family); 1709 goto rel_resource; 1710 } 1711 1712 n = dst_neigh_lookup(csk->dst, daddr); 1713 1714 if (!n) { 1715 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1716 goto rel_resource; 1717 } 1718 1719 if (!(n->nud_state & NUD_VALID)) 1720 neigh_event_send(n, NULL); 1721 1722 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1723 if (csk->atid < 0) { 1724 pr_err("%s, NO atid available.\n", ndev->name); 1725 goto rel_resource_without_clip; 1726 } 1727 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1728 cxgbi_sock_get(csk); 1729 1730 #ifdef CONFIG_CHELSIO_T4_DCB 1731 if (get_iscsi_dcb_state(ndev)) 1732 priority = get_iscsi_dcb_priority(ndev); 1733 1734 csk->dcb_priority = priority; 1735 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); 1736 #else 1737 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1738 #endif 1739 if (!csk->l2t) { 1740 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1741 goto rel_resource_without_clip; 1742 } 1743 cxgbi_sock_get(csk); 1744 1745 #if IS_ENABLED(CONFIG_IPV6) 1746 if (csk->csk_family == AF_INET6) 1747 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1748 #endif 1749 1750 if (is_t4(lldi->adapter_type)) { 1751 size = sizeof(struct cpl_act_open_req); 1752 size6 = sizeof(struct cpl_act_open_req6); 1753 } else if (is_t5(lldi->adapter_type)) { 1754 size = sizeof(struct cpl_t5_act_open_req); 1755 size6 = sizeof(struct cpl_t5_act_open_req6); 1756 } else { 1757 size = sizeof(struct cpl_t6_act_open_req); 1758 size6 = sizeof(struct cpl_t6_act_open_req6); 1759 } 1760 1761 if (csk->csk_family == AF_INET) 1762 skb = alloc_wr(size, 0, GFP_NOIO); 1763 #if IS_ENABLED(CONFIG_IPV6) 1764 else 1765 skb = alloc_wr(size6, 0, GFP_NOIO); 1766 #endif 1767 1768 if (!skb) 1769 goto rel_resource; 1770 skb->sk = (struct sock *)csk; 1771 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1772 1773 if (!csk->mtu) 1774 csk->mtu = dst_mtu(csk->dst); 1775 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1776 csk->tx_chan = cxgb4_port_chan(ndev); 1777 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; 1778 step = lldi->ntxq / lldi->nchan; 1779 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1780 step = lldi->nrxq / lldi->nchan; 1781 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); 1782 cdev->rxq_idx_cntr++; 1783 csk->rss_qid = lldi->rxq_ids[rxq_idx]; 1784 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; 1785 csk->snd_win = cxgb4i_snd_win; 1786 csk->rcv_win = cxgb4i_rcv_win; 1787 if (cxgb4i_rcv_win <= 0) { 1788 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; 1789 rcv_winf = linkspeed / SPEED_10000; 1790 if (rcv_winf) 1791 csk->rcv_win *= rcv_winf; 1792 } 1793 if (cxgb4i_snd_win <= 0) { 1794 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; 1795 snd_winf = linkspeed / SPEED_10000; 1796 if (snd_winf) 1797 csk->snd_win *= snd_winf; 1798 } 1799 csk->wr_cred = lldi->wr_cred - 1800 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1801 csk->wr_max_cred = csk->wr_cred; 1802 csk->wr_una_cred = 0; 1803 cxgbi_sock_reset_wr_list(csk); 1804 csk->err = 0; 1805 1806 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1807 (&csk->saddr), (&csk->daddr), csk, csk->state, 1808 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1809 csk->mtu, csk->mss_idx, csk->smac_idx); 1810 1811 /* must wait for either a act_open_rpl or act_open_establish */ 1812 if (!try_module_get(cdev->owner)) { 1813 pr_err("%s, try_module_get failed.\n", ndev->name); 1814 goto rel_resource; 1815 } 1816 1817 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1818 if (csk->csk_family == AF_INET) 1819 send_act_open_req(csk, skb, csk->l2t); 1820 #if IS_ENABLED(CONFIG_IPV6) 1821 else 1822 send_act_open_req6(csk, skb, csk->l2t); 1823 #endif 1824 neigh_release(n); 1825 1826 return 0; 1827 1828 rel_resource: 1829 #if IS_ENABLED(CONFIG_IPV6) 1830 if (csk->csk_family == AF_INET6) 1831 cxgb4_clip_release(ndev, 1832 (const u32 *)&csk->saddr6.sin6_addr, 1); 1833 #endif 1834 rel_resource_without_clip: 1835 if (n) 1836 neigh_release(n); 1837 if (skb) 1838 __kfree_skb(skb); 1839 return -EINVAL; 1840 } 1841 1842 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1843 [CPL_ACT_ESTABLISH] = do_act_establish, 1844 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1845 [CPL_PEER_CLOSE] = do_peer_close, 1846 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1847 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1848 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1849 [CPL_FW4_ACK] = do_fw4_ack, 1850 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1851 [CPL_ISCSI_DATA] = do_rx_iscsi_data, 1852 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1853 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1854 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1855 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, 1856 [CPL_RX_DATA] = do_rx_data, 1857 }; 1858 1859 static int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1860 { 1861 int rc; 1862 1863 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1864 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1865 1866 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1867 cxgb4i_max_connect); 1868 if (rc < 0) 1869 return rc; 1870 1871 cdev->csk_release_offload_resources = release_offload_resources; 1872 cdev->csk_push_tx_frames = push_tx_frames; 1873 cdev->csk_send_abort_req = send_abort_req; 1874 cdev->csk_send_close_req = send_close_req; 1875 cdev->csk_send_rx_credits = send_rx_credits; 1876 cdev->csk_alloc_cpls = alloc_cpls; 1877 cdev->csk_init_act_open = init_act_open; 1878 1879 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1880 return 0; 1881 } 1882 1883 static inline void 1884 ulp_mem_io_set_hdr(struct cxgbi_device *cdev, 1885 struct ulp_mem_io *req, 1886 unsigned int wr_len, unsigned int dlen, 1887 unsigned int pm_addr, 1888 int tid) 1889 { 1890 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1891 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1892 1893 INIT_ULPTX_WR(req, wr_len, 0, tid); 1894 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | 1895 FW_WR_ATOMIC_V(0)); 1896 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1897 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | 1898 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); 1899 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1900 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1901 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1902 1903 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1904 idata->len = htonl(dlen); 1905 } 1906 1907 static struct sk_buff * 1908 ddp_ppod_init_idata(struct cxgbi_device *cdev, 1909 struct cxgbi_ppm *ppm, 1910 unsigned int idx, unsigned int npods, 1911 unsigned int tid) 1912 { 1913 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; 1914 unsigned int dlen = npods << PPOD_SIZE_SHIFT; 1915 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1916 sizeof(struct ulptx_idata) + dlen, 16); 1917 struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1918 1919 if (!skb) { 1920 pr_err("%s: %s idx %u, npods %u, OOM.\n", 1921 __func__, ppm->ndev->name, idx, npods); 1922 return NULL; 1923 } 1924 1925 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, 1926 pm_addr, tid); 1927 1928 return skb; 1929 } 1930 1931 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1932 struct cxgbi_task_tag_info *ttinfo, 1933 unsigned int idx, unsigned int npods, 1934 struct scatterlist **sg_pp, 1935 unsigned int *sg_off) 1936 { 1937 struct cxgbi_device *cdev = csk->cdev; 1938 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, 1939 csk->tid); 1940 struct ulp_mem_io *req; 1941 struct ulptx_idata *idata; 1942 struct cxgbi_pagepod *ppod; 1943 int i; 1944 1945 if (!skb) 1946 return -ENOMEM; 1947 1948 req = (struct ulp_mem_io *)skb->head; 1949 idata = (struct ulptx_idata *)(req + 1); 1950 ppod = (struct cxgbi_pagepod *)(idata + 1); 1951 1952 for (i = 0; i < npods; i++, ppod++) 1953 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); 1954 1955 cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); 1956 cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); 1957 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 1958 1959 spin_lock_bh(&csk->lock); 1960 cxgbi_sock_skb_entail(csk, skb); 1961 spin_unlock_bh(&csk->lock); 1962 1963 return 0; 1964 } 1965 1966 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1967 struct cxgbi_task_tag_info *ttinfo) 1968 { 1969 unsigned int pidx = ttinfo->idx; 1970 unsigned int npods = ttinfo->npods; 1971 unsigned int i, cnt; 1972 int err = 0; 1973 struct scatterlist *sg = ttinfo->sgl; 1974 unsigned int offset = 0; 1975 1976 ttinfo->cid = csk->port_id; 1977 1978 for (i = 0; i < npods; i += cnt, pidx += cnt) { 1979 cnt = npods - i; 1980 1981 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1982 cnt = ULPMEM_IDATA_MAX_NPPODS; 1983 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, 1984 &sg, &offset); 1985 if (err < 0) 1986 break; 1987 } 1988 1989 return err; 1990 } 1991 1992 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1993 int pg_idx) 1994 { 1995 struct sk_buff *skb; 1996 struct cpl_set_tcb_field *req; 1997 1998 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1999 return 0; 2000 2001 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 2002 if (!skb) 2003 return -ENOMEM; 2004 2005 /* set up ulp page size */ 2006 req = (struct cpl_set_tcb_field *)skb->head; 2007 INIT_TP_WR(req, csk->tid); 2008 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2009 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2010 req->word_cookie = htons(0); 2011 req->mask = cpu_to_be64(0x3 << 8); 2012 req->val = cpu_to_be64(pg_idx << 8); 2013 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2014 2015 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2016 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2017 2018 reinit_completion(&csk->cmpl); 2019 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2020 wait_for_completion(&csk->cmpl); 2021 2022 return csk->err; 2023 } 2024 2025 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2026 int hcrc, int dcrc) 2027 { 2028 struct sk_buff *skb; 2029 struct cpl_set_tcb_field *req; 2030 2031 if (!hcrc && !dcrc) 2032 return 0; 2033 2034 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 2035 if (!skb) 2036 return -ENOMEM; 2037 2038 csk->hcrc_len = (hcrc ? 4 : 0); 2039 csk->dcrc_len = (dcrc ? 4 : 0); 2040 /* set up ulp submode */ 2041 req = (struct cpl_set_tcb_field *)skb->head; 2042 INIT_TP_WR(req, tid); 2043 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2044 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2045 req->word_cookie = htons(0); 2046 req->mask = cpu_to_be64(0x3 << 4); 2047 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2048 (dcrc ? ULP_CRC_DATA : 0)) << 4); 2049 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2050 2051 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2052 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2053 2054 reinit_completion(&csk->cmpl); 2055 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2056 wait_for_completion(&csk->cmpl); 2057 2058 return csk->err; 2059 } 2060 2061 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2062 { 2063 return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) 2064 (cxgbi_cdev_priv(cdev)))->iscsi_ppm); 2065 } 2066 2067 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 2068 { 2069 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 2070 struct net_device *ndev = cdev->ports[0]; 2071 struct cxgbi_tag_format tformat; 2072 unsigned int ppmax; 2073 int i; 2074 2075 if (!lldi->vr->iscsi.size) { 2076 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); 2077 return -EACCES; 2078 } 2079 2080 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; 2081 ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; 2082 2083 memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); 2084 for (i = 0; i < 4; i++) 2085 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) 2086 & 0xF; 2087 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); 2088 2089 cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax, 2090 lldi->iscsi_llimit, lldi->vr->iscsi.start, 2); 2091 2092 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 2093 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 2094 cdev->csk_ddp_set_map = ddp_set_map; 2095 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2096 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2097 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2098 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2099 cdev->cdev2ppm = cdev2ppm; 2100 2101 return 0; 2102 } 2103 2104 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 2105 { 2106 struct cxgbi_device *cdev; 2107 struct port_info *pi; 2108 int i, rc; 2109 2110 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 2111 if (!cdev) { 2112 pr_info("t4 device 0x%p, register failed.\n", lldi); 2113 return NULL; 2114 } 2115 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 2116 cdev, lldi->adapter_type, lldi->nports, 2117 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 2118 lldi->nrxq, lldi->wr_cred); 2119 for (i = 0; i < lldi->nrxq; i++) 2120 log_debug(1 << CXGBI_DBG_DEV, 2121 "t4 0x%p, rxq id #%d: %u.\n", 2122 cdev, i, lldi->rxq_ids[i]); 2123 2124 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 2125 cdev->flags = CXGBI_FLAG_DEV_T4; 2126 cdev->pdev = lldi->pdev; 2127 cdev->ports = lldi->ports; 2128 cdev->nports = lldi->nports; 2129 cdev->mtus = lldi->mtus; 2130 cdev->nmtus = NMTUS; 2131 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= 2132 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; 2133 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 2134 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 2135 cdev->itp = &cxgb4i_iscsi_transport; 2136 cdev->owner = THIS_MODULE; 2137 2138 cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); 2139 pr_info("cdev 0x%p,%s, pfvf %u.\n", 2140 cdev, lldi->ports[0]->name, cdev->pfvf); 2141 2142 rc = cxgb4i_ddp_init(cdev); 2143 if (rc) { 2144 pr_info("t4 0x%p ddp init failed.\n", cdev); 2145 goto err_out; 2146 } 2147 rc = cxgb4i_ofld_init(cdev); 2148 if (rc) { 2149 pr_info("t4 0x%p ofld init failed.\n", cdev); 2150 goto err_out; 2151 } 2152 2153 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 2154 &cxgb4i_host_template, cxgb4i_stt); 2155 if (rc) 2156 goto err_out; 2157 2158 for (i = 0; i < cdev->nports; i++) { 2159 pi = netdev_priv(lldi->ports[i]); 2160 cdev->hbas[i]->port_id = pi->port_id; 2161 } 2162 return cdev; 2163 2164 err_out: 2165 cxgbi_device_unregister(cdev); 2166 return ERR_PTR(-ENOMEM); 2167 } 2168 2169 #define RX_PULL_LEN 128 2170 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 2171 const struct pkt_gl *pgl) 2172 { 2173 const struct cpl_act_establish *rpl; 2174 struct sk_buff *skb; 2175 unsigned int opc; 2176 struct cxgbi_device *cdev = handle; 2177 2178 if (pgl == NULL) { 2179 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 2180 2181 skb = alloc_wr(len, 0, GFP_ATOMIC); 2182 if (!skb) 2183 goto nomem; 2184 skb_copy_to_linear_data(skb, &rsp[1], len); 2185 } else { 2186 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 2187 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 2188 pgl->va, be64_to_cpu(*rsp), 2189 be64_to_cpu(*(u64 *)pgl->va), 2190 pgl->tot_len); 2191 return 0; 2192 } 2193 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 2194 if (unlikely(!skb)) 2195 goto nomem; 2196 } 2197 2198 rpl = (struct cpl_act_establish *)skb->data; 2199 opc = rpl->ot.opcode; 2200 log_debug(1 << CXGBI_DBG_TOE, 2201 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 2202 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 2203 if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { 2204 pr_err("No handler for opcode 0x%x.\n", opc); 2205 __kfree_skb(skb); 2206 } else 2207 cxgb4i_cplhandlers[opc](cdev, skb); 2208 2209 return 0; 2210 nomem: 2211 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 2212 return 1; 2213 } 2214 2215 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 2216 { 2217 struct cxgbi_device *cdev = handle; 2218 2219 switch (state) { 2220 case CXGB4_STATE_UP: 2221 pr_info("cdev 0x%p, UP.\n", cdev); 2222 break; 2223 case CXGB4_STATE_START_RECOVERY: 2224 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 2225 /* close all connections */ 2226 break; 2227 case CXGB4_STATE_DOWN: 2228 pr_info("cdev 0x%p, DOWN.\n", cdev); 2229 break; 2230 case CXGB4_STATE_DETACH: 2231 pr_info("cdev 0x%p, DETACH.\n", cdev); 2232 cxgbi_device_unregister(cdev); 2233 break; 2234 default: 2235 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 2236 break; 2237 } 2238 return 0; 2239 } 2240 2241 #ifdef CONFIG_CHELSIO_T4_DCB 2242 static int 2243 cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, 2244 void *data) 2245 { 2246 int i, port = 0xFF; 2247 struct net_device *ndev; 2248 struct cxgbi_device *cdev = NULL; 2249 struct dcb_app_type *iscsi_app = data; 2250 struct cxgbi_ports_map *pmap; 2251 u8 priority; 2252 2253 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { 2254 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) 2255 return NOTIFY_DONE; 2256 2257 priority = iscsi_app->app.priority; 2258 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { 2259 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) 2260 return NOTIFY_DONE; 2261 2262 if (!iscsi_app->app.priority) 2263 return NOTIFY_DONE; 2264 2265 priority = ffs(iscsi_app->app.priority) - 1; 2266 } else { 2267 return NOTIFY_DONE; 2268 } 2269 2270 if (iscsi_app->app.protocol != 3260) 2271 return NOTIFY_DONE; 2272 2273 log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n", 2274 iscsi_app->ifindex, priority); 2275 2276 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); 2277 if (!ndev) 2278 return NOTIFY_DONE; 2279 2280 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); 2281 2282 dev_put(ndev); 2283 if (!cdev) 2284 return NOTIFY_DONE; 2285 2286 pmap = &cdev->pmap; 2287 2288 for (i = 0; i < pmap->used; i++) { 2289 if (pmap->port_csk[i]) { 2290 struct cxgbi_sock *csk = pmap->port_csk[i]; 2291 2292 if (csk->dcb_priority != priority) { 2293 iscsi_conn_failure(csk->user_data, 2294 ISCSI_ERR_CONN_FAILED); 2295 pr_info("Restarting iSCSI connection %p with " 2296 "priority %u->%u.\n", csk, 2297 csk->dcb_priority, priority); 2298 } 2299 } 2300 } 2301 return NOTIFY_OK; 2302 } 2303 #endif 2304 2305 static int __init cxgb4i_init_module(void) 2306 { 2307 int rc; 2308 2309 printk(KERN_INFO "%s", version); 2310 2311 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2312 if (rc < 0) 2313 return rc; 2314 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 2315 2316 #ifdef CONFIG_CHELSIO_T4_DCB 2317 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME); 2318 register_dcbevent_notifier(&cxgb4_dcb_change); 2319 #endif 2320 return 0; 2321 } 2322 2323 static void __exit cxgb4i_exit_module(void) 2324 { 2325 #ifdef CONFIG_CHELSIO_T4_DCB 2326 unregister_dcbevent_notifier(&cxgb4_dcb_change); 2327 #endif 2328 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 2329 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 2330 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2331 } 2332 2333 module_init(cxgb4i_init_module); 2334 module_exit(cxgb4i_exit_module); 2335