1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <scsi/scsi_host.h> 20 #include <net/tcp.h> 21 #include <net/dst.h> 22 #include <linux/netdevice.h> 23 #include <net/addrconf.h> 24 25 #include "t4_regs.h" 26 #include "t4_msg.h" 27 #include "cxgb4.h" 28 #include "cxgb4_uld.h" 29 #include "t4fw_api.h" 30 #include "l2t.h" 31 #include "cxgb4i.h" 32 #include "clip_tbl.h" 33 34 static unsigned int dbg_level; 35 36 #include "../libcxgbi.h" 37 38 #ifdef CONFIG_CHELSIO_T4_DCB 39 #include <net/dcbevent.h> 40 #include "cxgb4_dcb.h" 41 #endif 42 43 #define DRV_MODULE_NAME "cxgb4i" 44 #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" 45 #define DRV_MODULE_VERSION "0.9.5-ko" 46 #define DRV_MODULE_RELDATE "Apr. 2015" 47 48 static char version[] = 49 DRV_MODULE_DESC " " DRV_MODULE_NAME 50 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 51 52 MODULE_AUTHOR("Chelsio Communications, Inc."); 53 MODULE_DESCRIPTION(DRV_MODULE_DESC); 54 MODULE_VERSION(DRV_MODULE_VERSION); 55 MODULE_LICENSE("GPL"); 56 57 module_param(dbg_level, uint, 0644); 58 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 59 60 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) 61 static int cxgb4i_rcv_win = -1; 62 module_param(cxgb4i_rcv_win, int, 0644); 63 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes"); 64 65 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) 66 static int cxgb4i_snd_win = -1; 67 module_param(cxgb4i_snd_win, int, 0644); 68 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 69 70 static int cxgb4i_rx_credit_thres = 10 * 1024; 71 module_param(cxgb4i_rx_credit_thres, int, 0644); 72 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 73 "RX credits return threshold in bytes (default=10KB)"); 74 75 static unsigned int cxgb4i_max_connect = (8 * 1024); 76 module_param(cxgb4i_max_connect, uint, 0644); 77 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 78 79 static unsigned short cxgb4i_sport_base = 20000; 80 module_param(cxgb4i_sport_base, ushort, 0644); 81 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 82 83 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 84 85 static void *t4_uld_add(const struct cxgb4_lld_info *); 86 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 87 static int t4_uld_state_change(void *, enum cxgb4_state state); 88 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 89 90 static const struct cxgb4_uld_info cxgb4i_uld_info = { 91 .name = DRV_MODULE_NAME, 92 .nrxq = MAX_ULD_QSETS, 93 .ntxq = MAX_ULD_QSETS, 94 .rxq_size = 1024, 95 .lro = false, 96 .add = t4_uld_add, 97 .rx_handler = t4_uld_rx_handler, 98 .state_change = t4_uld_state_change, 99 }; 100 101 static struct scsi_host_template cxgb4i_host_template = { 102 .module = THIS_MODULE, 103 .name = DRV_MODULE_NAME, 104 .proc_name = DRV_MODULE_NAME, 105 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 106 .queuecommand = iscsi_queuecommand, 107 .change_queue_depth = scsi_change_queue_depth, 108 .sg_tablesize = SG_ALL, 109 .max_sectors = 0xFFFF, 110 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 111 .eh_timed_out = iscsi_eh_cmd_timed_out, 112 .eh_abort_handler = iscsi_eh_abort, 113 .eh_device_reset_handler = iscsi_eh_device_reset, 114 .eh_target_reset_handler = iscsi_eh_recover_target, 115 .target_alloc = iscsi_target_alloc, 116 .dma_boundary = PAGE_SIZE - 1, 117 .this_id = -1, 118 .track_queue_depth = 1, 119 }; 120 121 static struct iscsi_transport cxgb4i_iscsi_transport = { 122 .owner = THIS_MODULE, 123 .name = DRV_MODULE_NAME, 124 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 125 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 126 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 127 .attr_is_visible = cxgbi_attr_is_visible, 128 .get_host_param = cxgbi_get_host_param, 129 .set_host_param = cxgbi_set_host_param, 130 /* session management */ 131 .create_session = cxgbi_create_session, 132 .destroy_session = cxgbi_destroy_session, 133 .get_session_param = iscsi_session_get_param, 134 /* connection management */ 135 .create_conn = cxgbi_create_conn, 136 .bind_conn = cxgbi_bind_conn, 137 .destroy_conn = iscsi_tcp_conn_teardown, 138 .start_conn = iscsi_conn_start, 139 .stop_conn = iscsi_conn_stop, 140 .get_conn_param = iscsi_conn_get_param, 141 .set_param = cxgbi_set_conn_param, 142 .get_stats = cxgbi_get_conn_stats, 143 /* pdu xmit req from user space */ 144 .send_pdu = iscsi_conn_send_pdu, 145 /* task */ 146 .init_task = iscsi_tcp_task_init, 147 .xmit_task = iscsi_tcp_task_xmit, 148 .cleanup_task = cxgbi_cleanup_task, 149 /* pdu */ 150 .alloc_pdu = cxgbi_conn_alloc_pdu, 151 .init_pdu = cxgbi_conn_init_pdu, 152 .xmit_pdu = cxgbi_conn_xmit_pdu, 153 .parse_pdu_itt = cxgbi_parse_pdu_itt, 154 /* TCP connect/disconnect */ 155 .get_ep_param = cxgbi_get_ep_param, 156 .ep_connect = cxgbi_ep_connect, 157 .ep_poll = cxgbi_ep_poll, 158 .ep_disconnect = cxgbi_ep_disconnect, 159 /* Error recovery timeout call */ 160 .session_recovery_timedout = iscsi_session_recovery_timedout, 161 }; 162 163 #ifdef CONFIG_CHELSIO_T4_DCB 164 static int 165 cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); 166 167 static struct notifier_block cxgb4_dcb_change = { 168 .notifier_call = cxgb4_dcb_change_notify, 169 }; 170 #endif 171 172 static struct scsi_transport_template *cxgb4i_stt; 173 174 /* 175 * CPL (Chelsio Protocol Language) defines a message passing interface between 176 * the host driver and Chelsio asic. 177 * The section below implments CPLs that related to iscsi tcp connection 178 * open/close/abort and data send/receive. 179 */ 180 181 #define RCV_BUFSIZ_MASK 0x3FFU 182 #define MAX_IMM_TX_PKT_LEN 256 183 184 static int push_tx_frames(struct cxgbi_sock *, int); 185 186 /* 187 * is_ofld_imm - check whether a packet can be sent as immediate data 188 * @skb: the packet 189 * 190 * Returns true if a packet can be sent as an offload WR with immediate 191 * data. We currently use the same limit as for Ethernet packets. 192 */ 193 static inline bool is_ofld_imm(const struct sk_buff *skb) 194 { 195 int len = skb->len; 196 197 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 198 len += sizeof(struct fw_ofld_tx_data_wr); 199 200 return len <= MAX_IMM_TX_PKT_LEN; 201 } 202 203 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 204 struct l2t_entry *e) 205 { 206 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 207 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 208 unsigned long long opt0; 209 unsigned int opt2; 210 unsigned int qid_atid = ((unsigned int)csk->atid) | 211 (((unsigned int)csk->rss_qid) << 14); 212 213 opt0 = KEEP_ALIVE_F | 214 WND_SCALE_V(wscale) | 215 MSS_IDX_V(csk->mss_idx) | 216 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 217 TX_CHAN_V(csk->tx_chan) | 218 SMAC_SEL_V(csk->smac_idx) | 219 ULP_MODE_V(ULP_MODE_ISCSI) | 220 RCV_BUFSIZ_V(csk->rcv_win >> 10); 221 222 opt2 = RX_CHANNEL_V(0) | 223 RSS_QUEUE_VALID_F | 224 RSS_QUEUE_V(csk->rss_qid); 225 226 if (is_t4(lldi->adapter_type)) { 227 struct cpl_act_open_req *req = 228 (struct cpl_act_open_req *)skb->head; 229 230 INIT_TP_WR(req, 0); 231 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 232 qid_atid)); 233 req->local_port = csk->saddr.sin_port; 234 req->peer_port = csk->daddr.sin_port; 235 req->local_ip = csk->saddr.sin_addr.s_addr; 236 req->peer_ip = csk->daddr.sin_addr.s_addr; 237 req->opt0 = cpu_to_be64(opt0); 238 req->params = cpu_to_be32(cxgb4_select_ntuple( 239 csk->cdev->ports[csk->port_id], 240 csk->l2t)); 241 opt2 |= RX_FC_VALID_F; 242 req->opt2 = cpu_to_be32(opt2); 243 244 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 245 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 246 csk, &req->local_ip, ntohs(req->local_port), 247 &req->peer_ip, ntohs(req->peer_port), 248 csk->atid, csk->rss_qid); 249 } else if (is_t5(lldi->adapter_type)) { 250 struct cpl_t5_act_open_req *req = 251 (struct cpl_t5_act_open_req *)skb->head; 252 u32 isn = (prandom_u32() & ~7UL) - 1; 253 254 INIT_TP_WR(req, 0); 255 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 256 qid_atid)); 257 req->local_port = csk->saddr.sin_port; 258 req->peer_port = csk->daddr.sin_port; 259 req->local_ip = csk->saddr.sin_addr.s_addr; 260 req->peer_ip = csk->daddr.sin_addr.s_addr; 261 req->opt0 = cpu_to_be64(opt0); 262 req->params = cpu_to_be64(FILTER_TUPLE_V( 263 cxgb4_select_ntuple( 264 csk->cdev->ports[csk->port_id], 265 csk->l2t))); 266 req->rsvd = cpu_to_be32(isn); 267 opt2 |= T5_ISS_VALID; 268 opt2 |= T5_OPT_2_VALID_F; 269 270 req->opt2 = cpu_to_be32(opt2); 271 272 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 273 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 274 csk, &req->local_ip, ntohs(req->local_port), 275 &req->peer_ip, ntohs(req->peer_port), 276 csk->atid, csk->rss_qid); 277 } else { 278 struct cpl_t6_act_open_req *req = 279 (struct cpl_t6_act_open_req *)skb->head; 280 u32 isn = (prandom_u32() & ~7UL) - 1; 281 282 INIT_TP_WR(req, 0); 283 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 284 qid_atid)); 285 req->local_port = csk->saddr.sin_port; 286 req->peer_port = csk->daddr.sin_port; 287 req->local_ip = csk->saddr.sin_addr.s_addr; 288 req->peer_ip = csk->daddr.sin_addr.s_addr; 289 req->opt0 = cpu_to_be64(opt0); 290 req->params = cpu_to_be64(FILTER_TUPLE_V( 291 cxgb4_select_ntuple( 292 csk->cdev->ports[csk->port_id], 293 csk->l2t))); 294 req->rsvd = cpu_to_be32(isn); 295 296 opt2 |= T5_ISS_VALID; 297 opt2 |= RX_FC_DISABLE_F; 298 opt2 |= T5_OPT_2_VALID_F; 299 300 req->opt2 = cpu_to_be32(opt2); 301 req->rsvd2 = cpu_to_be32(0); 302 req->opt3 = cpu_to_be32(0); 303 304 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 305 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 306 csk, &req->local_ip, ntohs(req->local_port), 307 &req->peer_ip, ntohs(req->peer_port), 308 csk->atid, csk->rss_qid); 309 } 310 311 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 312 313 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 314 (&csk->saddr), (&csk->daddr), 315 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, 316 csk->state, csk->flags, csk->atid, csk->rss_qid); 317 318 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 319 } 320 321 #if IS_ENABLED(CONFIG_IPV6) 322 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 323 struct l2t_entry *e) 324 { 325 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 326 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 327 unsigned long long opt0; 328 unsigned int opt2; 329 unsigned int qid_atid = ((unsigned int)csk->atid) | 330 (((unsigned int)csk->rss_qid) << 14); 331 332 opt0 = KEEP_ALIVE_F | 333 WND_SCALE_V(wscale) | 334 MSS_IDX_V(csk->mss_idx) | 335 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 336 TX_CHAN_V(csk->tx_chan) | 337 SMAC_SEL_V(csk->smac_idx) | 338 ULP_MODE_V(ULP_MODE_ISCSI) | 339 RCV_BUFSIZ_V(csk->rcv_win >> 10); 340 341 opt2 = RX_CHANNEL_V(0) | 342 RSS_QUEUE_VALID_F | 343 RSS_QUEUE_V(csk->rss_qid); 344 345 if (is_t4(lldi->adapter_type)) { 346 struct cpl_act_open_req6 *req = 347 (struct cpl_act_open_req6 *)skb->head; 348 349 INIT_TP_WR(req, 0); 350 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 351 qid_atid)); 352 req->local_port = csk->saddr6.sin6_port; 353 req->peer_port = csk->daddr6.sin6_port; 354 355 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 356 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 357 8); 358 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 359 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 360 8); 361 362 req->opt0 = cpu_to_be64(opt0); 363 364 opt2 |= RX_FC_VALID_F; 365 req->opt2 = cpu_to_be32(opt2); 366 367 req->params = cpu_to_be32(cxgb4_select_ntuple( 368 csk->cdev->ports[csk->port_id], 369 csk->l2t)); 370 } else if (is_t5(lldi->adapter_type)) { 371 struct cpl_t5_act_open_req6 *req = 372 (struct cpl_t5_act_open_req6 *)skb->head; 373 374 INIT_TP_WR(req, 0); 375 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 376 qid_atid)); 377 req->local_port = csk->saddr6.sin6_port; 378 req->peer_port = csk->daddr6.sin6_port; 379 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 380 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 381 8); 382 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 383 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 384 8); 385 req->opt0 = cpu_to_be64(opt0); 386 387 opt2 |= T5_OPT_2_VALID_F; 388 req->opt2 = cpu_to_be32(opt2); 389 390 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 391 csk->cdev->ports[csk->port_id], 392 csk->l2t))); 393 } else { 394 struct cpl_t6_act_open_req6 *req = 395 (struct cpl_t6_act_open_req6 *)skb->head; 396 397 INIT_TP_WR(req, 0); 398 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 399 qid_atid)); 400 req->local_port = csk->saddr6.sin6_port; 401 req->peer_port = csk->daddr6.sin6_port; 402 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 403 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 404 8); 405 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 406 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 407 8); 408 req->opt0 = cpu_to_be64(opt0); 409 410 opt2 |= RX_FC_DISABLE_F; 411 opt2 |= T5_OPT_2_VALID_F; 412 413 req->opt2 = cpu_to_be32(opt2); 414 415 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 416 csk->cdev->ports[csk->port_id], 417 csk->l2t))); 418 419 req->rsvd2 = cpu_to_be32(0); 420 req->opt3 = cpu_to_be32(0); 421 } 422 423 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 424 425 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 426 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, 427 csk->flags, csk->atid, 428 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 429 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 430 csk->rss_qid); 431 432 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 433 } 434 #endif 435 436 static void send_close_req(struct cxgbi_sock *csk) 437 { 438 struct sk_buff *skb = csk->cpl_close; 439 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 440 unsigned int tid = csk->tid; 441 442 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 443 "csk 0x%p,%u,0x%lx, tid %u.\n", 444 csk, csk->state, csk->flags, csk->tid); 445 csk->cpl_close = NULL; 446 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 447 INIT_TP_WR(req, tid); 448 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 449 req->rsvd = 0; 450 451 cxgbi_sock_skb_entail(csk, skb); 452 if (csk->state >= CTP_ESTABLISHED) 453 push_tx_frames(csk, 1); 454 } 455 456 static void abort_arp_failure(void *handle, struct sk_buff *skb) 457 { 458 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 459 struct cpl_abort_req *req; 460 461 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 462 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 463 csk, csk->state, csk->flags, csk->tid); 464 req = (struct cpl_abort_req *)skb->data; 465 req->cmd = CPL_ABORT_NO_RST; 466 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 467 } 468 469 static void send_abort_req(struct cxgbi_sock *csk) 470 { 471 struct cpl_abort_req *req; 472 struct sk_buff *skb = csk->cpl_abort_req; 473 474 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 475 return; 476 477 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 478 send_tx_flowc_wr(csk); 479 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 480 } 481 482 cxgbi_sock_set_state(csk, CTP_ABORTING); 483 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 484 cxgbi_sock_purge_write_queue(csk); 485 486 csk->cpl_abort_req = NULL; 487 req = (struct cpl_abort_req *)skb->head; 488 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 489 req->cmd = CPL_ABORT_SEND_RST; 490 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 491 INIT_TP_WR(req, csk->tid); 492 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 493 req->rsvd0 = htonl(csk->snd_nxt); 494 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 495 496 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 497 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 498 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 499 req->rsvd1); 500 501 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 502 } 503 504 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 505 { 506 struct sk_buff *skb = csk->cpl_abort_rpl; 507 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 508 509 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 510 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 511 csk, csk->state, csk->flags, csk->tid, rst_status); 512 513 csk->cpl_abort_rpl = NULL; 514 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 515 INIT_TP_WR(rpl, csk->tid); 516 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 517 rpl->cmd = rst_status; 518 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 519 } 520 521 /* 522 * CPL connection rx data ack: host -> 523 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 524 * credits sent. 525 */ 526 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 527 { 528 struct sk_buff *skb; 529 struct cpl_rx_data_ack *req; 530 531 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 532 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 533 csk, csk->state, csk->flags, csk->tid, credits); 534 535 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 536 if (!skb) { 537 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 538 return 0; 539 } 540 req = (struct cpl_rx_data_ack *)skb->head; 541 542 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 543 INIT_TP_WR(req, csk->tid); 544 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 545 csk->tid)); 546 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 547 | RX_FORCE_ACK_F); 548 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 549 return credits; 550 } 551 552 /* 553 * sgl_len - calculates the size of an SGL of the given capacity 554 * @n: the number of SGL entries 555 * Calculates the number of flits needed for a scatter/gather list that 556 * can hold the given number of entries. 557 */ 558 static inline unsigned int sgl_len(unsigned int n) 559 { 560 n--; 561 return (3 * n) / 2 + (n & 1) + 2; 562 } 563 564 /* 565 * calc_tx_flits_ofld - calculate # of flits for an offload packet 566 * @skb: the packet 567 * 568 * Returns the number of flits needed for the given offload packet. 569 * These packets are already fully constructed and no additional headers 570 * will be added. 571 */ 572 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 573 { 574 unsigned int flits, cnt; 575 576 if (is_ofld_imm(skb)) 577 return DIV_ROUND_UP(skb->len, 8); 578 flits = skb_transport_offset(skb) / 8; 579 cnt = skb_shinfo(skb)->nr_frags; 580 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 581 cnt++; 582 return flits + sgl_len(cnt); 583 } 584 585 #define FLOWC_WR_NPARAMS_MIN 9 586 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 587 { 588 int nparams, flowclen16, flowclen; 589 590 nparams = FLOWC_WR_NPARAMS_MIN; 591 #ifdef CONFIG_CHELSIO_T4_DCB 592 nparams++; 593 #endif 594 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 595 flowclen16 = DIV_ROUND_UP(flowclen, 16); 596 flowclen = flowclen16 * 16; 597 /* 598 * Return the number of 16-byte credits used by the FlowC request. 599 * Pass back the nparams and actual FlowC length if requested. 600 */ 601 if (nparamsp) 602 *nparamsp = nparams; 603 if (flowclenp) 604 *flowclenp = flowclen; 605 606 return flowclen16; 607 } 608 609 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 610 { 611 struct sk_buff *skb; 612 struct fw_flowc_wr *flowc; 613 int nparams, flowclen16, flowclen; 614 615 #ifdef CONFIG_CHELSIO_T4_DCB 616 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; 617 #endif 618 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 619 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 620 flowc = (struct fw_flowc_wr *)skb->head; 621 flowc->op_to_nparams = 622 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 623 flowc->flowid_len16 = 624 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 625 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 626 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 627 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 628 flowc->mnemval[1].val = htonl(csk->tx_chan); 629 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 630 flowc->mnemval[2].val = htonl(csk->tx_chan); 631 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 632 flowc->mnemval[3].val = htonl(csk->rss_qid); 633 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 634 flowc->mnemval[4].val = htonl(csk->snd_nxt); 635 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 636 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 637 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 638 flowc->mnemval[6].val = htonl(csk->snd_win); 639 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 640 flowc->mnemval[7].val = htonl(csk->advmss); 641 flowc->mnemval[8].mnemonic = 0; 642 flowc->mnemval[8].val = 0; 643 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 644 flowc->mnemval[8].val = 16384; 645 #ifdef CONFIG_CHELSIO_T4_DCB 646 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; 647 if (vlan == CPL_L2T_VLAN_NONE) { 648 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n", 649 csk->tid); 650 flowc->mnemval[9].val = cpu_to_be32(0); 651 } else { 652 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> 653 VLAN_PRIO_SHIFT); 654 } 655 #endif 656 657 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 658 659 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 660 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 661 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 662 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, 663 csk->advmss); 664 665 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 666 667 return flowclen16; 668 } 669 670 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 671 int dlen, int len, u32 credits, int compl) 672 { 673 struct fw_ofld_tx_data_wr *req; 674 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 675 unsigned int wr_ulp_mode = 0, val; 676 bool imm = is_ofld_imm(skb); 677 678 req = __skb_push(skb, sizeof(*req)); 679 680 if (imm) { 681 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 682 FW_WR_COMPL_F | 683 FW_WR_IMMDLEN_V(dlen)); 684 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 685 FW_WR_LEN16_V(credits)); 686 } else { 687 req->op_to_immdlen = 688 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 689 FW_WR_COMPL_F | 690 FW_WR_IMMDLEN_V(0)); 691 req->flowid_len16 = 692 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 693 FW_WR_LEN16_V(credits)); 694 } 695 if (submode) 696 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 697 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 698 val = skb_peek(&csk->write_queue) ? 0 : 1; 699 req->tunnel_to_proxy = htonl(wr_ulp_mode | 700 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 701 req->plen = htonl(len); 702 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 703 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 704 } 705 706 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 707 { 708 kfree_skb(skb); 709 } 710 711 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 712 { 713 int total_size = 0; 714 struct sk_buff *skb; 715 716 if (unlikely(csk->state < CTP_ESTABLISHED || 717 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 718 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 719 1 << CXGBI_DBG_PDU_TX, 720 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 721 csk, csk->state, csk->flags, csk->tid); 722 return 0; 723 } 724 725 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 726 int dlen = skb->len; 727 int len = skb->len; 728 unsigned int credits_needed; 729 int flowclen16 = 0; 730 731 skb_reset_transport_header(skb); 732 if (is_ofld_imm(skb)) 733 credits_needed = DIV_ROUND_UP(dlen, 16); 734 else 735 credits_needed = DIV_ROUND_UP( 736 8 * calc_tx_flits_ofld(skb), 737 16); 738 739 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 740 credits_needed += DIV_ROUND_UP( 741 sizeof(struct fw_ofld_tx_data_wr), 742 16); 743 744 /* 745 * Assumes the initial credits is large enough to support 746 * fw_flowc_wr plus largest possible first payload 747 */ 748 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 749 flowclen16 = send_tx_flowc_wr(csk); 750 csk->wr_cred -= flowclen16; 751 csk->wr_una_cred += flowclen16; 752 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 753 } 754 755 if (csk->wr_cred < credits_needed) { 756 log_debug(1 << CXGBI_DBG_PDU_TX, 757 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 758 csk, skb->len, skb->data_len, 759 credits_needed, csk->wr_cred); 760 break; 761 } 762 __skb_unlink(skb, &csk->write_queue); 763 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 764 skb->csum = credits_needed + flowclen16; 765 csk->wr_cred -= credits_needed; 766 csk->wr_una_cred += credits_needed; 767 cxgbi_sock_enqueue_wr(csk, skb); 768 769 log_debug(1 << CXGBI_DBG_PDU_TX, 770 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 771 csk, skb->len, skb->data_len, credits_needed, 772 csk->wr_cred, csk->wr_una_cred); 773 774 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 775 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 776 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 777 req_completion); 778 csk->snd_nxt += len; 779 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 780 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && 781 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { 782 struct cpl_close_con_req *req = 783 (struct cpl_close_con_req *)skb->data; 784 req->wr.wr_hi |= htonl(FW_WR_COMPL_F); 785 } 786 total_size += skb->truesize; 787 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 788 789 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 790 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 791 csk, csk->state, csk->flags, csk->tid, skb, len); 792 793 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 794 } 795 return total_size; 796 } 797 798 static inline void free_atid(struct cxgbi_sock *csk) 799 { 800 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 801 802 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 803 cxgb4_free_atid(lldi->tids, csk->atid); 804 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 805 cxgbi_sock_put(csk); 806 } 807 } 808 809 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 810 { 811 struct cxgbi_sock *csk; 812 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 813 unsigned short tcp_opt = ntohs(req->tcp_opt); 814 unsigned int tid = GET_TID(req); 815 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 816 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 817 struct tid_info *t = lldi->tids; 818 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 819 820 csk = lookup_atid(t, atid); 821 if (unlikely(!csk)) { 822 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 823 goto rel_skb; 824 } 825 826 if (csk->atid != atid) { 827 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 828 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 829 goto rel_skb; 830 } 831 832 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 833 (&csk->saddr), (&csk->daddr), 834 atid, tid, csk, csk->state, csk->flags, rcv_isn); 835 836 module_put(cdev->owner); 837 838 cxgbi_sock_get(csk); 839 csk->tid = tid; 840 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); 841 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 842 843 free_atid(csk); 844 845 spin_lock_bh(&csk->lock); 846 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 847 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 848 csk, csk->state, csk->flags, csk->tid); 849 850 if (csk->retry_timer.function) { 851 del_timer(&csk->retry_timer); 852 csk->retry_timer.function = NULL; 853 } 854 855 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 856 /* 857 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 858 * pass through opt0. 859 */ 860 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) 861 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); 862 863 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 864 if (TCPOPT_TSTAMP_G(tcp_opt)) 865 csk->advmss -= 12; 866 if (csk->advmss < 128) 867 csk->advmss = 128; 868 869 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 870 "csk 0x%p, mss_idx %u, advmss %u.\n", 871 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); 872 873 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 874 875 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 876 send_abort_req(csk); 877 else { 878 if (skb_queue_len(&csk->write_queue)) 879 push_tx_frames(csk, 0); 880 cxgbi_conn_tx_open(csk); 881 } 882 spin_unlock_bh(&csk->lock); 883 884 rel_skb: 885 __kfree_skb(skb); 886 } 887 888 static int act_open_rpl_status_to_errno(int status) 889 { 890 switch (status) { 891 case CPL_ERR_CONN_RESET: 892 return -ECONNREFUSED; 893 case CPL_ERR_ARP_MISS: 894 return -EHOSTUNREACH; 895 case CPL_ERR_CONN_TIMEDOUT: 896 return -ETIMEDOUT; 897 case CPL_ERR_TCAM_FULL: 898 return -ENOMEM; 899 case CPL_ERR_CONN_EXIST: 900 return -EADDRINUSE; 901 default: 902 return -EIO; 903 } 904 } 905 906 static void csk_act_open_retry_timer(struct timer_list *t) 907 { 908 struct sk_buff *skb = NULL; 909 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); 910 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 911 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 912 struct l2t_entry *); 913 int t4 = is_t4(lldi->adapter_type), size, size6; 914 915 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 916 "csk 0x%p,%u,0x%lx,%u.\n", 917 csk, csk->state, csk->flags, csk->tid); 918 919 cxgbi_sock_get(csk); 920 spin_lock_bh(&csk->lock); 921 922 if (t4) { 923 size = sizeof(struct cpl_act_open_req); 924 size6 = sizeof(struct cpl_act_open_req6); 925 } else { 926 size = sizeof(struct cpl_t5_act_open_req); 927 size6 = sizeof(struct cpl_t5_act_open_req6); 928 } 929 930 if (csk->csk_family == AF_INET) { 931 send_act_open_func = send_act_open_req; 932 skb = alloc_wr(size, 0, GFP_ATOMIC); 933 #if IS_ENABLED(CONFIG_IPV6) 934 } else { 935 send_act_open_func = send_act_open_req6; 936 skb = alloc_wr(size6, 0, GFP_ATOMIC); 937 #endif 938 } 939 940 if (!skb) 941 cxgbi_sock_fail_act_open(csk, -ENOMEM); 942 else { 943 skb->sk = (struct sock *)csk; 944 t4_set_arp_err_handler(skb, csk, 945 cxgbi_sock_act_open_req_arp_failure); 946 send_act_open_func(csk, skb, csk->l2t); 947 } 948 949 spin_unlock_bh(&csk->lock); 950 cxgbi_sock_put(csk); 951 952 } 953 954 static inline bool is_neg_adv(unsigned int status) 955 { 956 return status == CPL_ERR_RTX_NEG_ADVICE || 957 status == CPL_ERR_KEEPALV_NEG_ADVICE || 958 status == CPL_ERR_PERSIST_NEG_ADVICE; 959 } 960 961 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 962 { 963 struct cxgbi_sock *csk; 964 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 965 unsigned int tid = GET_TID(rpl); 966 unsigned int atid = 967 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); 968 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); 969 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 970 struct tid_info *t = lldi->tids; 971 972 csk = lookup_atid(t, atid); 973 if (unlikely(!csk)) { 974 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 975 goto rel_skb; 976 } 977 978 pr_info_ipaddr("tid %u/%u, status %u.\n" 979 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 980 atid, tid, status, csk, csk->state, csk->flags); 981 982 if (is_neg_adv(status)) 983 goto rel_skb; 984 985 module_put(cdev->owner); 986 987 if (status && status != CPL_ERR_TCAM_FULL && 988 status != CPL_ERR_CONN_EXIST && 989 status != CPL_ERR_ARP_MISS) 990 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), 991 csk->csk_family); 992 993 cxgbi_sock_get(csk); 994 spin_lock_bh(&csk->lock); 995 996 if (status == CPL_ERR_CONN_EXIST && 997 csk->retry_timer.function != csk_act_open_retry_timer) { 998 csk->retry_timer.function = csk_act_open_retry_timer; 999 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 1000 } else 1001 cxgbi_sock_fail_act_open(csk, 1002 act_open_rpl_status_to_errno(status)); 1003 1004 spin_unlock_bh(&csk->lock); 1005 cxgbi_sock_put(csk); 1006 rel_skb: 1007 __kfree_skb(skb); 1008 } 1009 1010 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 1011 { 1012 struct cxgbi_sock *csk; 1013 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 1014 unsigned int tid = GET_TID(req); 1015 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1016 struct tid_info *t = lldi->tids; 1017 1018 csk = lookup_tid(t, tid); 1019 if (unlikely(!csk)) { 1020 pr_err("can't find connection for tid %u.\n", tid); 1021 goto rel_skb; 1022 } 1023 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1024 (&csk->saddr), (&csk->daddr), 1025 csk, csk->state, csk->flags, csk->tid); 1026 cxgbi_sock_rcv_peer_close(csk); 1027 rel_skb: 1028 __kfree_skb(skb); 1029 } 1030 1031 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1032 { 1033 struct cxgbi_sock *csk; 1034 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 1035 unsigned int tid = GET_TID(rpl); 1036 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1037 struct tid_info *t = lldi->tids; 1038 1039 csk = lookup_tid(t, tid); 1040 if (unlikely(!csk)) { 1041 pr_err("can't find connection for tid %u.\n", tid); 1042 goto rel_skb; 1043 } 1044 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1045 (&csk->saddr), (&csk->daddr), 1046 csk, csk->state, csk->flags, csk->tid); 1047 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 1048 rel_skb: 1049 __kfree_skb(skb); 1050 } 1051 1052 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 1053 int *need_rst) 1054 { 1055 switch (abort_reason) { 1056 case CPL_ERR_BAD_SYN: /* fall through */ 1057 case CPL_ERR_CONN_RESET: 1058 return csk->state > CTP_ESTABLISHED ? 1059 -EPIPE : -ECONNRESET; 1060 case CPL_ERR_XMIT_TIMEDOUT: 1061 case CPL_ERR_PERSIST_TIMEDOUT: 1062 case CPL_ERR_FINWAIT2_TIMEDOUT: 1063 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1064 return -ETIMEDOUT; 1065 default: 1066 return -EIO; 1067 } 1068 } 1069 1070 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1071 { 1072 struct cxgbi_sock *csk; 1073 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 1074 unsigned int tid = GET_TID(req); 1075 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1076 struct tid_info *t = lldi->tids; 1077 int rst_status = CPL_ABORT_NO_RST; 1078 1079 csk = lookup_tid(t, tid); 1080 if (unlikely(!csk)) { 1081 pr_err("can't find connection for tid %u.\n", tid); 1082 goto rel_skb; 1083 } 1084 1085 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1086 (&csk->saddr), (&csk->daddr), 1087 csk, csk->state, csk->flags, csk->tid, req->status); 1088 1089 if (is_neg_adv(req->status)) 1090 goto rel_skb; 1091 1092 cxgbi_sock_get(csk); 1093 spin_lock_bh(&csk->lock); 1094 1095 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 1096 1097 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 1098 send_tx_flowc_wr(csk); 1099 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 1100 } 1101 1102 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 1103 cxgbi_sock_set_state(csk, CTP_ABORTING); 1104 1105 send_abort_rpl(csk, rst_status); 1106 1107 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 1108 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1109 cxgbi_sock_closed(csk); 1110 } 1111 1112 spin_unlock_bh(&csk->lock); 1113 cxgbi_sock_put(csk); 1114 rel_skb: 1115 __kfree_skb(skb); 1116 } 1117 1118 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1119 { 1120 struct cxgbi_sock *csk; 1121 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1122 unsigned int tid = GET_TID(rpl); 1123 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1124 struct tid_info *t = lldi->tids; 1125 1126 csk = lookup_tid(t, tid); 1127 if (!csk) 1128 goto rel_skb; 1129 1130 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1131 (&csk->saddr), (&csk->daddr), csk, 1132 csk->state, csk->flags, csk->tid, rpl->status); 1133 1134 if (rpl->status == CPL_ERR_ABORT_FAILED) 1135 goto rel_skb; 1136 1137 cxgbi_sock_rcv_abort_rpl(csk); 1138 rel_skb: 1139 __kfree_skb(skb); 1140 } 1141 1142 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1143 { 1144 struct cxgbi_sock *csk; 1145 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1146 unsigned int tid = GET_TID(cpl); 1147 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1148 struct tid_info *t = lldi->tids; 1149 1150 csk = lookup_tid(t, tid); 1151 if (!csk) { 1152 pr_err("can't find connection for tid %u.\n", tid); 1153 } else { 1154 /* not expecting this, reset the connection. */ 1155 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1156 spin_lock_bh(&csk->lock); 1157 send_abort_req(csk); 1158 spin_unlock_bh(&csk->lock); 1159 } 1160 __kfree_skb(skb); 1161 } 1162 1163 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1164 { 1165 struct cxgbi_sock *csk; 1166 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1167 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1168 unsigned int tid = GET_TID(cpl); 1169 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1170 struct tid_info *t = lldi->tids; 1171 1172 csk = lookup_tid(t, tid); 1173 if (unlikely(!csk)) { 1174 pr_err("can't find conn. for tid %u.\n", tid); 1175 goto rel_skb; 1176 } 1177 1178 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1179 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1180 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1181 pdu_len_ddp); 1182 1183 spin_lock_bh(&csk->lock); 1184 1185 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1186 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1187 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1188 csk, csk->state, csk->flags, csk->tid); 1189 if (csk->state != CTP_ABORTING) 1190 goto abort_conn; 1191 else 1192 goto discard; 1193 } 1194 1195 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1196 cxgbi_skcb_flags(skb) = 0; 1197 1198 skb_reset_transport_header(skb); 1199 __skb_pull(skb, sizeof(*cpl)); 1200 __pskb_trim(skb, ntohs(cpl->len)); 1201 1202 if (!csk->skb_ulp_lhdr) { 1203 unsigned char *bhs; 1204 unsigned int hlen, dlen, plen; 1205 1206 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1207 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1208 csk, csk->state, csk->flags, csk->tid, skb); 1209 csk->skb_ulp_lhdr = skb; 1210 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1211 1212 if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && 1213 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { 1214 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1215 csk->tid, cxgbi_skcb_tcp_seq(skb), 1216 csk->rcv_nxt); 1217 goto abort_conn; 1218 } 1219 1220 bhs = skb->data; 1221 hlen = ntohs(cpl->len); 1222 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1223 1224 plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1225 if (is_t4(lldi->adapter_type)) 1226 plen -= 40; 1227 1228 if ((hlen + dlen) != plen) { 1229 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1230 "mismatch %u != %u + %u, seq 0x%x.\n", 1231 csk->tid, plen, hlen, dlen, 1232 cxgbi_skcb_tcp_seq(skb)); 1233 goto abort_conn; 1234 } 1235 1236 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1237 if (dlen) 1238 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1239 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1240 1241 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1242 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1243 csk, skb, *bhs, hlen, dlen, 1244 ntohl(*((unsigned int *)(bhs + 16))), 1245 ntohl(*((unsigned int *)(bhs + 24)))); 1246 1247 } else { 1248 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1249 1250 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1252 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1253 csk, csk->state, csk->flags, skb, lskb); 1254 } 1255 1256 __skb_queue_tail(&csk->receive_queue, skb); 1257 spin_unlock_bh(&csk->lock); 1258 return; 1259 1260 abort_conn: 1261 send_abort_req(csk); 1262 discard: 1263 spin_unlock_bh(&csk->lock); 1264 rel_skb: 1265 __kfree_skb(skb); 1266 } 1267 1268 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1269 { 1270 struct cxgbi_sock *csk; 1271 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1272 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1273 struct tid_info *t = lldi->tids; 1274 struct sk_buff *lskb; 1275 u32 tid = GET_TID(cpl); 1276 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1277 1278 csk = lookup_tid(t, tid); 1279 if (unlikely(!csk)) { 1280 pr_err("can't find conn. for tid %u.\n", tid); 1281 goto rel_skb; 1282 } 1283 1284 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1285 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1286 csk, csk->state, csk->flags, csk->tid, skb, 1287 skb->len, pdu_len_ddp); 1288 1289 spin_lock_bh(&csk->lock); 1290 1291 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1292 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1293 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1294 csk, csk->state, csk->flags, csk->tid); 1295 1296 if (csk->state != CTP_ABORTING) 1297 goto abort_conn; 1298 else 1299 goto discard; 1300 } 1301 1302 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); 1303 cxgbi_skcb_flags(skb) = 0; 1304 1305 skb_reset_transport_header(skb); 1306 __skb_pull(skb, sizeof(*cpl)); 1307 __pskb_trim(skb, ntohs(cpl->len)); 1308 1309 if (!csk->skb_ulp_lhdr) 1310 csk->skb_ulp_lhdr = skb; 1311 1312 lskb = csk->skb_ulp_lhdr; 1313 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1314 1315 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1316 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1317 csk, csk->state, csk->flags, skb, lskb); 1318 1319 __skb_queue_tail(&csk->receive_queue, skb); 1320 spin_unlock_bh(&csk->lock); 1321 return; 1322 1323 abort_conn: 1324 send_abort_req(csk); 1325 discard: 1326 spin_unlock_bh(&csk->lock); 1327 rel_skb: 1328 __kfree_skb(skb); 1329 } 1330 1331 static void 1332 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, 1333 struct sk_buff *skb, u32 ddpvld) 1334 { 1335 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1336 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1337 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1338 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); 1339 } 1340 1341 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1342 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1343 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1344 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); 1345 } 1346 1347 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1348 log_debug(1 << CXGBI_DBG_PDU_RX, 1349 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1350 csk, skb, ddpvld); 1351 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); 1352 } 1353 1354 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1355 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1356 log_debug(1 << CXGBI_DBG_PDU_RX, 1357 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1358 csk, skb, ddpvld); 1359 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); 1360 } 1361 } 1362 1363 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1364 struct sk_buff *skb) 1365 { 1366 struct cxgbi_sock *csk; 1367 struct sk_buff *lskb; 1368 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1369 unsigned int tid = GET_TID(rpl); 1370 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1371 struct tid_info *t = lldi->tids; 1372 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1373 1374 csk = lookup_tid(t, tid); 1375 if (unlikely(!csk)) { 1376 pr_err("can't find connection for tid %u.\n", tid); 1377 goto rel_skb; 1378 } 1379 1380 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1381 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1382 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); 1383 1384 spin_lock_bh(&csk->lock); 1385 1386 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1387 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1388 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1389 csk, csk->state, csk->flags, csk->tid); 1390 if (csk->state != CTP_ABORTING) 1391 goto abort_conn; 1392 else 1393 goto discard; 1394 } 1395 1396 if (!csk->skb_ulp_lhdr) { 1397 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1398 goto abort_conn; 1399 } 1400 1401 lskb = csk->skb_ulp_lhdr; 1402 csk->skb_ulp_lhdr = NULL; 1403 1404 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1405 1406 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1407 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1408 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1409 1410 cxgb4i_process_ddpvld(csk, lskb, ddpvld); 1411 1412 log_debug(1 << CXGBI_DBG_PDU_RX, 1413 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1414 csk, lskb, cxgbi_skcb_flags(lskb)); 1415 1416 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1417 cxgbi_conn_pdu_ready(csk); 1418 spin_unlock_bh(&csk->lock); 1419 goto rel_skb; 1420 1421 abort_conn: 1422 send_abort_req(csk); 1423 discard: 1424 spin_unlock_bh(&csk->lock); 1425 rel_skb: 1426 __kfree_skb(skb); 1427 } 1428 1429 static void 1430 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) 1431 { 1432 struct cxgbi_sock *csk; 1433 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; 1434 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1435 struct tid_info *t = lldi->tids; 1436 struct sk_buff *data_skb = NULL; 1437 u32 tid = GET_TID(rpl); 1438 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1439 u32 seq = be32_to_cpu(rpl->seq); 1440 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); 1441 1442 csk = lookup_tid(t, tid); 1443 if (unlikely(!csk)) { 1444 pr_err("can't find connection for tid %u.\n", tid); 1445 goto rel_skb; 1446 } 1447 1448 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1449 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " 1450 "pdu_len_ddp %u, status %u.\n", 1451 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, 1452 ntohs(rpl->len), pdu_len_ddp, rpl->status); 1453 1454 spin_lock_bh(&csk->lock); 1455 1456 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1457 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1458 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1459 csk, csk->state, csk->flags, csk->tid); 1460 1461 if (csk->state != CTP_ABORTING) 1462 goto abort_conn; 1463 else 1464 goto discard; 1465 } 1466 1467 cxgbi_skcb_tcp_seq(skb) = seq; 1468 cxgbi_skcb_flags(skb) = 0; 1469 cxgbi_skcb_rx_pdulen(skb) = 0; 1470 1471 skb_reset_transport_header(skb); 1472 __skb_pull(skb, sizeof(*rpl)); 1473 __pskb_trim(skb, be16_to_cpu(rpl->len)); 1474 1475 csk->rcv_nxt = seq + pdu_len_ddp; 1476 1477 if (csk->skb_ulp_lhdr) { 1478 data_skb = skb_peek(&csk->receive_queue); 1479 if (!data_skb || 1480 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { 1481 pr_err("Error! freelist data not found 0x%p, tid %u\n", 1482 data_skb, tid); 1483 1484 goto abort_conn; 1485 } 1486 __skb_unlink(data_skb, &csk->receive_queue); 1487 1488 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); 1489 1490 __skb_queue_tail(&csk->receive_queue, skb); 1491 __skb_queue_tail(&csk->receive_queue, data_skb); 1492 } else { 1493 __skb_queue_tail(&csk->receive_queue, skb); 1494 } 1495 1496 csk->skb_ulp_lhdr = NULL; 1497 1498 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1499 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); 1500 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); 1501 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); 1502 1503 cxgb4i_process_ddpvld(csk, skb, ddpvld); 1504 1505 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", 1506 csk, skb, cxgbi_skcb_flags(skb)); 1507 1508 cxgbi_conn_pdu_ready(csk); 1509 spin_unlock_bh(&csk->lock); 1510 1511 return; 1512 1513 abort_conn: 1514 send_abort_req(csk); 1515 discard: 1516 spin_unlock_bh(&csk->lock); 1517 rel_skb: 1518 __kfree_skb(skb); 1519 } 1520 1521 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1522 { 1523 struct cxgbi_sock *csk; 1524 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1525 unsigned int tid = GET_TID(rpl); 1526 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1527 struct tid_info *t = lldi->tids; 1528 1529 csk = lookup_tid(t, tid); 1530 if (unlikely(!csk)) 1531 pr_err("can't find connection for tid %u.\n", tid); 1532 else { 1533 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1534 "csk 0x%p,%u,0x%lx,%u.\n", 1535 csk, csk->state, csk->flags, csk->tid); 1536 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1537 rpl->seq_vld); 1538 } 1539 __kfree_skb(skb); 1540 } 1541 1542 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1543 { 1544 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1545 unsigned int tid = GET_TID(rpl); 1546 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1547 struct tid_info *t = lldi->tids; 1548 struct cxgbi_sock *csk; 1549 1550 csk = lookup_tid(t, tid); 1551 if (!csk) { 1552 pr_err("can't find conn. for tid %u.\n", tid); 1553 return; 1554 } 1555 1556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1557 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1558 csk, csk->state, csk->flags, csk->tid, rpl->status); 1559 1560 if (rpl->status != CPL_ERR_NONE) { 1561 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1562 csk, tid, rpl->status); 1563 csk->err = -EINVAL; 1564 } 1565 1566 complete(&csk->cmpl); 1567 1568 __kfree_skb(skb); 1569 } 1570 1571 static int alloc_cpls(struct cxgbi_sock *csk) 1572 { 1573 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1574 0, GFP_KERNEL); 1575 if (!csk->cpl_close) 1576 return -ENOMEM; 1577 1578 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1579 0, GFP_KERNEL); 1580 if (!csk->cpl_abort_req) 1581 goto free_cpls; 1582 1583 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1584 0, GFP_KERNEL); 1585 if (!csk->cpl_abort_rpl) 1586 goto free_cpls; 1587 return 0; 1588 1589 free_cpls: 1590 cxgbi_sock_free_cpl_skbs(csk); 1591 return -ENOMEM; 1592 } 1593 1594 static inline void l2t_put(struct cxgbi_sock *csk) 1595 { 1596 if (csk->l2t) { 1597 cxgb4_l2t_release(csk->l2t); 1598 csk->l2t = NULL; 1599 cxgbi_sock_put(csk); 1600 } 1601 } 1602 1603 static void release_offload_resources(struct cxgbi_sock *csk) 1604 { 1605 struct cxgb4_lld_info *lldi; 1606 #if IS_ENABLED(CONFIG_IPV6) 1607 struct net_device *ndev = csk->cdev->ports[csk->port_id]; 1608 #endif 1609 1610 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1611 "csk 0x%p,%u,0x%lx,%u.\n", 1612 csk, csk->state, csk->flags, csk->tid); 1613 1614 cxgbi_sock_free_cpl_skbs(csk); 1615 cxgbi_sock_purge_write_queue(csk); 1616 if (csk->wr_cred != csk->wr_max_cred) { 1617 cxgbi_sock_purge_wr_queue(csk); 1618 cxgbi_sock_reset_wr_list(csk); 1619 } 1620 1621 l2t_put(csk); 1622 #if IS_ENABLED(CONFIG_IPV6) 1623 if (csk->csk_family == AF_INET6) 1624 cxgb4_clip_release(ndev, 1625 (const u32 *)&csk->saddr6.sin6_addr, 1); 1626 #endif 1627 1628 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1629 free_atid(csk); 1630 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1631 lldi = cxgbi_cdev_priv(csk->cdev); 1632 cxgb4_remove_tid(lldi->tids, 0, csk->tid, 1633 csk->csk_family); 1634 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1635 cxgbi_sock_put(csk); 1636 } 1637 csk->dst = NULL; 1638 } 1639 1640 #ifdef CONFIG_CHELSIO_T4_DCB 1641 static inline u8 get_iscsi_dcb_state(struct net_device *ndev) 1642 { 1643 return ndev->dcbnl_ops->getstate(ndev); 1644 } 1645 1646 static int select_priority(int pri_mask) 1647 { 1648 if (!pri_mask) 1649 return 0; 1650 return (ffs(pri_mask) - 1); 1651 } 1652 1653 static u8 get_iscsi_dcb_priority(struct net_device *ndev) 1654 { 1655 int rv; 1656 u8 caps; 1657 1658 struct dcb_app iscsi_dcb_app = { 1659 .protocol = 3260 1660 }; 1661 1662 rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); 1663 if (rv) 1664 return 0; 1665 1666 if (caps & DCB_CAP_DCBX_VER_IEEE) { 1667 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; 1668 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); 1669 if (!rv) { 1670 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; 1671 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); 1672 } 1673 } else if (caps & DCB_CAP_DCBX_VER_CEE) { 1674 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; 1675 rv = dcb_getapp(ndev, &iscsi_dcb_app); 1676 } 1677 1678 log_debug(1 << CXGBI_DBG_ISCSI, 1679 "iSCSI priority is set to %u\n", select_priority(rv)); 1680 return select_priority(rv); 1681 } 1682 #endif 1683 1684 static int init_act_open(struct cxgbi_sock *csk) 1685 { 1686 struct cxgbi_device *cdev = csk->cdev; 1687 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1688 struct net_device *ndev = cdev->ports[csk->port_id]; 1689 struct sk_buff *skb = NULL; 1690 struct neighbour *n = NULL; 1691 void *daddr; 1692 unsigned int step; 1693 unsigned int rxq_idx; 1694 unsigned int size, size6; 1695 unsigned int linkspeed; 1696 unsigned int rcv_winf, snd_winf; 1697 #ifdef CONFIG_CHELSIO_T4_DCB 1698 u8 priority = 0; 1699 #endif 1700 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1701 "csk 0x%p,%u,0x%lx,%u.\n", 1702 csk, csk->state, csk->flags, csk->tid); 1703 1704 if (csk->csk_family == AF_INET) 1705 daddr = &csk->daddr.sin_addr.s_addr; 1706 #if IS_ENABLED(CONFIG_IPV6) 1707 else if (csk->csk_family == AF_INET6) 1708 daddr = &csk->daddr6.sin6_addr; 1709 #endif 1710 else { 1711 pr_err("address family 0x%x not supported\n", csk->csk_family); 1712 goto rel_resource; 1713 } 1714 1715 n = dst_neigh_lookup(csk->dst, daddr); 1716 1717 if (!n) { 1718 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1719 goto rel_resource; 1720 } 1721 1722 if (!(n->nud_state & NUD_VALID)) 1723 neigh_event_send(n, NULL); 1724 1725 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1726 if (csk->atid < 0) { 1727 pr_err("%s, NO atid available.\n", ndev->name); 1728 goto rel_resource_without_clip; 1729 } 1730 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1731 cxgbi_sock_get(csk); 1732 1733 #ifdef CONFIG_CHELSIO_T4_DCB 1734 if (get_iscsi_dcb_state(ndev)) 1735 priority = get_iscsi_dcb_priority(ndev); 1736 1737 csk->dcb_priority = priority; 1738 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); 1739 #else 1740 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1741 #endif 1742 if (!csk->l2t) { 1743 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1744 goto rel_resource_without_clip; 1745 } 1746 cxgbi_sock_get(csk); 1747 1748 #if IS_ENABLED(CONFIG_IPV6) 1749 if (csk->csk_family == AF_INET6) 1750 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1751 #endif 1752 1753 if (is_t4(lldi->adapter_type)) { 1754 size = sizeof(struct cpl_act_open_req); 1755 size6 = sizeof(struct cpl_act_open_req6); 1756 } else if (is_t5(lldi->adapter_type)) { 1757 size = sizeof(struct cpl_t5_act_open_req); 1758 size6 = sizeof(struct cpl_t5_act_open_req6); 1759 } else { 1760 size = sizeof(struct cpl_t6_act_open_req); 1761 size6 = sizeof(struct cpl_t6_act_open_req6); 1762 } 1763 1764 if (csk->csk_family == AF_INET) 1765 skb = alloc_wr(size, 0, GFP_NOIO); 1766 #if IS_ENABLED(CONFIG_IPV6) 1767 else 1768 skb = alloc_wr(size6, 0, GFP_NOIO); 1769 #endif 1770 1771 if (!skb) 1772 goto rel_resource; 1773 skb->sk = (struct sock *)csk; 1774 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1775 1776 if (!csk->mtu) 1777 csk->mtu = dst_mtu(csk->dst); 1778 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1779 csk->tx_chan = cxgb4_port_chan(ndev); 1780 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; 1781 step = lldi->ntxq / lldi->nchan; 1782 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1783 step = lldi->nrxq / lldi->nchan; 1784 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); 1785 cdev->rxq_idx_cntr++; 1786 csk->rss_qid = lldi->rxq_ids[rxq_idx]; 1787 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; 1788 csk->snd_win = cxgb4i_snd_win; 1789 csk->rcv_win = cxgb4i_rcv_win; 1790 if (cxgb4i_rcv_win <= 0) { 1791 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; 1792 rcv_winf = linkspeed / SPEED_10000; 1793 if (rcv_winf) 1794 csk->rcv_win *= rcv_winf; 1795 } 1796 if (cxgb4i_snd_win <= 0) { 1797 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; 1798 snd_winf = linkspeed / SPEED_10000; 1799 if (snd_winf) 1800 csk->snd_win *= snd_winf; 1801 } 1802 csk->wr_cred = lldi->wr_cred - 1803 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1804 csk->wr_max_cred = csk->wr_cred; 1805 csk->wr_una_cred = 0; 1806 cxgbi_sock_reset_wr_list(csk); 1807 csk->err = 0; 1808 1809 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1810 (&csk->saddr), (&csk->daddr), csk, csk->state, 1811 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1812 csk->mtu, csk->mss_idx, csk->smac_idx); 1813 1814 /* must wait for either a act_open_rpl or act_open_establish */ 1815 if (!try_module_get(cdev->owner)) { 1816 pr_err("%s, try_module_get failed.\n", ndev->name); 1817 goto rel_resource; 1818 } 1819 1820 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1821 if (csk->csk_family == AF_INET) 1822 send_act_open_req(csk, skb, csk->l2t); 1823 #if IS_ENABLED(CONFIG_IPV6) 1824 else 1825 send_act_open_req6(csk, skb, csk->l2t); 1826 #endif 1827 neigh_release(n); 1828 1829 return 0; 1830 1831 rel_resource: 1832 #if IS_ENABLED(CONFIG_IPV6) 1833 if (csk->csk_family == AF_INET6) 1834 cxgb4_clip_release(ndev, 1835 (const u32 *)&csk->saddr6.sin6_addr, 1); 1836 #endif 1837 rel_resource_without_clip: 1838 if (n) 1839 neigh_release(n); 1840 if (skb) 1841 __kfree_skb(skb); 1842 return -EINVAL; 1843 } 1844 1845 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1846 [CPL_ACT_ESTABLISH] = do_act_establish, 1847 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1848 [CPL_PEER_CLOSE] = do_peer_close, 1849 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1850 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1851 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1852 [CPL_FW4_ACK] = do_fw4_ack, 1853 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1854 [CPL_ISCSI_DATA] = do_rx_iscsi_data, 1855 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1856 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1857 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1858 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, 1859 [CPL_RX_DATA] = do_rx_data, 1860 }; 1861 1862 static int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1863 { 1864 int rc; 1865 1866 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1867 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1868 1869 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1870 cxgb4i_max_connect); 1871 if (rc < 0) 1872 return rc; 1873 1874 cdev->csk_release_offload_resources = release_offload_resources; 1875 cdev->csk_push_tx_frames = push_tx_frames; 1876 cdev->csk_send_abort_req = send_abort_req; 1877 cdev->csk_send_close_req = send_close_req; 1878 cdev->csk_send_rx_credits = send_rx_credits; 1879 cdev->csk_alloc_cpls = alloc_cpls; 1880 cdev->csk_init_act_open = init_act_open; 1881 1882 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1883 return 0; 1884 } 1885 1886 static inline void 1887 ulp_mem_io_set_hdr(struct cxgbi_device *cdev, 1888 struct ulp_mem_io *req, 1889 unsigned int wr_len, unsigned int dlen, 1890 unsigned int pm_addr, 1891 int tid) 1892 { 1893 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1894 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1895 1896 INIT_ULPTX_WR(req, wr_len, 0, tid); 1897 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | 1898 FW_WR_ATOMIC_V(0)); 1899 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1900 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | 1901 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); 1902 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1903 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1904 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1905 1906 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1907 idata->len = htonl(dlen); 1908 } 1909 1910 static struct sk_buff * 1911 ddp_ppod_init_idata(struct cxgbi_device *cdev, 1912 struct cxgbi_ppm *ppm, 1913 unsigned int idx, unsigned int npods, 1914 unsigned int tid) 1915 { 1916 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; 1917 unsigned int dlen = npods << PPOD_SIZE_SHIFT; 1918 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1919 sizeof(struct ulptx_idata) + dlen, 16); 1920 struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1921 1922 if (!skb) { 1923 pr_err("%s: %s idx %u, npods %u, OOM.\n", 1924 __func__, ppm->ndev->name, idx, npods); 1925 return NULL; 1926 } 1927 1928 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, 1929 pm_addr, tid); 1930 1931 return skb; 1932 } 1933 1934 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1935 struct cxgbi_task_tag_info *ttinfo, 1936 unsigned int idx, unsigned int npods, 1937 struct scatterlist **sg_pp, 1938 unsigned int *sg_off) 1939 { 1940 struct cxgbi_device *cdev = csk->cdev; 1941 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, 1942 csk->tid); 1943 struct ulp_mem_io *req; 1944 struct ulptx_idata *idata; 1945 struct cxgbi_pagepod *ppod; 1946 int i; 1947 1948 if (!skb) 1949 return -ENOMEM; 1950 1951 req = (struct ulp_mem_io *)skb->head; 1952 idata = (struct ulptx_idata *)(req + 1); 1953 ppod = (struct cxgbi_pagepod *)(idata + 1); 1954 1955 for (i = 0; i < npods; i++, ppod++) 1956 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); 1957 1958 cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); 1959 cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); 1960 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 1961 1962 spin_lock_bh(&csk->lock); 1963 cxgbi_sock_skb_entail(csk, skb); 1964 spin_unlock_bh(&csk->lock); 1965 1966 return 0; 1967 } 1968 1969 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1970 struct cxgbi_task_tag_info *ttinfo) 1971 { 1972 unsigned int pidx = ttinfo->idx; 1973 unsigned int npods = ttinfo->npods; 1974 unsigned int i, cnt; 1975 int err = 0; 1976 struct scatterlist *sg = ttinfo->sgl; 1977 unsigned int offset = 0; 1978 1979 ttinfo->cid = csk->port_id; 1980 1981 for (i = 0; i < npods; i += cnt, pidx += cnt) { 1982 cnt = npods - i; 1983 1984 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1985 cnt = ULPMEM_IDATA_MAX_NPPODS; 1986 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, 1987 &sg, &offset); 1988 if (err < 0) 1989 break; 1990 } 1991 1992 return err; 1993 } 1994 1995 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1996 int pg_idx) 1997 { 1998 struct sk_buff *skb; 1999 struct cpl_set_tcb_field *req; 2000 2001 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 2002 return 0; 2003 2004 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 2005 if (!skb) 2006 return -ENOMEM; 2007 2008 /* set up ulp page size */ 2009 req = (struct cpl_set_tcb_field *)skb->head; 2010 INIT_TP_WR(req, csk->tid); 2011 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2012 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2013 req->word_cookie = htons(0); 2014 req->mask = cpu_to_be64(0x3 << 8); 2015 req->val = cpu_to_be64(pg_idx << 8); 2016 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2017 2018 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2019 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2020 2021 reinit_completion(&csk->cmpl); 2022 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2023 wait_for_completion(&csk->cmpl); 2024 2025 return csk->err; 2026 } 2027 2028 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2029 int hcrc, int dcrc) 2030 { 2031 struct sk_buff *skb; 2032 struct cpl_set_tcb_field *req; 2033 2034 if (!hcrc && !dcrc) 2035 return 0; 2036 2037 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 2038 if (!skb) 2039 return -ENOMEM; 2040 2041 csk->hcrc_len = (hcrc ? 4 : 0); 2042 csk->dcrc_len = (dcrc ? 4 : 0); 2043 /* set up ulp submode */ 2044 req = (struct cpl_set_tcb_field *)skb->head; 2045 INIT_TP_WR(req, tid); 2046 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2047 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); 2048 req->word_cookie = htons(0); 2049 req->mask = cpu_to_be64(0x3 << 4); 2050 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2051 (dcrc ? ULP_CRC_DATA : 0)) << 4); 2052 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2053 2054 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2055 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2056 2057 reinit_completion(&csk->cmpl); 2058 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2059 wait_for_completion(&csk->cmpl); 2060 2061 return csk->err; 2062 } 2063 2064 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2065 { 2066 return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) 2067 (cxgbi_cdev_priv(cdev)))->iscsi_ppm); 2068 } 2069 2070 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 2071 { 2072 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 2073 struct net_device *ndev = cdev->ports[0]; 2074 struct cxgbi_tag_format tformat; 2075 int i, err; 2076 2077 if (!lldi->vr->iscsi.size) { 2078 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); 2079 return -EACCES; 2080 } 2081 2082 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; 2083 2084 memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); 2085 for (i = 0; i < 4; i++) 2086 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) 2087 & 0xF; 2088 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); 2089 2090 pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x", 2091 lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); 2092 2093 err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, 2094 lldi->vr->iscsi.size, lldi->iscsi_llimit, 2095 lldi->vr->iscsi.start, 2, 2096 lldi->vr->ppod_edram.start, 2097 lldi->vr->ppod_edram.size); 2098 2099 if (err < 0) 2100 return err; 2101 2102 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 2103 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 2104 cdev->csk_ddp_set_map = ddp_set_map; 2105 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2106 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2107 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2108 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2109 cdev->cdev2ppm = cdev2ppm; 2110 2111 return 0; 2112 } 2113 2114 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 2115 { 2116 struct cxgbi_device *cdev; 2117 struct port_info *pi; 2118 int i, rc; 2119 2120 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 2121 if (!cdev) { 2122 pr_info("t4 device 0x%p, register failed.\n", lldi); 2123 return NULL; 2124 } 2125 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 2126 cdev, lldi->adapter_type, lldi->nports, 2127 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 2128 lldi->nrxq, lldi->wr_cred); 2129 for (i = 0; i < lldi->nrxq; i++) 2130 log_debug(1 << CXGBI_DBG_DEV, 2131 "t4 0x%p, rxq id #%d: %u.\n", 2132 cdev, i, lldi->rxq_ids[i]); 2133 2134 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 2135 cdev->flags = CXGBI_FLAG_DEV_T4; 2136 cdev->pdev = lldi->pdev; 2137 cdev->ports = lldi->ports; 2138 cdev->nports = lldi->nports; 2139 cdev->mtus = lldi->mtus; 2140 cdev->nmtus = NMTUS; 2141 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= 2142 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; 2143 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 2144 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 2145 cdev->itp = &cxgb4i_iscsi_transport; 2146 cdev->owner = THIS_MODULE; 2147 2148 cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); 2149 pr_info("cdev 0x%p,%s, pfvf %u.\n", 2150 cdev, lldi->ports[0]->name, cdev->pfvf); 2151 2152 rc = cxgb4i_ddp_init(cdev); 2153 if (rc) { 2154 pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc); 2155 goto err_out; 2156 } 2157 rc = cxgb4i_ofld_init(cdev); 2158 if (rc) { 2159 pr_info("t4 0x%p ofld init failed.\n", cdev); 2160 goto err_out; 2161 } 2162 2163 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 2164 &cxgb4i_host_template, cxgb4i_stt); 2165 if (rc) 2166 goto err_out; 2167 2168 for (i = 0; i < cdev->nports; i++) { 2169 pi = netdev_priv(lldi->ports[i]); 2170 cdev->hbas[i]->port_id = pi->port_id; 2171 } 2172 return cdev; 2173 2174 err_out: 2175 cxgbi_device_unregister(cdev); 2176 return ERR_PTR(-ENOMEM); 2177 } 2178 2179 #define RX_PULL_LEN 128 2180 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 2181 const struct pkt_gl *pgl) 2182 { 2183 const struct cpl_act_establish *rpl; 2184 struct sk_buff *skb; 2185 unsigned int opc; 2186 struct cxgbi_device *cdev = handle; 2187 2188 if (pgl == NULL) { 2189 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 2190 2191 skb = alloc_wr(len, 0, GFP_ATOMIC); 2192 if (!skb) 2193 goto nomem; 2194 skb_copy_to_linear_data(skb, &rsp[1], len); 2195 } else { 2196 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 2197 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 2198 pgl->va, be64_to_cpu(*rsp), 2199 be64_to_cpu(*(u64 *)pgl->va), 2200 pgl->tot_len); 2201 return 0; 2202 } 2203 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 2204 if (unlikely(!skb)) 2205 goto nomem; 2206 } 2207 2208 rpl = (struct cpl_act_establish *)skb->data; 2209 opc = rpl->ot.opcode; 2210 log_debug(1 << CXGBI_DBG_TOE, 2211 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 2212 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 2213 if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { 2214 pr_err("No handler for opcode 0x%x.\n", opc); 2215 __kfree_skb(skb); 2216 } else 2217 cxgb4i_cplhandlers[opc](cdev, skb); 2218 2219 return 0; 2220 nomem: 2221 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 2222 return 1; 2223 } 2224 2225 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 2226 { 2227 struct cxgbi_device *cdev = handle; 2228 2229 switch (state) { 2230 case CXGB4_STATE_UP: 2231 pr_info("cdev 0x%p, UP.\n", cdev); 2232 break; 2233 case CXGB4_STATE_START_RECOVERY: 2234 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 2235 /* close all connections */ 2236 break; 2237 case CXGB4_STATE_DOWN: 2238 pr_info("cdev 0x%p, DOWN.\n", cdev); 2239 break; 2240 case CXGB4_STATE_DETACH: 2241 pr_info("cdev 0x%p, DETACH.\n", cdev); 2242 cxgbi_device_unregister(cdev); 2243 break; 2244 default: 2245 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 2246 break; 2247 } 2248 return 0; 2249 } 2250 2251 #ifdef CONFIG_CHELSIO_T4_DCB 2252 static int 2253 cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, 2254 void *data) 2255 { 2256 int i, port = 0xFF; 2257 struct net_device *ndev; 2258 struct cxgbi_device *cdev = NULL; 2259 struct dcb_app_type *iscsi_app = data; 2260 struct cxgbi_ports_map *pmap; 2261 u8 priority; 2262 2263 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { 2264 if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && 2265 (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) 2266 return NOTIFY_DONE; 2267 2268 priority = iscsi_app->app.priority; 2269 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { 2270 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) 2271 return NOTIFY_DONE; 2272 2273 if (!iscsi_app->app.priority) 2274 return NOTIFY_DONE; 2275 2276 priority = ffs(iscsi_app->app.priority) - 1; 2277 } else { 2278 return NOTIFY_DONE; 2279 } 2280 2281 if (iscsi_app->app.protocol != 3260) 2282 return NOTIFY_DONE; 2283 2284 log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n", 2285 iscsi_app->ifindex, priority); 2286 2287 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); 2288 if (!ndev) 2289 return NOTIFY_DONE; 2290 2291 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); 2292 2293 dev_put(ndev); 2294 if (!cdev) 2295 return NOTIFY_DONE; 2296 2297 pmap = &cdev->pmap; 2298 2299 for (i = 0; i < pmap->used; i++) { 2300 if (pmap->port_csk[i]) { 2301 struct cxgbi_sock *csk = pmap->port_csk[i]; 2302 2303 if (csk->dcb_priority != priority) { 2304 iscsi_conn_failure(csk->user_data, 2305 ISCSI_ERR_CONN_FAILED); 2306 pr_info("Restarting iSCSI connection %p with " 2307 "priority %u->%u.\n", csk, 2308 csk->dcb_priority, priority); 2309 } 2310 } 2311 } 2312 return NOTIFY_OK; 2313 } 2314 #endif 2315 2316 static int __init cxgb4i_init_module(void) 2317 { 2318 int rc; 2319 2320 printk(KERN_INFO "%s", version); 2321 2322 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2323 if (rc < 0) 2324 return rc; 2325 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 2326 2327 #ifdef CONFIG_CHELSIO_T4_DCB 2328 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME); 2329 register_dcbevent_notifier(&cxgb4_dcb_change); 2330 #endif 2331 return 0; 2332 } 2333 2334 static void __exit cxgb4i_exit_module(void) 2335 { 2336 #ifdef CONFIG_CHELSIO_T4_DCB 2337 unregister_dcbevent_notifier(&cxgb4_dcb_change); 2338 #endif 2339 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 2340 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 2341 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2342 } 2343 2344 module_init(cxgb4i_init_module); 2345 module_exit(cxgb4i_exit_module); 2346