1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <scsi/scsi_host.h> 20 #include <net/tcp.h> 21 #include <net/dst.h> 22 #include <linux/netdevice.h> 23 #include <net/addrconf.h> 24 25 #include "t4_regs.h" 26 #include "t4_msg.h" 27 #include "cxgb4.h" 28 #include "cxgb4_uld.h" 29 #include "t4fw_api.h" 30 #include "l2t.h" 31 #include "cxgb4i.h" 32 #include "clip_tbl.h" 33 34 static unsigned int dbg_level; 35 36 #include "../libcxgbi.h" 37 38 #ifdef CONFIG_CHELSIO_T4_DCB 39 #include <net/dcbevent.h> 40 #include "cxgb4_dcb.h" 41 #endif 42 43 #define DRV_MODULE_NAME "cxgb4i" 44 #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" 45 #define DRV_MODULE_VERSION "0.9.5-ko" 46 #define DRV_MODULE_RELDATE "Apr. 2015" 47 48 static char version[] = 49 DRV_MODULE_DESC " " DRV_MODULE_NAME 50 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 51 52 MODULE_AUTHOR("Chelsio Communications, Inc."); 53 MODULE_DESCRIPTION(DRV_MODULE_DESC); 54 MODULE_VERSION(DRV_MODULE_VERSION); 55 MODULE_LICENSE("GPL"); 56 57 module_param(dbg_level, uint, 0644); 58 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 59 60 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) 61 static int cxgb4i_rcv_win = -1; 62 module_param(cxgb4i_rcv_win, int, 0644); 63 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 64 65 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) 66 static int cxgb4i_snd_win = -1; 67 module_param(cxgb4i_snd_win, int, 0644); 68 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 69 70 static int cxgb4i_rx_credit_thres = 10 * 1024; 71 module_param(cxgb4i_rx_credit_thres, int, 0644); 72 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 73 "RX credits return threshold in bytes (default=10KB)"); 74 75 static unsigned int cxgb4i_max_connect = (8 * 1024); 76 module_param(cxgb4i_max_connect, uint, 0644); 77 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 78 79 static unsigned short cxgb4i_sport_base = 20000; 80 module_param(cxgb4i_sport_base, ushort, 0644); 81 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 82 83 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 84 85 static void *t4_uld_add(const struct cxgb4_lld_info *); 86 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 87 static int t4_uld_state_change(void *, enum cxgb4_state state); 88 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 89 90 static const struct cxgb4_uld_info cxgb4i_uld_info = { 91 .name = DRV_MODULE_NAME, 92 .nrxq = MAX_ULD_QSETS, 93 .ntxq = MAX_ULD_QSETS, 94 .rxq_size = 1024, 95 .lro = false, 96 .add = t4_uld_add, 97 .rx_handler = t4_uld_rx_handler, 98 .state_change = t4_uld_state_change, 99 }; 100 101 static struct scsi_host_template cxgb4i_host_template = { 102 .module = THIS_MODULE, 103 .name = DRV_MODULE_NAME, 104 .proc_name = DRV_MODULE_NAME, 105 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 106 .queuecommand = iscsi_queuecommand, 107 .change_queue_depth = scsi_change_queue_depth, 108 .sg_tablesize = SG_ALL, 109 .max_sectors = 0xFFFF, 110 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 111 .eh_timed_out = iscsi_eh_cmd_timed_out, 112 .eh_abort_handler = iscsi_eh_abort, 113 .eh_device_reset_handler = iscsi_eh_device_reset, 114 .eh_target_reset_handler = iscsi_eh_recover_target, 115 .target_alloc = iscsi_target_alloc, 116 .dma_boundary = PAGE_SIZE - 1, 117 .this_id = -1, 118 .track_queue_depth = 1, 119 }; 120 121 static struct iscsi_transport cxgb4i_iscsi_transport = { 122 .owner = THIS_MODULE, 123 .name = DRV_MODULE_NAME, 124 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 125 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 126 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 127 .attr_is_visible = cxgbi_attr_is_visible, 128 .get_host_param = cxgbi_get_host_param, 129 .set_host_param = cxgbi_set_host_param, 130 /* session management */ 131 .create_session = cxgbi_create_session, 132 .destroy_session = cxgbi_destroy_session, 133 .get_session_param = iscsi_session_get_param, 134 /* connection management */ 135 .create_conn = cxgbi_create_conn, 136 .bind_conn = cxgbi_bind_conn, 137 .destroy_conn = iscsi_tcp_conn_teardown, 138 .start_conn = iscsi_conn_start, 139 .stop_conn = iscsi_conn_stop, 140 .get_conn_param = iscsi_conn_get_param, 141 .set_param = cxgbi_set_conn_param, 142 .get_stats = cxgbi_get_conn_stats, 143 /* pdu xmit req from user space */ 144 .send_pdu = iscsi_conn_send_pdu, 145 /* task */ 146 .init_task = iscsi_tcp_task_init, 147 .xmit_task = iscsi_tcp_task_xmit, 148 .cleanup_task = cxgbi_cleanup_task, 149 /* pdu */ 150 .alloc_pdu = cxgbi_conn_alloc_pdu, 151 .init_pdu = cxgbi_conn_init_pdu, 152 .xmit_pdu = cxgbi_conn_xmit_pdu, 153 .parse_pdu_itt = cxgbi_parse_pdu_itt, 154 /* TCP connect/disconnect */ 155 .get_ep_param = cxgbi_get_ep_param, 156 .ep_connect = cxgbi_ep_connect, 157 .ep_poll = cxgbi_ep_poll, 158 .ep_disconnect = cxgbi_ep_disconnect, 159 /* Error recovery timeout call */ 160 .session_recovery_timedout = iscsi_session_recovery_timedout, 161 }; 162 163 #ifdef CONFIG_CHELSIO_T4_DCB 164 static int 165 cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *); 166 167 static struct notifier_block cxgb4_dcb_change = { 168 .notifier_call = cxgb4_dcb_change_notify, 169 }; 170 #endif 171 172 static struct scsi_transport_template *cxgb4i_stt; 173 174 /* 175 * CPL (Chelsio Protocol Language) defines a message passing interface between 176 * the host driver and Chelsio asic. 177 * The section below implments CPLs that related to iscsi tcp connection 178 * open/close/abort and data send/receive. 179 */ 180 181 #define RCV_BUFSIZ_MASK 0x3FFU 182 #define MAX_IMM_TX_PKT_LEN 256 183 184 static int push_tx_frames(struct cxgbi_sock *, int); 185 186 /* 187 * is_ofld_imm - check whether a packet can be sent as immediate data 188 * @skb: the packet 189 * 190 * Returns true if a packet can be sent as an offload WR with immediate 191 * data. We currently use the same limit as for Ethernet packets. 192 */ 193 static inline bool is_ofld_imm(const struct sk_buff *skb) 194 { 195 int len = skb->len; 196 197 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 198 len += sizeof(struct fw_ofld_tx_data_wr); 199 200 return len <= MAX_IMM_TX_PKT_LEN; 201 } 202 203 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 204 struct l2t_entry *e) 205 { 206 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 207 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 208 unsigned long long opt0; 209 unsigned int opt2; 210 unsigned int qid_atid = ((unsigned int)csk->atid) | 211 (((unsigned int)csk->rss_qid) << 14); 212 213 opt0 = KEEP_ALIVE_F | 214 WND_SCALE_V(wscale) | 215 MSS_IDX_V(csk->mss_idx) | 216 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 217 TX_CHAN_V(csk->tx_chan) | 218 SMAC_SEL_V(csk->smac_idx) | 219 ULP_MODE_V(ULP_MODE_ISCSI) | 220 RCV_BUFSIZ_V(csk->rcv_win >> 10); 221 222 opt2 = RX_CHANNEL_V(0) | 223 RSS_QUEUE_VALID_F | 224 RSS_QUEUE_V(csk->rss_qid); 225 226 if (is_t4(lldi->adapter_type)) { 227 struct cpl_act_open_req *req = 228 (struct cpl_act_open_req *)skb->head; 229 230 INIT_TP_WR(req, 0); 231 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 232 qid_atid)); 233 req->local_port = csk->saddr.sin_port; 234 req->peer_port = csk->daddr.sin_port; 235 req->local_ip = csk->saddr.sin_addr.s_addr; 236 req->peer_ip = csk->daddr.sin_addr.s_addr; 237 req->opt0 = cpu_to_be64(opt0); 238 req->params = cpu_to_be32(cxgb4_select_ntuple( 239 csk->cdev->ports[csk->port_id], 240 csk->l2t)); 241 opt2 |= RX_FC_VALID_F; 242 req->opt2 = cpu_to_be32(opt2); 243 244 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 245 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 246 csk, &req->local_ip, ntohs(req->local_port), 247 &req->peer_ip, ntohs(req->peer_port), 248 csk->atid, csk->rss_qid); 249 } else if (is_t5(lldi->adapter_type)) { 250 struct cpl_t5_act_open_req *req = 251 (struct cpl_t5_act_open_req *)skb->head; 252 u32 isn = (prandom_u32() & ~7UL) - 1; 253 254 INIT_TP_WR(req, 0); 255 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 256 qid_atid)); 257 req->local_port = csk->saddr.sin_port; 258 req->peer_port = csk->daddr.sin_port; 259 req->local_ip = csk->saddr.sin_addr.s_addr; 260 req->peer_ip = csk->daddr.sin_addr.s_addr; 261 req->opt0 = cpu_to_be64(opt0); 262 req->params = cpu_to_be64(FILTER_TUPLE_V( 263 cxgb4_select_ntuple( 264 csk->cdev->ports[csk->port_id], 265 csk->l2t))); 266 req->rsvd = cpu_to_be32(isn); 267 opt2 |= T5_ISS_VALID; 268 opt2 |= T5_OPT_2_VALID_F; 269 270 req->opt2 = cpu_to_be32(opt2); 271 272 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 273 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 274 csk, &req->local_ip, ntohs(req->local_port), 275 &req->peer_ip, ntohs(req->peer_port), 276 csk->atid, csk->rss_qid); 277 } else { 278 struct cpl_t6_act_open_req *req = 279 (struct cpl_t6_act_open_req *)skb->head; 280 u32 isn = (prandom_u32() & ~7UL) - 1; 281 282 INIT_TP_WR(req, 0); 283 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 284 qid_atid)); 285 req->local_port = csk->saddr.sin_port; 286 req->peer_port = csk->daddr.sin_port; 287 req->local_ip = csk->saddr.sin_addr.s_addr; 288 req->peer_ip = csk->daddr.sin_addr.s_addr; 289 req->opt0 = cpu_to_be64(opt0); 290 req->params = cpu_to_be64(FILTER_TUPLE_V( 291 cxgb4_select_ntuple( 292 csk->cdev->ports[csk->port_id], 293 csk->l2t))); 294 req->rsvd = cpu_to_be32(isn); 295 296 opt2 |= T5_ISS_VALID; 297 opt2 |= RX_FC_DISABLE_F; 298 opt2 |= T5_OPT_2_VALID_F; 299 300 req->opt2 = cpu_to_be32(opt2); 301 req->rsvd2 = cpu_to_be32(0); 302 req->opt3 = cpu_to_be32(0); 303 304 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 305 "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 306 csk, &req->local_ip, ntohs(req->local_port), 307 &req->peer_ip, ntohs(req->peer_port), 308 csk->atid, csk->rss_qid); 309 } 310 311 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 312 313 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 314 (&csk->saddr), (&csk->daddr), 315 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, 316 csk->state, csk->flags, csk->atid, csk->rss_qid); 317 318 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 319 } 320 321 #if IS_ENABLED(CONFIG_IPV6) 322 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 323 struct l2t_entry *e) 324 { 325 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 326 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 327 unsigned long long opt0; 328 unsigned int opt2; 329 unsigned int qid_atid = ((unsigned int)csk->atid) | 330 (((unsigned int)csk->rss_qid) << 14); 331 332 opt0 = KEEP_ALIVE_F | 333 WND_SCALE_V(wscale) | 334 MSS_IDX_V(csk->mss_idx) | 335 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 336 TX_CHAN_V(csk->tx_chan) | 337 SMAC_SEL_V(csk->smac_idx) | 338 ULP_MODE_V(ULP_MODE_ISCSI) | 339 RCV_BUFSIZ_V(csk->rcv_win >> 10); 340 341 opt2 = RX_CHANNEL_V(0) | 342 RSS_QUEUE_VALID_F | 343 RSS_QUEUE_V(csk->rss_qid); 344 345 if (is_t4(lldi->adapter_type)) { 346 struct cpl_act_open_req6 *req = 347 (struct cpl_act_open_req6 *)skb->head; 348 349 INIT_TP_WR(req, 0); 350 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 351 qid_atid)); 352 req->local_port = csk->saddr6.sin6_port; 353 req->peer_port = csk->daddr6.sin6_port; 354 355 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 356 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 357 8); 358 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 359 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 360 8); 361 362 req->opt0 = cpu_to_be64(opt0); 363 364 opt2 |= RX_FC_VALID_F; 365 req->opt2 = cpu_to_be32(opt2); 366 367 req->params = cpu_to_be32(cxgb4_select_ntuple( 368 csk->cdev->ports[csk->port_id], 369 csk->l2t)); 370 } else if (is_t5(lldi->adapter_type)) { 371 struct cpl_t5_act_open_req6 *req = 372 (struct cpl_t5_act_open_req6 *)skb->head; 373 374 INIT_TP_WR(req, 0); 375 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 376 qid_atid)); 377 req->local_port = csk->saddr6.sin6_port; 378 req->peer_port = csk->daddr6.sin6_port; 379 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 380 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 381 8); 382 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 383 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 384 8); 385 req->opt0 = cpu_to_be64(opt0); 386 387 opt2 |= T5_OPT_2_VALID_F; 388 req->opt2 = cpu_to_be32(opt2); 389 390 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 391 csk->cdev->ports[csk->port_id], 392 csk->l2t))); 393 } else { 394 struct cpl_t6_act_open_req6 *req = 395 (struct cpl_t6_act_open_req6 *)skb->head; 396 397 INIT_TP_WR(req, 0); 398 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 399 qid_atid)); 400 req->local_port = csk->saddr6.sin6_port; 401 req->peer_port = csk->daddr6.sin6_port; 402 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 403 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 404 8); 405 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 406 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 407 8); 408 req->opt0 = cpu_to_be64(opt0); 409 410 opt2 |= RX_FC_DISABLE_F; 411 opt2 |= T5_OPT_2_VALID_F; 412 413 req->opt2 = cpu_to_be32(opt2); 414 415 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 416 csk->cdev->ports[csk->port_id], 417 csk->l2t))); 418 419 req->rsvd2 = cpu_to_be32(0); 420 req->opt3 = cpu_to_be32(0); 421 } 422 423 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 424 425 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 426 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, 427 csk->flags, csk->atid, 428 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 429 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 430 csk->rss_qid); 431 432 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 433 } 434 #endif 435 436 static void send_close_req(struct cxgbi_sock *csk) 437 { 438 struct sk_buff *skb = csk->cpl_close; 439 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 440 unsigned int tid = csk->tid; 441 442 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 443 "csk 0x%p,%u,0x%lx, tid %u.\n", 444 csk, csk->state, csk->flags, csk->tid); 445 csk->cpl_close = NULL; 446 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 447 INIT_TP_WR(req, tid); 448 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 449 req->rsvd = 0; 450 451 cxgbi_sock_skb_entail(csk, skb); 452 if (csk->state >= CTP_ESTABLISHED) 453 push_tx_frames(csk, 1); 454 } 455 456 static void abort_arp_failure(void *handle, struct sk_buff *skb) 457 { 458 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 459 struct cpl_abort_req *req; 460 461 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 462 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 463 csk, csk->state, csk->flags, csk->tid); 464 req = (struct cpl_abort_req *)skb->data; 465 req->cmd = CPL_ABORT_NO_RST; 466 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 467 } 468 469 static void send_abort_req(struct cxgbi_sock *csk) 470 { 471 struct cpl_abort_req *req; 472 struct sk_buff *skb = csk->cpl_abort_req; 473 474 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 475 return; 476 477 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 478 send_tx_flowc_wr(csk); 479 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 480 } 481 482 cxgbi_sock_set_state(csk, CTP_ABORTING); 483 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 484 cxgbi_sock_purge_write_queue(csk); 485 486 csk->cpl_abort_req = NULL; 487 req = (struct cpl_abort_req *)skb->head; 488 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 489 req->cmd = CPL_ABORT_SEND_RST; 490 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 491 INIT_TP_WR(req, csk->tid); 492 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 493 req->rsvd0 = htonl(csk->snd_nxt); 494 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 495 496 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 497 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 498 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 499 req->rsvd1); 500 501 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 502 } 503 504 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 505 { 506 struct sk_buff *skb = csk->cpl_abort_rpl; 507 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 508 509 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 510 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 511 csk, csk->state, csk->flags, csk->tid, rst_status); 512 513 csk->cpl_abort_rpl = NULL; 514 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 515 INIT_TP_WR(rpl, csk->tid); 516 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 517 rpl->cmd = rst_status; 518 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 519 } 520 521 /* 522 * CPL connection rx data ack: host -> 523 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 524 * credits sent. 525 */ 526 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 527 { 528 struct sk_buff *skb; 529 struct cpl_rx_data_ack *req; 530 531 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 532 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 533 csk, csk->state, csk->flags, csk->tid, credits); 534 535 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 536 if (!skb) { 537 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 538 return 0; 539 } 540 req = (struct cpl_rx_data_ack *)skb->head; 541 542 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 543 INIT_TP_WR(req, csk->tid); 544 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 545 csk->tid)); 546 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 547 | RX_FORCE_ACK_F); 548 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 549 return credits; 550 } 551 552 /* 553 * sgl_len - calculates the size of an SGL of the given capacity 554 * @n: the number of SGL entries 555 * Calculates the number of flits needed for a scatter/gather list that 556 * can hold the given number of entries. 557 */ 558 static inline unsigned int sgl_len(unsigned int n) 559 { 560 n--; 561 return (3 * n) / 2 + (n & 1) + 2; 562 } 563 564 /* 565 * calc_tx_flits_ofld - calculate # of flits for an offload packet 566 * @skb: the packet 567 * 568 * Returns the number of flits needed for the given offload packet. 569 * These packets are already fully constructed and no additional headers 570 * will be added. 571 */ 572 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 573 { 574 unsigned int flits, cnt; 575 576 if (is_ofld_imm(skb)) 577 return DIV_ROUND_UP(skb->len, 8); 578 flits = skb_transport_offset(skb) / 8; 579 cnt = skb_shinfo(skb)->nr_frags; 580 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 581 cnt++; 582 return flits + sgl_len(cnt); 583 } 584 585 #define FLOWC_WR_NPARAMS_MIN 9 586 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 587 { 588 int nparams, flowclen16, flowclen; 589 590 nparams = FLOWC_WR_NPARAMS_MIN; 591 #ifdef CONFIG_CHELSIO_T4_DCB 592 nparams++; 593 #endif 594 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 595 flowclen16 = DIV_ROUND_UP(flowclen, 16); 596 flowclen = flowclen16 * 16; 597 /* 598 * Return the number of 16-byte credits used by the FlowC request. 599 * Pass back the nparams and actual FlowC length if requested. 600 */ 601 if (nparamsp) 602 *nparamsp = nparams; 603 if (flowclenp) 604 *flowclenp = flowclen; 605 606 return flowclen16; 607 } 608 609 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 610 { 611 struct sk_buff *skb; 612 struct fw_flowc_wr *flowc; 613 int nparams, flowclen16, flowclen; 614 615 #ifdef CONFIG_CHELSIO_T4_DCB 616 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; 617 #endif 618 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 619 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 620 flowc = (struct fw_flowc_wr *)skb->head; 621 flowc->op_to_nparams = 622 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 623 flowc->flowid_len16 = 624 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 625 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 626 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 627 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 628 flowc->mnemval[1].val = htonl(csk->tx_chan); 629 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 630 flowc->mnemval[2].val = htonl(csk->tx_chan); 631 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 632 flowc->mnemval[3].val = htonl(csk->rss_qid); 633 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 634 flowc->mnemval[4].val = htonl(csk->snd_nxt); 635 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 636 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 637 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 638 flowc->mnemval[6].val = htonl(csk->snd_win); 639 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 640 flowc->mnemval[7].val = htonl(csk->advmss); 641 flowc->mnemval[8].mnemonic = 0; 642 flowc->mnemval[8].val = 0; 643 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 644 flowc->mnemval[8].val = 16384; 645 #ifdef CONFIG_CHELSIO_T4_DCB 646 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO; 647 if (vlan == CPL_L2T_VLAN_NONE) { 648 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n", 649 csk->tid); 650 flowc->mnemval[9].val = cpu_to_be32(0); 651 } else { 652 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >> 653 VLAN_PRIO_SHIFT); 654 } 655 #endif 656 657 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 658 659 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 660 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 661 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 662 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, 663 csk->advmss); 664 665 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 666 667 return flowclen16; 668 } 669 670 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 671 int dlen, int len, u32 credits, int compl) 672 { 673 struct fw_ofld_tx_data_wr *req; 674 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 675 unsigned int wr_ulp_mode = 0, val; 676 bool imm = is_ofld_imm(skb); 677 678 req = __skb_push(skb, sizeof(*req)); 679 680 if (imm) { 681 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 682 FW_WR_COMPL_F | 683 FW_WR_IMMDLEN_V(dlen)); 684 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 685 FW_WR_LEN16_V(credits)); 686 } else { 687 req->op_to_immdlen = 688 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 689 FW_WR_COMPL_F | 690 FW_WR_IMMDLEN_V(0)); 691 req->flowid_len16 = 692 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 693 FW_WR_LEN16_V(credits)); 694 } 695 if (submode) 696 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 697 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 698 val = skb_peek(&csk->write_queue) ? 0 : 1; 699 req->tunnel_to_proxy = htonl(wr_ulp_mode | 700 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 701 req->plen = htonl(len); 702 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 703 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 704 } 705 706 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 707 { 708 kfree_skb(skb); 709 } 710 711 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 712 { 713 int total_size = 0; 714 struct sk_buff *skb; 715 716 if (unlikely(csk->state < CTP_ESTABLISHED || 717 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 718 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 719 1 << CXGBI_DBG_PDU_TX, 720 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 721 csk, csk->state, csk->flags, csk->tid); 722 return 0; 723 } 724 725 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 726 int dlen = skb->len; 727 int len = skb->len; 728 unsigned int credits_needed; 729 int flowclen16 = 0; 730 731 skb_reset_transport_header(skb); 732 if (is_ofld_imm(skb)) 733 credits_needed = DIV_ROUND_UP(dlen, 16); 734 else 735 credits_needed = DIV_ROUND_UP( 736 8 * calc_tx_flits_ofld(skb), 737 16); 738 739 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 740 credits_needed += DIV_ROUND_UP( 741 sizeof(struct fw_ofld_tx_data_wr), 742 16); 743 744 /* 745 * Assumes the initial credits is large enough to support 746 * fw_flowc_wr plus largest possible first payload 747 */ 748 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 749 flowclen16 = send_tx_flowc_wr(csk); 750 csk->wr_cred -= flowclen16; 751 csk->wr_una_cred += flowclen16; 752 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 753 } 754 755 if (csk->wr_cred < credits_needed) { 756 log_debug(1 << CXGBI_DBG_PDU_TX, 757 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 758 csk, skb->len, skb->data_len, 759 credits_needed, csk->wr_cred); 760 break; 761 } 762 __skb_unlink(skb, &csk->write_queue); 763 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 764 skb->csum = credits_needed + flowclen16; 765 csk->wr_cred -= credits_needed; 766 csk->wr_una_cred += credits_needed; 767 cxgbi_sock_enqueue_wr(csk, skb); 768 769 log_debug(1 << CXGBI_DBG_PDU_TX, 770 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 771 csk, skb->len, skb->data_len, credits_needed, 772 csk->wr_cred, csk->wr_una_cred); 773 774 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 775 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 776 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 777 req_completion); 778 csk->snd_nxt += len; 779 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 780 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) && 781 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { 782 struct cpl_close_con_req *req = 783 (struct cpl_close_con_req *)skb->data; 784 req->wr.wr_hi |= htonl(FW_WR_COMPL_F); 785 } 786 total_size += skb->truesize; 787 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 788 789 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 790 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 791 csk, csk->state, csk->flags, csk->tid, skb, len); 792 793 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 794 } 795 return total_size; 796 } 797 798 static inline void free_atid(struct cxgbi_sock *csk) 799 { 800 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 801 802 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 803 cxgb4_free_atid(lldi->tids, csk->atid); 804 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 805 cxgbi_sock_put(csk); 806 } 807 } 808 809 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 810 { 811 struct cxgbi_sock *csk; 812 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 813 unsigned short tcp_opt = ntohs(req->tcp_opt); 814 unsigned int tid = GET_TID(req); 815 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 816 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 817 struct tid_info *t = lldi->tids; 818 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 819 820 csk = lookup_atid(t, atid); 821 if (unlikely(!csk)) { 822 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 823 goto rel_skb; 824 } 825 826 if (csk->atid != atid) { 827 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 828 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 829 goto rel_skb; 830 } 831 832 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 833 (&csk->saddr), (&csk->daddr), 834 atid, tid, csk, csk->state, csk->flags, rcv_isn); 835 836 module_put(cdev->owner); 837 838 cxgbi_sock_get(csk); 839 csk->tid = tid; 840 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); 841 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 842 843 free_atid(csk); 844 845 spin_lock_bh(&csk->lock); 846 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 847 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 848 csk, csk->state, csk->flags, csk->tid); 849 850 if (csk->retry_timer.function) { 851 del_timer(&csk->retry_timer); 852 csk->retry_timer.function = NULL; 853 } 854 855 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 856 /* 857 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 858 * pass through opt0. 859 */ 860 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) 861 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); 862 863 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 864 if (TCPOPT_TSTAMP_G(tcp_opt)) 865 csk->advmss -= 12; 866 if (csk->advmss < 128) 867 csk->advmss = 128; 868 869 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 870 "csk 0x%p, mss_idx %u, advmss %u.\n", 871 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); 872 873 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 874 875 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 876 send_abort_req(csk); 877 else { 878 if (skb_queue_len(&csk->write_queue)) 879 push_tx_frames(csk, 0); 880 cxgbi_conn_tx_open(csk); 881 } 882 spin_unlock_bh(&csk->lock); 883 884 rel_skb: 885 __kfree_skb(skb); 886 } 887 888 static int act_open_rpl_status_to_errno(int status) 889 { 890 switch (status) { 891 case CPL_ERR_CONN_RESET: 892 return -ECONNREFUSED; 893 case CPL_ERR_ARP_MISS: 894 return -EHOSTUNREACH; 895 case CPL_ERR_CONN_TIMEDOUT: 896 return -ETIMEDOUT; 897 case CPL_ERR_TCAM_FULL: 898 return -ENOMEM; 899 case CPL_ERR_CONN_EXIST: 900 return -EADDRINUSE; 901 default: 902 return -EIO; 903 } 904 } 905 906 static void csk_act_open_retry_timer(struct timer_list *t) 907 { 908 struct sk_buff *skb = NULL; 909 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); 910 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 911 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 912 struct l2t_entry *); 913 int t4 = is_t4(lldi->adapter_type), size, size6; 914 915 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 916 "csk 0x%p,%u,0x%lx,%u.\n", 917 csk, csk->state, csk->flags, csk->tid); 918 919 cxgbi_sock_get(csk); 920 spin_lock_bh(&csk->lock); 921 922 if (t4) { 923 size = sizeof(struct cpl_act_open_req); 924 size6 = sizeof(struct cpl_act_open_req6); 925 } else { 926 size = sizeof(struct cpl_t5_act_open_req); 927 size6 = sizeof(struct cpl_t5_act_open_req6); 928 } 929 930 if (csk->csk_family == AF_INET) { 931 send_act_open_func = send_act_open_req; 932 skb = alloc_wr(size, 0, GFP_ATOMIC); 933 #if IS_ENABLED(CONFIG_IPV6) 934 } else { 935 send_act_open_func = send_act_open_req6; 936 skb = alloc_wr(size6, 0, GFP_ATOMIC); 937 #endif 938 } 939 940 if (!skb) 941 cxgbi_sock_fail_act_open(csk, -ENOMEM); 942 else { 943 skb->sk = (struct sock *)csk; 944 t4_set_arp_err_handler(skb, csk, 945 cxgbi_sock_act_open_req_arp_failure); 946 send_act_open_func(csk, skb, csk->l2t); 947 } 948 949 spin_unlock_bh(&csk->lock); 950 cxgbi_sock_put(csk); 951 952 } 953 954 static inline bool is_neg_adv(unsigned int status) 955 { 956 return status == CPL_ERR_RTX_NEG_ADVICE || 957 status == CPL_ERR_KEEPALV_NEG_ADVICE || 958 status == CPL_ERR_PERSIST_NEG_ADVICE; 959 } 960 961 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 962 { 963 struct cxgbi_sock *csk; 964 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 965 unsigned int tid = GET_TID(rpl); 966 unsigned int atid = 967 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); 968 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); 969 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 970 struct tid_info *t = lldi->tids; 971 972 csk = lookup_atid(t, atid); 973 if (unlikely(!csk)) { 974 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 975 goto rel_skb; 976 } 977 978 pr_info_ipaddr("tid %u/%u, status %u.\n" 979 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 980 atid, tid, status, csk, csk->state, csk->flags); 981 982 if (is_neg_adv(status)) 983 goto rel_skb; 984 985 module_put(cdev->owner); 986 987 if (status && status != CPL_ERR_TCAM_FULL && 988 status != CPL_ERR_CONN_EXIST && 989 status != CPL_ERR_ARP_MISS) 990 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), 991 csk->csk_family); 992 993 cxgbi_sock_get(csk); 994 spin_lock_bh(&csk->lock); 995 996 if (status == CPL_ERR_CONN_EXIST && 997 csk->retry_timer.function != csk_act_open_retry_timer) { 998 csk->retry_timer.function = csk_act_open_retry_timer; 999 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 1000 } else 1001 cxgbi_sock_fail_act_open(csk, 1002 act_open_rpl_status_to_errno(status)); 1003 1004 spin_unlock_bh(&csk->lock); 1005 cxgbi_sock_put(csk); 1006 rel_skb: 1007 __kfree_skb(skb); 1008 } 1009 1010 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 1011 { 1012 struct cxgbi_sock *csk; 1013 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 1014 unsigned int tid = GET_TID(req); 1015 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1016 struct tid_info *t = lldi->tids; 1017 1018 csk = lookup_tid(t, tid); 1019 if (unlikely(!csk)) { 1020 pr_err("can't find connection for tid %u.\n", tid); 1021 goto rel_skb; 1022 } 1023 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1024 (&csk->saddr), (&csk->daddr), 1025 csk, csk->state, csk->flags, csk->tid); 1026 cxgbi_sock_rcv_peer_close(csk); 1027 rel_skb: 1028 __kfree_skb(skb); 1029 } 1030 1031 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1032 { 1033 struct cxgbi_sock *csk; 1034 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 1035 unsigned int tid = GET_TID(rpl); 1036 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1037 struct tid_info *t = lldi->tids; 1038 1039 csk = lookup_tid(t, tid); 1040 if (unlikely(!csk)) { 1041 pr_err("can't find connection for tid %u.\n", tid); 1042 goto rel_skb; 1043 } 1044 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 1045 (&csk->saddr), (&csk->daddr), 1046 csk, csk->state, csk->flags, csk->tid); 1047 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 1048 rel_skb: 1049 __kfree_skb(skb); 1050 } 1051 1052 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 1053 int *need_rst) 1054 { 1055 switch (abort_reason) { 1056 case CPL_ERR_BAD_SYN: /* fall through */ 1057 case CPL_ERR_CONN_RESET: 1058 return csk->state > CTP_ESTABLISHED ? 1059 -EPIPE : -ECONNRESET; 1060 case CPL_ERR_XMIT_TIMEDOUT: 1061 case CPL_ERR_PERSIST_TIMEDOUT: 1062 case CPL_ERR_FINWAIT2_TIMEDOUT: 1063 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1064 return -ETIMEDOUT; 1065 default: 1066 return -EIO; 1067 } 1068 } 1069 1070 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1071 { 1072 struct cxgbi_sock *csk; 1073 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 1074 unsigned int tid = GET_TID(req); 1075 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1076 struct tid_info *t = lldi->tids; 1077 int rst_status = CPL_ABORT_NO_RST; 1078 1079 csk = lookup_tid(t, tid); 1080 if (unlikely(!csk)) { 1081 pr_err("can't find connection for tid %u.\n", tid); 1082 goto rel_skb; 1083 } 1084 1085 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1086 (&csk->saddr), (&csk->daddr), 1087 csk, csk->state, csk->flags, csk->tid, req->status); 1088 1089 if (is_neg_adv(req->status)) 1090 goto rel_skb; 1091 1092 cxgbi_sock_get(csk); 1093 spin_lock_bh(&csk->lock); 1094 1095 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 1096 1097 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 1098 send_tx_flowc_wr(csk); 1099 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 1100 } 1101 1102 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 1103 cxgbi_sock_set_state(csk, CTP_ABORTING); 1104 1105 send_abort_rpl(csk, rst_status); 1106 1107 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 1108 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1109 cxgbi_sock_closed(csk); 1110 } 1111 1112 spin_unlock_bh(&csk->lock); 1113 cxgbi_sock_put(csk); 1114 rel_skb: 1115 __kfree_skb(skb); 1116 } 1117 1118 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1119 { 1120 struct cxgbi_sock *csk; 1121 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1122 unsigned int tid = GET_TID(rpl); 1123 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1124 struct tid_info *t = lldi->tids; 1125 1126 csk = lookup_tid(t, tid); 1127 if (!csk) 1128 goto rel_skb; 1129 1130 if (csk) 1131 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1132 (&csk->saddr), (&csk->daddr), csk, 1133 csk->state, csk->flags, csk->tid, rpl->status); 1134 1135 if (rpl->status == CPL_ERR_ABORT_FAILED) 1136 goto rel_skb; 1137 1138 cxgbi_sock_rcv_abort_rpl(csk); 1139 rel_skb: 1140 __kfree_skb(skb); 1141 } 1142 1143 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1144 { 1145 struct cxgbi_sock *csk; 1146 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1147 unsigned int tid = GET_TID(cpl); 1148 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1149 struct tid_info *t = lldi->tids; 1150 1151 csk = lookup_tid(t, tid); 1152 if (!csk) { 1153 pr_err("can't find connection for tid %u.\n", tid); 1154 } else { 1155 /* not expecting this, reset the connection. */ 1156 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1157 spin_lock_bh(&csk->lock); 1158 send_abort_req(csk); 1159 spin_unlock_bh(&csk->lock); 1160 } 1161 __kfree_skb(skb); 1162 } 1163 1164 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1165 { 1166 struct cxgbi_sock *csk; 1167 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1168 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1169 unsigned int tid = GET_TID(cpl); 1170 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1171 struct tid_info *t = lldi->tids; 1172 1173 csk = lookup_tid(t, tid); 1174 if (unlikely(!csk)) { 1175 pr_err("can't find conn. for tid %u.\n", tid); 1176 goto rel_skb; 1177 } 1178 1179 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1180 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1181 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1182 pdu_len_ddp); 1183 1184 spin_lock_bh(&csk->lock); 1185 1186 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1187 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1188 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1189 csk, csk->state, csk->flags, csk->tid); 1190 if (csk->state != CTP_ABORTING) 1191 goto abort_conn; 1192 else 1193 goto discard; 1194 } 1195 1196 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1197 cxgbi_skcb_flags(skb) = 0; 1198 1199 skb_reset_transport_header(skb); 1200 __skb_pull(skb, sizeof(*cpl)); 1201 __pskb_trim(skb, ntohs(cpl->len)); 1202 1203 if (!csk->skb_ulp_lhdr) { 1204 unsigned char *bhs; 1205 unsigned int hlen, dlen, plen; 1206 1207 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1208 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1209 csk, csk->state, csk->flags, csk->tid, skb); 1210 csk->skb_ulp_lhdr = skb; 1211 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1212 1213 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 1214 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1215 csk->tid, cxgbi_skcb_tcp_seq(skb), 1216 csk->rcv_nxt); 1217 goto abort_conn; 1218 } 1219 1220 bhs = skb->data; 1221 hlen = ntohs(cpl->len); 1222 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1223 1224 plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1225 if (is_t4(lldi->adapter_type)) 1226 plen -= 40; 1227 1228 if ((hlen + dlen) != plen) { 1229 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1230 "mismatch %u != %u + %u, seq 0x%x.\n", 1231 csk->tid, plen, hlen, dlen, 1232 cxgbi_skcb_tcp_seq(skb)); 1233 goto abort_conn; 1234 } 1235 1236 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1237 if (dlen) 1238 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1239 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1240 1241 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1242 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1243 csk, skb, *bhs, hlen, dlen, 1244 ntohl(*((unsigned int *)(bhs + 16))), 1245 ntohl(*((unsigned int *)(bhs + 24)))); 1246 1247 } else { 1248 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1249 1250 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1251 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1252 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1253 csk, csk->state, csk->flags, skb, lskb); 1254 } 1255 1256 __skb_queue_tail(&csk->receive_queue, skb); 1257 spin_unlock_bh(&csk->lock); 1258 return; 1259 1260 abort_conn: 1261 send_abort_req(csk); 1262 discard: 1263 spin_unlock_bh(&csk->lock); 1264 rel_skb: 1265 __kfree_skb(skb); 1266 } 1267 1268 static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1269 { 1270 struct cxgbi_sock *csk; 1271 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1272 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1273 struct tid_info *t = lldi->tids; 1274 struct sk_buff *lskb; 1275 u32 tid = GET_TID(cpl); 1276 u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1277 1278 csk = lookup_tid(t, tid); 1279 if (unlikely(!csk)) { 1280 pr_err("can't find conn. for tid %u.\n", tid); 1281 goto rel_skb; 1282 } 1283 1284 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1285 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1286 csk, csk->state, csk->flags, csk->tid, skb, 1287 skb->len, pdu_len_ddp); 1288 1289 spin_lock_bh(&csk->lock); 1290 1291 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1292 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1293 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1294 csk, csk->state, csk->flags, csk->tid); 1295 1296 if (csk->state != CTP_ABORTING) 1297 goto abort_conn; 1298 else 1299 goto discard; 1300 } 1301 1302 cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq); 1303 cxgbi_skcb_flags(skb) = 0; 1304 1305 skb_reset_transport_header(skb); 1306 __skb_pull(skb, sizeof(*cpl)); 1307 __pskb_trim(skb, ntohs(cpl->len)); 1308 1309 if (!csk->skb_ulp_lhdr) 1310 csk->skb_ulp_lhdr = skb; 1311 1312 lskb = csk->skb_ulp_lhdr; 1313 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1314 1315 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1316 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1317 csk, csk->state, csk->flags, skb, lskb); 1318 1319 __skb_queue_tail(&csk->receive_queue, skb); 1320 spin_unlock_bh(&csk->lock); 1321 return; 1322 1323 abort_conn: 1324 send_abort_req(csk); 1325 discard: 1326 spin_unlock_bh(&csk->lock); 1327 rel_skb: 1328 __kfree_skb(skb); 1329 } 1330 1331 static void 1332 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, 1333 struct sk_buff *skb, u32 ddpvld) 1334 { 1335 if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1336 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1337 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1338 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); 1339 } 1340 1341 if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1342 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1343 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); 1344 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); 1345 } 1346 1347 if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1348 log_debug(1 << CXGBI_DBG_PDU_RX, 1349 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1350 csk, skb, ddpvld); 1351 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); 1352 } 1353 1354 if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1355 !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1356 log_debug(1 << CXGBI_DBG_PDU_RX, 1357 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1358 csk, skb, ddpvld); 1359 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); 1360 } 1361 } 1362 1363 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1364 struct sk_buff *skb) 1365 { 1366 struct cxgbi_sock *csk; 1367 struct sk_buff *lskb; 1368 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1369 unsigned int tid = GET_TID(rpl); 1370 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1371 struct tid_info *t = lldi->tids; 1372 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1373 1374 csk = lookup_tid(t, tid); 1375 if (unlikely(!csk)) { 1376 pr_err("can't find connection for tid %u.\n", tid); 1377 goto rel_skb; 1378 } 1379 1380 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1381 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1382 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); 1383 1384 spin_lock_bh(&csk->lock); 1385 1386 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1387 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1388 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1389 csk, csk->state, csk->flags, csk->tid); 1390 if (csk->state != CTP_ABORTING) 1391 goto abort_conn; 1392 else 1393 goto discard; 1394 } 1395 1396 if (!csk->skb_ulp_lhdr) { 1397 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1398 goto abort_conn; 1399 } 1400 1401 lskb = csk->skb_ulp_lhdr; 1402 csk->skb_ulp_lhdr = NULL; 1403 1404 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1405 1406 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1407 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1408 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1409 1410 cxgb4i_process_ddpvld(csk, lskb, ddpvld); 1411 1412 log_debug(1 << CXGBI_DBG_PDU_RX, 1413 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1414 csk, lskb, cxgbi_skcb_flags(lskb)); 1415 1416 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1417 cxgbi_conn_pdu_ready(csk); 1418 spin_unlock_bh(&csk->lock); 1419 goto rel_skb; 1420 1421 abort_conn: 1422 send_abort_req(csk); 1423 discard: 1424 spin_unlock_bh(&csk->lock); 1425 rel_skb: 1426 __kfree_skb(skb); 1427 } 1428 1429 static void 1430 do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb) 1431 { 1432 struct cxgbi_sock *csk; 1433 struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data; 1434 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1435 struct tid_info *t = lldi->tids; 1436 struct sk_buff *data_skb = NULL; 1437 u32 tid = GET_TID(rpl); 1438 u32 ddpvld = be32_to_cpu(rpl->ddpvld); 1439 u32 seq = be32_to_cpu(rpl->seq); 1440 u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp); 1441 1442 csk = lookup_tid(t, tid); 1443 if (unlikely(!csk)) { 1444 pr_err("can't find connection for tid %u.\n", tid); 1445 goto rel_skb; 1446 } 1447 1448 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1449 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, " 1450 "pdu_len_ddp %u, status %u.\n", 1451 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, 1452 ntohs(rpl->len), pdu_len_ddp, rpl->status); 1453 1454 spin_lock_bh(&csk->lock); 1455 1456 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1457 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1458 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1459 csk, csk->state, csk->flags, csk->tid); 1460 1461 if (csk->state != CTP_ABORTING) 1462 goto abort_conn; 1463 else 1464 goto discard; 1465 } 1466 1467 cxgbi_skcb_tcp_seq(skb) = seq; 1468 cxgbi_skcb_flags(skb) = 0; 1469 cxgbi_skcb_rx_pdulen(skb) = 0; 1470 1471 skb_reset_transport_header(skb); 1472 __skb_pull(skb, sizeof(*rpl)); 1473 __pskb_trim(skb, be16_to_cpu(rpl->len)); 1474 1475 csk->rcv_nxt = seq + pdu_len_ddp; 1476 1477 if (csk->skb_ulp_lhdr) { 1478 data_skb = skb_peek(&csk->receive_queue); 1479 if (!data_skb || 1480 !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) { 1481 pr_err("Error! freelist data not found 0x%p, tid %u\n", 1482 data_skb, tid); 1483 1484 goto abort_conn; 1485 } 1486 __skb_unlink(data_skb, &csk->receive_queue); 1487 1488 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA); 1489 1490 __skb_queue_tail(&csk->receive_queue, skb); 1491 __skb_queue_tail(&csk->receive_queue, data_skb); 1492 } else { 1493 __skb_queue_tail(&csk->receive_queue, skb); 1494 } 1495 1496 csk->skb_ulp_lhdr = NULL; 1497 1498 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1499 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); 1500 cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL); 1501 cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc); 1502 1503 cxgb4i_process_ddpvld(csk, skb, ddpvld); 1504 1505 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n", 1506 csk, skb, cxgbi_skcb_flags(skb)); 1507 1508 cxgbi_conn_pdu_ready(csk); 1509 spin_unlock_bh(&csk->lock); 1510 1511 return; 1512 1513 abort_conn: 1514 send_abort_req(csk); 1515 discard: 1516 spin_unlock_bh(&csk->lock); 1517 rel_skb: 1518 __kfree_skb(skb); 1519 } 1520 1521 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1522 { 1523 struct cxgbi_sock *csk; 1524 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1525 unsigned int tid = GET_TID(rpl); 1526 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1527 struct tid_info *t = lldi->tids; 1528 1529 csk = lookup_tid(t, tid); 1530 if (unlikely(!csk)) 1531 pr_err("can't find connection for tid %u.\n", tid); 1532 else { 1533 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1534 "csk 0x%p,%u,0x%lx,%u.\n", 1535 csk, csk->state, csk->flags, csk->tid); 1536 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1537 rpl->seq_vld); 1538 } 1539 __kfree_skb(skb); 1540 } 1541 1542 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1543 { 1544 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1545 unsigned int tid = GET_TID(rpl); 1546 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1547 struct tid_info *t = lldi->tids; 1548 struct cxgbi_sock *csk; 1549 1550 csk = lookup_tid(t, tid); 1551 if (!csk) 1552 pr_err("can't find conn. for tid %u.\n", tid); 1553 1554 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1555 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1556 csk, csk->state, csk->flags, csk->tid, rpl->status); 1557 1558 if (rpl->status != CPL_ERR_NONE) 1559 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1560 csk, tid, rpl->status); 1561 1562 __kfree_skb(skb); 1563 } 1564 1565 static int alloc_cpls(struct cxgbi_sock *csk) 1566 { 1567 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1568 0, GFP_KERNEL); 1569 if (!csk->cpl_close) 1570 return -ENOMEM; 1571 1572 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1573 0, GFP_KERNEL); 1574 if (!csk->cpl_abort_req) 1575 goto free_cpls; 1576 1577 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1578 0, GFP_KERNEL); 1579 if (!csk->cpl_abort_rpl) 1580 goto free_cpls; 1581 return 0; 1582 1583 free_cpls: 1584 cxgbi_sock_free_cpl_skbs(csk); 1585 return -ENOMEM; 1586 } 1587 1588 static inline void l2t_put(struct cxgbi_sock *csk) 1589 { 1590 if (csk->l2t) { 1591 cxgb4_l2t_release(csk->l2t); 1592 csk->l2t = NULL; 1593 cxgbi_sock_put(csk); 1594 } 1595 } 1596 1597 static void release_offload_resources(struct cxgbi_sock *csk) 1598 { 1599 struct cxgb4_lld_info *lldi; 1600 #if IS_ENABLED(CONFIG_IPV6) 1601 struct net_device *ndev = csk->cdev->ports[csk->port_id]; 1602 #endif 1603 1604 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1605 "csk 0x%p,%u,0x%lx,%u.\n", 1606 csk, csk->state, csk->flags, csk->tid); 1607 1608 cxgbi_sock_free_cpl_skbs(csk); 1609 cxgbi_sock_purge_write_queue(csk); 1610 if (csk->wr_cred != csk->wr_max_cred) { 1611 cxgbi_sock_purge_wr_queue(csk); 1612 cxgbi_sock_reset_wr_list(csk); 1613 } 1614 1615 l2t_put(csk); 1616 #if IS_ENABLED(CONFIG_IPV6) 1617 if (csk->csk_family == AF_INET6) 1618 cxgb4_clip_release(ndev, 1619 (const u32 *)&csk->saddr6.sin6_addr, 1); 1620 #endif 1621 1622 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1623 free_atid(csk); 1624 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1625 lldi = cxgbi_cdev_priv(csk->cdev); 1626 cxgb4_remove_tid(lldi->tids, 0, csk->tid, 1627 csk->csk_family); 1628 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1629 cxgbi_sock_put(csk); 1630 } 1631 csk->dst = NULL; 1632 } 1633 1634 #ifdef CONFIG_CHELSIO_T4_DCB 1635 static inline u8 get_iscsi_dcb_state(struct net_device *ndev) 1636 { 1637 return ndev->dcbnl_ops->getstate(ndev); 1638 } 1639 1640 static int select_priority(int pri_mask) 1641 { 1642 if (!pri_mask) 1643 return 0; 1644 return (ffs(pri_mask) - 1); 1645 } 1646 1647 static u8 get_iscsi_dcb_priority(struct net_device *ndev) 1648 { 1649 int rv; 1650 u8 caps; 1651 1652 struct dcb_app iscsi_dcb_app = { 1653 .protocol = 3260 1654 }; 1655 1656 rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps); 1657 if (rv) 1658 return 0; 1659 1660 if (caps & DCB_CAP_DCBX_VER_IEEE) { 1661 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; 1662 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); 1663 } else if (caps & DCB_CAP_DCBX_VER_CEE) { 1664 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; 1665 rv = dcb_getapp(ndev, &iscsi_dcb_app); 1666 } 1667 1668 log_debug(1 << CXGBI_DBG_ISCSI, 1669 "iSCSI priority is set to %u\n", select_priority(rv)); 1670 return select_priority(rv); 1671 } 1672 #endif 1673 1674 static int init_act_open(struct cxgbi_sock *csk) 1675 { 1676 struct cxgbi_device *cdev = csk->cdev; 1677 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1678 struct net_device *ndev = cdev->ports[csk->port_id]; 1679 struct sk_buff *skb = NULL; 1680 struct neighbour *n = NULL; 1681 void *daddr; 1682 unsigned int step; 1683 unsigned int rxq_idx; 1684 unsigned int size, size6; 1685 unsigned int linkspeed; 1686 unsigned int rcv_winf, snd_winf; 1687 #ifdef CONFIG_CHELSIO_T4_DCB 1688 u8 priority = 0; 1689 #endif 1690 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1691 "csk 0x%p,%u,0x%lx,%u.\n", 1692 csk, csk->state, csk->flags, csk->tid); 1693 1694 if (csk->csk_family == AF_INET) 1695 daddr = &csk->daddr.sin_addr.s_addr; 1696 #if IS_ENABLED(CONFIG_IPV6) 1697 else if (csk->csk_family == AF_INET6) 1698 daddr = &csk->daddr6.sin6_addr; 1699 #endif 1700 else { 1701 pr_err("address family 0x%x not supported\n", csk->csk_family); 1702 goto rel_resource; 1703 } 1704 1705 n = dst_neigh_lookup(csk->dst, daddr); 1706 1707 if (!n) { 1708 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1709 goto rel_resource; 1710 } 1711 1712 if (!(n->nud_state & NUD_VALID)) 1713 neigh_event_send(n, NULL); 1714 1715 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1716 if (csk->atid < 0) { 1717 pr_err("%s, NO atid available.\n", ndev->name); 1718 goto rel_resource_without_clip; 1719 } 1720 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1721 cxgbi_sock_get(csk); 1722 1723 #ifdef CONFIG_CHELSIO_T4_DCB 1724 if (get_iscsi_dcb_state(ndev)) 1725 priority = get_iscsi_dcb_priority(ndev); 1726 1727 csk->dcb_priority = priority; 1728 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); 1729 #else 1730 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1731 #endif 1732 if (!csk->l2t) { 1733 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1734 goto rel_resource_without_clip; 1735 } 1736 cxgbi_sock_get(csk); 1737 1738 #if IS_ENABLED(CONFIG_IPV6) 1739 if (csk->csk_family == AF_INET6) 1740 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1741 #endif 1742 1743 if (is_t4(lldi->adapter_type)) { 1744 size = sizeof(struct cpl_act_open_req); 1745 size6 = sizeof(struct cpl_act_open_req6); 1746 } else if (is_t5(lldi->adapter_type)) { 1747 size = sizeof(struct cpl_t5_act_open_req); 1748 size6 = sizeof(struct cpl_t5_act_open_req6); 1749 } else { 1750 size = sizeof(struct cpl_t6_act_open_req); 1751 size6 = sizeof(struct cpl_t6_act_open_req6); 1752 } 1753 1754 if (csk->csk_family == AF_INET) 1755 skb = alloc_wr(size, 0, GFP_NOIO); 1756 #if IS_ENABLED(CONFIG_IPV6) 1757 else 1758 skb = alloc_wr(size6, 0, GFP_NOIO); 1759 #endif 1760 1761 if (!skb) 1762 goto rel_resource; 1763 skb->sk = (struct sock *)csk; 1764 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1765 1766 if (!csk->mtu) 1767 csk->mtu = dst_mtu(csk->dst); 1768 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1769 csk->tx_chan = cxgb4_port_chan(ndev); 1770 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; 1771 step = lldi->ntxq / lldi->nchan; 1772 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1773 step = lldi->nrxq / lldi->nchan; 1774 rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step); 1775 cdev->rxq_idx_cntr++; 1776 csk->rss_qid = lldi->rxq_ids[rxq_idx]; 1777 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; 1778 csk->snd_win = cxgb4i_snd_win; 1779 csk->rcv_win = cxgb4i_rcv_win; 1780 if (cxgb4i_rcv_win <= 0) { 1781 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; 1782 rcv_winf = linkspeed / SPEED_10000; 1783 if (rcv_winf) 1784 csk->rcv_win *= rcv_winf; 1785 } 1786 if (cxgb4i_snd_win <= 0) { 1787 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; 1788 snd_winf = linkspeed / SPEED_10000; 1789 if (snd_winf) 1790 csk->snd_win *= snd_winf; 1791 } 1792 csk->wr_cred = lldi->wr_cred - 1793 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1794 csk->wr_max_cred = csk->wr_cred; 1795 csk->wr_una_cred = 0; 1796 cxgbi_sock_reset_wr_list(csk); 1797 csk->err = 0; 1798 1799 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1800 (&csk->saddr), (&csk->daddr), csk, csk->state, 1801 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1802 csk->mtu, csk->mss_idx, csk->smac_idx); 1803 1804 /* must wait for either a act_open_rpl or act_open_establish */ 1805 if (!try_module_get(cdev->owner)) { 1806 pr_err("%s, try_module_get failed.\n", ndev->name); 1807 goto rel_resource; 1808 } 1809 1810 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1811 if (csk->csk_family == AF_INET) 1812 send_act_open_req(csk, skb, csk->l2t); 1813 #if IS_ENABLED(CONFIG_IPV6) 1814 else 1815 send_act_open_req6(csk, skb, csk->l2t); 1816 #endif 1817 neigh_release(n); 1818 1819 return 0; 1820 1821 rel_resource: 1822 #if IS_ENABLED(CONFIG_IPV6) 1823 if (csk->csk_family == AF_INET6) 1824 cxgb4_clip_release(ndev, 1825 (const u32 *)&csk->saddr6.sin6_addr, 1); 1826 #endif 1827 rel_resource_without_clip: 1828 if (n) 1829 neigh_release(n); 1830 if (skb) 1831 __kfree_skb(skb); 1832 return -EINVAL; 1833 } 1834 1835 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1836 [CPL_ACT_ESTABLISH] = do_act_establish, 1837 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1838 [CPL_PEER_CLOSE] = do_peer_close, 1839 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1840 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1841 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1842 [CPL_FW4_ACK] = do_fw4_ack, 1843 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1844 [CPL_ISCSI_DATA] = do_rx_iscsi_data, 1845 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1846 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1847 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1848 [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp, 1849 [CPL_RX_DATA] = do_rx_data, 1850 }; 1851 1852 static int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1853 { 1854 int rc; 1855 1856 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1857 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1858 1859 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1860 cxgb4i_max_connect); 1861 if (rc < 0) 1862 return rc; 1863 1864 cdev->csk_release_offload_resources = release_offload_resources; 1865 cdev->csk_push_tx_frames = push_tx_frames; 1866 cdev->csk_send_abort_req = send_abort_req; 1867 cdev->csk_send_close_req = send_close_req; 1868 cdev->csk_send_rx_credits = send_rx_credits; 1869 cdev->csk_alloc_cpls = alloc_cpls; 1870 cdev->csk_init_act_open = init_act_open; 1871 1872 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1873 return 0; 1874 } 1875 1876 static inline void 1877 ulp_mem_io_set_hdr(struct cxgbi_device *cdev, 1878 struct ulp_mem_io *req, 1879 unsigned int wr_len, unsigned int dlen, 1880 unsigned int pm_addr, 1881 int tid) 1882 { 1883 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1884 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1885 1886 INIT_ULPTX_WR(req, wr_len, 0, tid); 1887 req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | 1888 FW_WR_ATOMIC_V(0)); 1889 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1890 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) | 1891 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type))); 1892 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1893 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1894 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1895 1896 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1897 idata->len = htonl(dlen); 1898 } 1899 1900 static struct sk_buff * 1901 ddp_ppod_init_idata(struct cxgbi_device *cdev, 1902 struct cxgbi_ppm *ppm, 1903 unsigned int idx, unsigned int npods, 1904 unsigned int tid) 1905 { 1906 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit; 1907 unsigned int dlen = npods << PPOD_SIZE_SHIFT; 1908 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1909 sizeof(struct ulptx_idata) + dlen, 16); 1910 struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1911 1912 if (!skb) { 1913 pr_err("%s: %s idx %u, npods %u, OOM.\n", 1914 __func__, ppm->ndev->name, idx, npods); 1915 return NULL; 1916 } 1917 1918 ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, 1919 pm_addr, tid); 1920 1921 return skb; 1922 } 1923 1924 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1925 struct cxgbi_task_tag_info *ttinfo, 1926 unsigned int idx, unsigned int npods, 1927 struct scatterlist **sg_pp, 1928 unsigned int *sg_off) 1929 { 1930 struct cxgbi_device *cdev = csk->cdev; 1931 struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods, 1932 csk->tid); 1933 struct ulp_mem_io *req; 1934 struct ulptx_idata *idata; 1935 struct cxgbi_pagepod *ppod; 1936 int i; 1937 1938 if (!skb) 1939 return -ENOMEM; 1940 1941 req = (struct ulp_mem_io *)skb->head; 1942 idata = (struct ulptx_idata *)(req + 1); 1943 ppod = (struct cxgbi_pagepod *)(idata + 1); 1944 1945 for (i = 0; i < npods; i++, ppod++) 1946 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off); 1947 1948 cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE); 1949 cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL); 1950 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 1951 1952 spin_lock_bh(&csk->lock); 1953 cxgbi_sock_skb_entail(csk, skb); 1954 spin_unlock_bh(&csk->lock); 1955 1956 return 0; 1957 } 1958 1959 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, 1960 struct cxgbi_task_tag_info *ttinfo) 1961 { 1962 unsigned int pidx = ttinfo->idx; 1963 unsigned int npods = ttinfo->npods; 1964 unsigned int i, cnt; 1965 int err = 0; 1966 struct scatterlist *sg = ttinfo->sgl; 1967 unsigned int offset = 0; 1968 1969 ttinfo->cid = csk->port_id; 1970 1971 for (i = 0; i < npods; i += cnt, pidx += cnt) { 1972 cnt = npods - i; 1973 1974 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1975 cnt = ULPMEM_IDATA_MAX_NPPODS; 1976 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, 1977 &sg, &offset); 1978 if (err < 0) 1979 break; 1980 } 1981 1982 return err; 1983 } 1984 1985 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1986 int pg_idx, bool reply) 1987 { 1988 struct sk_buff *skb; 1989 struct cpl_set_tcb_field *req; 1990 1991 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1992 return 0; 1993 1994 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1995 if (!skb) 1996 return -ENOMEM; 1997 1998 /* set up ulp page size */ 1999 req = (struct cpl_set_tcb_field *)skb->head; 2000 INIT_TP_WR(req, csk->tid); 2001 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2002 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2003 req->word_cookie = htons(0); 2004 req->mask = cpu_to_be64(0x3 << 8); 2005 req->val = cpu_to_be64(pg_idx << 8); 2006 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2007 2008 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2009 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2010 2011 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2012 return 0; 2013 } 2014 2015 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2016 int hcrc, int dcrc, int reply) 2017 { 2018 struct sk_buff *skb; 2019 struct cpl_set_tcb_field *req; 2020 2021 if (!hcrc && !dcrc) 2022 return 0; 2023 2024 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 2025 if (!skb) 2026 return -ENOMEM; 2027 2028 csk->hcrc_len = (hcrc ? 4 : 0); 2029 csk->dcrc_len = (dcrc ? 4 : 0); 2030 /* set up ulp submode */ 2031 req = (struct cpl_set_tcb_field *)skb->head; 2032 INIT_TP_WR(req, tid); 2033 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2034 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2035 req->word_cookie = htons(0); 2036 req->mask = cpu_to_be64(0x3 << 4); 2037 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2038 (dcrc ? ULP_CRC_DATA : 0)) << 4); 2039 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 2040 2041 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2042 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2043 2044 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2045 return 0; 2046 } 2047 2048 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2049 { 2050 return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *) 2051 (cxgbi_cdev_priv(cdev)))->iscsi_ppm); 2052 } 2053 2054 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 2055 { 2056 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 2057 struct net_device *ndev = cdev->ports[0]; 2058 struct cxgbi_tag_format tformat; 2059 unsigned int ppmax; 2060 int i; 2061 2062 if (!lldi->vr->iscsi.size) { 2063 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name); 2064 return -EACCES; 2065 } 2066 2067 cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; 2068 ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; 2069 2070 memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); 2071 for (i = 0; i < 4; i++) 2072 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) 2073 & 0xF; 2074 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); 2075 2076 cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax, 2077 lldi->iscsi_llimit, lldi->vr->iscsi.start, 2); 2078 2079 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 2080 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 2081 cdev->csk_ddp_set_map = ddp_set_map; 2082 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2083 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2084 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 2085 lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN); 2086 cdev->cdev2ppm = cdev2ppm; 2087 2088 return 0; 2089 } 2090 2091 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 2092 { 2093 struct cxgbi_device *cdev; 2094 struct port_info *pi; 2095 int i, rc; 2096 2097 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 2098 if (!cdev) { 2099 pr_info("t4 device 0x%p, register failed.\n", lldi); 2100 return NULL; 2101 } 2102 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 2103 cdev, lldi->adapter_type, lldi->nports, 2104 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 2105 lldi->nrxq, lldi->wr_cred); 2106 for (i = 0; i < lldi->nrxq; i++) 2107 log_debug(1 << CXGBI_DBG_DEV, 2108 "t4 0x%p, rxq id #%d: %u.\n", 2109 cdev, i, lldi->rxq_ids[i]); 2110 2111 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 2112 cdev->flags = CXGBI_FLAG_DEV_T4; 2113 cdev->pdev = lldi->pdev; 2114 cdev->ports = lldi->ports; 2115 cdev->nports = lldi->nports; 2116 cdev->mtus = lldi->mtus; 2117 cdev->nmtus = NMTUS; 2118 cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <= 2119 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0; 2120 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 2121 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 2122 cdev->itp = &cxgb4i_iscsi_transport; 2123 cdev->owner = THIS_MODULE; 2124 2125 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 2126 << FW_VIID_PFN_S; 2127 pr_info("cdev 0x%p,%s, pfvf %u.\n", 2128 cdev, lldi->ports[0]->name, cdev->pfvf); 2129 2130 rc = cxgb4i_ddp_init(cdev); 2131 if (rc) { 2132 pr_info("t4 0x%p ddp init failed.\n", cdev); 2133 goto err_out; 2134 } 2135 rc = cxgb4i_ofld_init(cdev); 2136 if (rc) { 2137 pr_info("t4 0x%p ofld init failed.\n", cdev); 2138 goto err_out; 2139 } 2140 2141 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 2142 &cxgb4i_host_template, cxgb4i_stt); 2143 if (rc) 2144 goto err_out; 2145 2146 for (i = 0; i < cdev->nports; i++) { 2147 pi = netdev_priv(lldi->ports[i]); 2148 cdev->hbas[i]->port_id = pi->port_id; 2149 } 2150 return cdev; 2151 2152 err_out: 2153 cxgbi_device_unregister(cdev); 2154 return ERR_PTR(-ENOMEM); 2155 } 2156 2157 #define RX_PULL_LEN 128 2158 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 2159 const struct pkt_gl *pgl) 2160 { 2161 const struct cpl_act_establish *rpl; 2162 struct sk_buff *skb; 2163 unsigned int opc; 2164 struct cxgbi_device *cdev = handle; 2165 2166 if (pgl == NULL) { 2167 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 2168 2169 skb = alloc_wr(len, 0, GFP_ATOMIC); 2170 if (!skb) 2171 goto nomem; 2172 skb_copy_to_linear_data(skb, &rsp[1], len); 2173 } else { 2174 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 2175 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 2176 pgl->va, be64_to_cpu(*rsp), 2177 be64_to_cpu(*(u64 *)pgl->va), 2178 pgl->tot_len); 2179 return 0; 2180 } 2181 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 2182 if (unlikely(!skb)) 2183 goto nomem; 2184 } 2185 2186 rpl = (struct cpl_act_establish *)skb->data; 2187 opc = rpl->ot.opcode; 2188 log_debug(1 << CXGBI_DBG_TOE, 2189 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 2190 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 2191 if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) { 2192 pr_err("No handler for opcode 0x%x.\n", opc); 2193 __kfree_skb(skb); 2194 } else 2195 cxgb4i_cplhandlers[opc](cdev, skb); 2196 2197 return 0; 2198 nomem: 2199 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 2200 return 1; 2201 } 2202 2203 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 2204 { 2205 struct cxgbi_device *cdev = handle; 2206 2207 switch (state) { 2208 case CXGB4_STATE_UP: 2209 pr_info("cdev 0x%p, UP.\n", cdev); 2210 break; 2211 case CXGB4_STATE_START_RECOVERY: 2212 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 2213 /* close all connections */ 2214 break; 2215 case CXGB4_STATE_DOWN: 2216 pr_info("cdev 0x%p, DOWN.\n", cdev); 2217 break; 2218 case CXGB4_STATE_DETACH: 2219 pr_info("cdev 0x%p, DETACH.\n", cdev); 2220 cxgbi_device_unregister(cdev); 2221 break; 2222 default: 2223 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 2224 break; 2225 } 2226 return 0; 2227 } 2228 2229 #ifdef CONFIG_CHELSIO_T4_DCB 2230 static int 2231 cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val, 2232 void *data) 2233 { 2234 int i, port = 0xFF; 2235 struct net_device *ndev; 2236 struct cxgbi_device *cdev = NULL; 2237 struct dcb_app_type *iscsi_app = data; 2238 struct cxgbi_ports_map *pmap; 2239 u8 priority; 2240 2241 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { 2242 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) 2243 return NOTIFY_DONE; 2244 2245 priority = iscsi_app->app.priority; 2246 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { 2247 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) 2248 return NOTIFY_DONE; 2249 2250 if (!iscsi_app->app.priority) 2251 return NOTIFY_DONE; 2252 2253 priority = ffs(iscsi_app->app.priority) - 1; 2254 } else { 2255 return NOTIFY_DONE; 2256 } 2257 2258 if (iscsi_app->app.protocol != 3260) 2259 return NOTIFY_DONE; 2260 2261 log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n", 2262 iscsi_app->ifindex, priority); 2263 2264 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); 2265 if (!ndev) 2266 return NOTIFY_DONE; 2267 2268 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port); 2269 2270 dev_put(ndev); 2271 if (!cdev) 2272 return NOTIFY_DONE; 2273 2274 pmap = &cdev->pmap; 2275 2276 for (i = 0; i < pmap->used; i++) { 2277 if (pmap->port_csk[i]) { 2278 struct cxgbi_sock *csk = pmap->port_csk[i]; 2279 2280 if (csk->dcb_priority != priority) { 2281 iscsi_conn_failure(csk->user_data, 2282 ISCSI_ERR_CONN_FAILED); 2283 pr_info("Restarting iSCSI connection %p with " 2284 "priority %u->%u.\n", csk, 2285 csk->dcb_priority, priority); 2286 } 2287 } 2288 } 2289 return NOTIFY_OK; 2290 } 2291 #endif 2292 2293 static int __init cxgb4i_init_module(void) 2294 { 2295 int rc; 2296 2297 printk(KERN_INFO "%s", version); 2298 2299 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2300 if (rc < 0) 2301 return rc; 2302 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 2303 2304 #ifdef CONFIG_CHELSIO_T4_DCB 2305 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME); 2306 register_dcbevent_notifier(&cxgb4_dcb_change); 2307 #endif 2308 return 0; 2309 } 2310 2311 static void __exit cxgb4i_exit_module(void) 2312 { 2313 #ifdef CONFIG_CHELSIO_T4_DCB 2314 unregister_dcbevent_notifier(&cxgb4_dcb_change); 2315 #endif 2316 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 2317 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 2318 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2319 } 2320 2321 module_init(cxgb4i_init_module); 2322 module_exit(cxgb4i_exit_module); 2323