1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <scsi/scsi_host.h> 19 #include <net/tcp.h> 20 #include <net/dst.h> 21 #include <linux/netdevice.h> 22 #include <net/addrconf.h> 23 24 #include "t4_regs.h" 25 #include "t4_msg.h" 26 #include "cxgb4.h" 27 #include "cxgb4_uld.h" 28 #include "t4fw_api.h" 29 #include "l2t.h" 30 #include "cxgb4i.h" 31 #include "clip_tbl.h" 32 33 static unsigned int dbg_level; 34 35 #include "../libcxgbi.h" 36 37 #define DRV_MODULE_NAME "cxgb4i" 38 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 39 #define DRV_MODULE_VERSION "0.9.5-ko" 40 #define DRV_MODULE_RELDATE "Apr. 2015" 41 42 static char version[] = 43 DRV_MODULE_DESC " " DRV_MODULE_NAME 44 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 45 46 MODULE_AUTHOR("Chelsio Communications, Inc."); 47 MODULE_DESCRIPTION(DRV_MODULE_DESC); 48 MODULE_VERSION(DRV_MODULE_VERSION); 49 MODULE_LICENSE("GPL"); 50 51 module_param(dbg_level, uint, 0644); 52 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 53 54 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) 55 static int cxgb4i_rcv_win = -1; 56 module_param(cxgb4i_rcv_win, int, 0644); 57 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 58 59 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) 60 static int cxgb4i_snd_win = -1; 61 module_param(cxgb4i_snd_win, int, 0644); 62 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 63 64 static int cxgb4i_rx_credit_thres = 10 * 1024; 65 module_param(cxgb4i_rx_credit_thres, int, 0644); 66 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 67 "RX credits return threshold in bytes (default=10KB)"); 68 69 static unsigned int cxgb4i_max_connect = (8 * 1024); 70 module_param(cxgb4i_max_connect, uint, 0644); 71 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 72 73 static unsigned short cxgb4i_sport_base = 20000; 74 module_param(cxgb4i_sport_base, ushort, 0644); 75 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 76 77 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 78 79 static void *t4_uld_add(const struct cxgb4_lld_info *); 80 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 81 static int t4_uld_state_change(void *, enum cxgb4_state state); 82 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 83 84 static const struct cxgb4_uld_info cxgb4i_uld_info = { 85 .name = DRV_MODULE_NAME, 86 .add = t4_uld_add, 87 .rx_handler = t4_uld_rx_handler, 88 .state_change = t4_uld_state_change, 89 }; 90 91 static struct scsi_host_template cxgb4i_host_template = { 92 .module = THIS_MODULE, 93 .name = DRV_MODULE_NAME, 94 .proc_name = DRV_MODULE_NAME, 95 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 96 .queuecommand = iscsi_queuecommand, 97 .change_queue_depth = scsi_change_queue_depth, 98 .sg_tablesize = SG_ALL, 99 .max_sectors = 0xFFFF, 100 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 101 .eh_abort_handler = iscsi_eh_abort, 102 .eh_device_reset_handler = iscsi_eh_device_reset, 103 .eh_target_reset_handler = iscsi_eh_recover_target, 104 .target_alloc = iscsi_target_alloc, 105 .use_clustering = DISABLE_CLUSTERING, 106 .this_id = -1, 107 .track_queue_depth = 1, 108 }; 109 110 static struct iscsi_transport cxgb4i_iscsi_transport = { 111 .owner = THIS_MODULE, 112 .name = DRV_MODULE_NAME, 113 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 114 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 115 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 116 .attr_is_visible = cxgbi_attr_is_visible, 117 .get_host_param = cxgbi_get_host_param, 118 .set_host_param = cxgbi_set_host_param, 119 /* session management */ 120 .create_session = cxgbi_create_session, 121 .destroy_session = cxgbi_destroy_session, 122 .get_session_param = iscsi_session_get_param, 123 /* connection management */ 124 .create_conn = cxgbi_create_conn, 125 .bind_conn = cxgbi_bind_conn, 126 .destroy_conn = iscsi_tcp_conn_teardown, 127 .start_conn = iscsi_conn_start, 128 .stop_conn = iscsi_conn_stop, 129 .get_conn_param = iscsi_conn_get_param, 130 .set_param = cxgbi_set_conn_param, 131 .get_stats = cxgbi_get_conn_stats, 132 /* pdu xmit req from user space */ 133 .send_pdu = iscsi_conn_send_pdu, 134 /* task */ 135 .init_task = iscsi_tcp_task_init, 136 .xmit_task = iscsi_tcp_task_xmit, 137 .cleanup_task = cxgbi_cleanup_task, 138 /* pdu */ 139 .alloc_pdu = cxgbi_conn_alloc_pdu, 140 .init_pdu = cxgbi_conn_init_pdu, 141 .xmit_pdu = cxgbi_conn_xmit_pdu, 142 .parse_pdu_itt = cxgbi_parse_pdu_itt, 143 /* TCP connect/disconnect */ 144 .get_ep_param = cxgbi_get_ep_param, 145 .ep_connect = cxgbi_ep_connect, 146 .ep_poll = cxgbi_ep_poll, 147 .ep_disconnect = cxgbi_ep_disconnect, 148 /* Error recovery timeout call */ 149 .session_recovery_timedout = iscsi_session_recovery_timedout, 150 }; 151 152 static struct scsi_transport_template *cxgb4i_stt; 153 154 /* 155 * CPL (Chelsio Protocol Language) defines a message passing interface between 156 * the host driver and Chelsio asic. 157 * The section below implments CPLs that related to iscsi tcp connection 158 * open/close/abort and data send/receive. 159 */ 160 161 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 162 #define RCV_BUFSIZ_MASK 0x3FFU 163 #define MAX_IMM_TX_PKT_LEN 128 164 165 static int push_tx_frames(struct cxgbi_sock *, int); 166 167 /* 168 * is_ofld_imm - check whether a packet can be sent as immediate data 169 * @skb: the packet 170 * 171 * Returns true if a packet can be sent as an offload WR with immediate 172 * data. We currently use the same limit as for Ethernet packets. 173 */ 174 static inline bool is_ofld_imm(const struct sk_buff *skb) 175 { 176 int len = skb->len; 177 178 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 179 len += sizeof(struct fw_ofld_tx_data_wr); 180 181 return len <= MAX_IMM_TX_PKT_LEN; 182 } 183 184 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 185 struct l2t_entry *e) 186 { 187 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 188 int t4 = is_t4(lldi->adapter_type); 189 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 190 unsigned long long opt0; 191 unsigned int opt2; 192 unsigned int qid_atid = ((unsigned int)csk->atid) | 193 (((unsigned int)csk->rss_qid) << 14); 194 195 opt0 = KEEP_ALIVE_F | 196 WND_SCALE_V(wscale) | 197 MSS_IDX_V(csk->mss_idx) | 198 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 199 TX_CHAN_V(csk->tx_chan) | 200 SMAC_SEL_V(csk->smac_idx) | 201 ULP_MODE_V(ULP_MODE_ISCSI) | 202 RCV_BUFSIZ_V(csk->rcv_win >> 10); 203 opt2 = RX_CHANNEL_V(0) | 204 RSS_QUEUE_VALID_F | 205 (RX_FC_DISABLE_F) | 206 RSS_QUEUE_V(csk->rss_qid); 207 208 if (is_t4(lldi->adapter_type)) { 209 struct cpl_act_open_req *req = 210 (struct cpl_act_open_req *)skb->head; 211 212 INIT_TP_WR(req, 0); 213 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 214 qid_atid)); 215 req->local_port = csk->saddr.sin_port; 216 req->peer_port = csk->daddr.sin_port; 217 req->local_ip = csk->saddr.sin_addr.s_addr; 218 req->peer_ip = csk->daddr.sin_addr.s_addr; 219 req->opt0 = cpu_to_be64(opt0); 220 req->params = cpu_to_be32(cxgb4_select_ntuple( 221 csk->cdev->ports[csk->port_id], 222 csk->l2t)); 223 opt2 |= RX_FC_VALID_F; 224 req->opt2 = cpu_to_be32(opt2); 225 226 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 227 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 228 csk, &req->local_ip, ntohs(req->local_port), 229 &req->peer_ip, ntohs(req->peer_port), 230 csk->atid, csk->rss_qid); 231 } else { 232 struct cpl_t5_act_open_req *req = 233 (struct cpl_t5_act_open_req *)skb->head; 234 235 INIT_TP_WR(req, 0); 236 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 237 qid_atid)); 238 req->local_port = csk->saddr.sin_port; 239 req->peer_port = csk->daddr.sin_port; 240 req->local_ip = csk->saddr.sin_addr.s_addr; 241 req->peer_ip = csk->daddr.sin_addr.s_addr; 242 req->opt0 = cpu_to_be64(opt0); 243 req->params = cpu_to_be64(FILTER_TUPLE_V( 244 cxgb4_select_ntuple( 245 csk->cdev->ports[csk->port_id], 246 csk->l2t))); 247 opt2 |= 1 << 31; 248 req->opt2 = cpu_to_be32(opt2); 249 250 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 251 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 252 csk, &req->local_ip, ntohs(req->local_port), 253 &req->peer_ip, ntohs(req->peer_port), 254 csk->atid, csk->rss_qid); 255 } 256 257 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 258 259 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 260 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, 261 csk->state, csk->flags, csk->atid, csk->rss_qid); 262 263 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 264 } 265 266 #if IS_ENABLED(CONFIG_IPV6) 267 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 268 struct l2t_entry *e) 269 { 270 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 271 int t4 = is_t4(lldi->adapter_type); 272 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 273 unsigned long long opt0; 274 unsigned int opt2; 275 unsigned int qid_atid = ((unsigned int)csk->atid) | 276 (((unsigned int)csk->rss_qid) << 14); 277 278 opt0 = KEEP_ALIVE_F | 279 WND_SCALE_V(wscale) | 280 MSS_IDX_V(csk->mss_idx) | 281 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 282 TX_CHAN_V(csk->tx_chan) | 283 SMAC_SEL_V(csk->smac_idx) | 284 ULP_MODE_V(ULP_MODE_ISCSI) | 285 RCV_BUFSIZ_V(csk->rcv_win >> 10); 286 287 opt2 = RX_CHANNEL_V(0) | 288 RSS_QUEUE_VALID_F | 289 RX_FC_DISABLE_F | 290 RSS_QUEUE_V(csk->rss_qid); 291 292 if (t4) { 293 struct cpl_act_open_req6 *req = 294 (struct cpl_act_open_req6 *)skb->head; 295 296 INIT_TP_WR(req, 0); 297 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 298 qid_atid)); 299 req->local_port = csk->saddr6.sin6_port; 300 req->peer_port = csk->daddr6.sin6_port; 301 302 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 303 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 304 8); 305 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 306 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 307 8); 308 309 req->opt0 = cpu_to_be64(opt0); 310 311 opt2 |= RX_FC_VALID_F; 312 req->opt2 = cpu_to_be32(opt2); 313 314 req->params = cpu_to_be32(cxgb4_select_ntuple( 315 csk->cdev->ports[csk->port_id], 316 csk->l2t)); 317 } else { 318 struct cpl_t5_act_open_req6 *req = 319 (struct cpl_t5_act_open_req6 *)skb->head; 320 321 INIT_TP_WR(req, 0); 322 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 323 qid_atid)); 324 req->local_port = csk->saddr6.sin6_port; 325 req->peer_port = csk->daddr6.sin6_port; 326 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 327 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 328 8); 329 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 330 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 331 8); 332 req->opt0 = cpu_to_be64(opt0); 333 334 opt2 |= T5_OPT_2_VALID_F; 335 req->opt2 = cpu_to_be32(opt2); 336 337 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 338 csk->cdev->ports[csk->port_id], 339 csk->l2t))); 340 } 341 342 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 343 344 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 345 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, 346 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 347 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 348 csk->rss_qid); 349 350 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 351 } 352 #endif 353 354 static void send_close_req(struct cxgbi_sock *csk) 355 { 356 struct sk_buff *skb = csk->cpl_close; 357 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 358 unsigned int tid = csk->tid; 359 360 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 361 "csk 0x%p,%u,0x%lx, tid %u.\n", 362 csk, csk->state, csk->flags, csk->tid); 363 csk->cpl_close = NULL; 364 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 365 INIT_TP_WR(req, tid); 366 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 367 req->rsvd = 0; 368 369 cxgbi_sock_skb_entail(csk, skb); 370 if (csk->state >= CTP_ESTABLISHED) 371 push_tx_frames(csk, 1); 372 } 373 374 static void abort_arp_failure(void *handle, struct sk_buff *skb) 375 { 376 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 377 struct cpl_abort_req *req; 378 379 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 380 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 381 csk, csk->state, csk->flags, csk->tid); 382 req = (struct cpl_abort_req *)skb->data; 383 req->cmd = CPL_ABORT_NO_RST; 384 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 385 } 386 387 static void send_abort_req(struct cxgbi_sock *csk) 388 { 389 struct cpl_abort_req *req; 390 struct sk_buff *skb = csk->cpl_abort_req; 391 392 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 393 return; 394 395 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 396 send_tx_flowc_wr(csk); 397 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 398 } 399 400 cxgbi_sock_set_state(csk, CTP_ABORTING); 401 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 402 cxgbi_sock_purge_write_queue(csk); 403 404 csk->cpl_abort_req = NULL; 405 req = (struct cpl_abort_req *)skb->head; 406 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 407 req->cmd = CPL_ABORT_SEND_RST; 408 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 409 INIT_TP_WR(req, csk->tid); 410 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 411 req->rsvd0 = htonl(csk->snd_nxt); 412 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 413 414 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 415 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 416 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 417 req->rsvd1); 418 419 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 420 } 421 422 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 423 { 424 struct sk_buff *skb = csk->cpl_abort_rpl; 425 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 426 427 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 428 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 429 csk, csk->state, csk->flags, csk->tid, rst_status); 430 431 csk->cpl_abort_rpl = NULL; 432 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 433 INIT_TP_WR(rpl, csk->tid); 434 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 435 rpl->cmd = rst_status; 436 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 437 } 438 439 /* 440 * CPL connection rx data ack: host -> 441 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 442 * credits sent. 443 */ 444 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 445 { 446 struct sk_buff *skb; 447 struct cpl_rx_data_ack *req; 448 449 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 450 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 451 csk, csk->state, csk->flags, csk->tid, credits); 452 453 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 454 if (!skb) { 455 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 456 return 0; 457 } 458 req = (struct cpl_rx_data_ack *)skb->head; 459 460 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 461 INIT_TP_WR(req, csk->tid); 462 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 463 csk->tid)); 464 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 465 | RX_FORCE_ACK_F); 466 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 467 return credits; 468 } 469 470 /* 471 * sgl_len - calculates the size of an SGL of the given capacity 472 * @n: the number of SGL entries 473 * Calculates the number of flits needed for a scatter/gather list that 474 * can hold the given number of entries. 475 */ 476 static inline unsigned int sgl_len(unsigned int n) 477 { 478 n--; 479 return (3 * n) / 2 + (n & 1) + 2; 480 } 481 482 /* 483 * calc_tx_flits_ofld - calculate # of flits for an offload packet 484 * @skb: the packet 485 * 486 * Returns the number of flits needed for the given offload packet. 487 * These packets are already fully constructed and no additional headers 488 * will be added. 489 */ 490 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 491 { 492 unsigned int flits, cnt; 493 494 if (is_ofld_imm(skb)) 495 return DIV_ROUND_UP(skb->len, 8); 496 flits = skb_transport_offset(skb) / 8; 497 cnt = skb_shinfo(skb)->nr_frags; 498 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 499 cnt++; 500 return flits + sgl_len(cnt); 501 } 502 503 #define FLOWC_WR_NPARAMS_MIN 9 504 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 505 { 506 int nparams, flowclen16, flowclen; 507 508 nparams = FLOWC_WR_NPARAMS_MIN; 509 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 510 flowclen16 = DIV_ROUND_UP(flowclen, 16); 511 flowclen = flowclen16 * 16; 512 /* 513 * Return the number of 16-byte credits used by the FlowC request. 514 * Pass back the nparams and actual FlowC length if requested. 515 */ 516 if (nparamsp) 517 *nparamsp = nparams; 518 if (flowclenp) 519 *flowclenp = flowclen; 520 521 return flowclen16; 522 } 523 524 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 525 { 526 struct sk_buff *skb; 527 struct fw_flowc_wr *flowc; 528 int nparams, flowclen16, flowclen; 529 530 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 531 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 532 flowc = (struct fw_flowc_wr *)skb->head; 533 flowc->op_to_nparams = 534 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 535 flowc->flowid_len16 = 536 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 537 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 538 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 539 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 540 flowc->mnemval[1].val = htonl(csk->tx_chan); 541 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 542 flowc->mnemval[2].val = htonl(csk->tx_chan); 543 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 544 flowc->mnemval[3].val = htonl(csk->rss_qid); 545 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 546 flowc->mnemval[4].val = htonl(csk->snd_nxt); 547 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 548 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 549 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 550 flowc->mnemval[6].val = htonl(csk->snd_win); 551 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 552 flowc->mnemval[7].val = htonl(csk->advmss); 553 flowc->mnemval[8].mnemonic = 0; 554 flowc->mnemval[8].val = 0; 555 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 556 flowc->mnemval[8].val = 16384; 557 558 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 559 560 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 561 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 562 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 563 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, 564 csk->advmss); 565 566 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 567 568 return flowclen16; 569 } 570 571 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 572 int dlen, int len, u32 credits, int compl) 573 { 574 struct fw_ofld_tx_data_wr *req; 575 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 576 unsigned int wr_ulp_mode = 0, val; 577 bool imm = is_ofld_imm(skb); 578 579 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 580 581 if (imm) { 582 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 583 FW_WR_COMPL_F | 584 FW_WR_IMMDLEN_V(dlen)); 585 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 586 FW_WR_LEN16_V(credits)); 587 } else { 588 req->op_to_immdlen = 589 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 590 FW_WR_COMPL_F | 591 FW_WR_IMMDLEN_V(0)); 592 req->flowid_len16 = 593 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 594 FW_WR_LEN16_V(credits)); 595 } 596 if (submode) 597 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 598 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 599 val = skb_peek(&csk->write_queue) ? 0 : 1; 600 req->tunnel_to_proxy = htonl(wr_ulp_mode | 601 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 602 req->plen = htonl(len); 603 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 604 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 605 } 606 607 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 608 { 609 kfree_skb(skb); 610 } 611 612 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 613 { 614 int total_size = 0; 615 struct sk_buff *skb; 616 617 if (unlikely(csk->state < CTP_ESTABLISHED || 618 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 619 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 620 1 << CXGBI_DBG_PDU_TX, 621 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 622 csk, csk->state, csk->flags, csk->tid); 623 return 0; 624 } 625 626 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 627 int dlen = skb->len; 628 int len = skb->len; 629 unsigned int credits_needed; 630 int flowclen16 = 0; 631 632 skb_reset_transport_header(skb); 633 if (is_ofld_imm(skb)) 634 credits_needed = DIV_ROUND_UP(dlen, 16); 635 else 636 credits_needed = DIV_ROUND_UP( 637 8 * calc_tx_flits_ofld(skb), 638 16); 639 640 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 641 credits_needed += DIV_ROUND_UP( 642 sizeof(struct fw_ofld_tx_data_wr), 643 16); 644 645 /* 646 * Assumes the initial credits is large enough to support 647 * fw_flowc_wr plus largest possible first payload 648 */ 649 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 650 flowclen16 = send_tx_flowc_wr(csk); 651 csk->wr_cred -= flowclen16; 652 csk->wr_una_cred += flowclen16; 653 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 654 } 655 656 if (csk->wr_cred < credits_needed) { 657 log_debug(1 << CXGBI_DBG_PDU_TX, 658 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 659 csk, skb->len, skb->data_len, 660 credits_needed, csk->wr_cred); 661 break; 662 } 663 __skb_unlink(skb, &csk->write_queue); 664 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 665 skb->csum = credits_needed + flowclen16; 666 csk->wr_cred -= credits_needed; 667 csk->wr_una_cred += credits_needed; 668 cxgbi_sock_enqueue_wr(csk, skb); 669 670 log_debug(1 << CXGBI_DBG_PDU_TX, 671 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 672 csk, skb->len, skb->data_len, credits_needed, 673 csk->wr_cred, csk->wr_una_cred); 674 675 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 676 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 677 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 678 req_completion); 679 csk->snd_nxt += len; 680 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 681 } 682 total_size += skb->truesize; 683 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 684 685 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 686 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 687 csk, csk->state, csk->flags, csk->tid, skb, len); 688 689 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 690 } 691 return total_size; 692 } 693 694 static inline void free_atid(struct cxgbi_sock *csk) 695 { 696 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 697 698 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 699 cxgb4_free_atid(lldi->tids, csk->atid); 700 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 701 cxgbi_sock_put(csk); 702 } 703 } 704 705 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 706 { 707 struct cxgbi_sock *csk; 708 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 709 unsigned short tcp_opt = ntohs(req->tcp_opt); 710 unsigned int tid = GET_TID(req); 711 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 712 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 713 struct tid_info *t = lldi->tids; 714 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 715 716 csk = lookup_atid(t, atid); 717 if (unlikely(!csk)) { 718 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 719 goto rel_skb; 720 } 721 722 if (csk->atid != atid) { 723 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 724 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 725 goto rel_skb; 726 } 727 728 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 729 (&csk->saddr), (&csk->daddr), 730 atid, tid, csk, csk->state, csk->flags, rcv_isn); 731 732 module_put(THIS_MODULE); 733 734 cxgbi_sock_get(csk); 735 csk->tid = tid; 736 cxgb4_insert_tid(lldi->tids, csk, tid); 737 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 738 739 free_atid(csk); 740 741 spin_lock_bh(&csk->lock); 742 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 743 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 744 csk, csk->state, csk->flags, csk->tid); 745 746 if (csk->retry_timer.function) { 747 del_timer(&csk->retry_timer); 748 csk->retry_timer.function = NULL; 749 } 750 751 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 752 /* 753 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 754 * pass through opt0. 755 */ 756 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) 757 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); 758 759 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; 760 if (TCPOPT_TSTAMP_G(tcp_opt)) 761 csk->advmss -= 12; 762 if (csk->advmss < 128) 763 csk->advmss = 128; 764 765 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 766 "csk 0x%p, mss_idx %u, advmss %u.\n", 767 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); 768 769 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 770 771 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 772 send_abort_req(csk); 773 else { 774 if (skb_queue_len(&csk->write_queue)) 775 push_tx_frames(csk, 0); 776 cxgbi_conn_tx_open(csk); 777 } 778 spin_unlock_bh(&csk->lock); 779 780 rel_skb: 781 __kfree_skb(skb); 782 } 783 784 static int act_open_rpl_status_to_errno(int status) 785 { 786 switch (status) { 787 case CPL_ERR_CONN_RESET: 788 return -ECONNREFUSED; 789 case CPL_ERR_ARP_MISS: 790 return -EHOSTUNREACH; 791 case CPL_ERR_CONN_TIMEDOUT: 792 return -ETIMEDOUT; 793 case CPL_ERR_TCAM_FULL: 794 return -ENOMEM; 795 case CPL_ERR_CONN_EXIST: 796 return -EADDRINUSE; 797 default: 798 return -EIO; 799 } 800 } 801 802 static void csk_act_open_retry_timer(unsigned long data) 803 { 804 struct sk_buff *skb = NULL; 805 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 806 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 807 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 808 struct l2t_entry *); 809 int t4 = is_t4(lldi->adapter_type), size, size6; 810 811 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 812 "csk 0x%p,%u,0x%lx,%u.\n", 813 csk, csk->state, csk->flags, csk->tid); 814 815 cxgbi_sock_get(csk); 816 spin_lock_bh(&csk->lock); 817 818 if (t4) { 819 size = sizeof(struct cpl_act_open_req); 820 size6 = sizeof(struct cpl_act_open_req6); 821 } else { 822 size = sizeof(struct cpl_t5_act_open_req); 823 size6 = sizeof(struct cpl_t5_act_open_req6); 824 } 825 826 if (csk->csk_family == AF_INET) { 827 send_act_open_func = send_act_open_req; 828 skb = alloc_wr(size, 0, GFP_ATOMIC); 829 #if IS_ENABLED(CONFIG_IPV6) 830 } else { 831 send_act_open_func = send_act_open_req6; 832 skb = alloc_wr(size6, 0, GFP_ATOMIC); 833 #endif 834 } 835 836 if (!skb) 837 cxgbi_sock_fail_act_open(csk, -ENOMEM); 838 else { 839 skb->sk = (struct sock *)csk; 840 t4_set_arp_err_handler(skb, csk, 841 cxgbi_sock_act_open_req_arp_failure); 842 send_act_open_func(csk, skb, csk->l2t); 843 } 844 845 spin_unlock_bh(&csk->lock); 846 cxgbi_sock_put(csk); 847 848 } 849 850 static inline bool is_neg_adv(unsigned int status) 851 { 852 return status == CPL_ERR_RTX_NEG_ADVICE || 853 status == CPL_ERR_KEEPALV_NEG_ADVICE || 854 status == CPL_ERR_PERSIST_NEG_ADVICE; 855 } 856 857 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 858 { 859 struct cxgbi_sock *csk; 860 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 861 unsigned int tid = GET_TID(rpl); 862 unsigned int atid = 863 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status))); 864 unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status)); 865 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 866 struct tid_info *t = lldi->tids; 867 868 csk = lookup_atid(t, atid); 869 if (unlikely(!csk)) { 870 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 871 goto rel_skb; 872 } 873 874 pr_info_ipaddr("tid %u/%u, status %u.\n" 875 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 876 atid, tid, status, csk, csk->state, csk->flags); 877 878 if (is_neg_adv(status)) 879 goto rel_skb; 880 881 module_put(THIS_MODULE); 882 883 if (status && status != CPL_ERR_TCAM_FULL && 884 status != CPL_ERR_CONN_EXIST && 885 status != CPL_ERR_ARP_MISS) 886 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); 887 888 cxgbi_sock_get(csk); 889 spin_lock_bh(&csk->lock); 890 891 if (status == CPL_ERR_CONN_EXIST && 892 csk->retry_timer.function != csk_act_open_retry_timer) { 893 csk->retry_timer.function = csk_act_open_retry_timer; 894 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 895 } else 896 cxgbi_sock_fail_act_open(csk, 897 act_open_rpl_status_to_errno(status)); 898 899 spin_unlock_bh(&csk->lock); 900 cxgbi_sock_put(csk); 901 rel_skb: 902 __kfree_skb(skb); 903 } 904 905 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 906 { 907 struct cxgbi_sock *csk; 908 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 909 unsigned int tid = GET_TID(req); 910 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 911 struct tid_info *t = lldi->tids; 912 913 csk = lookup_tid(t, tid); 914 if (unlikely(!csk)) { 915 pr_err("can't find connection for tid %u.\n", tid); 916 goto rel_skb; 917 } 918 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 919 (&csk->saddr), (&csk->daddr), 920 csk, csk->state, csk->flags, csk->tid); 921 cxgbi_sock_rcv_peer_close(csk); 922 rel_skb: 923 __kfree_skb(skb); 924 } 925 926 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 927 { 928 struct cxgbi_sock *csk; 929 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 930 unsigned int tid = GET_TID(rpl); 931 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 932 struct tid_info *t = lldi->tids; 933 934 csk = lookup_tid(t, tid); 935 if (unlikely(!csk)) { 936 pr_err("can't find connection for tid %u.\n", tid); 937 goto rel_skb; 938 } 939 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 940 (&csk->saddr), (&csk->daddr), 941 csk, csk->state, csk->flags, csk->tid); 942 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 943 rel_skb: 944 __kfree_skb(skb); 945 } 946 947 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 948 int *need_rst) 949 { 950 switch (abort_reason) { 951 case CPL_ERR_BAD_SYN: /* fall through */ 952 case CPL_ERR_CONN_RESET: 953 return csk->state > CTP_ESTABLISHED ? 954 -EPIPE : -ECONNRESET; 955 case CPL_ERR_XMIT_TIMEDOUT: 956 case CPL_ERR_PERSIST_TIMEDOUT: 957 case CPL_ERR_FINWAIT2_TIMEDOUT: 958 case CPL_ERR_KEEPALIVE_TIMEDOUT: 959 return -ETIMEDOUT; 960 default: 961 return -EIO; 962 } 963 } 964 965 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 966 { 967 struct cxgbi_sock *csk; 968 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 969 unsigned int tid = GET_TID(req); 970 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 971 struct tid_info *t = lldi->tids; 972 int rst_status = CPL_ABORT_NO_RST; 973 974 csk = lookup_tid(t, tid); 975 if (unlikely(!csk)) { 976 pr_err("can't find connection for tid %u.\n", tid); 977 goto rel_skb; 978 } 979 980 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 981 (&csk->saddr), (&csk->daddr), 982 csk, csk->state, csk->flags, csk->tid, req->status); 983 984 if (is_neg_adv(req->status)) 985 goto rel_skb; 986 987 cxgbi_sock_get(csk); 988 spin_lock_bh(&csk->lock); 989 990 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 991 992 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 993 send_tx_flowc_wr(csk); 994 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 995 } 996 997 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 998 cxgbi_sock_set_state(csk, CTP_ABORTING); 999 1000 send_abort_rpl(csk, rst_status); 1001 1002 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 1003 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1004 cxgbi_sock_closed(csk); 1005 } 1006 1007 spin_unlock_bh(&csk->lock); 1008 cxgbi_sock_put(csk); 1009 rel_skb: 1010 __kfree_skb(skb); 1011 } 1012 1013 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1014 { 1015 struct cxgbi_sock *csk; 1016 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1017 unsigned int tid = GET_TID(rpl); 1018 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1019 struct tid_info *t = lldi->tids; 1020 1021 csk = lookup_tid(t, tid); 1022 if (!csk) 1023 goto rel_skb; 1024 1025 if (csk) 1026 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1027 (&csk->saddr), (&csk->daddr), csk, 1028 csk->state, csk->flags, csk->tid, rpl->status); 1029 1030 if (rpl->status == CPL_ERR_ABORT_FAILED) 1031 goto rel_skb; 1032 1033 cxgbi_sock_rcv_abort_rpl(csk); 1034 rel_skb: 1035 __kfree_skb(skb); 1036 } 1037 1038 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1039 { 1040 struct cxgbi_sock *csk; 1041 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1042 unsigned int tid = GET_TID(cpl); 1043 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1044 struct tid_info *t = lldi->tids; 1045 1046 csk = lookup_tid(t, tid); 1047 if (!csk) { 1048 pr_err("can't find connection for tid %u.\n", tid); 1049 } else { 1050 /* not expecting this, reset the connection. */ 1051 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1052 spin_lock_bh(&csk->lock); 1053 send_abort_req(csk); 1054 spin_unlock_bh(&csk->lock); 1055 } 1056 __kfree_skb(skb); 1057 } 1058 1059 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1060 { 1061 struct cxgbi_sock *csk; 1062 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1063 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1064 unsigned int tid = GET_TID(cpl); 1065 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1066 struct tid_info *t = lldi->tids; 1067 1068 csk = lookup_tid(t, tid); 1069 if (unlikely(!csk)) { 1070 pr_err("can't find conn. for tid %u.\n", tid); 1071 goto rel_skb; 1072 } 1073 1074 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1075 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1076 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1077 pdu_len_ddp); 1078 1079 spin_lock_bh(&csk->lock); 1080 1081 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1082 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1083 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1084 csk, csk->state, csk->flags, csk->tid); 1085 if (csk->state != CTP_ABORTING) 1086 goto abort_conn; 1087 else 1088 goto discard; 1089 } 1090 1091 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1092 cxgbi_skcb_flags(skb) = 0; 1093 1094 skb_reset_transport_header(skb); 1095 __skb_pull(skb, sizeof(*cpl)); 1096 __pskb_trim(skb, ntohs(cpl->len)); 1097 1098 if (!csk->skb_ulp_lhdr) { 1099 unsigned char *bhs; 1100 unsigned int hlen, dlen, plen; 1101 1102 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1103 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1104 csk, csk->state, csk->flags, csk->tid, skb); 1105 csk->skb_ulp_lhdr = skb; 1106 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1107 1108 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 1109 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1110 csk->tid, cxgbi_skcb_tcp_seq(skb), 1111 csk->rcv_nxt); 1112 goto abort_conn; 1113 } 1114 1115 bhs = skb->data; 1116 hlen = ntohs(cpl->len); 1117 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1118 1119 plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1120 if (is_t4(lldi->adapter_type)) 1121 plen -= 40; 1122 1123 if ((hlen + dlen) != plen) { 1124 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1125 "mismatch %u != %u + %u, seq 0x%x.\n", 1126 csk->tid, plen, hlen, dlen, 1127 cxgbi_skcb_tcp_seq(skb)); 1128 goto abort_conn; 1129 } 1130 1131 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1132 if (dlen) 1133 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1134 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1135 1136 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1137 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1138 csk, skb, *bhs, hlen, dlen, 1139 ntohl(*((unsigned int *)(bhs + 16))), 1140 ntohl(*((unsigned int *)(bhs + 24)))); 1141 1142 } else { 1143 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1144 1145 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1146 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1147 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1148 csk, csk->state, csk->flags, skb, lskb); 1149 } 1150 1151 __skb_queue_tail(&csk->receive_queue, skb); 1152 spin_unlock_bh(&csk->lock); 1153 return; 1154 1155 abort_conn: 1156 send_abort_req(csk); 1157 discard: 1158 spin_unlock_bh(&csk->lock); 1159 rel_skb: 1160 __kfree_skb(skb); 1161 } 1162 1163 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1164 struct sk_buff *skb) 1165 { 1166 struct cxgbi_sock *csk; 1167 struct sk_buff *lskb; 1168 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1169 unsigned int tid = GET_TID(rpl); 1170 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1171 struct tid_info *t = lldi->tids; 1172 unsigned int status = ntohl(rpl->ddpvld); 1173 1174 csk = lookup_tid(t, tid); 1175 if (unlikely(!csk)) { 1176 pr_err("can't find connection for tid %u.\n", tid); 1177 goto rel_skb; 1178 } 1179 1180 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1181 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1182 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 1183 1184 spin_lock_bh(&csk->lock); 1185 1186 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1187 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1188 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1189 csk, csk->state, csk->flags, csk->tid); 1190 if (csk->state != CTP_ABORTING) 1191 goto abort_conn; 1192 else 1193 goto discard; 1194 } 1195 1196 if (!csk->skb_ulp_lhdr) { 1197 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1198 goto abort_conn; 1199 } 1200 1201 lskb = csk->skb_ulp_lhdr; 1202 csk->skb_ulp_lhdr = NULL; 1203 1204 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1205 1206 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1207 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1208 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1209 1210 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1211 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1212 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1213 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); 1214 } 1215 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1216 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1217 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1218 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); 1219 } 1220 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1221 log_debug(1 << CXGBI_DBG_PDU_RX, 1222 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1223 csk, lskb, status); 1224 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); 1225 } 1226 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1227 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { 1228 log_debug(1 << CXGBI_DBG_PDU_RX, 1229 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1230 csk, lskb, status); 1231 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); 1232 } 1233 log_debug(1 << CXGBI_DBG_PDU_RX, 1234 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1235 csk, lskb, cxgbi_skcb_flags(lskb)); 1236 1237 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1238 cxgbi_conn_pdu_ready(csk); 1239 spin_unlock_bh(&csk->lock); 1240 goto rel_skb; 1241 1242 abort_conn: 1243 send_abort_req(csk); 1244 discard: 1245 spin_unlock_bh(&csk->lock); 1246 rel_skb: 1247 __kfree_skb(skb); 1248 } 1249 1250 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1251 { 1252 struct cxgbi_sock *csk; 1253 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1254 unsigned int tid = GET_TID(rpl); 1255 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1256 struct tid_info *t = lldi->tids; 1257 1258 csk = lookup_tid(t, tid); 1259 if (unlikely(!csk)) 1260 pr_err("can't find connection for tid %u.\n", tid); 1261 else { 1262 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1263 "csk 0x%p,%u,0x%lx,%u.\n", 1264 csk, csk->state, csk->flags, csk->tid); 1265 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1266 rpl->seq_vld); 1267 } 1268 __kfree_skb(skb); 1269 } 1270 1271 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1272 { 1273 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1274 unsigned int tid = GET_TID(rpl); 1275 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1276 struct tid_info *t = lldi->tids; 1277 struct cxgbi_sock *csk; 1278 1279 csk = lookup_tid(t, tid); 1280 if (!csk) 1281 pr_err("can't find conn. for tid %u.\n", tid); 1282 1283 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1284 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1285 csk, csk->state, csk->flags, csk->tid, rpl->status); 1286 1287 if (rpl->status != CPL_ERR_NONE) 1288 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1289 csk, tid, rpl->status); 1290 1291 __kfree_skb(skb); 1292 } 1293 1294 static int alloc_cpls(struct cxgbi_sock *csk) 1295 { 1296 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1297 0, GFP_KERNEL); 1298 if (!csk->cpl_close) 1299 return -ENOMEM; 1300 1301 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1302 0, GFP_KERNEL); 1303 if (!csk->cpl_abort_req) 1304 goto free_cpls; 1305 1306 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1307 0, GFP_KERNEL); 1308 if (!csk->cpl_abort_rpl) 1309 goto free_cpls; 1310 return 0; 1311 1312 free_cpls: 1313 cxgbi_sock_free_cpl_skbs(csk); 1314 return -ENOMEM; 1315 } 1316 1317 static inline void l2t_put(struct cxgbi_sock *csk) 1318 { 1319 if (csk->l2t) { 1320 cxgb4_l2t_release(csk->l2t); 1321 csk->l2t = NULL; 1322 cxgbi_sock_put(csk); 1323 } 1324 } 1325 1326 static void release_offload_resources(struct cxgbi_sock *csk) 1327 { 1328 struct cxgb4_lld_info *lldi; 1329 #if IS_ENABLED(CONFIG_IPV6) 1330 struct net_device *ndev = csk->cdev->ports[csk->port_id]; 1331 #endif 1332 1333 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1334 "csk 0x%p,%u,0x%lx,%u.\n", 1335 csk, csk->state, csk->flags, csk->tid); 1336 1337 cxgbi_sock_free_cpl_skbs(csk); 1338 if (csk->wr_cred != csk->wr_max_cred) { 1339 cxgbi_sock_purge_wr_queue(csk); 1340 cxgbi_sock_reset_wr_list(csk); 1341 } 1342 1343 l2t_put(csk); 1344 #if IS_ENABLED(CONFIG_IPV6) 1345 if (csk->csk_family == AF_INET6) 1346 cxgb4_clip_release(ndev, 1347 (const u32 *)&csk->saddr6.sin6_addr, 1); 1348 #endif 1349 1350 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1351 free_atid(csk); 1352 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1353 lldi = cxgbi_cdev_priv(csk->cdev); 1354 cxgb4_remove_tid(lldi->tids, 0, csk->tid); 1355 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1356 cxgbi_sock_put(csk); 1357 } 1358 csk->dst = NULL; 1359 csk->cdev = NULL; 1360 } 1361 1362 static int init_act_open(struct cxgbi_sock *csk) 1363 { 1364 struct cxgbi_device *cdev = csk->cdev; 1365 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1366 struct net_device *ndev = cdev->ports[csk->port_id]; 1367 struct sk_buff *skb = NULL; 1368 struct neighbour *n = NULL; 1369 void *daddr; 1370 unsigned int step; 1371 unsigned int size, size6; 1372 int t4 = is_t4(lldi->adapter_type); 1373 unsigned int linkspeed; 1374 unsigned int rcv_winf, snd_winf; 1375 1376 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1377 "csk 0x%p,%u,0x%lx,%u.\n", 1378 csk, csk->state, csk->flags, csk->tid); 1379 1380 if (csk->csk_family == AF_INET) 1381 daddr = &csk->daddr.sin_addr.s_addr; 1382 #if IS_ENABLED(CONFIG_IPV6) 1383 else if (csk->csk_family == AF_INET6) 1384 daddr = &csk->daddr6.sin6_addr; 1385 #endif 1386 else { 1387 pr_err("address family 0x%x not supported\n", csk->csk_family); 1388 goto rel_resource; 1389 } 1390 1391 n = dst_neigh_lookup(csk->dst, daddr); 1392 1393 if (!n) { 1394 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1395 goto rel_resource; 1396 } 1397 1398 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1399 if (csk->atid < 0) { 1400 pr_err("%s, NO atid available.\n", ndev->name); 1401 return -EINVAL; 1402 } 1403 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1404 cxgbi_sock_get(csk); 1405 1406 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1407 if (!csk->l2t) { 1408 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1409 goto rel_resource_without_clip; 1410 } 1411 cxgbi_sock_get(csk); 1412 1413 #if IS_ENABLED(CONFIG_IPV6) 1414 if (csk->csk_family == AF_INET6) 1415 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); 1416 #endif 1417 1418 if (t4) { 1419 size = sizeof(struct cpl_act_open_req); 1420 size6 = sizeof(struct cpl_act_open_req6); 1421 } else { 1422 size = sizeof(struct cpl_t5_act_open_req); 1423 size6 = sizeof(struct cpl_t5_act_open_req6); 1424 } 1425 1426 if (csk->csk_family == AF_INET) 1427 skb = alloc_wr(size, 0, GFP_NOIO); 1428 #if IS_ENABLED(CONFIG_IPV6) 1429 else 1430 skb = alloc_wr(size6, 0, GFP_NOIO); 1431 #endif 1432 1433 if (!skb) 1434 goto rel_resource; 1435 skb->sk = (struct sock *)csk; 1436 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1437 1438 if (!csk->mtu) 1439 csk->mtu = dst_mtu(csk->dst); 1440 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1441 csk->tx_chan = cxgb4_port_chan(ndev); 1442 /* SMT two entries per row */ 1443 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; 1444 step = lldi->ntxq / lldi->nchan; 1445 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1446 step = lldi->nrxq / lldi->nchan; 1447 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1448 linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; 1449 csk->snd_win = cxgb4i_snd_win; 1450 csk->rcv_win = cxgb4i_rcv_win; 1451 if (cxgb4i_rcv_win <= 0) { 1452 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; 1453 rcv_winf = linkspeed / SPEED_10000; 1454 if (rcv_winf) 1455 csk->rcv_win *= rcv_winf; 1456 } 1457 if (cxgb4i_snd_win <= 0) { 1458 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; 1459 snd_winf = linkspeed / SPEED_10000; 1460 if (snd_winf) 1461 csk->snd_win *= snd_winf; 1462 } 1463 csk->wr_cred = lldi->wr_cred - 1464 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1465 csk->wr_max_cred = csk->wr_cred; 1466 csk->wr_una_cred = 0; 1467 cxgbi_sock_reset_wr_list(csk); 1468 csk->err = 0; 1469 1470 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1471 (&csk->saddr), (&csk->daddr), csk, csk->state, 1472 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1473 csk->mtu, csk->mss_idx, csk->smac_idx); 1474 1475 /* must wait for either a act_open_rpl or act_open_establish */ 1476 try_module_get(THIS_MODULE); 1477 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1478 if (csk->csk_family == AF_INET) 1479 send_act_open_req(csk, skb, csk->l2t); 1480 #if IS_ENABLED(CONFIG_IPV6) 1481 else 1482 send_act_open_req6(csk, skb, csk->l2t); 1483 #endif 1484 neigh_release(n); 1485 1486 return 0; 1487 1488 rel_resource: 1489 #if IS_ENABLED(CONFIG_IPV6) 1490 if (csk->csk_family == AF_INET6) 1491 cxgb4_clip_release(ndev, 1492 (const u32 *)&csk->saddr6.sin6_addr, 1); 1493 #endif 1494 rel_resource_without_clip: 1495 if (n) 1496 neigh_release(n); 1497 if (skb) 1498 __kfree_skb(skb); 1499 return -EINVAL; 1500 } 1501 1502 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1503 [CPL_ACT_ESTABLISH] = do_act_establish, 1504 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1505 [CPL_PEER_CLOSE] = do_peer_close, 1506 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1507 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1508 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1509 [CPL_FW4_ACK] = do_fw4_ack, 1510 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1511 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1512 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1513 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1514 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1515 [CPL_RX_DATA] = do_rx_data, 1516 }; 1517 1518 int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1519 { 1520 int rc; 1521 1522 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1523 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1524 1525 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1526 cxgb4i_max_connect); 1527 if (rc < 0) 1528 return rc; 1529 1530 cdev->csk_release_offload_resources = release_offload_resources; 1531 cdev->csk_push_tx_frames = push_tx_frames; 1532 cdev->csk_send_abort_req = send_abort_req; 1533 cdev->csk_send_close_req = send_close_req; 1534 cdev->csk_send_rx_credits = send_rx_credits; 1535 cdev->csk_alloc_cpls = alloc_cpls; 1536 cdev->csk_init_act_open = init_act_open; 1537 1538 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1539 return 0; 1540 } 1541 1542 /* 1543 * functions to program the pagepod in h/w 1544 */ 1545 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ 1546 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, 1547 struct ulp_mem_io *req, 1548 unsigned int wr_len, unsigned int dlen, 1549 unsigned int pm_addr) 1550 { 1551 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1552 1553 INIT_ULPTX_WR(req, wr_len, 0, 0); 1554 if (is_t4(lldi->adapter_type)) 1555 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1556 (ULP_MEMIO_ORDER_F)); 1557 else 1558 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1559 (T5_ULP_MEMIO_IMM_F)); 1560 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1561 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1562 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1563 1564 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1565 idata->len = htonl(dlen); 1566 } 1567 1568 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, 1569 struct cxgbi_pagepod_hdr *hdr, unsigned int idx, 1570 unsigned int npods, 1571 struct cxgbi_gather_list *gl, 1572 unsigned int gl_pidx) 1573 { 1574 struct cxgbi_ddp_info *ddp = cdev->ddp; 1575 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1576 struct sk_buff *skb; 1577 struct ulp_mem_io *req; 1578 struct ulptx_idata *idata; 1579 struct cxgbi_pagepod *ppod; 1580 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; 1581 unsigned int dlen = PPOD_SIZE * npods; 1582 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1583 sizeof(struct ulptx_idata) + dlen, 16); 1584 unsigned int i; 1585 1586 skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1587 if (!skb) { 1588 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", 1589 cdev, idx, npods); 1590 return -ENOMEM; 1591 } 1592 req = (struct ulp_mem_io *)skb->head; 1593 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 1594 1595 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1596 idata = (struct ulptx_idata *)(req + 1); 1597 ppod = (struct cxgbi_pagepod *)(idata + 1); 1598 1599 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { 1600 if (!hdr && !gl) 1601 cxgbi_ddp_ppod_clear(ppod); 1602 else 1603 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); 1604 } 1605 1606 cxgb4_ofld_send(cdev->ports[port_id], skb); 1607 return 0; 1608 } 1609 1610 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, 1611 unsigned int idx, unsigned int npods, 1612 struct cxgbi_gather_list *gl) 1613 { 1614 unsigned int i, cnt; 1615 int err = 0; 1616 1617 for (i = 0; i < npods; i += cnt, idx += cnt) { 1618 cnt = npods - i; 1619 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1620 cnt = ULPMEM_IDATA_MAX_NPPODS; 1621 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, 1622 idx, cnt, gl, 4 * i); 1623 if (err < 0) 1624 break; 1625 } 1626 return err; 1627 } 1628 1629 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, 1630 unsigned int idx, unsigned int npods) 1631 { 1632 unsigned int i, cnt; 1633 int err; 1634 1635 for (i = 0; i < npods; i += cnt, idx += cnt) { 1636 cnt = npods - i; 1637 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1638 cnt = ULPMEM_IDATA_MAX_NPPODS; 1639 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, 1640 idx, cnt, NULL, 0); 1641 if (err < 0) 1642 break; 1643 } 1644 } 1645 1646 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1647 int pg_idx, bool reply) 1648 { 1649 struct sk_buff *skb; 1650 struct cpl_set_tcb_field *req; 1651 1652 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1653 return 0; 1654 1655 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1656 if (!skb) 1657 return -ENOMEM; 1658 1659 /* set up ulp page size */ 1660 req = (struct cpl_set_tcb_field *)skb->head; 1661 INIT_TP_WR(req, csk->tid); 1662 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1663 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1664 req->word_cookie = htons(0); 1665 req->mask = cpu_to_be64(0x3 << 8); 1666 req->val = cpu_to_be64(pg_idx << 8); 1667 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1668 1669 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1670 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 1671 1672 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1673 return 0; 1674 } 1675 1676 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1677 int hcrc, int dcrc, int reply) 1678 { 1679 struct sk_buff *skb; 1680 struct cpl_set_tcb_field *req; 1681 1682 if (!hcrc && !dcrc) 1683 return 0; 1684 1685 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1686 if (!skb) 1687 return -ENOMEM; 1688 1689 csk->hcrc_len = (hcrc ? 4 : 0); 1690 csk->dcrc_len = (dcrc ? 4 : 0); 1691 /* set up ulp submode */ 1692 req = (struct cpl_set_tcb_field *)skb->head; 1693 INIT_TP_WR(req, tid); 1694 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1695 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1696 req->word_cookie = htons(0); 1697 req->mask = cpu_to_be64(0x3 << 4); 1698 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 1699 (dcrc ? ULP_CRC_DATA : 0)) << 4); 1700 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1701 1702 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1703 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 1704 1705 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1706 return 0; 1707 } 1708 1709 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 1710 { 1711 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1712 struct cxgbi_ddp_info *ddp = cdev->ddp; 1713 unsigned int tagmask, pgsz_factor[4]; 1714 int err; 1715 1716 if (ddp) { 1717 kref_get(&ddp->refcnt); 1718 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", 1719 cdev, cdev->ddp); 1720 return -EALREADY; 1721 } 1722 1723 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, 1724 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, 1725 lldi->iscsi_iolen, lldi->iscsi_iolen); 1726 if (err < 0) 1727 return err; 1728 1729 ddp = cdev->ddp; 1730 1731 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; 1732 cxgbi_ddp_page_size_factor(pgsz_factor); 1733 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); 1734 1735 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 1736 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 1737 cdev->csk_ddp_set = ddp_set_map; 1738 cdev->csk_ddp_clear = ddp_clear_map; 1739 1740 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", 1741 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, 1742 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); 1743 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " 1744 " %u/%u.\n", 1745 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, 1746 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, 1747 ddp->max_rxsz, lldi->iscsi_iolen); 1748 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", 1749 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, 1750 ddp->max_rxsz); 1751 return 0; 1752 } 1753 1754 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1755 { 1756 struct cxgbi_device *cdev; 1757 struct port_info *pi; 1758 int i, rc; 1759 1760 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 1761 if (!cdev) { 1762 pr_info("t4 device 0x%p, register failed.\n", lldi); 1763 return NULL; 1764 } 1765 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 1766 cdev, lldi->adapter_type, lldi->nports, 1767 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 1768 lldi->nrxq, lldi->wr_cred); 1769 for (i = 0; i < lldi->nrxq; i++) 1770 log_debug(1 << CXGBI_DBG_DEV, 1771 "t4 0x%p, rxq id #%d: %u.\n", 1772 cdev, i, lldi->rxq_ids[i]); 1773 1774 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 1775 cdev->flags = CXGBI_FLAG_DEV_T4; 1776 cdev->pdev = lldi->pdev; 1777 cdev->ports = lldi->ports; 1778 cdev->nports = lldi->nports; 1779 cdev->mtus = lldi->mtus; 1780 cdev->nmtus = NMTUS; 1781 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 1782 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 1783 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1784 cdev->itp = &cxgb4i_iscsi_transport; 1785 1786 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 1787 << FW_VIID_PFN_S; 1788 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1789 cdev, lldi->ports[0]->name, cdev->pfvf); 1790 1791 rc = cxgb4i_ddp_init(cdev); 1792 if (rc) { 1793 pr_info("t4 0x%p ddp init failed.\n", cdev); 1794 goto err_out; 1795 } 1796 rc = cxgb4i_ofld_init(cdev); 1797 if (rc) { 1798 pr_info("t4 0x%p ofld init failed.\n", cdev); 1799 goto err_out; 1800 } 1801 1802 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 1803 &cxgb4i_host_template, cxgb4i_stt); 1804 if (rc) 1805 goto err_out; 1806 1807 for (i = 0; i < cdev->nports; i++) { 1808 pi = netdev_priv(lldi->ports[i]); 1809 cdev->hbas[i]->port_id = pi->port_id; 1810 } 1811 return cdev; 1812 1813 err_out: 1814 cxgbi_device_unregister(cdev); 1815 return ERR_PTR(-ENOMEM); 1816 } 1817 1818 #define RX_PULL_LEN 128 1819 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 1820 const struct pkt_gl *pgl) 1821 { 1822 const struct cpl_act_establish *rpl; 1823 struct sk_buff *skb; 1824 unsigned int opc; 1825 struct cxgbi_device *cdev = handle; 1826 1827 if (pgl == NULL) { 1828 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1829 1830 skb = alloc_wr(len, 0, GFP_ATOMIC); 1831 if (!skb) 1832 goto nomem; 1833 skb_copy_to_linear_data(skb, &rsp[1], len); 1834 } else { 1835 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 1836 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 1837 pgl->va, be64_to_cpu(*rsp), 1838 be64_to_cpu(*(u64 *)pgl->va), 1839 pgl->tot_len); 1840 return 0; 1841 } 1842 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 1843 if (unlikely(!skb)) 1844 goto nomem; 1845 } 1846 1847 rpl = (struct cpl_act_establish *)skb->data; 1848 opc = rpl->ot.opcode; 1849 log_debug(1 << CXGBI_DBG_TOE, 1850 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 1851 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 1852 if (cxgb4i_cplhandlers[opc]) 1853 cxgb4i_cplhandlers[opc](cdev, skb); 1854 else { 1855 pr_err("No handler for opcode 0x%x.\n", opc); 1856 __kfree_skb(skb); 1857 } 1858 return 0; 1859 nomem: 1860 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 1861 return 1; 1862 } 1863 1864 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 1865 { 1866 struct cxgbi_device *cdev = handle; 1867 1868 switch (state) { 1869 case CXGB4_STATE_UP: 1870 pr_info("cdev 0x%p, UP.\n", cdev); 1871 break; 1872 case CXGB4_STATE_START_RECOVERY: 1873 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 1874 /* close all connections */ 1875 break; 1876 case CXGB4_STATE_DOWN: 1877 pr_info("cdev 0x%p, DOWN.\n", cdev); 1878 break; 1879 case CXGB4_STATE_DETACH: 1880 pr_info("cdev 0x%p, DETACH.\n", cdev); 1881 cxgbi_device_unregister(cdev); 1882 break; 1883 default: 1884 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 1885 break; 1886 } 1887 return 0; 1888 } 1889 1890 static int __init cxgb4i_init_module(void) 1891 { 1892 int rc; 1893 1894 printk(KERN_INFO "%s", version); 1895 1896 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1897 if (rc < 0) 1898 return rc; 1899 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1900 1901 return 0; 1902 } 1903 1904 static void __exit cxgb4i_exit_module(void) 1905 { 1906 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1907 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1908 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1909 } 1910 1911 module_init(cxgb4i_init_module); 1912 module_exit(cxgb4i_exit_module); 1913