1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <scsi/scsi_host.h> 19 #include <net/tcp.h> 20 #include <net/dst.h> 21 #include <linux/netdevice.h> 22 #include <net/addrconf.h> 23 24 #include "t4_regs.h" 25 #include "t4_msg.h" 26 #include "cxgb4.h" 27 #include "cxgb4_uld.h" 28 #include "t4fw_api.h" 29 #include "l2t.h" 30 #include "cxgb4i.h" 31 32 static unsigned int dbg_level; 33 34 #include "../libcxgbi.h" 35 36 #define DRV_MODULE_NAME "cxgb4i" 37 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 38 #define DRV_MODULE_VERSION "0.9.4" 39 40 static char version[] = 41 DRV_MODULE_DESC " " DRV_MODULE_NAME 42 " v" DRV_MODULE_VERSION "\n"; 43 44 MODULE_AUTHOR("Chelsio Communications, Inc."); 45 MODULE_DESCRIPTION(DRV_MODULE_DESC); 46 MODULE_VERSION(DRV_MODULE_VERSION); 47 MODULE_LICENSE("GPL"); 48 49 module_param(dbg_level, uint, 0644); 50 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 51 52 static int cxgb4i_rcv_win = 256 * 1024; 53 module_param(cxgb4i_rcv_win, int, 0644); 54 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 55 56 static int cxgb4i_snd_win = 128 * 1024; 57 module_param(cxgb4i_snd_win, int, 0644); 58 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 59 60 static int cxgb4i_rx_credit_thres = 10 * 1024; 61 module_param(cxgb4i_rx_credit_thres, int, 0644); 62 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 63 "RX credits return threshold in bytes (default=10KB)"); 64 65 static unsigned int cxgb4i_max_connect = (8 * 1024); 66 module_param(cxgb4i_max_connect, uint, 0644); 67 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 68 69 static unsigned short cxgb4i_sport_base = 20000; 70 module_param(cxgb4i_sport_base, ushort, 0644); 71 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 72 73 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 74 75 static void *t4_uld_add(const struct cxgb4_lld_info *); 76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 77 static int t4_uld_state_change(void *, enum cxgb4_state state); 78 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 79 80 static const struct cxgb4_uld_info cxgb4i_uld_info = { 81 .name = DRV_MODULE_NAME, 82 .add = t4_uld_add, 83 .rx_handler = t4_uld_rx_handler, 84 .state_change = t4_uld_state_change, 85 }; 86 87 static struct scsi_host_template cxgb4i_host_template = { 88 .module = THIS_MODULE, 89 .name = DRV_MODULE_NAME, 90 .proc_name = DRV_MODULE_NAME, 91 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 92 .queuecommand = iscsi_queuecommand, 93 .change_queue_depth = scsi_change_queue_depth, 94 .sg_tablesize = SG_ALL, 95 .max_sectors = 0xFFFF, 96 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 97 .eh_abort_handler = iscsi_eh_abort, 98 .eh_device_reset_handler = iscsi_eh_device_reset, 99 .eh_target_reset_handler = iscsi_eh_recover_target, 100 .target_alloc = iscsi_target_alloc, 101 .use_clustering = DISABLE_CLUSTERING, 102 .this_id = -1, 103 .track_queue_depth = 1, 104 }; 105 106 static struct iscsi_transport cxgb4i_iscsi_transport = { 107 .owner = THIS_MODULE, 108 .name = DRV_MODULE_NAME, 109 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 110 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 111 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 112 .attr_is_visible = cxgbi_attr_is_visible, 113 .get_host_param = cxgbi_get_host_param, 114 .set_host_param = cxgbi_set_host_param, 115 /* session management */ 116 .create_session = cxgbi_create_session, 117 .destroy_session = cxgbi_destroy_session, 118 .get_session_param = iscsi_session_get_param, 119 /* connection management */ 120 .create_conn = cxgbi_create_conn, 121 .bind_conn = cxgbi_bind_conn, 122 .destroy_conn = iscsi_tcp_conn_teardown, 123 .start_conn = iscsi_conn_start, 124 .stop_conn = iscsi_conn_stop, 125 .get_conn_param = iscsi_conn_get_param, 126 .set_param = cxgbi_set_conn_param, 127 .get_stats = cxgbi_get_conn_stats, 128 /* pdu xmit req from user space */ 129 .send_pdu = iscsi_conn_send_pdu, 130 /* task */ 131 .init_task = iscsi_tcp_task_init, 132 .xmit_task = iscsi_tcp_task_xmit, 133 .cleanup_task = cxgbi_cleanup_task, 134 /* pdu */ 135 .alloc_pdu = cxgbi_conn_alloc_pdu, 136 .init_pdu = cxgbi_conn_init_pdu, 137 .xmit_pdu = cxgbi_conn_xmit_pdu, 138 .parse_pdu_itt = cxgbi_parse_pdu_itt, 139 /* TCP connect/disconnect */ 140 .get_ep_param = cxgbi_get_ep_param, 141 .ep_connect = cxgbi_ep_connect, 142 .ep_poll = cxgbi_ep_poll, 143 .ep_disconnect = cxgbi_ep_disconnect, 144 /* Error recovery timeout call */ 145 .session_recovery_timedout = iscsi_session_recovery_timedout, 146 }; 147 148 static struct scsi_transport_template *cxgb4i_stt; 149 150 /* 151 * CPL (Chelsio Protocol Language) defines a message passing interface between 152 * the host driver and Chelsio asic. 153 * The section below implments CPLs that related to iscsi tcp connection 154 * open/close/abort and data send/receive. 155 */ 156 157 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 158 #define RCV_BUFSIZ_MASK 0x3FFU 159 #define MAX_IMM_TX_PKT_LEN 128 160 161 static int push_tx_frames(struct cxgbi_sock *, int); 162 163 /* 164 * is_ofld_imm - check whether a packet can be sent as immediate data 165 * @skb: the packet 166 * 167 * Returns true if a packet can be sent as an offload WR with immediate 168 * data. We currently use the same limit as for Ethernet packets. 169 */ 170 static inline bool is_ofld_imm(const struct sk_buff *skb) 171 { 172 int len = skb->len; 173 174 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 175 len += sizeof(struct fw_ofld_tx_data_wr); 176 177 return len <= MAX_IMM_TX_PKT_LEN; 178 } 179 180 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 181 struct l2t_entry *e) 182 { 183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 184 int t4 = is_t4(lldi->adapter_type); 185 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 186 unsigned long long opt0; 187 unsigned int opt2; 188 unsigned int qid_atid = ((unsigned int)csk->atid) | 189 (((unsigned int)csk->rss_qid) << 14); 190 191 opt0 = KEEP_ALIVE_F | 192 WND_SCALE_V(wscale) | 193 MSS_IDX_V(csk->mss_idx) | 194 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 195 TX_CHAN_V(csk->tx_chan) | 196 SMAC_SEL_V(csk->smac_idx) | 197 ULP_MODE_V(ULP_MODE_ISCSI) | 198 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 199 opt2 = RX_CHANNEL_V(0) | 200 RSS_QUEUE_VALID_F | 201 (RX_FC_DISABLE_F) | 202 RSS_QUEUE_V(csk->rss_qid); 203 204 if (is_t4(lldi->adapter_type)) { 205 struct cpl_act_open_req *req = 206 (struct cpl_act_open_req *)skb->head; 207 208 INIT_TP_WR(req, 0); 209 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 210 qid_atid)); 211 req->local_port = csk->saddr.sin_port; 212 req->peer_port = csk->daddr.sin_port; 213 req->local_ip = csk->saddr.sin_addr.s_addr; 214 req->peer_ip = csk->daddr.sin_addr.s_addr; 215 req->opt0 = cpu_to_be64(opt0); 216 req->params = cpu_to_be32(cxgb4_select_ntuple( 217 csk->cdev->ports[csk->port_id], 218 csk->l2t)); 219 opt2 |= RX_FC_VALID_F; 220 req->opt2 = cpu_to_be32(opt2); 221 222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 223 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 224 csk, &req->local_ip, ntohs(req->local_port), 225 &req->peer_ip, ntohs(req->peer_port), 226 csk->atid, csk->rss_qid); 227 } else { 228 struct cpl_t5_act_open_req *req = 229 (struct cpl_t5_act_open_req *)skb->head; 230 231 INIT_TP_WR(req, 0); 232 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 233 qid_atid)); 234 req->local_port = csk->saddr.sin_port; 235 req->peer_port = csk->daddr.sin_port; 236 req->local_ip = csk->saddr.sin_addr.s_addr; 237 req->peer_ip = csk->daddr.sin_addr.s_addr; 238 req->opt0 = cpu_to_be64(opt0); 239 req->params = cpu_to_be64(FILTER_TUPLE_V( 240 cxgb4_select_ntuple( 241 csk->cdev->ports[csk->port_id], 242 csk->l2t))); 243 opt2 |= 1 << 31; 244 req->opt2 = cpu_to_be32(opt2); 245 246 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 247 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 248 csk, &req->local_ip, ntohs(req->local_port), 249 &req->peer_ip, ntohs(req->peer_port), 250 csk->atid, csk->rss_qid); 251 } 252 253 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 254 255 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 256 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, 257 csk->state, csk->flags, csk->atid, csk->rss_qid); 258 259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 260 } 261 262 #if IS_ENABLED(CONFIG_IPV6) 263 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 264 struct l2t_entry *e) 265 { 266 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 267 int t4 = is_t4(lldi->adapter_type); 268 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 269 unsigned long long opt0; 270 unsigned int opt2; 271 unsigned int qid_atid = ((unsigned int)csk->atid) | 272 (((unsigned int)csk->rss_qid) << 14); 273 274 opt0 = KEEP_ALIVE_F | 275 WND_SCALE_V(wscale) | 276 MSS_IDX_V(csk->mss_idx) | 277 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 278 TX_CHAN_V(csk->tx_chan) | 279 SMAC_SEL_V(csk->smac_idx) | 280 ULP_MODE_V(ULP_MODE_ISCSI) | 281 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 282 283 opt2 = RX_CHANNEL_V(0) | 284 RSS_QUEUE_VALID_F | 285 RX_FC_DISABLE_F | 286 RSS_QUEUE_V(csk->rss_qid); 287 288 if (t4) { 289 struct cpl_act_open_req6 *req = 290 (struct cpl_act_open_req6 *)skb->head; 291 292 INIT_TP_WR(req, 0); 293 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 294 qid_atid)); 295 req->local_port = csk->saddr6.sin6_port; 296 req->peer_port = csk->daddr6.sin6_port; 297 298 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 299 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 300 8); 301 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 302 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 303 8); 304 305 req->opt0 = cpu_to_be64(opt0); 306 307 opt2 |= RX_FC_VALID_F; 308 req->opt2 = cpu_to_be32(opt2); 309 310 req->params = cpu_to_be32(cxgb4_select_ntuple( 311 csk->cdev->ports[csk->port_id], 312 csk->l2t)); 313 } else { 314 struct cpl_t5_act_open_req6 *req = 315 (struct cpl_t5_act_open_req6 *)skb->head; 316 317 INIT_TP_WR(req, 0); 318 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 319 qid_atid)); 320 req->local_port = csk->saddr6.sin6_port; 321 req->peer_port = csk->daddr6.sin6_port; 322 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 323 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 324 8); 325 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 326 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 327 8); 328 req->opt0 = cpu_to_be64(opt0); 329 330 opt2 |= T5_OPT_2_VALID_F; 331 req->opt2 = cpu_to_be32(opt2); 332 333 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 334 csk->cdev->ports[csk->port_id], 335 csk->l2t))); 336 } 337 338 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 339 340 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 341 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, 342 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 343 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 344 csk->rss_qid); 345 346 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 347 } 348 #endif 349 350 static void send_close_req(struct cxgbi_sock *csk) 351 { 352 struct sk_buff *skb = csk->cpl_close; 353 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 354 unsigned int tid = csk->tid; 355 356 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 357 "csk 0x%p,%u,0x%lx, tid %u.\n", 358 csk, csk->state, csk->flags, csk->tid); 359 csk->cpl_close = NULL; 360 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 361 INIT_TP_WR(req, tid); 362 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 363 req->rsvd = 0; 364 365 cxgbi_sock_skb_entail(csk, skb); 366 if (csk->state >= CTP_ESTABLISHED) 367 push_tx_frames(csk, 1); 368 } 369 370 static void abort_arp_failure(void *handle, struct sk_buff *skb) 371 { 372 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 373 struct cpl_abort_req *req; 374 375 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 376 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 377 csk, csk->state, csk->flags, csk->tid); 378 req = (struct cpl_abort_req *)skb->data; 379 req->cmd = CPL_ABORT_NO_RST; 380 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 381 } 382 383 static void send_abort_req(struct cxgbi_sock *csk) 384 { 385 struct cpl_abort_req *req; 386 struct sk_buff *skb = csk->cpl_abort_req; 387 388 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 389 return; 390 391 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 392 send_tx_flowc_wr(csk); 393 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 394 } 395 396 cxgbi_sock_set_state(csk, CTP_ABORTING); 397 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 398 cxgbi_sock_purge_write_queue(csk); 399 400 csk->cpl_abort_req = NULL; 401 req = (struct cpl_abort_req *)skb->head; 402 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 403 req->cmd = CPL_ABORT_SEND_RST; 404 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 405 INIT_TP_WR(req, csk->tid); 406 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 407 req->rsvd0 = htonl(csk->snd_nxt); 408 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 409 410 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 411 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 412 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 413 req->rsvd1); 414 415 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 416 } 417 418 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 419 { 420 struct sk_buff *skb = csk->cpl_abort_rpl; 421 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 422 423 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 424 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 425 csk, csk->state, csk->flags, csk->tid, rst_status); 426 427 csk->cpl_abort_rpl = NULL; 428 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 429 INIT_TP_WR(rpl, csk->tid); 430 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 431 rpl->cmd = rst_status; 432 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 433 } 434 435 /* 436 * CPL connection rx data ack: host -> 437 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 438 * credits sent. 439 */ 440 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 441 { 442 struct sk_buff *skb; 443 struct cpl_rx_data_ack *req; 444 445 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 446 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 447 csk, csk->state, csk->flags, csk->tid, credits); 448 449 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 450 if (!skb) { 451 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 452 return 0; 453 } 454 req = (struct cpl_rx_data_ack *)skb->head; 455 456 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 457 INIT_TP_WR(req, csk->tid); 458 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 459 csk->tid)); 460 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 461 | RX_FORCE_ACK_F); 462 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 463 return credits; 464 } 465 466 /* 467 * sgl_len - calculates the size of an SGL of the given capacity 468 * @n: the number of SGL entries 469 * Calculates the number of flits needed for a scatter/gather list that 470 * can hold the given number of entries. 471 */ 472 static inline unsigned int sgl_len(unsigned int n) 473 { 474 n--; 475 return (3 * n) / 2 + (n & 1) + 2; 476 } 477 478 /* 479 * calc_tx_flits_ofld - calculate # of flits for an offload packet 480 * @skb: the packet 481 * 482 * Returns the number of flits needed for the given offload packet. 483 * These packets are already fully constructed and no additional headers 484 * will be added. 485 */ 486 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 487 { 488 unsigned int flits, cnt; 489 490 if (is_ofld_imm(skb)) 491 return DIV_ROUND_UP(skb->len, 8); 492 flits = skb_transport_offset(skb) / 8; 493 cnt = skb_shinfo(skb)->nr_frags; 494 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 495 cnt++; 496 return flits + sgl_len(cnt); 497 } 498 499 #define FLOWC_WR_NPARAMS_MIN 9 500 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 501 { 502 int nparams, flowclen16, flowclen; 503 504 nparams = FLOWC_WR_NPARAMS_MIN; 505 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 506 flowclen16 = DIV_ROUND_UP(flowclen, 16); 507 flowclen = flowclen16 * 16; 508 /* 509 * Return the number of 16-byte credits used by the FlowC request. 510 * Pass back the nparams and actual FlowC length if requested. 511 */ 512 if (nparamsp) 513 *nparamsp = nparams; 514 if (flowclenp) 515 *flowclenp = flowclen; 516 517 return flowclen16; 518 } 519 520 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 521 { 522 struct sk_buff *skb; 523 struct fw_flowc_wr *flowc; 524 int nparams, flowclen16, flowclen; 525 526 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 527 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 528 flowc = (struct fw_flowc_wr *)skb->head; 529 flowc->op_to_nparams = 530 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 531 flowc->flowid_len16 = 532 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 533 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 534 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 535 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 536 flowc->mnemval[1].val = htonl(csk->tx_chan); 537 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 538 flowc->mnemval[2].val = htonl(csk->tx_chan); 539 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 540 flowc->mnemval[3].val = htonl(csk->rss_qid); 541 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 542 flowc->mnemval[4].val = htonl(csk->snd_nxt); 543 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 544 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 545 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 546 flowc->mnemval[6].val = htonl(cxgb4i_snd_win); 547 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 548 flowc->mnemval[7].val = htonl(csk->advmss); 549 flowc->mnemval[8].mnemonic = 0; 550 flowc->mnemval[8].val = 0; 551 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 552 flowc->mnemval[8].val = 16384; 553 554 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 555 556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 557 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 558 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 559 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, 560 csk->advmss); 561 562 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 563 564 return flowclen16; 565 } 566 567 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 568 int dlen, int len, u32 credits, int compl) 569 { 570 struct fw_ofld_tx_data_wr *req; 571 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 572 unsigned int wr_ulp_mode = 0, val; 573 bool imm = is_ofld_imm(skb); 574 575 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 576 577 if (imm) { 578 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 579 FW_WR_COMPL_F | 580 FW_WR_IMMDLEN_V(dlen)); 581 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 582 FW_WR_LEN16_V(credits)); 583 } else { 584 req->op_to_immdlen = 585 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 586 FW_WR_COMPL_F | 587 FW_WR_IMMDLEN_V(0)); 588 req->flowid_len16 = 589 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 590 FW_WR_LEN16_V(credits)); 591 } 592 if (submode) 593 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 594 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 595 val = skb_peek(&csk->write_queue) ? 0 : 1; 596 req->tunnel_to_proxy = htonl(wr_ulp_mode | 597 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 598 req->plen = htonl(len); 599 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 600 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 601 } 602 603 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 604 { 605 kfree_skb(skb); 606 } 607 608 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 609 { 610 int total_size = 0; 611 struct sk_buff *skb; 612 613 if (unlikely(csk->state < CTP_ESTABLISHED || 614 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 615 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 616 1 << CXGBI_DBG_PDU_TX, 617 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 618 csk, csk->state, csk->flags, csk->tid); 619 return 0; 620 } 621 622 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 623 int dlen = skb->len; 624 int len = skb->len; 625 unsigned int credits_needed; 626 int flowclen16 = 0; 627 628 skb_reset_transport_header(skb); 629 if (is_ofld_imm(skb)) 630 credits_needed = DIV_ROUND_UP(dlen, 16); 631 else 632 credits_needed = DIV_ROUND_UP( 633 8 * calc_tx_flits_ofld(skb), 634 16); 635 636 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 637 credits_needed += DIV_ROUND_UP( 638 sizeof(struct fw_ofld_tx_data_wr), 639 16); 640 641 /* 642 * Assumes the initial credits is large enough to support 643 * fw_flowc_wr plus largest possible first payload 644 */ 645 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 646 flowclen16 = send_tx_flowc_wr(csk); 647 csk->wr_cred -= flowclen16; 648 csk->wr_una_cred += flowclen16; 649 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 650 } 651 652 if (csk->wr_cred < credits_needed) { 653 log_debug(1 << CXGBI_DBG_PDU_TX, 654 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 655 csk, skb->len, skb->data_len, 656 credits_needed, csk->wr_cred); 657 break; 658 } 659 __skb_unlink(skb, &csk->write_queue); 660 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 661 skb->csum = credits_needed + flowclen16; 662 csk->wr_cred -= credits_needed; 663 csk->wr_una_cred += credits_needed; 664 cxgbi_sock_enqueue_wr(csk, skb); 665 666 log_debug(1 << CXGBI_DBG_PDU_TX, 667 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 668 csk, skb->len, skb->data_len, credits_needed, 669 csk->wr_cred, csk->wr_una_cred); 670 671 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 672 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 673 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 674 req_completion); 675 csk->snd_nxt += len; 676 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 677 } 678 total_size += skb->truesize; 679 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 680 681 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 682 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 683 csk, csk->state, csk->flags, csk->tid, skb, len); 684 685 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 686 } 687 return total_size; 688 } 689 690 static inline void free_atid(struct cxgbi_sock *csk) 691 { 692 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 693 694 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 695 cxgb4_free_atid(lldi->tids, csk->atid); 696 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 697 cxgbi_sock_put(csk); 698 } 699 } 700 701 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 702 { 703 struct cxgbi_sock *csk; 704 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 705 unsigned short tcp_opt = ntohs(req->tcp_opt); 706 unsigned int tid = GET_TID(req); 707 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 708 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 709 struct tid_info *t = lldi->tids; 710 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 711 712 csk = lookup_atid(t, atid); 713 if (unlikely(!csk)) { 714 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 715 goto rel_skb; 716 } 717 718 if (csk->atid != atid) { 719 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 720 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 721 goto rel_skb; 722 } 723 724 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 725 (&csk->saddr), (&csk->daddr), 726 atid, tid, csk, csk->state, csk->flags, rcv_isn); 727 728 module_put(THIS_MODULE); 729 730 cxgbi_sock_get(csk); 731 csk->tid = tid; 732 cxgb4_insert_tid(lldi->tids, csk, tid); 733 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 734 735 free_atid(csk); 736 737 spin_lock_bh(&csk->lock); 738 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 739 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 740 csk, csk->state, csk->flags, csk->tid); 741 742 if (csk->retry_timer.function) { 743 del_timer(&csk->retry_timer); 744 csk->retry_timer.function = NULL; 745 } 746 747 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 748 /* 749 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 750 * pass through opt0. 751 */ 752 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) 753 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); 754 755 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; 756 if (GET_TCPOPT_TSTAMP(tcp_opt)) 757 csk->advmss -= 12; 758 if (csk->advmss < 128) 759 csk->advmss = 128; 760 761 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 762 "csk 0x%p, mss_idx %u, advmss %u.\n", 763 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); 764 765 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 766 767 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 768 send_abort_req(csk); 769 else { 770 if (skb_queue_len(&csk->write_queue)) 771 push_tx_frames(csk, 0); 772 cxgbi_conn_tx_open(csk); 773 } 774 spin_unlock_bh(&csk->lock); 775 776 rel_skb: 777 __kfree_skb(skb); 778 } 779 780 static int act_open_rpl_status_to_errno(int status) 781 { 782 switch (status) { 783 case CPL_ERR_CONN_RESET: 784 return -ECONNREFUSED; 785 case CPL_ERR_ARP_MISS: 786 return -EHOSTUNREACH; 787 case CPL_ERR_CONN_TIMEDOUT: 788 return -ETIMEDOUT; 789 case CPL_ERR_TCAM_FULL: 790 return -ENOMEM; 791 case CPL_ERR_CONN_EXIST: 792 return -EADDRINUSE; 793 default: 794 return -EIO; 795 } 796 } 797 798 static void csk_act_open_retry_timer(unsigned long data) 799 { 800 struct sk_buff *skb = NULL; 801 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 802 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 803 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 804 struct l2t_entry *); 805 int t4 = is_t4(lldi->adapter_type), size, size6; 806 807 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 808 "csk 0x%p,%u,0x%lx,%u.\n", 809 csk, csk->state, csk->flags, csk->tid); 810 811 cxgbi_sock_get(csk); 812 spin_lock_bh(&csk->lock); 813 814 if (t4) { 815 size = sizeof(struct cpl_act_open_req); 816 size6 = sizeof(struct cpl_act_open_req6); 817 } else { 818 size = sizeof(struct cpl_t5_act_open_req); 819 size6 = sizeof(struct cpl_t5_act_open_req6); 820 } 821 822 if (csk->csk_family == AF_INET) { 823 send_act_open_func = send_act_open_req; 824 skb = alloc_wr(size, 0, GFP_ATOMIC); 825 #if IS_ENABLED(CONFIG_IPV6) 826 } else { 827 send_act_open_func = send_act_open_req6; 828 skb = alloc_wr(size6, 0, GFP_ATOMIC); 829 #endif 830 } 831 832 if (!skb) 833 cxgbi_sock_fail_act_open(csk, -ENOMEM); 834 else { 835 skb->sk = (struct sock *)csk; 836 t4_set_arp_err_handler(skb, csk, 837 cxgbi_sock_act_open_req_arp_failure); 838 send_act_open_func(csk, skb, csk->l2t); 839 } 840 841 spin_unlock_bh(&csk->lock); 842 cxgbi_sock_put(csk); 843 844 } 845 846 static inline bool is_neg_adv(unsigned int status) 847 { 848 return status == CPL_ERR_RTX_NEG_ADVICE || 849 status == CPL_ERR_KEEPALV_NEG_ADVICE || 850 status == CPL_ERR_PERSIST_NEG_ADVICE; 851 } 852 853 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 854 { 855 struct cxgbi_sock *csk; 856 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 857 unsigned int tid = GET_TID(rpl); 858 unsigned int atid = 859 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); 860 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); 861 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 862 struct tid_info *t = lldi->tids; 863 864 csk = lookup_atid(t, atid); 865 if (unlikely(!csk)) { 866 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 867 goto rel_skb; 868 } 869 870 pr_info_ipaddr("tid %u/%u, status %u.\n" 871 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 872 atid, tid, status, csk, csk->state, csk->flags); 873 874 if (is_neg_adv(status)) 875 goto rel_skb; 876 877 module_put(THIS_MODULE); 878 879 if (status && status != CPL_ERR_TCAM_FULL && 880 status != CPL_ERR_CONN_EXIST && 881 status != CPL_ERR_ARP_MISS) 882 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); 883 884 cxgbi_sock_get(csk); 885 spin_lock_bh(&csk->lock); 886 887 if (status == CPL_ERR_CONN_EXIST && 888 csk->retry_timer.function != csk_act_open_retry_timer) { 889 csk->retry_timer.function = csk_act_open_retry_timer; 890 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 891 } else 892 cxgbi_sock_fail_act_open(csk, 893 act_open_rpl_status_to_errno(status)); 894 895 spin_unlock_bh(&csk->lock); 896 cxgbi_sock_put(csk); 897 rel_skb: 898 __kfree_skb(skb); 899 } 900 901 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 902 { 903 struct cxgbi_sock *csk; 904 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 905 unsigned int tid = GET_TID(req); 906 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 907 struct tid_info *t = lldi->tids; 908 909 csk = lookup_tid(t, tid); 910 if (unlikely(!csk)) { 911 pr_err("can't find connection for tid %u.\n", tid); 912 goto rel_skb; 913 } 914 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 915 (&csk->saddr), (&csk->daddr), 916 csk, csk->state, csk->flags, csk->tid); 917 cxgbi_sock_rcv_peer_close(csk); 918 rel_skb: 919 __kfree_skb(skb); 920 } 921 922 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 923 { 924 struct cxgbi_sock *csk; 925 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 926 unsigned int tid = GET_TID(rpl); 927 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 928 struct tid_info *t = lldi->tids; 929 930 csk = lookup_tid(t, tid); 931 if (unlikely(!csk)) { 932 pr_err("can't find connection for tid %u.\n", tid); 933 goto rel_skb; 934 } 935 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 936 (&csk->saddr), (&csk->daddr), 937 csk, csk->state, csk->flags, csk->tid); 938 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 939 rel_skb: 940 __kfree_skb(skb); 941 } 942 943 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 944 int *need_rst) 945 { 946 switch (abort_reason) { 947 case CPL_ERR_BAD_SYN: /* fall through */ 948 case CPL_ERR_CONN_RESET: 949 return csk->state > CTP_ESTABLISHED ? 950 -EPIPE : -ECONNRESET; 951 case CPL_ERR_XMIT_TIMEDOUT: 952 case CPL_ERR_PERSIST_TIMEDOUT: 953 case CPL_ERR_FINWAIT2_TIMEDOUT: 954 case CPL_ERR_KEEPALIVE_TIMEDOUT: 955 return -ETIMEDOUT; 956 default: 957 return -EIO; 958 } 959 } 960 961 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 962 { 963 struct cxgbi_sock *csk; 964 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 965 unsigned int tid = GET_TID(req); 966 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 967 struct tid_info *t = lldi->tids; 968 int rst_status = CPL_ABORT_NO_RST; 969 970 csk = lookup_tid(t, tid); 971 if (unlikely(!csk)) { 972 pr_err("can't find connection for tid %u.\n", tid); 973 goto rel_skb; 974 } 975 976 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 977 (&csk->saddr), (&csk->daddr), 978 csk, csk->state, csk->flags, csk->tid, req->status); 979 980 if (is_neg_adv(req->status)) 981 goto rel_skb; 982 983 cxgbi_sock_get(csk); 984 spin_lock_bh(&csk->lock); 985 986 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 987 988 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 989 send_tx_flowc_wr(csk); 990 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 991 } 992 993 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 994 cxgbi_sock_set_state(csk, CTP_ABORTING); 995 996 send_abort_rpl(csk, rst_status); 997 998 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 999 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1000 cxgbi_sock_closed(csk); 1001 } 1002 1003 spin_unlock_bh(&csk->lock); 1004 cxgbi_sock_put(csk); 1005 rel_skb: 1006 __kfree_skb(skb); 1007 } 1008 1009 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1010 { 1011 struct cxgbi_sock *csk; 1012 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1013 unsigned int tid = GET_TID(rpl); 1014 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1015 struct tid_info *t = lldi->tids; 1016 1017 csk = lookup_tid(t, tid); 1018 if (!csk) 1019 goto rel_skb; 1020 1021 if (csk) 1022 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1023 (&csk->saddr), (&csk->daddr), csk, 1024 csk->state, csk->flags, csk->tid, rpl->status); 1025 1026 if (rpl->status == CPL_ERR_ABORT_FAILED) 1027 goto rel_skb; 1028 1029 cxgbi_sock_rcv_abort_rpl(csk); 1030 rel_skb: 1031 __kfree_skb(skb); 1032 } 1033 1034 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 1035 { 1036 struct cxgbi_sock *csk; 1037 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 1038 unsigned int tid = GET_TID(cpl); 1039 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1040 struct tid_info *t = lldi->tids; 1041 1042 csk = lookup_tid(t, tid); 1043 if (!csk) { 1044 pr_err("can't find connection for tid %u.\n", tid); 1045 } else { 1046 /* not expecting this, reset the connection. */ 1047 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1048 spin_lock_bh(&csk->lock); 1049 send_abort_req(csk); 1050 spin_unlock_bh(&csk->lock); 1051 } 1052 __kfree_skb(skb); 1053 } 1054 1055 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1056 { 1057 struct cxgbi_sock *csk; 1058 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1059 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1060 unsigned int tid = GET_TID(cpl); 1061 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1062 struct tid_info *t = lldi->tids; 1063 1064 csk = lookup_tid(t, tid); 1065 if (unlikely(!csk)) { 1066 pr_err("can't find conn. for tid %u.\n", tid); 1067 goto rel_skb; 1068 } 1069 1070 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1071 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1072 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1073 pdu_len_ddp); 1074 1075 spin_lock_bh(&csk->lock); 1076 1077 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1078 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1079 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1080 csk, csk->state, csk->flags, csk->tid); 1081 if (csk->state != CTP_ABORTING) 1082 goto abort_conn; 1083 else 1084 goto discard; 1085 } 1086 1087 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1088 cxgbi_skcb_flags(skb) = 0; 1089 1090 skb_reset_transport_header(skb); 1091 __skb_pull(skb, sizeof(*cpl)); 1092 __pskb_trim(skb, ntohs(cpl->len)); 1093 1094 if (!csk->skb_ulp_lhdr) { 1095 unsigned char *bhs; 1096 unsigned int hlen, dlen, plen; 1097 1098 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1099 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1100 csk, csk->state, csk->flags, csk->tid, skb); 1101 csk->skb_ulp_lhdr = skb; 1102 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1103 1104 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 1105 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1106 csk->tid, cxgbi_skcb_tcp_seq(skb), 1107 csk->rcv_nxt); 1108 goto abort_conn; 1109 } 1110 1111 bhs = skb->data; 1112 hlen = ntohs(cpl->len); 1113 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1114 1115 plen = ISCSI_PDU_LEN(pdu_len_ddp); 1116 if (is_t4(lldi->adapter_type)) 1117 plen -= 40; 1118 1119 if ((hlen + dlen) != plen) { 1120 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1121 "mismatch %u != %u + %u, seq 0x%x.\n", 1122 csk->tid, plen, hlen, dlen, 1123 cxgbi_skcb_tcp_seq(skb)); 1124 goto abort_conn; 1125 } 1126 1127 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1128 if (dlen) 1129 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1130 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1131 1132 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1133 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1134 csk, skb, *bhs, hlen, dlen, 1135 ntohl(*((unsigned int *)(bhs + 16))), 1136 ntohl(*((unsigned int *)(bhs + 24)))); 1137 1138 } else { 1139 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1140 1141 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1142 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1143 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1144 csk, csk->state, csk->flags, skb, lskb); 1145 } 1146 1147 __skb_queue_tail(&csk->receive_queue, skb); 1148 spin_unlock_bh(&csk->lock); 1149 return; 1150 1151 abort_conn: 1152 send_abort_req(csk); 1153 discard: 1154 spin_unlock_bh(&csk->lock); 1155 rel_skb: 1156 __kfree_skb(skb); 1157 } 1158 1159 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1160 struct sk_buff *skb) 1161 { 1162 struct cxgbi_sock *csk; 1163 struct sk_buff *lskb; 1164 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1165 unsigned int tid = GET_TID(rpl); 1166 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1167 struct tid_info *t = lldi->tids; 1168 unsigned int status = ntohl(rpl->ddpvld); 1169 1170 csk = lookup_tid(t, tid); 1171 if (unlikely(!csk)) { 1172 pr_err("can't find connection for tid %u.\n", tid); 1173 goto rel_skb; 1174 } 1175 1176 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1177 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1178 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 1179 1180 spin_lock_bh(&csk->lock); 1181 1182 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1183 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1184 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1185 csk, csk->state, csk->flags, csk->tid); 1186 if (csk->state != CTP_ABORTING) 1187 goto abort_conn; 1188 else 1189 goto discard; 1190 } 1191 1192 if (!csk->skb_ulp_lhdr) { 1193 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1194 goto abort_conn; 1195 } 1196 1197 lskb = csk->skb_ulp_lhdr; 1198 csk->skb_ulp_lhdr = NULL; 1199 1200 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1201 1202 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1203 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1204 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1205 1206 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1207 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1208 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1209 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); 1210 } 1211 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1212 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1213 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1214 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); 1215 } 1216 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1217 log_debug(1 << CXGBI_DBG_PDU_RX, 1218 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1219 csk, lskb, status); 1220 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); 1221 } 1222 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1223 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { 1224 log_debug(1 << CXGBI_DBG_PDU_RX, 1225 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1226 csk, lskb, status); 1227 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); 1228 } 1229 log_debug(1 << CXGBI_DBG_PDU_RX, 1230 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1231 csk, lskb, cxgbi_skcb_flags(lskb)); 1232 1233 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1234 cxgbi_conn_pdu_ready(csk); 1235 spin_unlock_bh(&csk->lock); 1236 goto rel_skb; 1237 1238 abort_conn: 1239 send_abort_req(csk); 1240 discard: 1241 spin_unlock_bh(&csk->lock); 1242 rel_skb: 1243 __kfree_skb(skb); 1244 } 1245 1246 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1247 { 1248 struct cxgbi_sock *csk; 1249 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1250 unsigned int tid = GET_TID(rpl); 1251 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1252 struct tid_info *t = lldi->tids; 1253 1254 csk = lookup_tid(t, tid); 1255 if (unlikely(!csk)) 1256 pr_err("can't find connection for tid %u.\n", tid); 1257 else { 1258 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1259 "csk 0x%p,%u,0x%lx,%u.\n", 1260 csk, csk->state, csk->flags, csk->tid); 1261 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1262 rpl->seq_vld); 1263 } 1264 __kfree_skb(skb); 1265 } 1266 1267 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1268 { 1269 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1270 unsigned int tid = GET_TID(rpl); 1271 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1272 struct tid_info *t = lldi->tids; 1273 struct cxgbi_sock *csk; 1274 1275 csk = lookup_tid(t, tid); 1276 if (!csk) 1277 pr_err("can't find conn. for tid %u.\n", tid); 1278 1279 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1280 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1281 csk, csk->state, csk->flags, csk->tid, rpl->status); 1282 1283 if (rpl->status != CPL_ERR_NONE) 1284 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1285 csk, tid, rpl->status); 1286 1287 __kfree_skb(skb); 1288 } 1289 1290 static int alloc_cpls(struct cxgbi_sock *csk) 1291 { 1292 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1293 0, GFP_KERNEL); 1294 if (!csk->cpl_close) 1295 return -ENOMEM; 1296 1297 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1298 0, GFP_KERNEL); 1299 if (!csk->cpl_abort_req) 1300 goto free_cpls; 1301 1302 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1303 0, GFP_KERNEL); 1304 if (!csk->cpl_abort_rpl) 1305 goto free_cpls; 1306 return 0; 1307 1308 free_cpls: 1309 cxgbi_sock_free_cpl_skbs(csk); 1310 return -ENOMEM; 1311 } 1312 1313 static inline void l2t_put(struct cxgbi_sock *csk) 1314 { 1315 if (csk->l2t) { 1316 cxgb4_l2t_release(csk->l2t); 1317 csk->l2t = NULL; 1318 cxgbi_sock_put(csk); 1319 } 1320 } 1321 1322 static void release_offload_resources(struct cxgbi_sock *csk) 1323 { 1324 struct cxgb4_lld_info *lldi; 1325 1326 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1327 "csk 0x%p,%u,0x%lx,%u.\n", 1328 csk, csk->state, csk->flags, csk->tid); 1329 1330 cxgbi_sock_free_cpl_skbs(csk); 1331 if (csk->wr_cred != csk->wr_max_cred) { 1332 cxgbi_sock_purge_wr_queue(csk); 1333 cxgbi_sock_reset_wr_list(csk); 1334 } 1335 1336 l2t_put(csk); 1337 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1338 free_atid(csk); 1339 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1340 lldi = cxgbi_cdev_priv(csk->cdev); 1341 cxgb4_remove_tid(lldi->tids, 0, csk->tid); 1342 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1343 cxgbi_sock_put(csk); 1344 } 1345 csk->dst = NULL; 1346 csk->cdev = NULL; 1347 } 1348 1349 static int init_act_open(struct cxgbi_sock *csk) 1350 { 1351 struct cxgbi_device *cdev = csk->cdev; 1352 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1353 struct net_device *ndev = cdev->ports[csk->port_id]; 1354 struct sk_buff *skb = NULL; 1355 struct neighbour *n = NULL; 1356 void *daddr; 1357 unsigned int step; 1358 unsigned int size, size6; 1359 int t4 = is_t4(lldi->adapter_type); 1360 1361 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1362 "csk 0x%p,%u,0x%lx,%u.\n", 1363 csk, csk->state, csk->flags, csk->tid); 1364 1365 if (csk->csk_family == AF_INET) 1366 daddr = &csk->daddr.sin_addr.s_addr; 1367 #if IS_ENABLED(CONFIG_IPV6) 1368 else if (csk->csk_family == AF_INET6) 1369 daddr = &csk->daddr6.sin6_addr; 1370 #endif 1371 else { 1372 pr_err("address family 0x%x not supported\n", csk->csk_family); 1373 goto rel_resource; 1374 } 1375 1376 n = dst_neigh_lookup(csk->dst, daddr); 1377 1378 if (!n) { 1379 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1380 goto rel_resource; 1381 } 1382 1383 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1384 if (csk->atid < 0) { 1385 pr_err("%s, NO atid available.\n", ndev->name); 1386 return -EINVAL; 1387 } 1388 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1389 cxgbi_sock_get(csk); 1390 1391 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1392 if (!csk->l2t) { 1393 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1394 goto rel_resource; 1395 } 1396 cxgbi_sock_get(csk); 1397 1398 if (t4) { 1399 size = sizeof(struct cpl_act_open_req); 1400 size6 = sizeof(struct cpl_act_open_req6); 1401 } else { 1402 size = sizeof(struct cpl_t5_act_open_req); 1403 size6 = sizeof(struct cpl_t5_act_open_req6); 1404 } 1405 1406 if (csk->csk_family == AF_INET) 1407 skb = alloc_wr(size, 0, GFP_NOIO); 1408 #if IS_ENABLED(CONFIG_IPV6) 1409 else 1410 skb = alloc_wr(size6, 0, GFP_NOIO); 1411 #endif 1412 1413 if (!skb) 1414 goto rel_resource; 1415 skb->sk = (struct sock *)csk; 1416 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1417 1418 if (!csk->mtu) 1419 csk->mtu = dst_mtu(csk->dst); 1420 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1421 csk->tx_chan = cxgb4_port_chan(ndev); 1422 /* SMT two entries per row */ 1423 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; 1424 step = lldi->ntxq / lldi->nchan; 1425 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1426 step = lldi->nrxq / lldi->nchan; 1427 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1428 csk->wr_cred = lldi->wr_cred - 1429 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1430 csk->wr_max_cred = csk->wr_cred; 1431 csk->wr_una_cred = 0; 1432 cxgbi_sock_reset_wr_list(csk); 1433 csk->err = 0; 1434 1435 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1436 (&csk->saddr), (&csk->daddr), csk, csk->state, 1437 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1438 csk->mtu, csk->mss_idx, csk->smac_idx); 1439 1440 /* must wait for either a act_open_rpl or act_open_establish */ 1441 try_module_get(THIS_MODULE); 1442 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1443 if (csk->csk_family == AF_INET) 1444 send_act_open_req(csk, skb, csk->l2t); 1445 #if IS_ENABLED(CONFIG_IPV6) 1446 else 1447 send_act_open_req6(csk, skb, csk->l2t); 1448 #endif 1449 neigh_release(n); 1450 1451 return 0; 1452 1453 rel_resource: 1454 if (n) 1455 neigh_release(n); 1456 if (skb) 1457 __kfree_skb(skb); 1458 return -EINVAL; 1459 } 1460 1461 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1462 [CPL_ACT_ESTABLISH] = do_act_establish, 1463 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1464 [CPL_PEER_CLOSE] = do_peer_close, 1465 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1466 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1467 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1468 [CPL_FW4_ACK] = do_fw4_ack, 1469 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1470 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1471 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1472 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1473 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1474 [CPL_RX_DATA] = do_rx_data, 1475 }; 1476 1477 int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1478 { 1479 int rc; 1480 1481 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1482 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1483 1484 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1485 cxgb4i_max_connect); 1486 if (rc < 0) 1487 return rc; 1488 1489 cdev->csk_release_offload_resources = release_offload_resources; 1490 cdev->csk_push_tx_frames = push_tx_frames; 1491 cdev->csk_send_abort_req = send_abort_req; 1492 cdev->csk_send_close_req = send_close_req; 1493 cdev->csk_send_rx_credits = send_rx_credits; 1494 cdev->csk_alloc_cpls = alloc_cpls; 1495 cdev->csk_init_act_open = init_act_open; 1496 1497 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1498 return 0; 1499 } 1500 1501 /* 1502 * functions to program the pagepod in h/w 1503 */ 1504 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ 1505 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, 1506 struct ulp_mem_io *req, 1507 unsigned int wr_len, unsigned int dlen, 1508 unsigned int pm_addr) 1509 { 1510 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1511 1512 INIT_ULPTX_WR(req, wr_len, 0, 0); 1513 if (is_t4(lldi->adapter_type)) 1514 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1515 (ULP_MEMIO_ORDER_F)); 1516 else 1517 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1518 (T5_ULP_MEMIO_IMM_F)); 1519 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1520 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1521 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1522 1523 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1524 idata->len = htonl(dlen); 1525 } 1526 1527 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, 1528 struct cxgbi_pagepod_hdr *hdr, unsigned int idx, 1529 unsigned int npods, 1530 struct cxgbi_gather_list *gl, 1531 unsigned int gl_pidx) 1532 { 1533 struct cxgbi_ddp_info *ddp = cdev->ddp; 1534 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1535 struct sk_buff *skb; 1536 struct ulp_mem_io *req; 1537 struct ulptx_idata *idata; 1538 struct cxgbi_pagepod *ppod; 1539 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; 1540 unsigned int dlen = PPOD_SIZE * npods; 1541 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1542 sizeof(struct ulptx_idata) + dlen, 16); 1543 unsigned int i; 1544 1545 skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1546 if (!skb) { 1547 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", 1548 cdev, idx, npods); 1549 return -ENOMEM; 1550 } 1551 req = (struct ulp_mem_io *)skb->head; 1552 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 1553 1554 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1555 idata = (struct ulptx_idata *)(req + 1); 1556 ppod = (struct cxgbi_pagepod *)(idata + 1); 1557 1558 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { 1559 if (!hdr && !gl) 1560 cxgbi_ddp_ppod_clear(ppod); 1561 else 1562 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); 1563 } 1564 1565 cxgb4_ofld_send(cdev->ports[port_id], skb); 1566 return 0; 1567 } 1568 1569 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, 1570 unsigned int idx, unsigned int npods, 1571 struct cxgbi_gather_list *gl) 1572 { 1573 unsigned int i, cnt; 1574 int err = 0; 1575 1576 for (i = 0; i < npods; i += cnt, idx += cnt) { 1577 cnt = npods - i; 1578 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1579 cnt = ULPMEM_IDATA_MAX_NPPODS; 1580 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, 1581 idx, cnt, gl, 4 * i); 1582 if (err < 0) 1583 break; 1584 } 1585 return err; 1586 } 1587 1588 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, 1589 unsigned int idx, unsigned int npods) 1590 { 1591 unsigned int i, cnt; 1592 int err; 1593 1594 for (i = 0; i < npods; i += cnt, idx += cnt) { 1595 cnt = npods - i; 1596 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1597 cnt = ULPMEM_IDATA_MAX_NPPODS; 1598 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, 1599 idx, cnt, NULL, 0); 1600 if (err < 0) 1601 break; 1602 } 1603 } 1604 1605 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1606 int pg_idx, bool reply) 1607 { 1608 struct sk_buff *skb; 1609 struct cpl_set_tcb_field *req; 1610 1611 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1612 return 0; 1613 1614 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1615 if (!skb) 1616 return -ENOMEM; 1617 1618 /* set up ulp page size */ 1619 req = (struct cpl_set_tcb_field *)skb->head; 1620 INIT_TP_WR(req, csk->tid); 1621 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1622 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1623 req->word_cookie = htons(0); 1624 req->mask = cpu_to_be64(0x3 << 8); 1625 req->val = cpu_to_be64(pg_idx << 8); 1626 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1627 1628 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1629 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 1630 1631 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1632 return 0; 1633 } 1634 1635 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1636 int hcrc, int dcrc, int reply) 1637 { 1638 struct sk_buff *skb; 1639 struct cpl_set_tcb_field *req; 1640 1641 if (!hcrc && !dcrc) 1642 return 0; 1643 1644 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1645 if (!skb) 1646 return -ENOMEM; 1647 1648 csk->hcrc_len = (hcrc ? 4 : 0); 1649 csk->dcrc_len = (dcrc ? 4 : 0); 1650 /* set up ulp submode */ 1651 req = (struct cpl_set_tcb_field *)skb->head; 1652 INIT_TP_WR(req, tid); 1653 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1654 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1655 req->word_cookie = htons(0); 1656 req->mask = cpu_to_be64(0x3 << 4); 1657 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 1658 (dcrc ? ULP_CRC_DATA : 0)) << 4); 1659 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1660 1661 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1662 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 1663 1664 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1665 return 0; 1666 } 1667 1668 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 1669 { 1670 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1671 struct cxgbi_ddp_info *ddp = cdev->ddp; 1672 unsigned int tagmask, pgsz_factor[4]; 1673 int err; 1674 1675 if (ddp) { 1676 kref_get(&ddp->refcnt); 1677 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", 1678 cdev, cdev->ddp); 1679 return -EALREADY; 1680 } 1681 1682 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, 1683 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, 1684 lldi->iscsi_iolen, lldi->iscsi_iolen); 1685 if (err < 0) 1686 return err; 1687 1688 ddp = cdev->ddp; 1689 1690 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; 1691 cxgbi_ddp_page_size_factor(pgsz_factor); 1692 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); 1693 1694 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 1695 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 1696 cdev->csk_ddp_set = ddp_set_map; 1697 cdev->csk_ddp_clear = ddp_clear_map; 1698 1699 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", 1700 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, 1701 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); 1702 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " 1703 " %u/%u.\n", 1704 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, 1705 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, 1706 ddp->max_rxsz, lldi->iscsi_iolen); 1707 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", 1708 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, 1709 ddp->max_rxsz); 1710 return 0; 1711 } 1712 1713 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1714 { 1715 struct cxgbi_device *cdev; 1716 struct port_info *pi; 1717 int i, rc; 1718 1719 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 1720 if (!cdev) { 1721 pr_info("t4 device 0x%p, register failed.\n", lldi); 1722 return NULL; 1723 } 1724 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 1725 cdev, lldi->adapter_type, lldi->nports, 1726 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 1727 lldi->nrxq, lldi->wr_cred); 1728 for (i = 0; i < lldi->nrxq; i++) 1729 log_debug(1 << CXGBI_DBG_DEV, 1730 "t4 0x%p, rxq id #%d: %u.\n", 1731 cdev, i, lldi->rxq_ids[i]); 1732 1733 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 1734 cdev->flags = CXGBI_FLAG_DEV_T4; 1735 cdev->pdev = lldi->pdev; 1736 cdev->ports = lldi->ports; 1737 cdev->nports = lldi->nports; 1738 cdev->mtus = lldi->mtus; 1739 cdev->nmtus = NMTUS; 1740 cdev->snd_win = cxgb4i_snd_win; 1741 cdev->rcv_win = cxgb4i_rcv_win; 1742 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 1743 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 1744 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1745 cdev->itp = &cxgb4i_iscsi_transport; 1746 1747 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 1748 << FW_VIID_PFN_S; 1749 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1750 cdev, lldi->ports[0]->name, cdev->pfvf); 1751 1752 rc = cxgb4i_ddp_init(cdev); 1753 if (rc) { 1754 pr_info("t4 0x%p ddp init failed.\n", cdev); 1755 goto err_out; 1756 } 1757 rc = cxgb4i_ofld_init(cdev); 1758 if (rc) { 1759 pr_info("t4 0x%p ofld init failed.\n", cdev); 1760 goto err_out; 1761 } 1762 1763 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 1764 &cxgb4i_host_template, cxgb4i_stt); 1765 if (rc) 1766 goto err_out; 1767 1768 for (i = 0; i < cdev->nports; i++) { 1769 pi = netdev_priv(lldi->ports[i]); 1770 cdev->hbas[i]->port_id = pi->port_id; 1771 } 1772 return cdev; 1773 1774 err_out: 1775 cxgbi_device_unregister(cdev); 1776 return ERR_PTR(-ENOMEM); 1777 } 1778 1779 #define RX_PULL_LEN 128 1780 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 1781 const struct pkt_gl *pgl) 1782 { 1783 const struct cpl_act_establish *rpl; 1784 struct sk_buff *skb; 1785 unsigned int opc; 1786 struct cxgbi_device *cdev = handle; 1787 1788 if (pgl == NULL) { 1789 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1790 1791 skb = alloc_wr(len, 0, GFP_ATOMIC); 1792 if (!skb) 1793 goto nomem; 1794 skb_copy_to_linear_data(skb, &rsp[1], len); 1795 } else { 1796 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 1797 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 1798 pgl->va, be64_to_cpu(*rsp), 1799 be64_to_cpu(*(u64 *)pgl->va), 1800 pgl->tot_len); 1801 return 0; 1802 } 1803 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 1804 if (unlikely(!skb)) 1805 goto nomem; 1806 } 1807 1808 rpl = (struct cpl_act_establish *)skb->data; 1809 opc = rpl->ot.opcode; 1810 log_debug(1 << CXGBI_DBG_TOE, 1811 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 1812 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 1813 if (cxgb4i_cplhandlers[opc]) 1814 cxgb4i_cplhandlers[opc](cdev, skb); 1815 else { 1816 pr_err("No handler for opcode 0x%x.\n", opc); 1817 __kfree_skb(skb); 1818 } 1819 return 0; 1820 nomem: 1821 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 1822 return 1; 1823 } 1824 1825 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 1826 { 1827 struct cxgbi_device *cdev = handle; 1828 1829 switch (state) { 1830 case CXGB4_STATE_UP: 1831 pr_info("cdev 0x%p, UP.\n", cdev); 1832 break; 1833 case CXGB4_STATE_START_RECOVERY: 1834 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 1835 /* close all connections */ 1836 break; 1837 case CXGB4_STATE_DOWN: 1838 pr_info("cdev 0x%p, DOWN.\n", cdev); 1839 break; 1840 case CXGB4_STATE_DETACH: 1841 pr_info("cdev 0x%p, DETACH.\n", cdev); 1842 cxgbi_device_unregister(cdev); 1843 break; 1844 default: 1845 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 1846 break; 1847 } 1848 return 0; 1849 } 1850 1851 static int __init cxgb4i_init_module(void) 1852 { 1853 int rc; 1854 1855 printk(KERN_INFO "%s", version); 1856 1857 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1858 if (rc < 0) 1859 return rc; 1860 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1861 1862 return 0; 1863 } 1864 1865 static void __exit cxgb4i_exit_module(void) 1866 { 1867 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1868 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1869 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1870 } 1871 1872 module_init(cxgb4i_init_module); 1873 module_exit(cxgb4i_exit_module); 1874