1 /* 2 * cxgb4i.c: Chelsio T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <scsi/scsi_host.h> 19 #include <net/tcp.h> 20 #include <net/dst.h> 21 #include <linux/netdevice.h> 22 #include <net/addrconf.h> 23 24 #include "t4_regs.h" 25 #include "t4_msg.h" 26 #include "cxgb4.h" 27 #include "cxgb4_uld.h" 28 #include "t4fw_api.h" 29 #include "l2t.h" 30 #include "cxgb4i.h" 31 32 static unsigned int dbg_level; 33 34 #include "../libcxgbi.h" 35 36 #define DRV_MODULE_NAME "cxgb4i" 37 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" 38 #define DRV_MODULE_VERSION "0.9.4" 39 40 static char version[] = 41 DRV_MODULE_DESC " " DRV_MODULE_NAME 42 " v" DRV_MODULE_VERSION "\n"; 43 44 MODULE_AUTHOR("Chelsio Communications, Inc."); 45 MODULE_DESCRIPTION(DRV_MODULE_DESC); 46 MODULE_VERSION(DRV_MODULE_VERSION); 47 MODULE_LICENSE("GPL"); 48 49 module_param(dbg_level, uint, 0644); 50 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); 51 52 static int cxgb4i_rcv_win = 256 * 1024; 53 module_param(cxgb4i_rcv_win, int, 0644); 54 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); 55 56 static int cxgb4i_snd_win = 128 * 1024; 57 module_param(cxgb4i_snd_win, int, 0644); 58 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); 59 60 static int cxgb4i_rx_credit_thres = 10 * 1024; 61 module_param(cxgb4i_rx_credit_thres, int, 0644); 62 MODULE_PARM_DESC(cxgb4i_rx_credit_thres, 63 "RX credits return threshold in bytes (default=10KB)"); 64 65 static unsigned int cxgb4i_max_connect = (8 * 1024); 66 module_param(cxgb4i_max_connect, uint, 0644); 67 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); 68 69 static unsigned short cxgb4i_sport_base = 20000; 70 module_param(cxgb4i_sport_base, ushort, 0644); 71 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); 72 73 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); 74 75 static void *t4_uld_add(const struct cxgb4_lld_info *); 76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 77 static int t4_uld_state_change(void *, enum cxgb4_state state); 78 static inline int send_tx_flowc_wr(struct cxgbi_sock *); 79 80 static const struct cxgb4_uld_info cxgb4i_uld_info = { 81 .name = DRV_MODULE_NAME, 82 .add = t4_uld_add, 83 .rx_handler = t4_uld_rx_handler, 84 .state_change = t4_uld_state_change, 85 }; 86 87 static struct scsi_host_template cxgb4i_host_template = { 88 .module = THIS_MODULE, 89 .name = DRV_MODULE_NAME, 90 .proc_name = DRV_MODULE_NAME, 91 .can_queue = CXGB4I_SCSI_HOST_QDEPTH, 92 .queuecommand = iscsi_queuecommand, 93 .change_queue_depth = scsi_change_queue_depth, 94 .sg_tablesize = SG_ALL, 95 .max_sectors = 0xFFFF, 96 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 97 .eh_abort_handler = iscsi_eh_abort, 98 .eh_device_reset_handler = iscsi_eh_device_reset, 99 .eh_target_reset_handler = iscsi_eh_recover_target, 100 .target_alloc = iscsi_target_alloc, 101 .use_clustering = DISABLE_CLUSTERING, 102 .this_id = -1, 103 .track_queue_depth = 1, 104 }; 105 106 static struct iscsi_transport cxgb4i_iscsi_transport = { 107 .owner = THIS_MODULE, 108 .name = DRV_MODULE_NAME, 109 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 110 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 111 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 112 .attr_is_visible = cxgbi_attr_is_visible, 113 .get_host_param = cxgbi_get_host_param, 114 .set_host_param = cxgbi_set_host_param, 115 /* session management */ 116 .create_session = cxgbi_create_session, 117 .destroy_session = cxgbi_destroy_session, 118 .get_session_param = iscsi_session_get_param, 119 /* connection management */ 120 .create_conn = cxgbi_create_conn, 121 .bind_conn = cxgbi_bind_conn, 122 .destroy_conn = iscsi_tcp_conn_teardown, 123 .start_conn = iscsi_conn_start, 124 .stop_conn = iscsi_conn_stop, 125 .get_conn_param = iscsi_conn_get_param, 126 .set_param = cxgbi_set_conn_param, 127 .get_stats = cxgbi_get_conn_stats, 128 /* pdu xmit req from user space */ 129 .send_pdu = iscsi_conn_send_pdu, 130 /* task */ 131 .init_task = iscsi_tcp_task_init, 132 .xmit_task = iscsi_tcp_task_xmit, 133 .cleanup_task = cxgbi_cleanup_task, 134 /* pdu */ 135 .alloc_pdu = cxgbi_conn_alloc_pdu, 136 .init_pdu = cxgbi_conn_init_pdu, 137 .xmit_pdu = cxgbi_conn_xmit_pdu, 138 .parse_pdu_itt = cxgbi_parse_pdu_itt, 139 /* TCP connect/disconnect */ 140 .get_ep_param = cxgbi_get_ep_param, 141 .ep_connect = cxgbi_ep_connect, 142 .ep_poll = cxgbi_ep_poll, 143 .ep_disconnect = cxgbi_ep_disconnect, 144 /* Error recovery timeout call */ 145 .session_recovery_timedout = iscsi_session_recovery_timedout, 146 }; 147 148 static struct scsi_transport_template *cxgb4i_stt; 149 150 /* 151 * CPL (Chelsio Protocol Language) defines a message passing interface between 152 * the host driver and Chelsio asic. 153 * The section below implments CPLs that related to iscsi tcp connection 154 * open/close/abort and data send/receive. 155 */ 156 157 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 158 #define RCV_BUFSIZ_MASK 0x3FFU 159 #define MAX_IMM_TX_PKT_LEN 128 160 161 static inline void set_queue(struct sk_buff *skb, unsigned int queue, 162 const struct cxgbi_sock *csk) 163 { 164 skb->queue_mapping = queue; 165 } 166 167 static int push_tx_frames(struct cxgbi_sock *, int); 168 169 /* 170 * is_ofld_imm - check whether a packet can be sent as immediate data 171 * @skb: the packet 172 * 173 * Returns true if a packet can be sent as an offload WR with immediate 174 * data. We currently use the same limit as for Ethernet packets. 175 */ 176 static inline bool is_ofld_imm(const struct sk_buff *skb) 177 { 178 int len = skb->len; 179 180 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 181 len += sizeof(struct fw_ofld_tx_data_wr); 182 183 return len <= MAX_IMM_TX_PKT_LEN; 184 } 185 186 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, 187 struct l2t_entry *e) 188 { 189 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 190 int t4 = is_t4(lldi->adapter_type); 191 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 192 unsigned long long opt0; 193 unsigned int opt2; 194 unsigned int qid_atid = ((unsigned int)csk->atid) | 195 (((unsigned int)csk->rss_qid) << 14); 196 197 opt0 = KEEP_ALIVE_F | 198 WND_SCALE_V(wscale) | 199 MSS_IDX_V(csk->mss_idx) | 200 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 201 TX_CHAN_V(csk->tx_chan) | 202 SMAC_SEL_V(csk->smac_idx) | 203 ULP_MODE_V(ULP_MODE_ISCSI) | 204 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 205 opt2 = RX_CHANNEL_V(0) | 206 RSS_QUEUE_VALID_F | 207 (RX_FC_DISABLE_F) | 208 RSS_QUEUE_V(csk->rss_qid); 209 210 if (is_t4(lldi->adapter_type)) { 211 struct cpl_act_open_req *req = 212 (struct cpl_act_open_req *)skb->head; 213 214 INIT_TP_WR(req, 0); 215 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 216 qid_atid)); 217 req->local_port = csk->saddr.sin_port; 218 req->peer_port = csk->daddr.sin_port; 219 req->local_ip = csk->saddr.sin_addr.s_addr; 220 req->peer_ip = csk->daddr.sin_addr.s_addr; 221 req->opt0 = cpu_to_be64(opt0); 222 req->params = cpu_to_be32(cxgb4_select_ntuple( 223 csk->cdev->ports[csk->port_id], 224 csk->l2t)); 225 opt2 |= RX_FC_VALID_F; 226 req->opt2 = cpu_to_be32(opt2); 227 228 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 229 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 230 csk, &req->local_ip, ntohs(req->local_port), 231 &req->peer_ip, ntohs(req->peer_port), 232 csk->atid, csk->rss_qid); 233 } else { 234 struct cpl_t5_act_open_req *req = 235 (struct cpl_t5_act_open_req *)skb->head; 236 237 INIT_TP_WR(req, 0); 238 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 239 qid_atid)); 240 req->local_port = csk->saddr.sin_port; 241 req->peer_port = csk->daddr.sin_port; 242 req->local_ip = csk->saddr.sin_addr.s_addr; 243 req->peer_ip = csk->daddr.sin_addr.s_addr; 244 req->opt0 = cpu_to_be64(opt0); 245 req->params = cpu_to_be64(FILTER_TUPLE_V( 246 cxgb4_select_ntuple( 247 csk->cdev->ports[csk->port_id], 248 csk->l2t))); 249 opt2 |= 1 << 31; 250 req->opt2 = cpu_to_be32(opt2); 251 252 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 253 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", 254 csk, &req->local_ip, ntohs(req->local_port), 255 &req->peer_ip, ntohs(req->peer_port), 256 csk->atid, csk->rss_qid); 257 } 258 259 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 260 261 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", 262 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, 263 csk->state, csk->flags, csk->atid, csk->rss_qid); 264 265 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 266 } 267 268 #if IS_ENABLED(CONFIG_IPV6) 269 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, 270 struct l2t_entry *e) 271 { 272 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 273 int t4 = is_t4(lldi->adapter_type); 274 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 275 unsigned long long opt0; 276 unsigned int opt2; 277 unsigned int qid_atid = ((unsigned int)csk->atid) | 278 (((unsigned int)csk->rss_qid) << 14); 279 280 opt0 = KEEP_ALIVE_F | 281 WND_SCALE_V(wscale) | 282 MSS_IDX_V(csk->mss_idx) | 283 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | 284 TX_CHAN_V(csk->tx_chan) | 285 SMAC_SEL_V(csk->smac_idx) | 286 ULP_MODE_V(ULP_MODE_ISCSI) | 287 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); 288 289 opt2 = RX_CHANNEL_V(0) | 290 RSS_QUEUE_VALID_F | 291 RX_FC_DISABLE_F | 292 RSS_QUEUE_V(csk->rss_qid); 293 294 if (t4) { 295 struct cpl_act_open_req6 *req = 296 (struct cpl_act_open_req6 *)skb->head; 297 298 INIT_TP_WR(req, 0); 299 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 300 qid_atid)); 301 req->local_port = csk->saddr6.sin6_port; 302 req->peer_port = csk->daddr6.sin6_port; 303 304 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 305 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 306 8); 307 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 308 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 309 8); 310 311 req->opt0 = cpu_to_be64(opt0); 312 313 opt2 |= RX_FC_VALID_F; 314 req->opt2 = cpu_to_be32(opt2); 315 316 req->params = cpu_to_be32(cxgb4_select_ntuple( 317 csk->cdev->ports[csk->port_id], 318 csk->l2t)); 319 } else { 320 struct cpl_t5_act_open_req6 *req = 321 (struct cpl_t5_act_open_req6 *)skb->head; 322 323 INIT_TP_WR(req, 0); 324 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 325 qid_atid)); 326 req->local_port = csk->saddr6.sin6_port; 327 req->peer_port = csk->daddr6.sin6_port; 328 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); 329 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + 330 8); 331 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); 332 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + 333 8); 334 req->opt0 = cpu_to_be64(opt0); 335 336 opt2 |= T5_OPT_2_VALID_F; 337 req->opt2 = cpu_to_be32(opt2); 338 339 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple( 340 csk->cdev->ports[csk->port_id], 341 csk->l2t))); 342 } 343 344 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 345 346 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", 347 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, 348 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), 349 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), 350 csk->rss_qid); 351 352 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 353 } 354 #endif 355 356 static void send_close_req(struct cxgbi_sock *csk) 357 { 358 struct sk_buff *skb = csk->cpl_close; 359 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; 360 unsigned int tid = csk->tid; 361 362 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 363 "csk 0x%p,%u,0x%lx, tid %u.\n", 364 csk, csk->state, csk->flags, csk->tid); 365 csk->cpl_close = NULL; 366 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 367 INIT_TP_WR(req, tid); 368 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 369 req->rsvd = 0; 370 371 cxgbi_sock_skb_entail(csk, skb); 372 if (csk->state >= CTP_ESTABLISHED) 373 push_tx_frames(csk, 1); 374 } 375 376 static void abort_arp_failure(void *handle, struct sk_buff *skb) 377 { 378 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; 379 struct cpl_abort_req *req; 380 381 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 382 "csk 0x%p,%u,0x%lx, tid %u, abort.\n", 383 csk, csk->state, csk->flags, csk->tid); 384 req = (struct cpl_abort_req *)skb->data; 385 req->cmd = CPL_ABORT_NO_RST; 386 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 387 } 388 389 static void send_abort_req(struct cxgbi_sock *csk) 390 { 391 struct cpl_abort_req *req; 392 struct sk_buff *skb = csk->cpl_abort_req; 393 394 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 395 return; 396 397 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 398 send_tx_flowc_wr(csk); 399 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 400 } 401 402 cxgbi_sock_set_state(csk, CTP_ABORTING); 403 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 404 cxgbi_sock_purge_write_queue(csk); 405 406 csk->cpl_abort_req = NULL; 407 req = (struct cpl_abort_req *)skb->head; 408 set_queue(skb, CPL_PRIORITY_DATA, csk); 409 req->cmd = CPL_ABORT_SEND_RST; 410 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 411 INIT_TP_WR(req, csk->tid); 412 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); 413 req->rsvd0 = htonl(csk->snd_nxt); 414 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); 415 416 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 417 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", 418 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, 419 req->rsvd1); 420 421 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 422 } 423 424 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) 425 { 426 struct sk_buff *skb = csk->cpl_abort_rpl; 427 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; 428 429 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 430 "csk 0x%p,%u,0x%lx,%u, status %d.\n", 431 csk, csk->state, csk->flags, csk->tid, rst_status); 432 433 csk->cpl_abort_rpl = NULL; 434 set_queue(skb, CPL_PRIORITY_DATA, csk); 435 INIT_TP_WR(rpl, csk->tid); 436 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 437 rpl->cmd = rst_status; 438 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 439 } 440 441 /* 442 * CPL connection rx data ack: host -> 443 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of 444 * credits sent. 445 */ 446 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) 447 { 448 struct sk_buff *skb; 449 struct cpl_rx_data_ack *req; 450 451 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 452 "csk 0x%p,%u,0x%lx,%u, credit %u.\n", 453 csk, csk->state, csk->flags, csk->tid, credits); 454 455 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); 456 if (!skb) { 457 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); 458 return 0; 459 } 460 req = (struct cpl_rx_data_ack *)skb->head; 461 462 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); 463 INIT_TP_WR(req, csk->tid); 464 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 465 csk->tid)); 466 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) 467 | RX_FORCE_ACK_F); 468 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 469 return credits; 470 } 471 472 /* 473 * sgl_len - calculates the size of an SGL of the given capacity 474 * @n: the number of SGL entries 475 * Calculates the number of flits needed for a scatter/gather list that 476 * can hold the given number of entries. 477 */ 478 static inline unsigned int sgl_len(unsigned int n) 479 { 480 n--; 481 return (3 * n) / 2 + (n & 1) + 2; 482 } 483 484 /* 485 * calc_tx_flits_ofld - calculate # of flits for an offload packet 486 * @skb: the packet 487 * 488 * Returns the number of flits needed for the given offload packet. 489 * These packets are already fully constructed and no additional headers 490 * will be added. 491 */ 492 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 493 { 494 unsigned int flits, cnt; 495 496 if (is_ofld_imm(skb)) 497 return DIV_ROUND_UP(skb->len, 8); 498 flits = skb_transport_offset(skb) / 8; 499 cnt = skb_shinfo(skb)->nr_frags; 500 if (skb_tail_pointer(skb) != skb_transport_header(skb)) 501 cnt++; 502 return flits + sgl_len(cnt); 503 } 504 505 #define FLOWC_WR_NPARAMS_MIN 9 506 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 507 { 508 int nparams, flowclen16, flowclen; 509 510 nparams = FLOWC_WR_NPARAMS_MIN; 511 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 512 flowclen16 = DIV_ROUND_UP(flowclen, 16); 513 flowclen = flowclen16 * 16; 514 /* 515 * Return the number of 16-byte credits used by the FlowC request. 516 * Pass back the nparams and actual FlowC length if requested. 517 */ 518 if (nparamsp) 519 *nparamsp = nparams; 520 if (flowclenp) 521 *flowclenp = flowclen; 522 523 return flowclen16; 524 } 525 526 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 527 { 528 struct sk_buff *skb; 529 struct fw_flowc_wr *flowc; 530 int nparams, flowclen16, flowclen; 531 532 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 533 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 534 flowc = (struct fw_flowc_wr *)skb->head; 535 flowc->op_to_nparams = 536 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 537 flowc->flowid_len16 = 538 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 539 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 540 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 541 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 542 flowc->mnemval[1].val = htonl(csk->tx_chan); 543 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 544 flowc->mnemval[2].val = htonl(csk->tx_chan); 545 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 546 flowc->mnemval[3].val = htonl(csk->rss_qid); 547 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 548 flowc->mnemval[4].val = htonl(csk->snd_nxt); 549 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 550 flowc->mnemval[5].val = htonl(csk->rcv_nxt); 551 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 552 flowc->mnemval[6].val = htonl(cxgb4i_snd_win); 553 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 554 flowc->mnemval[7].val = htonl(csk->advmss); 555 flowc->mnemval[8].mnemonic = 0; 556 flowc->mnemval[8].val = 0; 557 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 558 flowc->mnemval[8].val = 16384; 559 560 set_queue(skb, CPL_PRIORITY_DATA, csk); 561 562 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 563 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", 564 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, 565 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, 566 csk->advmss); 567 568 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 569 570 return flowclen16; 571 } 572 573 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, 574 int dlen, int len, u32 credits, int compl) 575 { 576 struct fw_ofld_tx_data_wr *req; 577 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 578 unsigned int wr_ulp_mode = 0, val; 579 bool imm = is_ofld_imm(skb); 580 581 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 582 583 if (imm) { 584 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 585 FW_WR_COMPL_F | 586 FW_WR_IMMDLEN_V(dlen)); 587 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | 588 FW_WR_LEN16_V(credits)); 589 } else { 590 req->op_to_immdlen = 591 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 592 FW_WR_COMPL_F | 593 FW_WR_IMMDLEN_V(0)); 594 req->flowid_len16 = 595 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | 596 FW_WR_LEN16_V(credits)); 597 } 598 if (submode) 599 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) | 600 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 601 val = skb_peek(&csk->write_queue) ? 0 : 1; 602 req->tunnel_to_proxy = htonl(wr_ulp_mode | 603 FW_OFLD_TX_DATA_WR_SHOVE_V(val)); 604 req->plen = htonl(len); 605 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) 606 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 607 } 608 609 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) 610 { 611 kfree_skb(skb); 612 } 613 614 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) 615 { 616 int total_size = 0; 617 struct sk_buff *skb; 618 619 if (unlikely(csk->state < CTP_ESTABLISHED || 620 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { 621 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | 622 1 << CXGBI_DBG_PDU_TX, 623 "csk 0x%p,%u,0x%lx,%u, in closing state.\n", 624 csk, csk->state, csk->flags, csk->tid); 625 return 0; 626 } 627 628 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { 629 int dlen = skb->len; 630 int len = skb->len; 631 unsigned int credits_needed; 632 int flowclen16 = 0; 633 634 skb_reset_transport_header(skb); 635 if (is_ofld_imm(skb)) 636 credits_needed = DIV_ROUND_UP(dlen, 16); 637 else 638 credits_needed = DIV_ROUND_UP( 639 8 * calc_tx_flits_ofld(skb), 640 16); 641 642 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 643 credits_needed += DIV_ROUND_UP( 644 sizeof(struct fw_ofld_tx_data_wr), 645 16); 646 647 /* 648 * Assumes the initial credits is large enough to support 649 * fw_flowc_wr plus largest possible first payload 650 */ 651 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 652 flowclen16 = send_tx_flowc_wr(csk); 653 csk->wr_cred -= flowclen16; 654 csk->wr_una_cred += flowclen16; 655 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 656 } 657 658 if (csk->wr_cred < credits_needed) { 659 log_debug(1 << CXGBI_DBG_PDU_TX, 660 "csk 0x%p, skb %u/%u, wr %d < %u.\n", 661 csk, skb->len, skb->data_len, 662 credits_needed, csk->wr_cred); 663 break; 664 } 665 __skb_unlink(skb, &csk->write_queue); 666 set_queue(skb, CPL_PRIORITY_DATA, csk); 667 skb->csum = credits_needed + flowclen16; 668 csk->wr_cred -= credits_needed; 669 csk->wr_una_cred += credits_needed; 670 cxgbi_sock_enqueue_wr(csk, skb); 671 672 log_debug(1 << CXGBI_DBG_PDU_TX, 673 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", 674 csk, skb->len, skb->data_len, credits_needed, 675 csk->wr_cred, csk->wr_una_cred); 676 677 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 678 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 679 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 680 req_completion); 681 csk->snd_nxt += len; 682 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); 683 } 684 total_size += skb->truesize; 685 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); 686 687 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, 688 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", 689 csk, csk->state, csk->flags, csk->tid, skb, len); 690 691 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 692 } 693 return total_size; 694 } 695 696 static inline void free_atid(struct cxgbi_sock *csk) 697 { 698 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 699 700 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { 701 cxgb4_free_atid(lldi->tids, csk->atid); 702 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); 703 cxgbi_sock_put(csk); 704 } 705 } 706 707 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) 708 { 709 struct cxgbi_sock *csk; 710 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; 711 unsigned short tcp_opt = ntohs(req->tcp_opt); 712 unsigned int tid = GET_TID(req); 713 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 714 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 715 struct tid_info *t = lldi->tids; 716 u32 rcv_isn = be32_to_cpu(req->rcv_isn); 717 718 csk = lookup_atid(t, atid); 719 if (unlikely(!csk)) { 720 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); 721 goto rel_skb; 722 } 723 724 if (csk->atid != atid) { 725 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", 726 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); 727 goto rel_skb; 728 } 729 730 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", 731 (&csk->saddr), (&csk->daddr), 732 atid, tid, csk, csk->state, csk->flags, rcv_isn); 733 734 module_put(THIS_MODULE); 735 736 cxgbi_sock_get(csk); 737 csk->tid = tid; 738 cxgb4_insert_tid(lldi->tids, csk, tid); 739 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); 740 741 free_atid(csk); 742 743 spin_lock_bh(&csk->lock); 744 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) 745 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", 746 csk, csk->state, csk->flags, csk->tid); 747 748 if (csk->retry_timer.function) { 749 del_timer(&csk->retry_timer); 750 csk->retry_timer.function = NULL; 751 } 752 753 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; 754 /* 755 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't 756 * pass through opt0. 757 */ 758 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) 759 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); 760 761 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; 762 if (GET_TCPOPT_TSTAMP(tcp_opt)) 763 csk->advmss -= 12; 764 if (csk->advmss < 128) 765 csk->advmss = 128; 766 767 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 768 "csk 0x%p, mss_idx %u, advmss %u.\n", 769 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); 770 771 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); 772 773 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) 774 send_abort_req(csk); 775 else { 776 if (skb_queue_len(&csk->write_queue)) 777 push_tx_frames(csk, 0); 778 cxgbi_conn_tx_open(csk); 779 } 780 spin_unlock_bh(&csk->lock); 781 782 rel_skb: 783 __kfree_skb(skb); 784 } 785 786 static int act_open_rpl_status_to_errno(int status) 787 { 788 switch (status) { 789 case CPL_ERR_CONN_RESET: 790 return -ECONNREFUSED; 791 case CPL_ERR_ARP_MISS: 792 return -EHOSTUNREACH; 793 case CPL_ERR_CONN_TIMEDOUT: 794 return -ETIMEDOUT; 795 case CPL_ERR_TCAM_FULL: 796 return -ENOMEM; 797 case CPL_ERR_CONN_EXIST: 798 return -EADDRINUSE; 799 default: 800 return -EIO; 801 } 802 } 803 804 static void csk_act_open_retry_timer(unsigned long data) 805 { 806 struct sk_buff *skb = NULL; 807 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 808 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 809 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 810 struct l2t_entry *); 811 int t4 = is_t4(lldi->adapter_type), size, size6; 812 813 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 814 "csk 0x%p,%u,0x%lx,%u.\n", 815 csk, csk->state, csk->flags, csk->tid); 816 817 cxgbi_sock_get(csk); 818 spin_lock_bh(&csk->lock); 819 820 if (t4) { 821 size = sizeof(struct cpl_act_open_req); 822 size6 = sizeof(struct cpl_act_open_req6); 823 } else { 824 size = sizeof(struct cpl_t5_act_open_req); 825 size6 = sizeof(struct cpl_t5_act_open_req6); 826 } 827 828 if (csk->csk_family == AF_INET) { 829 send_act_open_func = send_act_open_req; 830 skb = alloc_wr(size, 0, GFP_ATOMIC); 831 #if IS_ENABLED(CONFIG_IPV6) 832 } else { 833 send_act_open_func = send_act_open_req6; 834 skb = alloc_wr(size6, 0, GFP_ATOMIC); 835 #endif 836 } 837 838 if (!skb) 839 cxgbi_sock_fail_act_open(csk, -ENOMEM); 840 else { 841 skb->sk = (struct sock *)csk; 842 t4_set_arp_err_handler(skb, csk, 843 cxgbi_sock_act_open_req_arp_failure); 844 send_act_open_func(csk, skb, csk->l2t); 845 } 846 847 spin_unlock_bh(&csk->lock); 848 cxgbi_sock_put(csk); 849 850 } 851 852 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 853 { 854 struct cxgbi_sock *csk; 855 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; 856 unsigned int tid = GET_TID(rpl); 857 unsigned int atid = 858 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); 859 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); 860 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 861 struct tid_info *t = lldi->tids; 862 863 csk = lookup_atid(t, atid); 864 if (unlikely(!csk)) { 865 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); 866 goto rel_skb; 867 } 868 869 pr_info_ipaddr("tid %u/%u, status %u.\n" 870 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 871 atid, tid, status, csk, csk->state, csk->flags); 872 873 if (status == CPL_ERR_RTX_NEG_ADVICE) 874 goto rel_skb; 875 876 module_put(THIS_MODULE); 877 878 if (status && status != CPL_ERR_TCAM_FULL && 879 status != CPL_ERR_CONN_EXIST && 880 status != CPL_ERR_ARP_MISS) 881 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); 882 883 cxgbi_sock_get(csk); 884 spin_lock_bh(&csk->lock); 885 886 if (status == CPL_ERR_CONN_EXIST && 887 csk->retry_timer.function != csk_act_open_retry_timer) { 888 csk->retry_timer.function = csk_act_open_retry_timer; 889 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 890 } else 891 cxgbi_sock_fail_act_open(csk, 892 act_open_rpl_status_to_errno(status)); 893 894 spin_unlock_bh(&csk->lock); 895 cxgbi_sock_put(csk); 896 rel_skb: 897 __kfree_skb(skb); 898 } 899 900 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) 901 { 902 struct cxgbi_sock *csk; 903 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; 904 unsigned int tid = GET_TID(req); 905 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 906 struct tid_info *t = lldi->tids; 907 908 csk = lookup_tid(t, tid); 909 if (unlikely(!csk)) { 910 pr_err("can't find connection for tid %u.\n", tid); 911 goto rel_skb; 912 } 913 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 914 (&csk->saddr), (&csk->daddr), 915 csk, csk->state, csk->flags, csk->tid); 916 cxgbi_sock_rcv_peer_close(csk); 917 rel_skb: 918 __kfree_skb(skb); 919 } 920 921 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 922 { 923 struct cxgbi_sock *csk; 924 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; 925 unsigned int tid = GET_TID(rpl); 926 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 927 struct tid_info *t = lldi->tids; 928 929 csk = lookup_tid(t, tid); 930 if (unlikely(!csk)) { 931 pr_err("can't find connection for tid %u.\n", tid); 932 goto rel_skb; 933 } 934 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", 935 (&csk->saddr), (&csk->daddr), 936 csk, csk->state, csk->flags, csk->tid); 937 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 938 rel_skb: 939 __kfree_skb(skb); 940 } 941 942 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, 943 int *need_rst) 944 { 945 switch (abort_reason) { 946 case CPL_ERR_BAD_SYN: /* fall through */ 947 case CPL_ERR_CONN_RESET: 948 return csk->state > CTP_ESTABLISHED ? 949 -EPIPE : -ECONNRESET; 950 case CPL_ERR_XMIT_TIMEDOUT: 951 case CPL_ERR_PERSIST_TIMEDOUT: 952 case CPL_ERR_FINWAIT2_TIMEDOUT: 953 case CPL_ERR_KEEPALIVE_TIMEDOUT: 954 return -ETIMEDOUT; 955 default: 956 return -EIO; 957 } 958 } 959 960 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 961 { 962 struct cxgbi_sock *csk; 963 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; 964 unsigned int tid = GET_TID(req); 965 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 966 struct tid_info *t = lldi->tids; 967 int rst_status = CPL_ABORT_NO_RST; 968 969 csk = lookup_tid(t, tid); 970 if (unlikely(!csk)) { 971 pr_err("can't find connection for tid %u.\n", tid); 972 goto rel_skb; 973 } 974 975 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 976 (&csk->saddr), (&csk->daddr), 977 csk, csk->state, csk->flags, csk->tid, req->status); 978 979 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 980 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 981 goto rel_skb; 982 983 cxgbi_sock_get(csk); 984 spin_lock_bh(&csk->lock); 985 986 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 987 988 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 989 send_tx_flowc_wr(csk); 990 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 991 } 992 993 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 994 cxgbi_sock_set_state(csk, CTP_ABORTING); 995 996 send_abort_rpl(csk, rst_status); 997 998 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 999 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 1000 cxgbi_sock_closed(csk); 1001 } 1002 1003 spin_unlock_bh(&csk->lock); 1004 cxgbi_sock_put(csk); 1005 rel_skb: 1006 __kfree_skb(skb); 1007 } 1008 1009 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) 1010 { 1011 struct cxgbi_sock *csk; 1012 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; 1013 unsigned int tid = GET_TID(rpl); 1014 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1015 struct tid_info *t = lldi->tids; 1016 1017 csk = lookup_tid(t, tid); 1018 if (!csk) 1019 goto rel_skb; 1020 1021 if (csk) 1022 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", 1023 (&csk->saddr), (&csk->daddr), csk, 1024 csk->state, csk->flags, csk->tid, rpl->status); 1025 1026 if (rpl->status == CPL_ERR_ABORT_FAILED) 1027 goto rel_skb; 1028 1029 cxgbi_sock_rcv_abort_rpl(csk); 1030 rel_skb: 1031 __kfree_skb(skb); 1032 } 1033 1034 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) 1035 { 1036 struct cxgbi_sock *csk; 1037 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; 1038 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); 1039 unsigned int tid = GET_TID(cpl); 1040 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1041 struct tid_info *t = lldi->tids; 1042 1043 csk = lookup_tid(t, tid); 1044 if (unlikely(!csk)) { 1045 pr_err("can't find conn. for tid %u.\n", tid); 1046 goto rel_skb; 1047 } 1048 1049 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1050 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", 1051 csk, csk->state, csk->flags, csk->tid, skb, skb->len, 1052 pdu_len_ddp); 1053 1054 spin_lock_bh(&csk->lock); 1055 1056 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1057 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1058 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1059 csk, csk->state, csk->flags, csk->tid); 1060 if (csk->state != CTP_ABORTING) 1061 goto abort_conn; 1062 else 1063 goto discard; 1064 } 1065 1066 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); 1067 cxgbi_skcb_flags(skb) = 0; 1068 1069 skb_reset_transport_header(skb); 1070 __skb_pull(skb, sizeof(*cpl)); 1071 __pskb_trim(skb, ntohs(cpl->len)); 1072 1073 if (!csk->skb_ulp_lhdr) { 1074 unsigned char *bhs; 1075 unsigned int hlen, dlen, plen; 1076 1077 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1078 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", 1079 csk, csk->state, csk->flags, csk->tid, skb); 1080 csk->skb_ulp_lhdr = skb; 1081 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); 1082 1083 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { 1084 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", 1085 csk->tid, cxgbi_skcb_tcp_seq(skb), 1086 csk->rcv_nxt); 1087 goto abort_conn; 1088 } 1089 1090 bhs = skb->data; 1091 hlen = ntohs(cpl->len); 1092 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1093 1094 plen = ISCSI_PDU_LEN(pdu_len_ddp); 1095 if (is_t4(lldi->adapter_type)) 1096 plen -= 40; 1097 1098 if ((hlen + dlen) != plen) { 1099 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " 1100 "mismatch %u != %u + %u, seq 0x%x.\n", 1101 csk->tid, plen, hlen, dlen, 1102 cxgbi_skcb_tcp_seq(skb)); 1103 goto abort_conn; 1104 } 1105 1106 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); 1107 if (dlen) 1108 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; 1109 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); 1110 1111 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1112 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", 1113 csk, skb, *bhs, hlen, dlen, 1114 ntohl(*((unsigned int *)(bhs + 16))), 1115 ntohl(*((unsigned int *)(bhs + 24)))); 1116 1117 } else { 1118 struct sk_buff *lskb = csk->skb_ulp_lhdr; 1119 1120 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); 1121 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1122 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", 1123 csk, csk->state, csk->flags, skb, lskb); 1124 } 1125 1126 __skb_queue_tail(&csk->receive_queue, skb); 1127 spin_unlock_bh(&csk->lock); 1128 return; 1129 1130 abort_conn: 1131 send_abort_req(csk); 1132 discard: 1133 spin_unlock_bh(&csk->lock); 1134 rel_skb: 1135 __kfree_skb(skb); 1136 } 1137 1138 static void do_rx_data_ddp(struct cxgbi_device *cdev, 1139 struct sk_buff *skb) 1140 { 1141 struct cxgbi_sock *csk; 1142 struct sk_buff *lskb; 1143 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; 1144 unsigned int tid = GET_TID(rpl); 1145 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1146 struct tid_info *t = lldi->tids; 1147 unsigned int status = ntohl(rpl->ddpvld); 1148 1149 csk = lookup_tid(t, tid); 1150 if (unlikely(!csk)) { 1151 pr_err("can't find connection for tid %u.\n", tid); 1152 goto rel_skb; 1153 } 1154 1155 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, 1156 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", 1157 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); 1158 1159 spin_lock_bh(&csk->lock); 1160 1161 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { 1162 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1163 "csk 0x%p,%u,0x%lx,%u, bad state.\n", 1164 csk, csk->state, csk->flags, csk->tid); 1165 if (csk->state != CTP_ABORTING) 1166 goto abort_conn; 1167 else 1168 goto discard; 1169 } 1170 1171 if (!csk->skb_ulp_lhdr) { 1172 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); 1173 goto abort_conn; 1174 } 1175 1176 lskb = csk->skb_ulp_lhdr; 1177 csk->skb_ulp_lhdr = NULL; 1178 1179 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); 1180 1181 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) 1182 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", 1183 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); 1184 1185 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { 1186 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", 1187 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1188 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); 1189 } 1190 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { 1191 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", 1192 csk, lskb, status, cxgbi_skcb_flags(lskb)); 1193 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); 1194 } 1195 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { 1196 log_debug(1 << CXGBI_DBG_PDU_RX, 1197 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", 1198 csk, lskb, status); 1199 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); 1200 } 1201 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && 1202 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { 1203 log_debug(1 << CXGBI_DBG_PDU_RX, 1204 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", 1205 csk, lskb, status); 1206 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); 1207 } 1208 log_debug(1 << CXGBI_DBG_PDU_RX, 1209 "csk 0x%p, lskb 0x%p, f 0x%lx.\n", 1210 csk, lskb, cxgbi_skcb_flags(lskb)); 1211 1212 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); 1213 cxgbi_conn_pdu_ready(csk); 1214 spin_unlock_bh(&csk->lock); 1215 goto rel_skb; 1216 1217 abort_conn: 1218 send_abort_req(csk); 1219 discard: 1220 spin_unlock_bh(&csk->lock); 1221 rel_skb: 1222 __kfree_skb(skb); 1223 } 1224 1225 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) 1226 { 1227 struct cxgbi_sock *csk; 1228 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; 1229 unsigned int tid = GET_TID(rpl); 1230 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1231 struct tid_info *t = lldi->tids; 1232 1233 csk = lookup_tid(t, tid); 1234 if (unlikely(!csk)) 1235 pr_err("can't find connection for tid %u.\n", tid); 1236 else { 1237 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1238 "csk 0x%p,%u,0x%lx,%u.\n", 1239 csk, csk->state, csk->flags, csk->tid); 1240 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), 1241 rpl->seq_vld); 1242 } 1243 __kfree_skb(skb); 1244 } 1245 1246 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 1247 { 1248 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; 1249 unsigned int tid = GET_TID(rpl); 1250 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1251 struct tid_info *t = lldi->tids; 1252 struct cxgbi_sock *csk; 1253 1254 csk = lookup_tid(t, tid); 1255 if (!csk) 1256 pr_err("can't find conn. for tid %u.\n", tid); 1257 1258 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1259 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1260 csk, csk->state, csk->flags, csk->tid, rpl->status); 1261 1262 if (rpl->status != CPL_ERR_NONE) 1263 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1264 csk, tid, rpl->status); 1265 1266 __kfree_skb(skb); 1267 } 1268 1269 static int alloc_cpls(struct cxgbi_sock *csk) 1270 { 1271 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 1272 0, GFP_KERNEL); 1273 if (!csk->cpl_close) 1274 return -ENOMEM; 1275 1276 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 1277 0, GFP_KERNEL); 1278 if (!csk->cpl_abort_req) 1279 goto free_cpls; 1280 1281 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 1282 0, GFP_KERNEL); 1283 if (!csk->cpl_abort_rpl) 1284 goto free_cpls; 1285 return 0; 1286 1287 free_cpls: 1288 cxgbi_sock_free_cpl_skbs(csk); 1289 return -ENOMEM; 1290 } 1291 1292 static inline void l2t_put(struct cxgbi_sock *csk) 1293 { 1294 if (csk->l2t) { 1295 cxgb4_l2t_release(csk->l2t); 1296 csk->l2t = NULL; 1297 cxgbi_sock_put(csk); 1298 } 1299 } 1300 1301 static void release_offload_resources(struct cxgbi_sock *csk) 1302 { 1303 struct cxgb4_lld_info *lldi; 1304 1305 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1306 "csk 0x%p,%u,0x%lx,%u.\n", 1307 csk, csk->state, csk->flags, csk->tid); 1308 1309 cxgbi_sock_free_cpl_skbs(csk); 1310 if (csk->wr_cred != csk->wr_max_cred) { 1311 cxgbi_sock_purge_wr_queue(csk); 1312 cxgbi_sock_reset_wr_list(csk); 1313 } 1314 1315 l2t_put(csk); 1316 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) 1317 free_atid(csk); 1318 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { 1319 lldi = cxgbi_cdev_priv(csk->cdev); 1320 cxgb4_remove_tid(lldi->tids, 0, csk->tid); 1321 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); 1322 cxgbi_sock_put(csk); 1323 } 1324 csk->dst = NULL; 1325 csk->cdev = NULL; 1326 } 1327 1328 static int init_act_open(struct cxgbi_sock *csk) 1329 { 1330 struct cxgbi_device *cdev = csk->cdev; 1331 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1332 struct net_device *ndev = cdev->ports[csk->port_id]; 1333 struct sk_buff *skb = NULL; 1334 struct neighbour *n = NULL; 1335 void *daddr; 1336 unsigned int step; 1337 unsigned int size, size6; 1338 int t4 = is_t4(lldi->adapter_type); 1339 1340 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1341 "csk 0x%p,%u,0x%lx,%u.\n", 1342 csk, csk->state, csk->flags, csk->tid); 1343 1344 if (csk->csk_family == AF_INET) 1345 daddr = &csk->daddr.sin_addr.s_addr; 1346 #if IS_ENABLED(CONFIG_IPV6) 1347 else if (csk->csk_family == AF_INET6) 1348 daddr = &csk->daddr6.sin6_addr; 1349 #endif 1350 else { 1351 pr_err("address family 0x%x not supported\n", csk->csk_family); 1352 goto rel_resource; 1353 } 1354 1355 n = dst_neigh_lookup(csk->dst, daddr); 1356 1357 if (!n) { 1358 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); 1359 goto rel_resource; 1360 } 1361 1362 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1363 if (csk->atid < 0) { 1364 pr_err("%s, NO atid available.\n", ndev->name); 1365 return -EINVAL; 1366 } 1367 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1368 cxgbi_sock_get(csk); 1369 1370 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1371 if (!csk->l2t) { 1372 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1373 goto rel_resource; 1374 } 1375 cxgbi_sock_get(csk); 1376 1377 if (t4) { 1378 size = sizeof(struct cpl_act_open_req); 1379 size6 = sizeof(struct cpl_act_open_req6); 1380 } else { 1381 size = sizeof(struct cpl_t5_act_open_req); 1382 size6 = sizeof(struct cpl_t5_act_open_req6); 1383 } 1384 1385 if (csk->csk_family == AF_INET) 1386 skb = alloc_wr(size, 0, GFP_NOIO); 1387 #if IS_ENABLED(CONFIG_IPV6) 1388 else 1389 skb = alloc_wr(size6, 0, GFP_NOIO); 1390 #endif 1391 1392 if (!skb) 1393 goto rel_resource; 1394 skb->sk = (struct sock *)csk; 1395 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); 1396 1397 if (!csk->mtu) 1398 csk->mtu = dst_mtu(csk->dst); 1399 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); 1400 csk->tx_chan = cxgb4_port_chan(ndev); 1401 /* SMT two entries per row */ 1402 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; 1403 step = lldi->ntxq / lldi->nchan; 1404 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1405 step = lldi->nrxq / lldi->nchan; 1406 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1407 csk->wr_cred = lldi->wr_cred - 1408 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); 1409 csk->wr_max_cred = csk->wr_cred; 1410 csk->wr_una_cred = 0; 1411 cxgbi_sock_reset_wr_list(csk); 1412 csk->err = 0; 1413 1414 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", 1415 (&csk->saddr), (&csk->daddr), csk, csk->state, 1416 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, 1417 csk->mtu, csk->mss_idx, csk->smac_idx); 1418 1419 /* must wait for either a act_open_rpl or act_open_establish */ 1420 try_module_get(THIS_MODULE); 1421 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1422 if (csk->csk_family == AF_INET) 1423 send_act_open_req(csk, skb, csk->l2t); 1424 #if IS_ENABLED(CONFIG_IPV6) 1425 else 1426 send_act_open_req6(csk, skb, csk->l2t); 1427 #endif 1428 neigh_release(n); 1429 1430 return 0; 1431 1432 rel_resource: 1433 if (n) 1434 neigh_release(n); 1435 if (skb) 1436 __kfree_skb(skb); 1437 return -EINVAL; 1438 } 1439 1440 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { 1441 [CPL_ACT_ESTABLISH] = do_act_establish, 1442 [CPL_ACT_OPEN_RPL] = do_act_open_rpl, 1443 [CPL_PEER_CLOSE] = do_peer_close, 1444 [CPL_ABORT_REQ_RSS] = do_abort_req_rss, 1445 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, 1446 [CPL_CLOSE_CON_RPL] = do_close_con_rpl, 1447 [CPL_FW4_ACK] = do_fw4_ack, 1448 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, 1449 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, 1450 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1451 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1452 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1453 }; 1454 1455 int cxgb4i_ofld_init(struct cxgbi_device *cdev) 1456 { 1457 int rc; 1458 1459 if (cxgb4i_max_connect > CXGB4I_MAX_CONN) 1460 cxgb4i_max_connect = CXGB4I_MAX_CONN; 1461 1462 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, 1463 cxgb4i_max_connect); 1464 if (rc < 0) 1465 return rc; 1466 1467 cdev->csk_release_offload_resources = release_offload_resources; 1468 cdev->csk_push_tx_frames = push_tx_frames; 1469 cdev->csk_send_abort_req = send_abort_req; 1470 cdev->csk_send_close_req = send_close_req; 1471 cdev->csk_send_rx_credits = send_rx_credits; 1472 cdev->csk_alloc_cpls = alloc_cpls; 1473 cdev->csk_init_act_open = init_act_open; 1474 1475 pr_info("cdev 0x%p, offload up, added.\n", cdev); 1476 return 0; 1477 } 1478 1479 /* 1480 * functions to program the pagepod in h/w 1481 */ 1482 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ 1483 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, 1484 struct ulp_mem_io *req, 1485 unsigned int wr_len, unsigned int dlen, 1486 unsigned int pm_addr) 1487 { 1488 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); 1489 1490 INIT_ULPTX_WR(req, wr_len, 0, 0); 1491 if (is_t4(lldi->adapter_type)) 1492 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1493 (ULP_MEMIO_ORDER_F)); 1494 else 1495 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | 1496 (T5_ULP_MEMIO_IMM_F)); 1497 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5)); 1498 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5)); 1499 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); 1500 1501 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); 1502 idata->len = htonl(dlen); 1503 } 1504 1505 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, 1506 struct cxgbi_pagepod_hdr *hdr, unsigned int idx, 1507 unsigned int npods, 1508 struct cxgbi_gather_list *gl, 1509 unsigned int gl_pidx) 1510 { 1511 struct cxgbi_ddp_info *ddp = cdev->ddp; 1512 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1513 struct sk_buff *skb; 1514 struct ulp_mem_io *req; 1515 struct ulptx_idata *idata; 1516 struct cxgbi_pagepod *ppod; 1517 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; 1518 unsigned int dlen = PPOD_SIZE * npods; 1519 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + 1520 sizeof(struct ulptx_idata) + dlen, 16); 1521 unsigned int i; 1522 1523 skb = alloc_wr(wr_len, 0, GFP_ATOMIC); 1524 if (!skb) { 1525 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", 1526 cdev, idx, npods); 1527 return -ENOMEM; 1528 } 1529 req = (struct ulp_mem_io *)skb->head; 1530 set_queue(skb, CPL_PRIORITY_CONTROL, NULL); 1531 1532 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1533 idata = (struct ulptx_idata *)(req + 1); 1534 ppod = (struct cxgbi_pagepod *)(idata + 1); 1535 1536 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { 1537 if (!hdr && !gl) 1538 cxgbi_ddp_ppod_clear(ppod); 1539 else 1540 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); 1541 } 1542 1543 cxgb4_ofld_send(cdev->ports[port_id], skb); 1544 return 0; 1545 } 1546 1547 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, 1548 unsigned int idx, unsigned int npods, 1549 struct cxgbi_gather_list *gl) 1550 { 1551 unsigned int i, cnt; 1552 int err = 0; 1553 1554 for (i = 0; i < npods; i += cnt, idx += cnt) { 1555 cnt = npods - i; 1556 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1557 cnt = ULPMEM_IDATA_MAX_NPPODS; 1558 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, 1559 idx, cnt, gl, 4 * i); 1560 if (err < 0) 1561 break; 1562 } 1563 return err; 1564 } 1565 1566 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, 1567 unsigned int idx, unsigned int npods) 1568 { 1569 unsigned int i, cnt; 1570 int err; 1571 1572 for (i = 0; i < npods; i += cnt, idx += cnt) { 1573 cnt = npods - i; 1574 if (cnt > ULPMEM_IDATA_MAX_NPPODS) 1575 cnt = ULPMEM_IDATA_MAX_NPPODS; 1576 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, 1577 idx, cnt, NULL, 0); 1578 if (err < 0) 1579 break; 1580 } 1581 } 1582 1583 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1584 int pg_idx, bool reply) 1585 { 1586 struct sk_buff *skb; 1587 struct cpl_set_tcb_field *req; 1588 1589 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) 1590 return 0; 1591 1592 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1593 if (!skb) 1594 return -ENOMEM; 1595 1596 /* set up ulp page size */ 1597 req = (struct cpl_set_tcb_field *)skb->head; 1598 INIT_TP_WR(req, csk->tid); 1599 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1600 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1601 req->word_cookie = htons(0); 1602 req->mask = cpu_to_be64(0x3 << 8); 1603 req->val = cpu_to_be64(pg_idx << 8); 1604 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1605 1606 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1607 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 1608 1609 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1610 return 0; 1611 } 1612 1613 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1614 int hcrc, int dcrc, int reply) 1615 { 1616 struct sk_buff *skb; 1617 struct cpl_set_tcb_field *req; 1618 1619 if (!hcrc && !dcrc) 1620 return 0; 1621 1622 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); 1623 if (!skb) 1624 return -ENOMEM; 1625 1626 csk->hcrc_len = (hcrc ? 4 : 0); 1627 csk->dcrc_len = (dcrc ? 4 : 0); 1628 /* set up ulp submode */ 1629 req = (struct cpl_set_tcb_field *)skb->head; 1630 INIT_TP_WR(req, tid); 1631 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1632 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1633 req->word_cookie = htons(0); 1634 req->mask = cpu_to_be64(0x3 << 4); 1635 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 1636 (dcrc ? ULP_CRC_DATA : 0)) << 4); 1637 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); 1638 1639 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1640 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 1641 1642 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 1643 return 0; 1644 } 1645 1646 static int cxgb4i_ddp_init(struct cxgbi_device *cdev) 1647 { 1648 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1649 struct cxgbi_ddp_info *ddp = cdev->ddp; 1650 unsigned int tagmask, pgsz_factor[4]; 1651 int err; 1652 1653 if (ddp) { 1654 kref_get(&ddp->refcnt); 1655 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", 1656 cdev, cdev->ddp); 1657 return -EALREADY; 1658 } 1659 1660 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, 1661 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, 1662 lldi->iscsi_iolen, lldi->iscsi_iolen); 1663 if (err < 0) 1664 return err; 1665 1666 ddp = cdev->ddp; 1667 1668 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; 1669 cxgbi_ddp_page_size_factor(pgsz_factor); 1670 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); 1671 1672 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; 1673 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; 1674 cdev->csk_ddp_set = ddp_set_map; 1675 cdev->csk_ddp_clear = ddp_clear_map; 1676 1677 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", 1678 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, 1679 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); 1680 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " 1681 " %u/%u.\n", 1682 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, 1683 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, 1684 ddp->max_rxsz, lldi->iscsi_iolen); 1685 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", 1686 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, 1687 ddp->max_rxsz); 1688 return 0; 1689 } 1690 1691 static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1692 { 1693 struct cxgbi_device *cdev; 1694 struct port_info *pi; 1695 int i, rc; 1696 1697 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); 1698 if (!cdev) { 1699 pr_info("t4 device 0x%p, register failed.\n", lldi); 1700 return NULL; 1701 } 1702 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", 1703 cdev, lldi->adapter_type, lldi->nports, 1704 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, 1705 lldi->nrxq, lldi->wr_cred); 1706 for (i = 0; i < lldi->nrxq; i++) 1707 log_debug(1 << CXGBI_DBG_DEV, 1708 "t4 0x%p, rxq id #%d: %u.\n", 1709 cdev, i, lldi->rxq_ids[i]); 1710 1711 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); 1712 cdev->flags = CXGBI_FLAG_DEV_T4; 1713 cdev->pdev = lldi->pdev; 1714 cdev->ports = lldi->ports; 1715 cdev->nports = lldi->nports; 1716 cdev->mtus = lldi->mtus; 1717 cdev->nmtus = NMTUS; 1718 cdev->snd_win = cxgb4i_snd_win; 1719 cdev->rcv_win = cxgb4i_rcv_win; 1720 cdev->rx_credit_thres = cxgb4i_rx_credit_thres; 1721 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; 1722 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); 1723 cdev->itp = &cxgb4i_iscsi_transport; 1724 1725 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) 1726 << FW_VIID_PFN_S; 1727 pr_info("cdev 0x%p,%s, pfvf %u.\n", 1728 cdev, lldi->ports[0]->name, cdev->pfvf); 1729 1730 rc = cxgb4i_ddp_init(cdev); 1731 if (rc) { 1732 pr_info("t4 0x%p ddp init failed.\n", cdev); 1733 goto err_out; 1734 } 1735 rc = cxgb4i_ofld_init(cdev); 1736 if (rc) { 1737 pr_info("t4 0x%p ofld init failed.\n", cdev); 1738 goto err_out; 1739 } 1740 1741 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, 1742 &cxgb4i_host_template, cxgb4i_stt); 1743 if (rc) 1744 goto err_out; 1745 1746 for (i = 0; i < cdev->nports; i++) { 1747 pi = netdev_priv(lldi->ports[i]); 1748 cdev->hbas[i]->port_id = pi->port_id; 1749 } 1750 return cdev; 1751 1752 err_out: 1753 cxgbi_device_unregister(cdev); 1754 return ERR_PTR(-ENOMEM); 1755 } 1756 1757 #define RX_PULL_LEN 128 1758 static int t4_uld_rx_handler(void *handle, const __be64 *rsp, 1759 const struct pkt_gl *pgl) 1760 { 1761 const struct cpl_act_establish *rpl; 1762 struct sk_buff *skb; 1763 unsigned int opc; 1764 struct cxgbi_device *cdev = handle; 1765 1766 if (pgl == NULL) { 1767 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1768 1769 skb = alloc_wr(len, 0, GFP_ATOMIC); 1770 if (!skb) 1771 goto nomem; 1772 skb_copy_to_linear_data(skb, &rsp[1], len); 1773 } else { 1774 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { 1775 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", 1776 pgl->va, be64_to_cpu(*rsp), 1777 be64_to_cpu(*(u64 *)pgl->va), 1778 pgl->tot_len); 1779 return 0; 1780 } 1781 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); 1782 if (unlikely(!skb)) 1783 goto nomem; 1784 } 1785 1786 rpl = (struct cpl_act_establish *)skb->data; 1787 opc = rpl->ot.opcode; 1788 log_debug(1 << CXGBI_DBG_TOE, 1789 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", 1790 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); 1791 if (cxgb4i_cplhandlers[opc]) 1792 cxgb4i_cplhandlers[opc](cdev, skb); 1793 else { 1794 pr_err("No handler for opcode 0x%x.\n", opc); 1795 __kfree_skb(skb); 1796 } 1797 return 0; 1798 nomem: 1799 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); 1800 return 1; 1801 } 1802 1803 static int t4_uld_state_change(void *handle, enum cxgb4_state state) 1804 { 1805 struct cxgbi_device *cdev = handle; 1806 1807 switch (state) { 1808 case CXGB4_STATE_UP: 1809 pr_info("cdev 0x%p, UP.\n", cdev); 1810 break; 1811 case CXGB4_STATE_START_RECOVERY: 1812 pr_info("cdev 0x%p, RECOVERY.\n", cdev); 1813 /* close all connections */ 1814 break; 1815 case CXGB4_STATE_DOWN: 1816 pr_info("cdev 0x%p, DOWN.\n", cdev); 1817 break; 1818 case CXGB4_STATE_DETACH: 1819 pr_info("cdev 0x%p, DETACH.\n", cdev); 1820 cxgbi_device_unregister(cdev); 1821 break; 1822 default: 1823 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); 1824 break; 1825 } 1826 return 0; 1827 } 1828 1829 static int __init cxgb4i_init_module(void) 1830 { 1831 int rc; 1832 1833 printk(KERN_INFO "%s", version); 1834 1835 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1836 if (rc < 0) 1837 return rc; 1838 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1839 1840 return 0; 1841 } 1842 1843 static void __exit cxgb4i_exit_module(void) 1844 { 1845 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1846 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1847 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1848 } 1849 1850 module_init(cxgb4i_init_module); 1851 module_exit(cxgb4i_exit_module); 1852