1 /* 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Written by: Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/skbuff.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/pci.h> 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_host.h> 23 #include <linux/if_vlan.h> 24 #include <linux/inet.h> 25 #include <net/dst.h> 26 #include <net/route.h> 27 #include <net/ipv6.h> 28 #include <net/ip6_route.h> 29 #include <net/addrconf.h> 30 31 #include <linux/inetdevice.h> /* ip_dev_find */ 32 #include <linux/module.h> 33 #include <net/tcp.h> 34 35 static unsigned int dbg_level; 36 37 #include "libcxgbi.h" 38 39 #define DRV_MODULE_NAME "libcxgbi" 40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library" 41 #define DRV_MODULE_VERSION "0.9.1-ko" 42 #define DRV_MODULE_RELDATE "Apr. 2015" 43 44 static char version[] = 45 DRV_MODULE_DESC " " DRV_MODULE_NAME 46 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 47 48 MODULE_AUTHOR("Chelsio Communications, Inc."); 49 MODULE_DESCRIPTION(DRV_MODULE_DESC); 50 MODULE_VERSION(DRV_MODULE_VERSION); 51 MODULE_LICENSE("GPL"); 52 53 module_param(dbg_level, uint, 0644); 54 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); 55 56 57 /* 58 * cxgbi device management 59 * maintains a list of the cxgbi devices 60 */ 61 static LIST_HEAD(cdev_list); 62 static DEFINE_MUTEX(cdev_mutex); 63 64 static LIST_HEAD(cdev_rcu_list); 65 static DEFINE_SPINLOCK(cdev_rcu_lock); 66 67 static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) 68 { 69 if (age) 70 *age = sw_tag & 0x7FFF; 71 if (idx) 72 *idx = (sw_tag >> 16) & 0x7FFF; 73 } 74 75 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 76 unsigned int max_conn) 77 { 78 struct cxgbi_ports_map *pmap = &cdev->pmap; 79 80 pmap->port_csk = cxgbi_alloc_big_mem(max_conn * 81 sizeof(struct cxgbi_sock *), 82 GFP_KERNEL); 83 if (!pmap->port_csk) { 84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); 85 return -ENOMEM; 86 } 87 88 pmap->max_connect = max_conn; 89 pmap->sport_base = base; 90 spin_lock_init(&pmap->lock); 91 return 0; 92 } 93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); 94 95 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) 96 { 97 struct cxgbi_ports_map *pmap = &cdev->pmap; 98 struct cxgbi_sock *csk; 99 int i; 100 101 for (i = 0; i < pmap->max_connect; i++) { 102 if (pmap->port_csk[i]) { 103 csk = pmap->port_csk[i]; 104 pmap->port_csk[i] = NULL; 105 log_debug(1 << CXGBI_DBG_SOCK, 106 "csk 0x%p, cdev 0x%p, offload down.\n", 107 csk, cdev); 108 spin_lock_bh(&csk->lock); 109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); 110 cxgbi_sock_closed(csk); 111 spin_unlock_bh(&csk->lock); 112 cxgbi_sock_put(csk); 113 } 114 } 115 } 116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); 117 118 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) 119 { 120 log_debug(1 << CXGBI_DBG_DEV, 121 "cdev 0x%p, p# %u.\n", cdev, cdev->nports); 122 cxgbi_hbas_remove(cdev); 123 cxgbi_device_portmap_cleanup(cdev); 124 cxgbi_ppm_release(cdev->cdev2ppm(cdev)); 125 if (cdev->pmap.max_connect) 126 cxgbi_free_big_mem(cdev->pmap.port_csk); 127 kfree(cdev); 128 } 129 130 struct cxgbi_device *cxgbi_device_register(unsigned int extra, 131 unsigned int nports) 132 { 133 struct cxgbi_device *cdev; 134 135 cdev = kzalloc(sizeof(*cdev) + extra + nports * 136 (sizeof(struct cxgbi_hba *) + 137 sizeof(struct net_device *)), 138 GFP_KERNEL); 139 if (!cdev) { 140 pr_warn("nport %d, OOM.\n", nports); 141 return NULL; 142 } 143 cdev->ports = (struct net_device **)(cdev + 1); 144 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * 145 sizeof(struct net_device *)); 146 if (extra) 147 cdev->dd_data = ((char *)cdev->hbas) + 148 nports * sizeof(struct cxgbi_hba *); 149 spin_lock_init(&cdev->pmap.lock); 150 151 mutex_lock(&cdev_mutex); 152 list_add_tail(&cdev->list_head, &cdev_list); 153 mutex_unlock(&cdev_mutex); 154 155 spin_lock(&cdev_rcu_lock); 156 list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); 157 spin_unlock(&cdev_rcu_lock); 158 159 log_debug(1 << CXGBI_DBG_DEV, 160 "cdev 0x%p, p# %u.\n", cdev, nports); 161 return cdev; 162 } 163 EXPORT_SYMBOL_GPL(cxgbi_device_register); 164 165 void cxgbi_device_unregister(struct cxgbi_device *cdev) 166 { 167 log_debug(1 << CXGBI_DBG_DEV, 168 "cdev 0x%p, p# %u,%s.\n", 169 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 170 171 mutex_lock(&cdev_mutex); 172 list_del(&cdev->list_head); 173 mutex_unlock(&cdev_mutex); 174 175 spin_lock(&cdev_rcu_lock); 176 list_del_rcu(&cdev->rcu_node); 177 spin_unlock(&cdev_rcu_lock); 178 synchronize_rcu(); 179 180 cxgbi_device_destroy(cdev); 181 } 182 EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 183 184 void cxgbi_device_unregister_all(unsigned int flag) 185 { 186 struct cxgbi_device *cdev, *tmp; 187 188 mutex_lock(&cdev_mutex); 189 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 190 if ((cdev->flags & flag) == flag) { 191 mutex_unlock(&cdev_mutex); 192 cxgbi_device_unregister(cdev); 193 mutex_lock(&cdev_mutex); 194 } 195 } 196 mutex_unlock(&cdev_mutex); 197 } 198 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); 199 200 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) 201 { 202 struct cxgbi_device *cdev, *tmp; 203 204 mutex_lock(&cdev_mutex); 205 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 206 if (cdev->lldev == lldev) { 207 mutex_unlock(&cdev_mutex); 208 return cdev; 209 } 210 } 211 mutex_unlock(&cdev_mutex); 212 213 log_debug(1 << CXGBI_DBG_DEV, 214 "lldev 0x%p, NO match found.\n", lldev); 215 return NULL; 216 } 217 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); 218 219 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, 220 int *port) 221 { 222 struct net_device *vdev = NULL; 223 struct cxgbi_device *cdev, *tmp; 224 int i; 225 226 if (is_vlan_dev(ndev)) { 227 vdev = ndev; 228 ndev = vlan_dev_real_dev(ndev); 229 log_debug(1 << CXGBI_DBG_DEV, 230 "vlan dev %s -> %s.\n", vdev->name, ndev->name); 231 } 232 233 mutex_lock(&cdev_mutex); 234 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 235 for (i = 0; i < cdev->nports; i++) { 236 if (ndev == cdev->ports[i]) { 237 cdev->hbas[i]->vdev = vdev; 238 mutex_unlock(&cdev_mutex); 239 if (port) 240 *port = i; 241 return cdev; 242 } 243 } 244 } 245 mutex_unlock(&cdev_mutex); 246 log_debug(1 << CXGBI_DBG_DEV, 247 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 248 return NULL; 249 } 250 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); 251 252 struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, 253 int *port) 254 { 255 struct net_device *vdev = NULL; 256 struct cxgbi_device *cdev; 257 int i; 258 259 if (is_vlan_dev(ndev)) { 260 vdev = ndev; 261 ndev = vlan_dev_real_dev(ndev); 262 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 263 } 264 265 rcu_read_lock(); 266 list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { 267 for (i = 0; i < cdev->nports; i++) { 268 if (ndev == cdev->ports[i]) { 269 cdev->hbas[i]->vdev = vdev; 270 rcu_read_unlock(); 271 if (port) 272 *port = i; 273 return cdev; 274 } 275 } 276 } 277 rcu_read_unlock(); 278 279 log_debug(1 << CXGBI_DBG_DEV, 280 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 281 return NULL; 282 } 283 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); 284 285 #if IS_ENABLED(CONFIG_IPV6) 286 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 287 int *port) 288 { 289 struct net_device *vdev = NULL; 290 struct cxgbi_device *cdev, *tmp; 291 int i; 292 293 if (is_vlan_dev(ndev)) { 294 vdev = ndev; 295 ndev = vlan_dev_real_dev(ndev); 296 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 297 } 298 299 mutex_lock(&cdev_mutex); 300 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 301 for (i = 0; i < cdev->nports; i++) { 302 if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, 303 MAX_ADDR_LEN)) { 304 cdev->hbas[i]->vdev = vdev; 305 mutex_unlock(&cdev_mutex); 306 if (port) 307 *port = i; 308 return cdev; 309 } 310 } 311 } 312 mutex_unlock(&cdev_mutex); 313 log_debug(1 << CXGBI_DBG_DEV, 314 "ndev 0x%p, %s, NO match mac found.\n", 315 ndev, ndev->name); 316 return NULL; 317 } 318 #endif 319 320 void cxgbi_hbas_remove(struct cxgbi_device *cdev) 321 { 322 int i; 323 struct cxgbi_hba *chba; 324 325 log_debug(1 << CXGBI_DBG_DEV, 326 "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 327 328 for (i = 0; i < cdev->nports; i++) { 329 chba = cdev->hbas[i]; 330 if (chba) { 331 cdev->hbas[i] = NULL; 332 iscsi_host_remove(chba->shost); 333 pci_dev_put(cdev->pdev); 334 iscsi_host_free(chba->shost); 335 } 336 } 337 } 338 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); 339 340 int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, 341 unsigned int max_id, struct scsi_host_template *sht, 342 struct scsi_transport_template *stt) 343 { 344 struct cxgbi_hba *chba; 345 struct Scsi_Host *shost; 346 int i, err; 347 348 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 349 350 for (i = 0; i < cdev->nports; i++) { 351 shost = iscsi_host_alloc(sht, sizeof(*chba), 1); 352 if (!shost) { 353 pr_info("0x%p, p%d, %s, host alloc failed.\n", 354 cdev, i, cdev->ports[i]->name); 355 err = -ENOMEM; 356 goto err_out; 357 } 358 359 shost->transportt = stt; 360 shost->max_lun = max_lun; 361 shost->max_id = max_id; 362 shost->max_channel = 0; 363 shost->max_cmd_len = 16; 364 365 chba = iscsi_host_priv(shost); 366 chba->cdev = cdev; 367 chba->ndev = cdev->ports[i]; 368 chba->shost = shost; 369 370 log_debug(1 << CXGBI_DBG_DEV, 371 "cdev 0x%p, p#%d %s: chba 0x%p.\n", 372 cdev, i, cdev->ports[i]->name, chba); 373 374 pci_dev_get(cdev->pdev); 375 err = iscsi_host_add(shost, &cdev->pdev->dev); 376 if (err) { 377 pr_info("cdev 0x%p, p#%d %s, host add failed.\n", 378 cdev, i, cdev->ports[i]->name); 379 pci_dev_put(cdev->pdev); 380 scsi_host_put(shost); 381 goto err_out; 382 } 383 384 cdev->hbas[i] = chba; 385 } 386 387 return 0; 388 389 err_out: 390 cxgbi_hbas_remove(cdev); 391 return err; 392 } 393 EXPORT_SYMBOL_GPL(cxgbi_hbas_add); 394 395 /* 396 * iSCSI offload 397 * 398 * - source port management 399 * To find a free source port in the port allocation map we use a very simple 400 * rotor scheme to look for the next free port. 401 * 402 * If a source port has been specified make sure that it doesn't collide with 403 * our normal source port allocation map. If it's outside the range of our 404 * allocation/deallocation scheme just let them use it. 405 * 406 * If the source port is outside our allocation range, the caller is 407 * responsible for keeping track of their port usage. 408 */ 409 410 static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, 411 unsigned char port_id) 412 { 413 struct cxgbi_ports_map *pmap = &cdev->pmap; 414 unsigned int i; 415 unsigned int used; 416 417 if (!pmap->max_connect || !pmap->used) 418 return NULL; 419 420 spin_lock_bh(&pmap->lock); 421 used = pmap->used; 422 for (i = 0; used && i < pmap->max_connect; i++) { 423 struct cxgbi_sock *csk = pmap->port_csk[i]; 424 425 if (csk) { 426 if (csk->port_id == port_id) { 427 spin_unlock_bh(&pmap->lock); 428 return csk; 429 } 430 used--; 431 } 432 } 433 spin_unlock_bh(&pmap->lock); 434 435 return NULL; 436 } 437 438 static int sock_get_port(struct cxgbi_sock *csk) 439 { 440 struct cxgbi_device *cdev = csk->cdev; 441 struct cxgbi_ports_map *pmap = &cdev->pmap; 442 unsigned int start; 443 int idx; 444 __be16 *port; 445 446 if (!pmap->max_connect) { 447 pr_err("cdev 0x%p, p#%u %s, NO port map.\n", 448 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 449 return -EADDRNOTAVAIL; 450 } 451 452 if (csk->csk_family == AF_INET) 453 port = &csk->saddr.sin_port; 454 else /* ipv6 */ 455 port = &csk->saddr6.sin6_port; 456 457 if (*port) { 458 pr_err("source port NON-ZERO %u.\n", 459 ntohs(*port)); 460 return -EADDRINUSE; 461 } 462 463 spin_lock_bh(&pmap->lock); 464 if (pmap->used >= pmap->max_connect) { 465 spin_unlock_bh(&pmap->lock); 466 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", 467 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 468 return -EADDRNOTAVAIL; 469 } 470 471 start = idx = pmap->next; 472 do { 473 if (++idx >= pmap->max_connect) 474 idx = 0; 475 if (!pmap->port_csk[idx]) { 476 pmap->used++; 477 *port = htons(pmap->sport_base + idx); 478 pmap->next = idx; 479 pmap->port_csk[idx] = csk; 480 spin_unlock_bh(&pmap->lock); 481 cxgbi_sock_get(csk); 482 log_debug(1 << CXGBI_DBG_SOCK, 483 "cdev 0x%p, p#%u %s, p %u, %u.\n", 484 cdev, csk->port_id, 485 cdev->ports[csk->port_id]->name, 486 pmap->sport_base + idx, pmap->next); 487 return 0; 488 } 489 } while (idx != start); 490 spin_unlock_bh(&pmap->lock); 491 492 /* should not happen */ 493 pr_warn("cdev 0x%p, p#%u %s, next %u?\n", 494 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 495 pmap->next); 496 return -EADDRNOTAVAIL; 497 } 498 499 static void sock_put_port(struct cxgbi_sock *csk) 500 { 501 struct cxgbi_device *cdev = csk->cdev; 502 struct cxgbi_ports_map *pmap = &cdev->pmap; 503 __be16 *port; 504 505 if (csk->csk_family == AF_INET) 506 port = &csk->saddr.sin_port; 507 else /* ipv6 */ 508 port = &csk->saddr6.sin6_port; 509 510 if (*port) { 511 int idx = ntohs(*port) - pmap->sport_base; 512 513 *port = 0; 514 if (idx < 0 || idx >= pmap->max_connect) { 515 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", 516 cdev, csk->port_id, 517 cdev->ports[csk->port_id]->name, 518 ntohs(*port)); 519 return; 520 } 521 522 spin_lock_bh(&pmap->lock); 523 pmap->port_csk[idx] = NULL; 524 pmap->used--; 525 spin_unlock_bh(&pmap->lock); 526 527 log_debug(1 << CXGBI_DBG_SOCK, 528 "cdev 0x%p, p#%u %s, release %u.\n", 529 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 530 pmap->sport_base + idx); 531 532 cxgbi_sock_put(csk); 533 } 534 } 535 536 /* 537 * iscsi tcp connection 538 */ 539 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) 540 { 541 if (csk->cpl_close) { 542 kfree_skb(csk->cpl_close); 543 csk->cpl_close = NULL; 544 } 545 if (csk->cpl_abort_req) { 546 kfree_skb(csk->cpl_abort_req); 547 csk->cpl_abort_req = NULL; 548 } 549 if (csk->cpl_abort_rpl) { 550 kfree_skb(csk->cpl_abort_rpl); 551 csk->cpl_abort_rpl = NULL; 552 } 553 } 554 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); 555 556 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) 557 { 558 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); 559 560 if (!csk) { 561 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); 562 return NULL; 563 } 564 565 if (cdev->csk_alloc_cpls(csk) < 0) { 566 pr_info("csk 0x%p, alloc cpls failed.\n", csk); 567 kfree(csk); 568 return NULL; 569 } 570 571 spin_lock_init(&csk->lock); 572 kref_init(&csk->refcnt); 573 skb_queue_head_init(&csk->receive_queue); 574 skb_queue_head_init(&csk->write_queue); 575 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 576 rwlock_init(&csk->callback_lock); 577 csk->cdev = cdev; 578 csk->flags = 0; 579 cxgbi_sock_set_state(csk, CTP_CLOSED); 580 581 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); 582 583 return csk; 584 } 585 586 static struct rtable *find_route_ipv4(struct flowi4 *fl4, 587 __be32 saddr, __be32 daddr, 588 __be16 sport, __be16 dport, u8 tos) 589 { 590 struct rtable *rt; 591 592 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, 593 dport, sport, IPPROTO_TCP, tos, 0); 594 if (IS_ERR(rt)) 595 return NULL; 596 597 return rt; 598 } 599 600 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) 601 { 602 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; 603 struct dst_entry *dst; 604 struct net_device *ndev; 605 struct cxgbi_device *cdev; 606 struct rtable *rt = NULL; 607 struct neighbour *n; 608 struct flowi4 fl4; 609 struct cxgbi_sock *csk = NULL; 610 unsigned int mtu = 0; 611 int port = 0xFFFF; 612 int err = 0; 613 614 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 615 if (!rt) { 616 pr_info("no route to ipv4 0x%x, port %u.\n", 617 be32_to_cpu(daddr->sin_addr.s_addr), 618 be16_to_cpu(daddr->sin_port)); 619 err = -ENETUNREACH; 620 goto err_out; 621 } 622 dst = &rt->dst; 623 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); 624 if (!n) { 625 err = -ENODEV; 626 goto rel_rt; 627 } 628 ndev = n->dev; 629 630 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 631 pr_info("multi-cast route %pI4, port %u, dev %s.\n", 632 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 633 ndev->name); 634 err = -ENETUNREACH; 635 goto rel_neigh; 636 } 637 638 if (ndev->flags & IFF_LOOPBACK) { 639 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 640 mtu = ndev->mtu; 641 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 642 n->dev->name, ndev->name, mtu); 643 } 644 645 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { 646 pr_info("%s interface not up.\n", ndev->name); 647 err = -ENETDOWN; 648 goto rel_neigh; 649 } 650 651 cdev = cxgbi_device_find_by_netdev(ndev, &port); 652 if (!cdev) { 653 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 654 &daddr->sin_addr.s_addr, ndev->name); 655 err = -ENETUNREACH; 656 goto rel_neigh; 657 } 658 log_debug(1 << CXGBI_DBG_SOCK, 659 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 660 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 661 port, ndev->name, cdev); 662 663 csk = cxgbi_sock_create(cdev); 664 if (!csk) { 665 err = -ENOMEM; 666 goto rel_neigh; 667 } 668 csk->cdev = cdev; 669 csk->port_id = port; 670 csk->mtu = mtu; 671 csk->dst = dst; 672 673 csk->csk_family = AF_INET; 674 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 675 csk->daddr.sin_port = daddr->sin_port; 676 csk->daddr.sin_family = daddr->sin_family; 677 csk->saddr.sin_family = daddr->sin_family; 678 csk->saddr.sin_addr.s_addr = fl4.saddr; 679 neigh_release(n); 680 681 return csk; 682 683 rel_neigh: 684 neigh_release(n); 685 686 rel_rt: 687 ip_rt_put(rt); 688 if (csk) 689 cxgbi_sock_closed(csk); 690 err_out: 691 return ERR_PTR(err); 692 } 693 694 #if IS_ENABLED(CONFIG_IPV6) 695 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, 696 const struct in6_addr *daddr) 697 { 698 struct flowi6 fl; 699 700 memset(&fl, 0, sizeof(fl)); 701 if (saddr) 702 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 703 if (daddr) 704 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); 705 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 706 } 707 708 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) 709 { 710 struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; 711 struct dst_entry *dst; 712 struct net_device *ndev; 713 struct cxgbi_device *cdev; 714 struct rt6_info *rt = NULL; 715 struct neighbour *n; 716 struct in6_addr pref_saddr; 717 struct cxgbi_sock *csk = NULL; 718 unsigned int mtu = 0; 719 int port = 0xFFFF; 720 int err = 0; 721 722 rt = find_route_ipv6(NULL, &daddr6->sin6_addr); 723 724 if (!rt) { 725 pr_info("no route to ipv6 %pI6 port %u\n", 726 daddr6->sin6_addr.s6_addr, 727 be16_to_cpu(daddr6->sin6_port)); 728 err = -ENETUNREACH; 729 goto err_out; 730 } 731 732 dst = &rt->dst; 733 734 n = dst_neigh_lookup(dst, &daddr6->sin6_addr); 735 736 if (!n) { 737 pr_info("%pI6, port %u, dst no neighbour.\n", 738 daddr6->sin6_addr.s6_addr, 739 be16_to_cpu(daddr6->sin6_port)); 740 err = -ENETUNREACH; 741 goto rel_rt; 742 } 743 ndev = n->dev; 744 745 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { 746 pr_info("%s interface not up.\n", ndev->name); 747 err = -ENETDOWN; 748 goto rel_rt; 749 } 750 751 if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { 752 pr_info("multi-cast route %pI6 port %u, dev %s.\n", 753 daddr6->sin6_addr.s6_addr, 754 ntohs(daddr6->sin6_port), ndev->name); 755 err = -ENETUNREACH; 756 goto rel_rt; 757 } 758 759 cdev = cxgbi_device_find_by_netdev(ndev, &port); 760 if (!cdev) 761 cdev = cxgbi_device_find_by_mac(ndev, &port); 762 if (!cdev) { 763 pr_info("dst %pI6 %s, NOT cxgbi device.\n", 764 daddr6->sin6_addr.s6_addr, ndev->name); 765 err = -ENETUNREACH; 766 goto rel_rt; 767 } 768 log_debug(1 << CXGBI_DBG_SOCK, 769 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", 770 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, 771 ndev->name, cdev); 772 773 csk = cxgbi_sock_create(cdev); 774 if (!csk) { 775 err = -ENOMEM; 776 goto rel_rt; 777 } 778 csk->cdev = cdev; 779 csk->port_id = port; 780 csk->mtu = mtu; 781 csk->dst = dst; 782 783 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { 784 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); 785 786 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, 787 &daddr6->sin6_addr, 0, &pref_saddr); 788 if (err) { 789 pr_info("failed to get source address to reach %pI6\n", 790 &daddr6->sin6_addr); 791 goto rel_rt; 792 } 793 } else { 794 pref_saddr = rt->rt6i_prefsrc.addr; 795 } 796 797 csk->csk_family = AF_INET6; 798 csk->daddr6.sin6_addr = daddr6->sin6_addr; 799 csk->daddr6.sin6_port = daddr6->sin6_port; 800 csk->daddr6.sin6_family = daddr6->sin6_family; 801 csk->saddr6.sin6_family = daddr6->sin6_family; 802 csk->saddr6.sin6_addr = pref_saddr; 803 804 neigh_release(n); 805 return csk; 806 807 rel_rt: 808 if (n) 809 neigh_release(n); 810 811 ip6_rt_put(rt); 812 if (csk) 813 cxgbi_sock_closed(csk); 814 err_out: 815 return ERR_PTR(err); 816 } 817 #endif /* IS_ENABLED(CONFIG_IPV6) */ 818 819 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, 820 unsigned int opt) 821 { 822 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; 823 dst_confirm(csk->dst); 824 smp_mb(); 825 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); 826 } 827 EXPORT_SYMBOL_GPL(cxgbi_sock_established); 828 829 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) 830 { 831 log_debug(1 << CXGBI_DBG_SOCK, 832 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", 833 csk, csk->state, csk->flags, csk->user_data); 834 835 if (csk->state != CTP_ESTABLISHED) { 836 read_lock_bh(&csk->callback_lock); 837 if (csk->user_data) 838 iscsi_conn_failure(csk->user_data, 839 ISCSI_ERR_TCP_CONN_CLOSE); 840 read_unlock_bh(&csk->callback_lock); 841 } 842 } 843 844 void cxgbi_sock_closed(struct cxgbi_sock *csk) 845 { 846 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 847 csk, (csk)->state, (csk)->flags, (csk)->tid); 848 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 849 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) 850 return; 851 if (csk->saddr.sin_port) 852 sock_put_port(csk); 853 if (csk->dst) 854 dst_release(csk->dst); 855 csk->cdev->csk_release_offload_resources(csk); 856 cxgbi_sock_set_state(csk, CTP_CLOSED); 857 cxgbi_inform_iscsi_conn_closing(csk); 858 cxgbi_sock_put(csk); 859 } 860 EXPORT_SYMBOL_GPL(cxgbi_sock_closed); 861 862 static void need_active_close(struct cxgbi_sock *csk) 863 { 864 int data_lost; 865 int close_req = 0; 866 867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 868 csk, (csk)->state, (csk)->flags, (csk)->tid); 869 spin_lock_bh(&csk->lock); 870 if (csk->dst) 871 dst_confirm(csk->dst); 872 data_lost = skb_queue_len(&csk->receive_queue); 873 __skb_queue_purge(&csk->receive_queue); 874 875 if (csk->state == CTP_ACTIVE_OPEN) 876 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 877 else if (csk->state == CTP_ESTABLISHED) { 878 close_req = 1; 879 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); 880 } else if (csk->state == CTP_PASSIVE_CLOSE) { 881 close_req = 1; 882 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 883 } 884 885 if (close_req) { 886 if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || 887 data_lost) 888 csk->cdev->csk_send_abort_req(csk); 889 else 890 csk->cdev->csk_send_close_req(csk); 891 } 892 893 spin_unlock_bh(&csk->lock); 894 } 895 896 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) 897 { 898 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", 899 csk, csk->state, csk->flags, 900 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, 901 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, 902 errno); 903 904 cxgbi_sock_set_state(csk, CTP_CONNECTING); 905 csk->err = errno; 906 cxgbi_sock_closed(csk); 907 } 908 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); 909 910 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 911 { 912 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 913 struct module *owner = csk->cdev->owner; 914 915 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 916 csk, (csk)->state, (csk)->flags, (csk)->tid); 917 cxgbi_sock_get(csk); 918 spin_lock_bh(&csk->lock); 919 if (csk->state == CTP_ACTIVE_OPEN) 920 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); 921 spin_unlock_bh(&csk->lock); 922 cxgbi_sock_put(csk); 923 __kfree_skb(skb); 924 925 module_put(owner); 926 } 927 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 928 929 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) 930 { 931 cxgbi_sock_get(csk); 932 spin_lock_bh(&csk->lock); 933 934 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 935 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 936 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 937 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) 938 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", 939 csk, csk->state, csk->flags, csk->tid); 940 cxgbi_sock_closed(csk); 941 } 942 943 spin_unlock_bh(&csk->lock); 944 cxgbi_sock_put(csk); 945 } 946 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); 947 948 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) 949 { 950 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 951 csk, (csk)->state, (csk)->flags, (csk)->tid); 952 cxgbi_sock_get(csk); 953 spin_lock_bh(&csk->lock); 954 955 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 956 goto done; 957 958 switch (csk->state) { 959 case CTP_ESTABLISHED: 960 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); 961 break; 962 case CTP_ACTIVE_CLOSE: 963 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 964 break; 965 case CTP_CLOSE_WAIT_1: 966 cxgbi_sock_closed(csk); 967 break; 968 case CTP_ABORTING: 969 break; 970 default: 971 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 972 csk, csk->state, csk->flags, csk->tid); 973 } 974 cxgbi_inform_iscsi_conn_closing(csk); 975 done: 976 spin_unlock_bh(&csk->lock); 977 cxgbi_sock_put(csk); 978 } 979 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); 980 981 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) 982 { 983 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 984 csk, (csk)->state, (csk)->flags, (csk)->tid); 985 cxgbi_sock_get(csk); 986 spin_lock_bh(&csk->lock); 987 988 csk->snd_una = snd_nxt - 1; 989 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 990 goto done; 991 992 switch (csk->state) { 993 case CTP_ACTIVE_CLOSE: 994 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); 995 break; 996 case CTP_CLOSE_WAIT_1: 997 case CTP_CLOSE_WAIT_2: 998 cxgbi_sock_closed(csk); 999 break; 1000 case CTP_ABORTING: 1001 break; 1002 default: 1003 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 1004 csk, csk->state, csk->flags, csk->tid); 1005 } 1006 done: 1007 spin_unlock_bh(&csk->lock); 1008 cxgbi_sock_put(csk); 1009 } 1010 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); 1011 1012 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, 1013 unsigned int snd_una, int seq_chk) 1014 { 1015 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1016 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", 1017 csk, csk->state, csk->flags, csk->tid, credits, 1018 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); 1019 1020 spin_lock_bh(&csk->lock); 1021 1022 csk->wr_cred += credits; 1023 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) 1024 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; 1025 1026 while (credits) { 1027 struct sk_buff *p = cxgbi_sock_peek_wr(csk); 1028 1029 if (unlikely(!p)) { 1030 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", 1031 csk, csk->state, csk->flags, csk->tid, credits, 1032 csk->wr_cred, csk->wr_una_cred); 1033 break; 1034 } 1035 1036 if (unlikely(credits < p->csum)) { 1037 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", 1038 csk, csk->state, csk->flags, csk->tid, 1039 credits, csk->wr_cred, csk->wr_una_cred, 1040 p->csum); 1041 p->csum -= credits; 1042 break; 1043 } else { 1044 cxgbi_sock_dequeue_wr(csk); 1045 credits -= p->csum; 1046 kfree_skb(p); 1047 } 1048 } 1049 1050 cxgbi_sock_check_wr_invariants(csk); 1051 1052 if (seq_chk) { 1053 if (unlikely(before(snd_una, csk->snd_una))) { 1054 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", 1055 csk, csk->state, csk->flags, csk->tid, snd_una, 1056 csk->snd_una); 1057 goto done; 1058 } 1059 1060 if (csk->snd_una != snd_una) { 1061 csk->snd_una = snd_una; 1062 dst_confirm(csk->dst); 1063 } 1064 } 1065 1066 if (skb_queue_len(&csk->write_queue)) { 1067 if (csk->cdev->csk_push_tx_frames(csk, 0)) 1068 cxgbi_conn_tx_open(csk); 1069 } else 1070 cxgbi_conn_tx_open(csk); 1071 done: 1072 spin_unlock_bh(&csk->lock); 1073 } 1074 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); 1075 1076 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, 1077 unsigned short mtu) 1078 { 1079 int i = 0; 1080 1081 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) 1082 ++i; 1083 1084 return i; 1085 } 1086 1087 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) 1088 { 1089 unsigned int idx; 1090 struct dst_entry *dst = csk->dst; 1091 1092 csk->advmss = dst_metric_advmss(dst); 1093 1094 if (csk->advmss > pmtu - 40) 1095 csk->advmss = pmtu - 40; 1096 if (csk->advmss < csk->cdev->mtus[0] - 40) 1097 csk->advmss = csk->cdev->mtus[0] - 40; 1098 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); 1099 1100 return idx; 1101 } 1102 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); 1103 1104 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) 1105 { 1106 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; 1107 __skb_queue_tail(&csk->write_queue, skb); 1108 } 1109 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); 1110 1111 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) 1112 { 1113 struct sk_buff *skb; 1114 1115 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) 1116 kfree_skb(skb); 1117 } 1118 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); 1119 1120 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) 1121 { 1122 int pending = cxgbi_sock_count_pending_wrs(csk); 1123 1124 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) 1125 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", 1126 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); 1127 } 1128 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); 1129 1130 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) 1131 { 1132 struct cxgbi_device *cdev = csk->cdev; 1133 struct sk_buff *next; 1134 int err, copied = 0; 1135 1136 spin_lock_bh(&csk->lock); 1137 1138 if (csk->state != CTP_ESTABLISHED) { 1139 log_debug(1 << CXGBI_DBG_PDU_TX, 1140 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", 1141 csk, csk->state, csk->flags, csk->tid); 1142 err = -EAGAIN; 1143 goto out_err; 1144 } 1145 1146 if (csk->err) { 1147 log_debug(1 << CXGBI_DBG_PDU_TX, 1148 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", 1149 csk, csk->state, csk->flags, csk->tid, csk->err); 1150 err = -EPIPE; 1151 goto out_err; 1152 } 1153 1154 if (csk->write_seq - csk->snd_una >= csk->snd_win) { 1155 log_debug(1 << CXGBI_DBG_PDU_TX, 1156 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 1157 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 1158 csk->snd_una, csk->snd_win); 1159 err = -ENOBUFS; 1160 goto out_err; 1161 } 1162 1163 while (skb) { 1164 int frags = skb_shinfo(skb)->nr_frags + 1165 (skb->len != skb->data_len); 1166 1167 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { 1168 pr_err("csk 0x%p, skb head %u < %u.\n", 1169 csk, skb_headroom(skb), cdev->skb_tx_rsvd); 1170 err = -EINVAL; 1171 goto out_err; 1172 } 1173 1174 if (frags >= SKB_WR_LIST_SIZE) { 1175 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", 1176 csk, skb_shinfo(skb)->nr_frags, skb->len, 1177 skb->data_len, (uint)(SKB_WR_LIST_SIZE)); 1178 err = -EINVAL; 1179 goto out_err; 1180 } 1181 1182 next = skb->next; 1183 skb->next = NULL; 1184 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); 1185 cxgbi_sock_skb_entail(csk, skb); 1186 copied += skb->len; 1187 csk->write_seq += skb->len + 1188 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1189 skb = next; 1190 } 1191 1192 if (likely(skb_queue_len(&csk->write_queue))) 1193 cdev->csk_push_tx_frames(csk, 1); 1194 done: 1195 spin_unlock_bh(&csk->lock); 1196 return copied; 1197 1198 out_err: 1199 if (copied == 0 && err == -EPIPE) 1200 copied = csk->err ? csk->err : -EPIPE; 1201 else 1202 copied = err; 1203 goto done; 1204 } 1205 1206 static inline void 1207 scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, 1208 unsigned int *sgcnt, unsigned int *dlen, 1209 unsigned int prot) 1210 { 1211 struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc); 1212 1213 *sgl = sdb->table.sgl; 1214 *sgcnt = sdb->table.nents; 1215 *dlen = sdb->length; 1216 /* Caution: for protection sdb, sdb->length is invalid */ 1217 } 1218 1219 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, 1220 struct cxgbi_task_tag_info *ttinfo, 1221 struct scatterlist **sg_pp, unsigned int *sg_off) 1222 { 1223 struct scatterlist *sg = sg_pp ? *sg_pp : NULL; 1224 unsigned int offset = sg_off ? *sg_off : 0; 1225 dma_addr_t addr = 0UL; 1226 unsigned int len = 0; 1227 int i; 1228 1229 memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); 1230 1231 if (sg) { 1232 addr = sg_dma_address(sg); 1233 len = sg_dma_len(sg); 1234 } 1235 1236 for (i = 0; i < PPOD_PAGES_MAX; i++) { 1237 if (sg) { 1238 ppod->addr[i] = cpu_to_be64(addr + offset); 1239 offset += PAGE_SIZE; 1240 if (offset == (len + sg->offset)) { 1241 offset = 0; 1242 sg = sg_next(sg); 1243 if (sg) { 1244 addr = sg_dma_address(sg); 1245 len = sg_dma_len(sg); 1246 } 1247 } 1248 } else { 1249 ppod->addr[i] = 0ULL; 1250 } 1251 } 1252 1253 /* 1254 * the fifth address needs to be repeated in the next ppod, so do 1255 * not move sg 1256 */ 1257 if (sg_pp) { 1258 *sg_pp = sg; 1259 *sg_off = offset; 1260 } 1261 1262 if (offset == len) { 1263 offset = 0; 1264 sg = sg_next(sg); 1265 if (sg) { 1266 addr = sg_dma_address(sg); 1267 len = sg_dma_len(sg); 1268 } 1269 } 1270 ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; 1271 } 1272 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); 1273 1274 /* 1275 * APIs interacting with open-iscsi libraries 1276 */ 1277 1278 static unsigned char padding[4]; 1279 1280 void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, 1281 struct cxgbi_tag_format *tformat, unsigned int ppmax, 1282 unsigned int llimit, unsigned int start, 1283 unsigned int rsvd_factor) 1284 { 1285 int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, 1286 cdev->lldev, tformat, ppmax, llimit, start, 1287 rsvd_factor); 1288 1289 if (err >= 0) { 1290 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); 1291 1292 if (ppm->ppmax < 1024 || 1293 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) 1294 cdev->flags |= CXGBI_FLAG_DDP_OFF; 1295 err = 0; 1296 } else { 1297 cdev->flags |= CXGBI_FLAG_DDP_OFF; 1298 } 1299 } 1300 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); 1301 1302 static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) 1303 { 1304 int i; 1305 int last_sgidx = nents - 1; 1306 struct scatterlist *sg = sgl; 1307 1308 for (i = 0; i < nents; i++, sg = sg_next(sg)) { 1309 unsigned int len = sg->length + sg->offset; 1310 1311 if ((sg->offset & 0x3) || (i && sg->offset) || 1312 ((i != last_sgidx) && len != PAGE_SIZE)) { 1313 log_debug(1 << CXGBI_DBG_DDP, 1314 "sg %u/%u, %u,%u, not aligned.\n", 1315 i, nents, sg->offset, sg->length); 1316 goto err_out; 1317 } 1318 } 1319 return 0; 1320 err_out: 1321 return -EINVAL; 1322 } 1323 1324 static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, 1325 struct cxgbi_task_data *tdata, u32 sw_tag, 1326 unsigned int xferlen) 1327 { 1328 struct cxgbi_sock *csk = cconn->cep->csk; 1329 struct cxgbi_device *cdev = csk->cdev; 1330 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1331 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1332 struct scatterlist *sgl = ttinfo->sgl; 1333 unsigned int sgcnt = ttinfo->nents; 1334 unsigned int sg_offset = sgl->offset; 1335 int err; 1336 1337 if (cdev->flags & CXGBI_FLAG_DDP_OFF) { 1338 log_debug(1 << CXGBI_DBG_DDP, 1339 "cdev 0x%p DDP off.\n", cdev); 1340 return -EINVAL; 1341 } 1342 1343 if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || 1344 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { 1345 log_debug(1 << CXGBI_DBG_DDP, 1346 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", 1347 ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, 1348 xferlen, ttinfo->nents); 1349 return -EINVAL; 1350 } 1351 1352 /* make sure the buffer is suitable for ddp */ 1353 if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) 1354 return -EINVAL; 1355 1356 ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> 1357 PAGE_SHIFT; 1358 1359 /* 1360 * the ddp tag will be used for the itt in the outgoing pdu, 1361 * the itt genrated by libiscsi is saved in the ppm and can be 1362 * retrieved via the ddp tag 1363 */ 1364 err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, 1365 &ttinfo->tag, (unsigned long)sw_tag); 1366 if (err < 0) { 1367 cconn->ddp_full++; 1368 return err; 1369 } 1370 ttinfo->npods = err; 1371 1372 /* setup dma from scsi command sgl */ 1373 sgl->offset = 0; 1374 err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 1375 sgl->offset = sg_offset; 1376 if (err == 0) { 1377 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 1378 __func__, sw_tag, xferlen, sgcnt); 1379 goto rel_ppods; 1380 } 1381 if (err != ttinfo->nr_pages) { 1382 log_debug(1 << CXGBI_DBG_DDP, 1383 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", 1384 __func__, sw_tag, xferlen, sgcnt, err); 1385 } 1386 1387 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; 1388 ttinfo->cid = csk->port_id; 1389 1390 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, 1391 xferlen, &ttinfo->hdr); 1392 1393 if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { 1394 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ 1395 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; 1396 } else { 1397 /* write ppod from control queue now */ 1398 err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); 1399 if (err < 0) 1400 goto rel_ppods; 1401 } 1402 1403 return 0; 1404 1405 rel_ppods: 1406 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 1407 1408 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { 1409 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; 1410 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 1411 } 1412 return -EINVAL; 1413 } 1414 1415 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) 1416 { 1417 struct scsi_cmnd *sc = task->sc; 1418 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1419 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1420 struct cxgbi_device *cdev = cconn->chba->cdev; 1421 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1422 u32 tag = ntohl((__force u32)hdr_itt); 1423 1424 log_debug(1 << CXGBI_DBG_DDP, 1425 "cdev 0x%p, task 0x%p, release tag 0x%x.\n", 1426 cdev, task, tag); 1427 if (sc && 1428 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && 1429 cxgbi_ppm_is_ddp_tag(ppm, tag)) { 1430 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1431 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1432 1433 if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) 1434 cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); 1435 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 1436 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, 1437 DMA_FROM_DEVICE); 1438 } 1439 } 1440 1441 static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) 1442 { 1443 /* assume idx and age both are < 0x7FFF (32767) */ 1444 return (idx << 16) | age; 1445 } 1446 1447 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 1448 { 1449 struct scsi_cmnd *sc = task->sc; 1450 struct iscsi_conn *conn = task->conn; 1451 struct iscsi_session *sess = conn->session; 1452 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1453 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1454 struct cxgbi_device *cdev = cconn->chba->cdev; 1455 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1456 u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); 1457 u32 tag = 0; 1458 int err = -EINVAL; 1459 1460 if (sc && 1461 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) 1462 ) { 1463 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1464 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1465 1466 scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, 1467 &tdata->dlen, 0); 1468 err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); 1469 if (!err) 1470 tag = ttinfo->tag; 1471 else 1472 log_debug(1 << CXGBI_DBG_DDP, 1473 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", 1474 cconn->cep->csk, task, tdata->dlen, 1475 ttinfo->nents); 1476 } 1477 1478 if (err < 0) { 1479 err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); 1480 if (err < 0) 1481 return err; 1482 } 1483 /* the itt need to sent in big-endian order */ 1484 *hdr_itt = (__force itt_t)htonl(tag); 1485 1486 log_debug(1 << CXGBI_DBG_DDP, 1487 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", 1488 cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); 1489 return 0; 1490 } 1491 1492 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) 1493 { 1494 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1495 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1496 struct cxgbi_device *cdev = cconn->chba->cdev; 1497 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1498 u32 tag = ntohl((__force u32)itt); 1499 u32 sw_bits; 1500 1501 if (ppm) { 1502 if (cxgbi_ppm_is_ddp_tag(ppm, tag)) 1503 sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); 1504 else 1505 sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); 1506 } else { 1507 sw_bits = tag; 1508 } 1509 1510 cxgbi_decode_sw_tag(sw_bits, idx, age); 1511 log_debug(1 << CXGBI_DBG_DDP, 1512 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", 1513 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, 1514 age ? *age : 0xFF); 1515 } 1516 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); 1517 1518 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) 1519 { 1520 struct iscsi_conn *conn = csk->user_data; 1521 1522 if (conn) { 1523 log_debug(1 << CXGBI_DBG_SOCK, 1524 "csk 0x%p, cid %d.\n", csk, conn->id); 1525 iscsi_conn_queue_work(conn); 1526 } 1527 } 1528 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); 1529 1530 /* 1531 * pdu receive, interact with libiscsi_tcp 1532 */ 1533 static inline int read_pdu_skb(struct iscsi_conn *conn, 1534 struct sk_buff *skb, 1535 unsigned int offset, 1536 int offloaded) 1537 { 1538 int status = 0; 1539 int bytes_read; 1540 1541 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); 1542 switch (status) { 1543 case ISCSI_TCP_CONN_ERR: 1544 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", 1545 skb, offset, offloaded); 1546 return -EIO; 1547 case ISCSI_TCP_SUSPENDED: 1548 log_debug(1 << CXGBI_DBG_PDU_RX, 1549 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", 1550 skb, offset, offloaded, bytes_read); 1551 /* no transfer - just have caller flush queue */ 1552 return bytes_read; 1553 case ISCSI_TCP_SKB_DONE: 1554 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", 1555 skb, offset, offloaded); 1556 /* 1557 * pdus should always fit in the skb and we should get 1558 * segment done notifcation. 1559 */ 1560 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); 1561 return -EFAULT; 1562 case ISCSI_TCP_SEGMENT_DONE: 1563 log_debug(1 << CXGBI_DBG_PDU_RX, 1564 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", 1565 skb, offset, offloaded, bytes_read); 1566 return bytes_read; 1567 default: 1568 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", 1569 skb, offset, offloaded, status); 1570 return -EINVAL; 1571 } 1572 } 1573 1574 static int 1575 skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, 1576 struct sk_buff *skb) 1577 { 1578 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1579 int err; 1580 1581 log_debug(1 << CXGBI_DBG_PDU_RX, 1582 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1583 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1584 1585 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { 1586 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); 1587 iscsi_conn_failure(conn, ISCSI_ERR_PROTO); 1588 return -EIO; 1589 } 1590 1591 if (conn->hdrdgst_en && 1592 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { 1593 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); 1594 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); 1595 return -EIO; 1596 } 1597 1598 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) && 1599 cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) { 1600 /* If completion flag is set and data is directly 1601 * placed in to the host memory then update 1602 * task->exp_datasn to the datasn in completion 1603 * iSCSI hdr as T6 adapter generates completion only 1604 * for the last pdu of a sequence. 1605 */ 1606 itt_t itt = ((struct iscsi_data *)skb->data)->itt; 1607 struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); 1608 u32 data_sn = be32_to_cpu(((struct iscsi_data *) 1609 skb->data)->datasn); 1610 if (task && task->sc) { 1611 struct iscsi_tcp_task *tcp_task = task->dd_data; 1612 1613 tcp_task->exp_datasn = data_sn; 1614 } 1615 } 1616 1617 err = read_pdu_skb(conn, skb, 0, 0); 1618 if (likely(err >= 0)) { 1619 struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; 1620 u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 1621 1622 if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) 1623 cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); 1624 } 1625 1626 return err; 1627 } 1628 1629 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1630 struct sk_buff *skb, unsigned int offset) 1631 { 1632 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1633 bool offloaded = 0; 1634 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 1635 1636 log_debug(1 << CXGBI_DBG_PDU_RX, 1637 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1638 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1639 1640 if (conn->datadgst_en && 1641 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { 1642 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", 1643 conn, lskb, cxgbi_skcb_flags(lskb)); 1644 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 1645 return -EIO; 1646 } 1647 1648 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) 1649 return 0; 1650 1651 /* coalesced, add header digest length */ 1652 if (lskb == skb && conn->hdrdgst_en) 1653 offset += ISCSI_DIGEST_SIZE; 1654 1655 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) 1656 offloaded = 1; 1657 1658 if (opcode == ISCSI_OP_SCSI_DATA_IN) 1659 log_debug(1 << CXGBI_DBG_PDU_RX, 1660 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", 1661 skb, opcode, ntohl(tcp_conn->in.hdr->itt), 1662 tcp_conn->in.datalen, offloaded ? "is" : "not"); 1663 1664 return read_pdu_skb(conn, skb, offset, offloaded); 1665 } 1666 1667 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) 1668 { 1669 struct cxgbi_device *cdev = csk->cdev; 1670 int must_send; 1671 u32 credits; 1672 1673 log_debug(1 << CXGBI_DBG_PDU_RX, 1674 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", 1675 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1676 csk->rcv_wup, cdev->rx_credit_thres, 1677 csk->rcv_win); 1678 1679 if (!cdev->rx_credit_thres) 1680 return; 1681 1682 if (csk->state != CTP_ESTABLISHED) 1683 return; 1684 1685 credits = csk->copied_seq - csk->rcv_wup; 1686 if (unlikely(!credits)) 1687 return; 1688 must_send = credits + 16384 >= csk->rcv_win; 1689 if (must_send || credits >= cdev->rx_credit_thres) 1690 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1691 } 1692 1693 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) 1694 { 1695 struct cxgbi_device *cdev = csk->cdev; 1696 struct iscsi_conn *conn = csk->user_data; 1697 struct sk_buff *skb; 1698 unsigned int read = 0; 1699 int err = 0; 1700 1701 log_debug(1 << CXGBI_DBG_PDU_RX, 1702 "csk 0x%p, conn 0x%p.\n", csk, conn); 1703 1704 if (unlikely(!conn || conn->suspend_rx)) { 1705 log_debug(1 << CXGBI_DBG_PDU_RX, 1706 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", 1707 csk, conn, conn ? conn->id : 0xFF, 1708 conn ? conn->suspend_rx : 0xFF); 1709 return; 1710 } 1711 1712 while (!err) { 1713 skb = skb_peek(&csk->receive_queue); 1714 if (!skb || 1715 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { 1716 if (skb) 1717 log_debug(1 << CXGBI_DBG_PDU_RX, 1718 "skb 0x%p, NOT ready 0x%lx.\n", 1719 skb, cxgbi_skcb_flags(skb)); 1720 break; 1721 } 1722 __skb_unlink(skb, &csk->receive_queue); 1723 1724 read += cxgbi_skcb_rx_pdulen(skb); 1725 log_debug(1 << CXGBI_DBG_PDU_RX, 1726 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", 1727 csk, skb, skb->len, cxgbi_skcb_flags(skb), 1728 cxgbi_skcb_rx_pdulen(skb)); 1729 1730 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1731 err = skb_read_pdu_bhs(csk, conn, skb); 1732 if (err < 0) { 1733 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1734 "f 0x%lx, plen %u.\n", 1735 csk, skb, skb->len, 1736 cxgbi_skcb_flags(skb), 1737 cxgbi_skcb_rx_pdulen(skb)); 1738 goto skb_done; 1739 } 1740 err = skb_read_pdu_data(conn, skb, skb, 1741 err + cdev->skb_rx_extra); 1742 if (err < 0) 1743 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " 1744 "f 0x%lx, plen %u.\n", 1745 csk, skb, skb->len, 1746 cxgbi_skcb_flags(skb), 1747 cxgbi_skcb_rx_pdulen(skb)); 1748 } else { 1749 err = skb_read_pdu_bhs(csk, conn, skb); 1750 if (err < 0) { 1751 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1752 "f 0x%lx, plen %u.\n", 1753 csk, skb, skb->len, 1754 cxgbi_skcb_flags(skb), 1755 cxgbi_skcb_rx_pdulen(skb)); 1756 goto skb_done; 1757 } 1758 1759 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1760 struct sk_buff *dskb; 1761 1762 dskb = skb_peek(&csk->receive_queue); 1763 if (!dskb) { 1764 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," 1765 " plen %u, NO data.\n", 1766 csk, skb, skb->len, 1767 cxgbi_skcb_flags(skb), 1768 cxgbi_skcb_rx_pdulen(skb)); 1769 err = -EIO; 1770 goto skb_done; 1771 } 1772 __skb_unlink(dskb, &csk->receive_queue); 1773 1774 err = skb_read_pdu_data(conn, skb, dskb, 0); 1775 if (err < 0) 1776 pr_err("data, csk 0x%p, skb 0x%p,%u, " 1777 "f 0x%lx, plen %u, dskb 0x%p," 1778 "%u.\n", 1779 csk, skb, skb->len, 1780 cxgbi_skcb_flags(skb), 1781 cxgbi_skcb_rx_pdulen(skb), 1782 dskb, dskb->len); 1783 __kfree_skb(dskb); 1784 } else 1785 err = skb_read_pdu_data(conn, skb, skb, 0); 1786 } 1787 skb_done: 1788 __kfree_skb(skb); 1789 1790 if (err < 0) 1791 break; 1792 } 1793 1794 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); 1795 if (read) { 1796 csk->copied_seq += read; 1797 csk_return_rx_credits(csk, read); 1798 conn->rxdata_octets += read; 1799 } 1800 1801 if (err < 0) { 1802 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", 1803 csk, conn, err, read); 1804 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1805 } 1806 } 1807 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); 1808 1809 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, 1810 unsigned int offset, unsigned int *off, 1811 struct scatterlist **sgp) 1812 { 1813 int i; 1814 struct scatterlist *sg; 1815 1816 for_each_sg(sgl, sg, sgcnt, i) { 1817 if (offset < sg->length) { 1818 *off = offset; 1819 *sgp = sg; 1820 return 0; 1821 } 1822 offset -= sg->length; 1823 } 1824 return -EFAULT; 1825 } 1826 1827 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, 1828 unsigned int dlen, struct page_frag *frags, 1829 int frag_max) 1830 { 1831 unsigned int datalen = dlen; 1832 unsigned int sglen = sg->length - sgoffset; 1833 struct page *page = sg_page(sg); 1834 int i; 1835 1836 i = 0; 1837 do { 1838 unsigned int copy; 1839 1840 if (!sglen) { 1841 sg = sg_next(sg); 1842 if (!sg) { 1843 pr_warn("sg %d NULL, len %u/%u.\n", 1844 i, datalen, dlen); 1845 return -EINVAL; 1846 } 1847 sgoffset = 0; 1848 sglen = sg->length; 1849 page = sg_page(sg); 1850 1851 } 1852 copy = min(datalen, sglen); 1853 if (i && page == frags[i - 1].page && 1854 sgoffset + sg->offset == 1855 frags[i - 1].offset + frags[i - 1].size) { 1856 frags[i - 1].size += copy; 1857 } else { 1858 if (i >= frag_max) { 1859 pr_warn("too many pages %u, dlen %u.\n", 1860 frag_max, dlen); 1861 return -EINVAL; 1862 } 1863 1864 frags[i].page = page; 1865 frags[i].offset = sg->offset + sgoffset; 1866 frags[i].size = copy; 1867 i++; 1868 } 1869 datalen -= copy; 1870 sgoffset += copy; 1871 sglen -= copy; 1872 } while (datalen); 1873 1874 return i; 1875 } 1876 1877 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 1878 { 1879 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1880 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1881 struct cxgbi_device *cdev = cconn->chba->cdev; 1882 struct iscsi_conn *conn = task->conn; 1883 struct iscsi_tcp_task *tcp_task = task->dd_data; 1884 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1885 struct scsi_cmnd *sc = task->sc; 1886 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 1887 1888 tcp_task->dd_data = tdata; 1889 task->hdr = NULL; 1890 1891 if (tdata->skb) { 1892 kfree_skb(tdata->skb); 1893 tdata->skb = NULL; 1894 } 1895 1896 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1897 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1898 (opcode == ISCSI_OP_SCSI_CMD && 1899 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) 1900 /* data could goes into skb head */ 1901 headroom += min_t(unsigned int, 1902 SKB_MAX_HEAD(cdev->skb_tx_rsvd), 1903 conn->max_xmit_dlength); 1904 1905 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1906 if (!tdata->skb) { 1907 struct cxgbi_sock *csk = cconn->cep->csk; 1908 struct net_device *ndev = cdev->ports[csk->port_id]; 1909 ndev->stats.tx_dropped++; 1910 return -ENOMEM; 1911 } 1912 1913 skb_get(tdata->skb); 1914 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1915 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1916 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 1917 1918 /* data_out uses scsi_cmd's itt */ 1919 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 1920 task_reserve_itt(task, &task->hdr->itt); 1921 1922 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1923 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", 1924 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, 1925 conn->max_xmit_dlength, ntohl(task->hdr->itt)); 1926 1927 return 0; 1928 } 1929 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); 1930 1931 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) 1932 { 1933 if (hcrc || dcrc) { 1934 u8 submode = 0; 1935 1936 if (hcrc) 1937 submode |= 1; 1938 if (dcrc) 1939 submode |= 2; 1940 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; 1941 } else 1942 cxgbi_skcb_ulp_mode(skb) = 0; 1943 } 1944 1945 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 1946 unsigned int count) 1947 { 1948 struct iscsi_conn *conn = task->conn; 1949 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1950 struct sk_buff *skb = tdata->skb; 1951 unsigned int datalen = count; 1952 int i, padlen = iscsi_padding(count); 1953 struct page *pg; 1954 1955 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1956 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", 1957 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, 1958 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); 1959 1960 skb_put(skb, task->hdr_len); 1961 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); 1962 if (!count) 1963 return 0; 1964 1965 if (task->sc) { 1966 struct scsi_data_buffer *sdb = scsi_out(task->sc); 1967 struct scatterlist *sg = NULL; 1968 int err; 1969 1970 tdata->offset = offset; 1971 tdata->count = count; 1972 err = sgl_seek_offset( 1973 sdb->table.sgl, sdb->table.nents, 1974 tdata->offset, &tdata->sgoffset, &sg); 1975 if (err < 0) { 1976 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", 1977 sdb->table.nents, tdata->offset, sdb->length); 1978 return err; 1979 } 1980 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, 1981 tdata->frags, MAX_PDU_FRAGS); 1982 if (err < 0) { 1983 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", 1984 sdb->table.nents, tdata->offset, tdata->count); 1985 return err; 1986 } 1987 tdata->nr_frags = err; 1988 1989 if (tdata->nr_frags > MAX_SKB_FRAGS || 1990 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { 1991 char *dst = skb->data + task->hdr_len; 1992 struct page_frag *frag = tdata->frags; 1993 1994 /* data fits in the skb's headroom */ 1995 for (i = 0; i < tdata->nr_frags; i++, frag++) { 1996 char *src = kmap_atomic(frag->page); 1997 1998 memcpy(dst, src+frag->offset, frag->size); 1999 dst += frag->size; 2000 kunmap_atomic(src); 2001 } 2002 if (padlen) { 2003 memset(dst, 0, padlen); 2004 padlen = 0; 2005 } 2006 skb_put(skb, count + padlen); 2007 } else { 2008 /* data fit into frag_list */ 2009 for (i = 0; i < tdata->nr_frags; i++) { 2010 __skb_fill_page_desc(skb, i, 2011 tdata->frags[i].page, 2012 tdata->frags[i].offset, 2013 tdata->frags[i].size); 2014 skb_frag_ref(skb, i); 2015 } 2016 skb_shinfo(skb)->nr_frags = tdata->nr_frags; 2017 skb->len += count; 2018 skb->data_len += count; 2019 skb->truesize += count; 2020 } 2021 2022 } else { 2023 pg = virt_to_page(task->data); 2024 2025 get_page(pg); 2026 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), 2027 count); 2028 skb->len += count; 2029 skb->data_len += count; 2030 skb->truesize += count; 2031 } 2032 2033 if (padlen) { 2034 i = skb_shinfo(skb)->nr_frags; 2035 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 2036 virt_to_page(padding), offset_in_page(padding), 2037 padlen); 2038 2039 skb->data_len += padlen; 2040 skb->truesize += padlen; 2041 skb->len += padlen; 2042 } 2043 2044 return 0; 2045 } 2046 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); 2047 2048 int cxgbi_conn_xmit_pdu(struct iscsi_task *task) 2049 { 2050 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2051 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2052 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2053 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 2054 struct sk_buff *skb = tdata->skb; 2055 struct cxgbi_sock *csk = NULL; 2056 unsigned int datalen; 2057 int err; 2058 2059 if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { 2060 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2061 "task 0x%p, skb 0x%p\n", task, skb); 2062 return 0; 2063 } 2064 2065 if (cconn && cconn->cep) 2066 csk = cconn->cep->csk; 2067 if (!csk) { 2068 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2069 "task 0x%p, csk gone.\n", task); 2070 return -EPIPE; 2071 } 2072 2073 datalen = skb->data_len; 2074 2075 /* write ppod first if using ofldq to write ppod */ 2076 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { 2077 struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); 2078 2079 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; 2080 if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) 2081 pr_err("task 0x%p, ppod writing using ofldq failed.\n", 2082 task); 2083 /* continue. Let fl get the data */ 2084 } 2085 2086 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2087 if (err > 0) { 2088 int pdulen = err; 2089 2090 log_debug(1 << CXGBI_DBG_PDU_TX, 2091 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", 2092 task, task->sc, skb, skb->len, skb->data_len, err); 2093 2094 if (task->conn->hdrdgst_en) 2095 pdulen += ISCSI_DIGEST_SIZE; 2096 2097 if (datalen && task->conn->datadgst_en) 2098 pdulen += ISCSI_DIGEST_SIZE; 2099 2100 task->conn->txdata_octets += pdulen; 2101 cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); 2102 return 0; 2103 } 2104 2105 if (err == -EAGAIN || err == -ENOBUFS) { 2106 log_debug(1 << CXGBI_DBG_PDU_TX, 2107 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2108 task, skb, skb->len, skb->data_len, err); 2109 /* reset skb to send when we are called again */ 2110 return err; 2111 } 2112 2113 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2114 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2115 task->itt, skb, skb->len, skb->data_len, err); 2116 2117 __kfree_skb(tdata->skb); 2118 tdata->skb = NULL; 2119 2120 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2121 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2122 return err; 2123 } 2124 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); 2125 2126 void cxgbi_cleanup_task(struct iscsi_task *task) 2127 { 2128 struct iscsi_tcp_task *tcp_task = task->dd_data; 2129 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2130 2131 log_debug(1 << CXGBI_DBG_ISCSI, 2132 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2133 task, tdata->skb, task->hdr_itt); 2134 2135 tcp_task->dd_data = NULL; 2136 /* never reached the xmit task callout */ 2137 if (tdata->skb) { 2138 kfree_skb(tdata->skb); 2139 tdata->skb = NULL; 2140 } 2141 2142 task_release_itt(task, task->hdr_itt); 2143 memset(tdata, 0, sizeof(*tdata)); 2144 2145 iscsi_tcp_cleanup_task(task); 2146 } 2147 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2148 2149 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, 2150 struct iscsi_stats *stats) 2151 { 2152 struct iscsi_conn *conn = cls_conn->dd_data; 2153 2154 stats->txdata_octets = conn->txdata_octets; 2155 stats->rxdata_octets = conn->rxdata_octets; 2156 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2157 stats->dataout_pdus = conn->dataout_pdus_cnt; 2158 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2159 stats->datain_pdus = conn->datain_pdus_cnt; 2160 stats->r2t_pdus = conn->r2t_pdus_cnt; 2161 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2162 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2163 stats->digest_err = 0; 2164 stats->timeout_err = 0; 2165 stats->custom_length = 1; 2166 strcpy(stats->custom[0].desc, "eh_abort_cnt"); 2167 stats->custom[0].value = conn->eh_abort_cnt; 2168 } 2169 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); 2170 2171 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) 2172 { 2173 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2174 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2175 struct cxgbi_device *cdev = cconn->chba->cdev; 2176 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); 2177 unsigned int max_def = 512 * MAX_SKB_FRAGS; 2178 unsigned int max = max(max_def, headroom); 2179 2180 max = min(cconn->chba->cdev->tx_max_size, max); 2181 if (conn->max_xmit_dlength) 2182 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); 2183 else 2184 conn->max_xmit_dlength = max; 2185 cxgbi_align_pdu_size(conn->max_xmit_dlength); 2186 2187 return 0; 2188 } 2189 2190 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) 2191 { 2192 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2193 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2194 unsigned int max = cconn->chba->cdev->rx_max_size; 2195 2196 cxgbi_align_pdu_size(max); 2197 2198 if (conn->max_recv_dlength) { 2199 if (conn->max_recv_dlength > max) { 2200 pr_err("MaxRecvDataSegmentLength %u > %u.\n", 2201 conn->max_recv_dlength, max); 2202 return -EINVAL; 2203 } 2204 conn->max_recv_dlength = min(conn->max_recv_dlength, max); 2205 cxgbi_align_pdu_size(conn->max_recv_dlength); 2206 } else 2207 conn->max_recv_dlength = max; 2208 2209 return 0; 2210 } 2211 2212 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, 2213 enum iscsi_param param, char *buf, int buflen) 2214 { 2215 struct iscsi_conn *conn = cls_conn->dd_data; 2216 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2217 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2218 struct cxgbi_sock *csk = cconn->cep->csk; 2219 int err; 2220 2221 log_debug(1 << CXGBI_DBG_ISCSI, 2222 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2223 cls_conn, param, buflen, buf); 2224 2225 switch (param) { 2226 case ISCSI_PARAM_HDRDGST_EN: 2227 err = iscsi_set_param(cls_conn, param, buf, buflen); 2228 if (!err && conn->hdrdgst_en) 2229 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2230 conn->hdrdgst_en, 2231 conn->datadgst_en, 0); 2232 break; 2233 case ISCSI_PARAM_DATADGST_EN: 2234 err = iscsi_set_param(cls_conn, param, buf, buflen); 2235 if (!err && conn->datadgst_en) 2236 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2237 conn->hdrdgst_en, 2238 conn->datadgst_en, 0); 2239 break; 2240 case ISCSI_PARAM_MAX_R2T: 2241 return iscsi_tcp_set_max_r2t(conn, buf); 2242 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2243 err = iscsi_set_param(cls_conn, param, buf, buflen); 2244 if (!err) 2245 err = cxgbi_conn_max_recv_dlength(conn); 2246 break; 2247 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2248 err = iscsi_set_param(cls_conn, param, buf, buflen); 2249 if (!err) 2250 err = cxgbi_conn_max_xmit_dlength(conn); 2251 break; 2252 default: 2253 return iscsi_set_param(cls_conn, param, buf, buflen); 2254 } 2255 return err; 2256 } 2257 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); 2258 2259 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) 2260 { 2261 int len; 2262 2263 cxgbi_sock_get(csk); 2264 len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); 2265 cxgbi_sock_put(csk); 2266 2267 return len; 2268 } 2269 2270 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) 2271 { 2272 int len; 2273 2274 cxgbi_sock_get(csk); 2275 if (csk->csk_family == AF_INET) 2276 len = sprintf(buf, "%pI4", 2277 &csk->daddr.sin_addr.s_addr); 2278 else 2279 len = sprintf(buf, "%pI6", 2280 &csk->daddr6.sin6_addr); 2281 2282 cxgbi_sock_put(csk); 2283 2284 return len; 2285 } 2286 2287 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, 2288 char *buf) 2289 { 2290 struct cxgbi_endpoint *cep = ep->dd_data; 2291 struct cxgbi_sock *csk; 2292 int len; 2293 2294 log_debug(1 << CXGBI_DBG_ISCSI, 2295 "cls_conn 0x%p, param %d.\n", ep, param); 2296 2297 switch (param) { 2298 case ISCSI_PARAM_CONN_PORT: 2299 case ISCSI_PARAM_CONN_ADDRESS: 2300 if (!cep) 2301 return -ENOTCONN; 2302 2303 csk = cep->csk; 2304 if (!csk) 2305 return -ENOTCONN; 2306 2307 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2308 &csk->daddr, param, buf); 2309 default: 2310 return -ENOSYS; 2311 } 2312 return len; 2313 } 2314 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); 2315 2316 struct iscsi_cls_conn * 2317 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) 2318 { 2319 struct iscsi_cls_conn *cls_conn; 2320 struct iscsi_conn *conn; 2321 struct iscsi_tcp_conn *tcp_conn; 2322 struct cxgbi_conn *cconn; 2323 2324 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); 2325 if (!cls_conn) 2326 return NULL; 2327 2328 conn = cls_conn->dd_data; 2329 tcp_conn = conn->dd_data; 2330 cconn = tcp_conn->dd_data; 2331 cconn->iconn = conn; 2332 2333 log_debug(1 << CXGBI_DBG_ISCSI, 2334 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", 2335 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); 2336 2337 return cls_conn; 2338 } 2339 EXPORT_SYMBOL_GPL(cxgbi_create_conn); 2340 2341 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, 2342 struct iscsi_cls_conn *cls_conn, 2343 u64 transport_eph, int is_leading) 2344 { 2345 struct iscsi_conn *conn = cls_conn->dd_data; 2346 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2347 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2348 struct cxgbi_ppm *ppm; 2349 struct iscsi_endpoint *ep; 2350 struct cxgbi_endpoint *cep; 2351 struct cxgbi_sock *csk; 2352 int err; 2353 2354 ep = iscsi_lookup_endpoint(transport_eph); 2355 if (!ep) 2356 return -EINVAL; 2357 2358 /* setup ddp pagesize */ 2359 cep = ep->dd_data; 2360 csk = cep->csk; 2361 2362 ppm = csk->cdev->cdev2ppm(csk->cdev); 2363 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2364 ppm->tformat.pgsz_idx_dflt, 0); 2365 if (err < 0) 2366 return err; 2367 2368 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2369 if (err) 2370 return -EINVAL; 2371 2372 /* calculate the tag idx bits needed for this conn based on cmds_max */ 2373 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; 2374 2375 write_lock_bh(&csk->callback_lock); 2376 csk->user_data = conn; 2377 cconn->chba = cep->chba; 2378 cconn->cep = cep; 2379 cep->cconn = cconn; 2380 write_unlock_bh(&csk->callback_lock); 2381 2382 cxgbi_conn_max_xmit_dlength(conn); 2383 cxgbi_conn_max_recv_dlength(conn); 2384 2385 log_debug(1 << CXGBI_DBG_ISCSI, 2386 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", 2387 cls_session, cls_conn, ep, cconn, csk); 2388 /* init recv engine */ 2389 iscsi_tcp_hdr_recv_prep(tcp_conn); 2390 2391 return 0; 2392 } 2393 EXPORT_SYMBOL_GPL(cxgbi_bind_conn); 2394 2395 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, 2396 u16 cmds_max, u16 qdepth, 2397 u32 initial_cmdsn) 2398 { 2399 struct cxgbi_endpoint *cep; 2400 struct cxgbi_hba *chba; 2401 struct Scsi_Host *shost; 2402 struct iscsi_cls_session *cls_session; 2403 struct iscsi_session *session; 2404 2405 if (!ep) { 2406 pr_err("missing endpoint.\n"); 2407 return NULL; 2408 } 2409 2410 cep = ep->dd_data; 2411 chba = cep->chba; 2412 shost = chba->shost; 2413 2414 BUG_ON(chba != iscsi_host_priv(shost)); 2415 2416 cls_session = iscsi_session_setup(chba->cdev->itp, shost, 2417 cmds_max, 0, 2418 sizeof(struct iscsi_tcp_task) + 2419 sizeof(struct cxgbi_task_data), 2420 initial_cmdsn, ISCSI_MAX_TARGET); 2421 if (!cls_session) 2422 return NULL; 2423 2424 session = cls_session->dd_data; 2425 if (iscsi_tcp_r2tpool_alloc(session)) 2426 goto remove_session; 2427 2428 log_debug(1 << CXGBI_DBG_ISCSI, 2429 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); 2430 return cls_session; 2431 2432 remove_session: 2433 iscsi_session_teardown(cls_session); 2434 return NULL; 2435 } 2436 EXPORT_SYMBOL_GPL(cxgbi_create_session); 2437 2438 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) 2439 { 2440 log_debug(1 << CXGBI_DBG_ISCSI, 2441 "cls sess 0x%p.\n", cls_session); 2442 2443 iscsi_tcp_r2tpool_free(cls_session->dd_data); 2444 iscsi_session_teardown(cls_session); 2445 } 2446 EXPORT_SYMBOL_GPL(cxgbi_destroy_session); 2447 2448 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2449 char *buf, int buflen) 2450 { 2451 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2452 2453 if (!chba->ndev) { 2454 shost_printk(KERN_ERR, shost, "Could not get host param. " 2455 "netdev for host not set.\n"); 2456 return -ENODEV; 2457 } 2458 2459 log_debug(1 << CXGBI_DBG_ISCSI, 2460 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", 2461 shost, chba, chba->ndev->name, param, buflen, buf); 2462 2463 switch (param) { 2464 case ISCSI_HOST_PARAM_IPADDRESS: 2465 { 2466 __be32 addr = in_aton(buf); 2467 log_debug(1 << CXGBI_DBG_ISCSI, 2468 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); 2469 cxgbi_set_iscsi_ipv4(chba, addr); 2470 return 0; 2471 } 2472 case ISCSI_HOST_PARAM_HWADDRESS: 2473 case ISCSI_HOST_PARAM_NETDEV_NAME: 2474 return 0; 2475 default: 2476 return iscsi_host_set_param(shost, param, buf, buflen); 2477 } 2478 } 2479 EXPORT_SYMBOL_GPL(cxgbi_set_host_param); 2480 2481 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2482 char *buf) 2483 { 2484 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2485 int len = 0; 2486 2487 if (!chba->ndev) { 2488 shost_printk(KERN_ERR, shost, "Could not get host param. " 2489 "netdev for host not set.\n"); 2490 return -ENODEV; 2491 } 2492 2493 log_debug(1 << CXGBI_DBG_ISCSI, 2494 "shost 0x%p, hba 0x%p,%s, param %d.\n", 2495 shost, chba, chba->ndev->name, param); 2496 2497 switch (param) { 2498 case ISCSI_HOST_PARAM_HWADDRESS: 2499 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); 2500 break; 2501 case ISCSI_HOST_PARAM_NETDEV_NAME: 2502 len = sprintf(buf, "%s\n", chba->ndev->name); 2503 break; 2504 case ISCSI_HOST_PARAM_IPADDRESS: 2505 { 2506 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, 2507 chba->port_id); 2508 if (csk) { 2509 len = sprintf(buf, "%pIS", 2510 (struct sockaddr *)&csk->saddr); 2511 } 2512 log_debug(1 << CXGBI_DBG_ISCSI, 2513 "hba %s, addr %s.\n", chba->ndev->name, buf); 2514 break; 2515 } 2516 default: 2517 return iscsi_host_get_param(shost, param, buf); 2518 } 2519 2520 return len; 2521 } 2522 EXPORT_SYMBOL_GPL(cxgbi_get_host_param); 2523 2524 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, 2525 struct sockaddr *dst_addr, 2526 int non_blocking) 2527 { 2528 struct iscsi_endpoint *ep; 2529 struct cxgbi_endpoint *cep; 2530 struct cxgbi_hba *hba = NULL; 2531 struct cxgbi_sock *csk; 2532 int err = -EINVAL; 2533 2534 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2535 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", 2536 shost, non_blocking, dst_addr); 2537 2538 if (shost) { 2539 hba = iscsi_host_priv(shost); 2540 if (!hba) { 2541 pr_info("shost 0x%p, priv NULL.\n", shost); 2542 goto err_out; 2543 } 2544 } 2545 2546 if (dst_addr->sa_family == AF_INET) { 2547 csk = cxgbi_check_route(dst_addr); 2548 #if IS_ENABLED(CONFIG_IPV6) 2549 } else if (dst_addr->sa_family == AF_INET6) { 2550 csk = cxgbi_check_route6(dst_addr); 2551 #endif 2552 } else { 2553 pr_info("address family 0x%x NOT supported.\n", 2554 dst_addr->sa_family); 2555 err = -EAFNOSUPPORT; 2556 return (struct iscsi_endpoint *)ERR_PTR(err); 2557 } 2558 2559 if (IS_ERR(csk)) 2560 return (struct iscsi_endpoint *)csk; 2561 cxgbi_sock_get(csk); 2562 2563 if (!hba) 2564 hba = csk->cdev->hbas[csk->port_id]; 2565 else if (hba != csk->cdev->hbas[csk->port_id]) { 2566 pr_info("Could not connect through requested host %u" 2567 "hba 0x%p != 0x%p (%u).\n", 2568 shost->host_no, hba, 2569 csk->cdev->hbas[csk->port_id], csk->port_id); 2570 err = -ENOSPC; 2571 goto release_conn; 2572 } 2573 2574 err = sock_get_port(csk); 2575 if (err) 2576 goto release_conn; 2577 2578 cxgbi_sock_set_state(csk, CTP_CONNECTING); 2579 err = csk->cdev->csk_init_act_open(csk); 2580 if (err) 2581 goto release_conn; 2582 2583 if (cxgbi_sock_is_closing(csk)) { 2584 err = -ENOSPC; 2585 pr_info("csk 0x%p is closing.\n", csk); 2586 goto release_conn; 2587 } 2588 2589 ep = iscsi_create_endpoint(sizeof(*cep)); 2590 if (!ep) { 2591 err = -ENOMEM; 2592 pr_info("iscsi alloc ep, OOM.\n"); 2593 goto release_conn; 2594 } 2595 2596 cep = ep->dd_data; 2597 cep->csk = csk; 2598 cep->chba = hba; 2599 2600 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2601 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", 2602 ep, cep, csk, hba, hba->ndev->name); 2603 return ep; 2604 2605 release_conn: 2606 cxgbi_sock_put(csk); 2607 cxgbi_sock_closed(csk); 2608 err_out: 2609 return ERR_PTR(err); 2610 } 2611 EXPORT_SYMBOL_GPL(cxgbi_ep_connect); 2612 2613 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 2614 { 2615 struct cxgbi_endpoint *cep = ep->dd_data; 2616 struct cxgbi_sock *csk = cep->csk; 2617 2618 if (!cxgbi_sock_is_established(csk)) 2619 return 0; 2620 return 1; 2621 } 2622 EXPORT_SYMBOL_GPL(cxgbi_ep_poll); 2623 2624 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) 2625 { 2626 struct cxgbi_endpoint *cep = ep->dd_data; 2627 struct cxgbi_conn *cconn = cep->cconn; 2628 struct cxgbi_sock *csk = cep->csk; 2629 2630 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2631 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", 2632 ep, cep, cconn, csk, csk->state, csk->flags); 2633 2634 if (cconn && cconn->iconn) { 2635 iscsi_suspend_tx(cconn->iconn); 2636 write_lock_bh(&csk->callback_lock); 2637 cep->csk->user_data = NULL; 2638 cconn->cep = NULL; 2639 write_unlock_bh(&csk->callback_lock); 2640 } 2641 iscsi_destroy_endpoint(ep); 2642 2643 if (likely(csk->state >= CTP_ESTABLISHED)) 2644 need_active_close(csk); 2645 else 2646 cxgbi_sock_closed(csk); 2647 2648 cxgbi_sock_put(csk); 2649 } 2650 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); 2651 2652 int cxgbi_iscsi_init(struct iscsi_transport *itp, 2653 struct scsi_transport_template **stt) 2654 { 2655 *stt = iscsi_register_transport(itp); 2656 if (*stt == NULL) { 2657 pr_err("unable to register %s transport 0x%p.\n", 2658 itp->name, itp); 2659 return -ENODEV; 2660 } 2661 log_debug(1 << CXGBI_DBG_ISCSI, 2662 "%s, registered iscsi transport 0x%p.\n", 2663 itp->name, stt); 2664 return 0; 2665 } 2666 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); 2667 2668 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, 2669 struct scsi_transport_template **stt) 2670 { 2671 if (*stt) { 2672 log_debug(1 << CXGBI_DBG_ISCSI, 2673 "de-register transport 0x%p, %s, stt 0x%p.\n", 2674 itp, itp->name, *stt); 2675 *stt = NULL; 2676 iscsi_unregister_transport(itp); 2677 } 2678 } 2679 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2680 2681 umode_t cxgbi_attr_is_visible(int param_type, int param) 2682 { 2683 switch (param_type) { 2684 case ISCSI_HOST_PARAM: 2685 switch (param) { 2686 case ISCSI_HOST_PARAM_NETDEV_NAME: 2687 case ISCSI_HOST_PARAM_HWADDRESS: 2688 case ISCSI_HOST_PARAM_IPADDRESS: 2689 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2690 return S_IRUGO; 2691 default: 2692 return 0; 2693 } 2694 case ISCSI_PARAM: 2695 switch (param) { 2696 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2697 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2698 case ISCSI_PARAM_HDRDGST_EN: 2699 case ISCSI_PARAM_DATADGST_EN: 2700 case ISCSI_PARAM_CONN_ADDRESS: 2701 case ISCSI_PARAM_CONN_PORT: 2702 case ISCSI_PARAM_EXP_STATSN: 2703 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2704 case ISCSI_PARAM_PERSISTENT_PORT: 2705 case ISCSI_PARAM_PING_TMO: 2706 case ISCSI_PARAM_RECV_TMO: 2707 case ISCSI_PARAM_INITIAL_R2T_EN: 2708 case ISCSI_PARAM_MAX_R2T: 2709 case ISCSI_PARAM_IMM_DATA_EN: 2710 case ISCSI_PARAM_FIRST_BURST: 2711 case ISCSI_PARAM_MAX_BURST: 2712 case ISCSI_PARAM_PDU_INORDER_EN: 2713 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2714 case ISCSI_PARAM_ERL: 2715 case ISCSI_PARAM_TARGET_NAME: 2716 case ISCSI_PARAM_TPGT: 2717 case ISCSI_PARAM_USERNAME: 2718 case ISCSI_PARAM_PASSWORD: 2719 case ISCSI_PARAM_USERNAME_IN: 2720 case ISCSI_PARAM_PASSWORD_IN: 2721 case ISCSI_PARAM_FAST_ABORT: 2722 case ISCSI_PARAM_ABORT_TMO: 2723 case ISCSI_PARAM_LU_RESET_TMO: 2724 case ISCSI_PARAM_TGT_RESET_TMO: 2725 case ISCSI_PARAM_IFACE_NAME: 2726 case ISCSI_PARAM_INITIATOR_NAME: 2727 return S_IRUGO; 2728 default: 2729 return 0; 2730 } 2731 } 2732 2733 return 0; 2734 } 2735 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); 2736 2737 static int __init libcxgbi_init_module(void) 2738 { 2739 pr_info("%s", version); 2740 2741 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 2742 sizeof(struct cxgbi_skb_cb)); 2743 return 0; 2744 } 2745 2746 static void __exit libcxgbi_exit_module(void) 2747 { 2748 cxgbi_device_unregister_all(0xFF); 2749 return; 2750 } 2751 2752 module_init(libcxgbi_init_module); 2753 module_exit(libcxgbi_exit_module); 2754