1 /* 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 3 * 4 * Copyright (c) 2010-2015 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Written by: Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/skbuff.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/pci.h> 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_host.h> 23 #include <linux/if_vlan.h> 24 #include <linux/inet.h> 25 #include <net/dst.h> 26 #include <net/route.h> 27 #include <net/ipv6.h> 28 #include <net/ip6_route.h> 29 #include <net/addrconf.h> 30 31 #include <linux/inetdevice.h> /* ip_dev_find */ 32 #include <linux/module.h> 33 #include <net/tcp.h> 34 35 static unsigned int dbg_level; 36 37 #include "libcxgbi.h" 38 39 #define DRV_MODULE_NAME "libcxgbi" 40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library" 41 #define DRV_MODULE_VERSION "0.9.1-ko" 42 #define DRV_MODULE_RELDATE "Apr. 2015" 43 44 static char version[] = 45 DRV_MODULE_DESC " " DRV_MODULE_NAME 46 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 47 48 MODULE_AUTHOR("Chelsio Communications, Inc."); 49 MODULE_DESCRIPTION(DRV_MODULE_DESC); 50 MODULE_VERSION(DRV_MODULE_VERSION); 51 MODULE_LICENSE("GPL"); 52 53 module_param(dbg_level, uint, 0644); 54 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); 55 56 57 /* 58 * cxgbi device management 59 * maintains a list of the cxgbi devices 60 */ 61 static LIST_HEAD(cdev_list); 62 static DEFINE_MUTEX(cdev_mutex); 63 64 static LIST_HEAD(cdev_rcu_list); 65 static DEFINE_SPINLOCK(cdev_rcu_lock); 66 67 static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age) 68 { 69 if (age) 70 *age = sw_tag & 0x7FFF; 71 if (idx) 72 *idx = (sw_tag >> 16) & 0x7FFF; 73 } 74 75 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 76 unsigned int max_conn) 77 { 78 struct cxgbi_ports_map *pmap = &cdev->pmap; 79 80 pmap->port_csk = cxgbi_alloc_big_mem(max_conn * 81 sizeof(struct cxgbi_sock *), 82 GFP_KERNEL); 83 if (!pmap->port_csk) { 84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); 85 return -ENOMEM; 86 } 87 88 pmap->max_connect = max_conn; 89 pmap->sport_base = base; 90 spin_lock_init(&pmap->lock); 91 return 0; 92 } 93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); 94 95 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) 96 { 97 struct cxgbi_ports_map *pmap = &cdev->pmap; 98 struct cxgbi_sock *csk; 99 int i; 100 101 for (i = 0; i < pmap->max_connect; i++) { 102 if (pmap->port_csk[i]) { 103 csk = pmap->port_csk[i]; 104 pmap->port_csk[i] = NULL; 105 log_debug(1 << CXGBI_DBG_SOCK, 106 "csk 0x%p, cdev 0x%p, offload down.\n", 107 csk, cdev); 108 spin_lock_bh(&csk->lock); 109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); 110 cxgbi_sock_closed(csk); 111 spin_unlock_bh(&csk->lock); 112 cxgbi_sock_put(csk); 113 } 114 } 115 } 116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); 117 118 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) 119 { 120 log_debug(1 << CXGBI_DBG_DEV, 121 "cdev 0x%p, p# %u.\n", cdev, cdev->nports); 122 cxgbi_hbas_remove(cdev); 123 cxgbi_device_portmap_cleanup(cdev); 124 cxgbi_ppm_release(cdev->cdev2ppm(cdev)); 125 if (cdev->pmap.max_connect) 126 cxgbi_free_big_mem(cdev->pmap.port_csk); 127 kfree(cdev); 128 } 129 130 struct cxgbi_device *cxgbi_device_register(unsigned int extra, 131 unsigned int nports) 132 { 133 struct cxgbi_device *cdev; 134 135 cdev = kzalloc(sizeof(*cdev) + extra + nports * 136 (sizeof(struct cxgbi_hba *) + 137 sizeof(struct net_device *)), 138 GFP_KERNEL); 139 if (!cdev) { 140 pr_warn("nport %d, OOM.\n", nports); 141 return NULL; 142 } 143 cdev->ports = (struct net_device **)(cdev + 1); 144 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * 145 sizeof(struct net_device *)); 146 if (extra) 147 cdev->dd_data = ((char *)cdev->hbas) + 148 nports * sizeof(struct cxgbi_hba *); 149 spin_lock_init(&cdev->pmap.lock); 150 151 mutex_lock(&cdev_mutex); 152 list_add_tail(&cdev->list_head, &cdev_list); 153 mutex_unlock(&cdev_mutex); 154 155 spin_lock(&cdev_rcu_lock); 156 list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); 157 spin_unlock(&cdev_rcu_lock); 158 159 log_debug(1 << CXGBI_DBG_DEV, 160 "cdev 0x%p, p# %u.\n", cdev, nports); 161 return cdev; 162 } 163 EXPORT_SYMBOL_GPL(cxgbi_device_register); 164 165 void cxgbi_device_unregister(struct cxgbi_device *cdev) 166 { 167 log_debug(1 << CXGBI_DBG_DEV, 168 "cdev 0x%p, p# %u,%s.\n", 169 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 170 171 mutex_lock(&cdev_mutex); 172 list_del(&cdev->list_head); 173 mutex_unlock(&cdev_mutex); 174 175 spin_lock(&cdev_rcu_lock); 176 list_del_rcu(&cdev->rcu_node); 177 spin_unlock(&cdev_rcu_lock); 178 synchronize_rcu(); 179 180 cxgbi_device_destroy(cdev); 181 } 182 EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 183 184 void cxgbi_device_unregister_all(unsigned int flag) 185 { 186 struct cxgbi_device *cdev, *tmp; 187 188 mutex_lock(&cdev_mutex); 189 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 190 if ((cdev->flags & flag) == flag) { 191 mutex_unlock(&cdev_mutex); 192 cxgbi_device_unregister(cdev); 193 mutex_lock(&cdev_mutex); 194 } 195 } 196 mutex_unlock(&cdev_mutex); 197 } 198 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); 199 200 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) 201 { 202 struct cxgbi_device *cdev, *tmp; 203 204 mutex_lock(&cdev_mutex); 205 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 206 if (cdev->lldev == lldev) { 207 mutex_unlock(&cdev_mutex); 208 return cdev; 209 } 210 } 211 mutex_unlock(&cdev_mutex); 212 213 log_debug(1 << CXGBI_DBG_DEV, 214 "lldev 0x%p, NO match found.\n", lldev); 215 return NULL; 216 } 217 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); 218 219 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, 220 int *port) 221 { 222 struct net_device *vdev = NULL; 223 struct cxgbi_device *cdev, *tmp; 224 int i; 225 226 if (is_vlan_dev(ndev)) { 227 vdev = ndev; 228 ndev = vlan_dev_real_dev(ndev); 229 log_debug(1 << CXGBI_DBG_DEV, 230 "vlan dev %s -> %s.\n", vdev->name, ndev->name); 231 } 232 233 mutex_lock(&cdev_mutex); 234 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 235 for (i = 0; i < cdev->nports; i++) { 236 if (ndev == cdev->ports[i]) { 237 cdev->hbas[i]->vdev = vdev; 238 mutex_unlock(&cdev_mutex); 239 if (port) 240 *port = i; 241 return cdev; 242 } 243 } 244 } 245 mutex_unlock(&cdev_mutex); 246 log_debug(1 << CXGBI_DBG_DEV, 247 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 248 return NULL; 249 } 250 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); 251 252 struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, 253 int *port) 254 { 255 struct net_device *vdev = NULL; 256 struct cxgbi_device *cdev; 257 int i; 258 259 if (is_vlan_dev(ndev)) { 260 vdev = ndev; 261 ndev = vlan_dev_real_dev(ndev); 262 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 263 } 264 265 rcu_read_lock(); 266 list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { 267 for (i = 0; i < cdev->nports; i++) { 268 if (ndev == cdev->ports[i]) { 269 cdev->hbas[i]->vdev = vdev; 270 rcu_read_unlock(); 271 if (port) 272 *port = i; 273 return cdev; 274 } 275 } 276 } 277 rcu_read_unlock(); 278 279 log_debug(1 << CXGBI_DBG_DEV, 280 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 281 return NULL; 282 } 283 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); 284 285 #if IS_ENABLED(CONFIG_IPV6) 286 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 287 int *port) 288 { 289 struct net_device *vdev = NULL; 290 struct cxgbi_device *cdev, *tmp; 291 int i; 292 293 if (is_vlan_dev(ndev)) { 294 vdev = ndev; 295 ndev = vlan_dev_real_dev(ndev); 296 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 297 } 298 299 mutex_lock(&cdev_mutex); 300 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 301 for (i = 0; i < cdev->nports; i++) { 302 if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, 303 MAX_ADDR_LEN)) { 304 cdev->hbas[i]->vdev = vdev; 305 mutex_unlock(&cdev_mutex); 306 if (port) 307 *port = i; 308 return cdev; 309 } 310 } 311 } 312 mutex_unlock(&cdev_mutex); 313 log_debug(1 << CXGBI_DBG_DEV, 314 "ndev 0x%p, %s, NO match mac found.\n", 315 ndev, ndev->name); 316 return NULL; 317 } 318 #endif 319 320 void cxgbi_hbas_remove(struct cxgbi_device *cdev) 321 { 322 int i; 323 struct cxgbi_hba *chba; 324 325 log_debug(1 << CXGBI_DBG_DEV, 326 "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 327 328 for (i = 0; i < cdev->nports; i++) { 329 chba = cdev->hbas[i]; 330 if (chba) { 331 cdev->hbas[i] = NULL; 332 iscsi_host_remove(chba->shost); 333 pci_dev_put(cdev->pdev); 334 iscsi_host_free(chba->shost); 335 } 336 } 337 } 338 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); 339 340 int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, 341 unsigned int max_id, struct scsi_host_template *sht, 342 struct scsi_transport_template *stt) 343 { 344 struct cxgbi_hba *chba; 345 struct Scsi_Host *shost; 346 int i, err; 347 348 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 349 350 for (i = 0; i < cdev->nports; i++) { 351 shost = iscsi_host_alloc(sht, sizeof(*chba), 1); 352 if (!shost) { 353 pr_info("0x%p, p%d, %s, host alloc failed.\n", 354 cdev, i, cdev->ports[i]->name); 355 err = -ENOMEM; 356 goto err_out; 357 } 358 359 shost->transportt = stt; 360 shost->max_lun = max_lun; 361 shost->max_id = max_id; 362 shost->max_channel = 0; 363 shost->max_cmd_len = 16; 364 365 chba = iscsi_host_priv(shost); 366 chba->cdev = cdev; 367 chba->ndev = cdev->ports[i]; 368 chba->shost = shost; 369 370 log_debug(1 << CXGBI_DBG_DEV, 371 "cdev 0x%p, p#%d %s: chba 0x%p.\n", 372 cdev, i, cdev->ports[i]->name, chba); 373 374 pci_dev_get(cdev->pdev); 375 err = iscsi_host_add(shost, &cdev->pdev->dev); 376 if (err) { 377 pr_info("cdev 0x%p, p#%d %s, host add failed.\n", 378 cdev, i, cdev->ports[i]->name); 379 pci_dev_put(cdev->pdev); 380 scsi_host_put(shost); 381 goto err_out; 382 } 383 384 cdev->hbas[i] = chba; 385 } 386 387 return 0; 388 389 err_out: 390 cxgbi_hbas_remove(cdev); 391 return err; 392 } 393 EXPORT_SYMBOL_GPL(cxgbi_hbas_add); 394 395 /* 396 * iSCSI offload 397 * 398 * - source port management 399 * To find a free source port in the port allocation map we use a very simple 400 * rotor scheme to look for the next free port. 401 * 402 * If a source port has been specified make sure that it doesn't collide with 403 * our normal source port allocation map. If it's outside the range of our 404 * allocation/deallocation scheme just let them use it. 405 * 406 * If the source port is outside our allocation range, the caller is 407 * responsible for keeping track of their port usage. 408 */ 409 410 static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev, 411 unsigned char port_id) 412 { 413 struct cxgbi_ports_map *pmap = &cdev->pmap; 414 unsigned int i; 415 unsigned int used; 416 417 if (!pmap->max_connect || !pmap->used) 418 return NULL; 419 420 spin_lock_bh(&pmap->lock); 421 used = pmap->used; 422 for (i = 0; used && i < pmap->max_connect; i++) { 423 struct cxgbi_sock *csk = pmap->port_csk[i]; 424 425 if (csk) { 426 if (csk->port_id == port_id) { 427 spin_unlock_bh(&pmap->lock); 428 return csk; 429 } 430 used--; 431 } 432 } 433 spin_unlock_bh(&pmap->lock); 434 435 return NULL; 436 } 437 438 static int sock_get_port(struct cxgbi_sock *csk) 439 { 440 struct cxgbi_device *cdev = csk->cdev; 441 struct cxgbi_ports_map *pmap = &cdev->pmap; 442 unsigned int start; 443 int idx; 444 __be16 *port; 445 446 if (!pmap->max_connect) { 447 pr_err("cdev 0x%p, p#%u %s, NO port map.\n", 448 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 449 return -EADDRNOTAVAIL; 450 } 451 452 if (csk->csk_family == AF_INET) 453 port = &csk->saddr.sin_port; 454 else /* ipv6 */ 455 port = &csk->saddr6.sin6_port; 456 457 if (*port) { 458 pr_err("source port NON-ZERO %u.\n", 459 ntohs(*port)); 460 return -EADDRINUSE; 461 } 462 463 spin_lock_bh(&pmap->lock); 464 if (pmap->used >= pmap->max_connect) { 465 spin_unlock_bh(&pmap->lock); 466 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", 467 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 468 return -EADDRNOTAVAIL; 469 } 470 471 start = idx = pmap->next; 472 do { 473 if (++idx >= pmap->max_connect) 474 idx = 0; 475 if (!pmap->port_csk[idx]) { 476 pmap->used++; 477 *port = htons(pmap->sport_base + idx); 478 pmap->next = idx; 479 pmap->port_csk[idx] = csk; 480 spin_unlock_bh(&pmap->lock); 481 cxgbi_sock_get(csk); 482 log_debug(1 << CXGBI_DBG_SOCK, 483 "cdev 0x%p, p#%u %s, p %u, %u.\n", 484 cdev, csk->port_id, 485 cdev->ports[csk->port_id]->name, 486 pmap->sport_base + idx, pmap->next); 487 return 0; 488 } 489 } while (idx != start); 490 spin_unlock_bh(&pmap->lock); 491 492 /* should not happen */ 493 pr_warn("cdev 0x%p, p#%u %s, next %u?\n", 494 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 495 pmap->next); 496 return -EADDRNOTAVAIL; 497 } 498 499 static void sock_put_port(struct cxgbi_sock *csk) 500 { 501 struct cxgbi_device *cdev = csk->cdev; 502 struct cxgbi_ports_map *pmap = &cdev->pmap; 503 __be16 *port; 504 505 if (csk->csk_family == AF_INET) 506 port = &csk->saddr.sin_port; 507 else /* ipv6 */ 508 port = &csk->saddr6.sin6_port; 509 510 if (*port) { 511 int idx = ntohs(*port) - pmap->sport_base; 512 513 *port = 0; 514 if (idx < 0 || idx >= pmap->max_connect) { 515 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", 516 cdev, csk->port_id, 517 cdev->ports[csk->port_id]->name, 518 ntohs(*port)); 519 return; 520 } 521 522 spin_lock_bh(&pmap->lock); 523 pmap->port_csk[idx] = NULL; 524 pmap->used--; 525 spin_unlock_bh(&pmap->lock); 526 527 log_debug(1 << CXGBI_DBG_SOCK, 528 "cdev 0x%p, p#%u %s, release %u.\n", 529 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 530 pmap->sport_base + idx); 531 532 cxgbi_sock_put(csk); 533 } 534 } 535 536 /* 537 * iscsi tcp connection 538 */ 539 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) 540 { 541 if (csk->cpl_close) { 542 kfree_skb(csk->cpl_close); 543 csk->cpl_close = NULL; 544 } 545 if (csk->cpl_abort_req) { 546 kfree_skb(csk->cpl_abort_req); 547 csk->cpl_abort_req = NULL; 548 } 549 if (csk->cpl_abort_rpl) { 550 kfree_skb(csk->cpl_abort_rpl); 551 csk->cpl_abort_rpl = NULL; 552 } 553 } 554 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); 555 556 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) 557 { 558 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); 559 560 if (!csk) { 561 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); 562 return NULL; 563 } 564 565 if (cdev->csk_alloc_cpls(csk) < 0) { 566 pr_info("csk 0x%p, alloc cpls failed.\n", csk); 567 kfree(csk); 568 return NULL; 569 } 570 571 spin_lock_init(&csk->lock); 572 kref_init(&csk->refcnt); 573 skb_queue_head_init(&csk->receive_queue); 574 skb_queue_head_init(&csk->write_queue); 575 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 576 rwlock_init(&csk->callback_lock); 577 csk->cdev = cdev; 578 csk->flags = 0; 579 cxgbi_sock_set_state(csk, CTP_CLOSED); 580 581 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); 582 583 return csk; 584 } 585 586 static struct rtable *find_route_ipv4(struct flowi4 *fl4, 587 __be32 saddr, __be32 daddr, 588 __be16 sport, __be16 dport, u8 tos) 589 { 590 struct rtable *rt; 591 592 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, 593 dport, sport, IPPROTO_TCP, tos, 0); 594 if (IS_ERR(rt)) 595 return NULL; 596 597 return rt; 598 } 599 600 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) 601 { 602 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; 603 struct dst_entry *dst; 604 struct net_device *ndev; 605 struct cxgbi_device *cdev; 606 struct rtable *rt = NULL; 607 struct neighbour *n; 608 struct flowi4 fl4; 609 struct cxgbi_sock *csk = NULL; 610 unsigned int mtu = 0; 611 int port = 0xFFFF; 612 int err = 0; 613 614 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 615 if (!rt) { 616 pr_info("no route to ipv4 0x%x, port %u.\n", 617 be32_to_cpu(daddr->sin_addr.s_addr), 618 be16_to_cpu(daddr->sin_port)); 619 err = -ENETUNREACH; 620 goto err_out; 621 } 622 dst = &rt->dst; 623 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); 624 if (!n) { 625 err = -ENODEV; 626 goto rel_rt; 627 } 628 ndev = n->dev; 629 630 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 631 pr_info("multi-cast route %pI4, port %u, dev %s.\n", 632 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 633 ndev->name); 634 err = -ENETUNREACH; 635 goto rel_neigh; 636 } 637 638 if (ndev->flags & IFF_LOOPBACK) { 639 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 640 mtu = ndev->mtu; 641 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 642 n->dev->name, ndev->name, mtu); 643 } 644 645 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { 646 pr_info("%s interface not up.\n", ndev->name); 647 err = -ENETDOWN; 648 goto rel_neigh; 649 } 650 651 cdev = cxgbi_device_find_by_netdev(ndev, &port); 652 if (!cdev) { 653 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 654 &daddr->sin_addr.s_addr, ndev->name); 655 err = -ENETUNREACH; 656 goto rel_neigh; 657 } 658 log_debug(1 << CXGBI_DBG_SOCK, 659 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 660 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 661 port, ndev->name, cdev); 662 663 csk = cxgbi_sock_create(cdev); 664 if (!csk) { 665 err = -ENOMEM; 666 goto rel_neigh; 667 } 668 csk->cdev = cdev; 669 csk->port_id = port; 670 csk->mtu = mtu; 671 csk->dst = dst; 672 673 csk->csk_family = AF_INET; 674 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 675 csk->daddr.sin_port = daddr->sin_port; 676 csk->daddr.sin_family = daddr->sin_family; 677 csk->saddr.sin_family = daddr->sin_family; 678 csk->saddr.sin_addr.s_addr = fl4.saddr; 679 neigh_release(n); 680 681 return csk; 682 683 rel_neigh: 684 neigh_release(n); 685 686 rel_rt: 687 ip_rt_put(rt); 688 if (csk) 689 cxgbi_sock_closed(csk); 690 err_out: 691 return ERR_PTR(err); 692 } 693 694 #if IS_ENABLED(CONFIG_IPV6) 695 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, 696 const struct in6_addr *daddr) 697 { 698 struct flowi6 fl; 699 700 memset(&fl, 0, sizeof(fl)); 701 if (saddr) 702 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 703 if (daddr) 704 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); 705 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 706 } 707 708 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) 709 { 710 struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; 711 struct dst_entry *dst; 712 struct net_device *ndev; 713 struct cxgbi_device *cdev; 714 struct rt6_info *rt = NULL; 715 struct neighbour *n; 716 struct in6_addr pref_saddr; 717 struct cxgbi_sock *csk = NULL; 718 unsigned int mtu = 0; 719 int port = 0xFFFF; 720 int err = 0; 721 722 rt = find_route_ipv6(NULL, &daddr6->sin6_addr); 723 724 if (!rt) { 725 pr_info("no route to ipv6 %pI6 port %u\n", 726 daddr6->sin6_addr.s6_addr, 727 be16_to_cpu(daddr6->sin6_port)); 728 err = -ENETUNREACH; 729 goto err_out; 730 } 731 732 dst = &rt->dst; 733 734 n = dst_neigh_lookup(dst, &daddr6->sin6_addr); 735 736 if (!n) { 737 pr_info("%pI6, port %u, dst no neighbour.\n", 738 daddr6->sin6_addr.s6_addr, 739 be16_to_cpu(daddr6->sin6_port)); 740 err = -ENETUNREACH; 741 goto rel_rt; 742 } 743 ndev = n->dev; 744 745 if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) { 746 pr_info("%s interface not up.\n", ndev->name); 747 err = -ENETDOWN; 748 goto rel_rt; 749 } 750 751 if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) { 752 pr_info("multi-cast route %pI6 port %u, dev %s.\n", 753 daddr6->sin6_addr.s6_addr, 754 ntohs(daddr6->sin6_port), ndev->name); 755 err = -ENETUNREACH; 756 goto rel_rt; 757 } 758 759 cdev = cxgbi_device_find_by_netdev(ndev, &port); 760 if (!cdev) 761 cdev = cxgbi_device_find_by_mac(ndev, &port); 762 if (!cdev) { 763 pr_info("dst %pI6 %s, NOT cxgbi device.\n", 764 daddr6->sin6_addr.s6_addr, ndev->name); 765 err = -ENETUNREACH; 766 goto rel_rt; 767 } 768 log_debug(1 << CXGBI_DBG_SOCK, 769 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", 770 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, 771 ndev->name, cdev); 772 773 csk = cxgbi_sock_create(cdev); 774 if (!csk) { 775 err = -ENOMEM; 776 goto rel_rt; 777 } 778 csk->cdev = cdev; 779 csk->port_id = port; 780 csk->mtu = mtu; 781 csk->dst = dst; 782 783 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { 784 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); 785 786 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, 787 &daddr6->sin6_addr, 0, &pref_saddr); 788 if (err) { 789 pr_info("failed to get source address to reach %pI6\n", 790 &daddr6->sin6_addr); 791 goto rel_rt; 792 } 793 } else { 794 pref_saddr = rt->rt6i_prefsrc.addr; 795 } 796 797 csk->csk_family = AF_INET6; 798 csk->daddr6.sin6_addr = daddr6->sin6_addr; 799 csk->daddr6.sin6_port = daddr6->sin6_port; 800 csk->daddr6.sin6_family = daddr6->sin6_family; 801 csk->saddr6.sin6_family = daddr6->sin6_family; 802 csk->saddr6.sin6_addr = pref_saddr; 803 804 neigh_release(n); 805 return csk; 806 807 rel_rt: 808 if (n) 809 neigh_release(n); 810 811 ip6_rt_put(rt); 812 if (csk) 813 cxgbi_sock_closed(csk); 814 err_out: 815 return ERR_PTR(err); 816 } 817 #endif /* IS_ENABLED(CONFIG_IPV6) */ 818 819 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, 820 unsigned int opt) 821 { 822 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; 823 dst_confirm(csk->dst); 824 smp_mb(); 825 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); 826 } 827 EXPORT_SYMBOL_GPL(cxgbi_sock_established); 828 829 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) 830 { 831 log_debug(1 << CXGBI_DBG_SOCK, 832 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", 833 csk, csk->state, csk->flags, csk->user_data); 834 835 if (csk->state != CTP_ESTABLISHED) { 836 read_lock_bh(&csk->callback_lock); 837 if (csk->user_data) 838 iscsi_conn_failure(csk->user_data, 839 ISCSI_ERR_TCP_CONN_CLOSE); 840 read_unlock_bh(&csk->callback_lock); 841 } 842 } 843 844 void cxgbi_sock_closed(struct cxgbi_sock *csk) 845 { 846 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 847 csk, (csk)->state, (csk)->flags, (csk)->tid); 848 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 849 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) 850 return; 851 if (csk->saddr.sin_port) 852 sock_put_port(csk); 853 if (csk->dst) 854 dst_release(csk->dst); 855 csk->cdev->csk_release_offload_resources(csk); 856 cxgbi_sock_set_state(csk, CTP_CLOSED); 857 cxgbi_inform_iscsi_conn_closing(csk); 858 cxgbi_sock_put(csk); 859 } 860 EXPORT_SYMBOL_GPL(cxgbi_sock_closed); 861 862 static void need_active_close(struct cxgbi_sock *csk) 863 { 864 int data_lost; 865 int close_req = 0; 866 867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 868 csk, (csk)->state, (csk)->flags, (csk)->tid); 869 spin_lock_bh(&csk->lock); 870 dst_confirm(csk->dst); 871 data_lost = skb_queue_len(&csk->receive_queue); 872 __skb_queue_purge(&csk->receive_queue); 873 874 if (csk->state == CTP_ACTIVE_OPEN) 875 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 876 else if (csk->state == CTP_ESTABLISHED) { 877 close_req = 1; 878 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); 879 } else if (csk->state == CTP_PASSIVE_CLOSE) { 880 close_req = 1; 881 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 882 } 883 884 if (close_req) { 885 if (data_lost) 886 csk->cdev->csk_send_abort_req(csk); 887 else 888 csk->cdev->csk_send_close_req(csk); 889 } 890 891 spin_unlock_bh(&csk->lock); 892 } 893 894 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) 895 { 896 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", 897 csk, csk->state, csk->flags, 898 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, 899 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, 900 errno); 901 902 cxgbi_sock_set_state(csk, CTP_CONNECTING); 903 csk->err = errno; 904 cxgbi_sock_closed(csk); 905 } 906 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); 907 908 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 909 { 910 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 911 struct module *owner = csk->cdev->owner; 912 913 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 914 csk, (csk)->state, (csk)->flags, (csk)->tid); 915 cxgbi_sock_get(csk); 916 spin_lock_bh(&csk->lock); 917 if (csk->state == CTP_ACTIVE_OPEN) 918 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); 919 spin_unlock_bh(&csk->lock); 920 cxgbi_sock_put(csk); 921 __kfree_skb(skb); 922 923 module_put(owner); 924 } 925 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 926 927 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) 928 { 929 cxgbi_sock_get(csk); 930 spin_lock_bh(&csk->lock); 931 932 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 933 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 934 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 935 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) 936 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", 937 csk, csk->state, csk->flags, csk->tid); 938 cxgbi_sock_closed(csk); 939 } 940 941 spin_unlock_bh(&csk->lock); 942 cxgbi_sock_put(csk); 943 } 944 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); 945 946 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) 947 { 948 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 949 csk, (csk)->state, (csk)->flags, (csk)->tid); 950 cxgbi_sock_get(csk); 951 spin_lock_bh(&csk->lock); 952 953 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 954 goto done; 955 956 switch (csk->state) { 957 case CTP_ESTABLISHED: 958 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); 959 break; 960 case CTP_ACTIVE_CLOSE: 961 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 962 break; 963 case CTP_CLOSE_WAIT_1: 964 cxgbi_sock_closed(csk); 965 break; 966 case CTP_ABORTING: 967 break; 968 default: 969 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 970 csk, csk->state, csk->flags, csk->tid); 971 } 972 cxgbi_inform_iscsi_conn_closing(csk); 973 done: 974 spin_unlock_bh(&csk->lock); 975 cxgbi_sock_put(csk); 976 } 977 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); 978 979 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) 980 { 981 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 982 csk, (csk)->state, (csk)->flags, (csk)->tid); 983 cxgbi_sock_get(csk); 984 spin_lock_bh(&csk->lock); 985 986 csk->snd_una = snd_nxt - 1; 987 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 988 goto done; 989 990 switch (csk->state) { 991 case CTP_ACTIVE_CLOSE: 992 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); 993 break; 994 case CTP_CLOSE_WAIT_1: 995 case CTP_CLOSE_WAIT_2: 996 cxgbi_sock_closed(csk); 997 break; 998 case CTP_ABORTING: 999 break; 1000 default: 1001 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 1002 csk, csk->state, csk->flags, csk->tid); 1003 } 1004 done: 1005 spin_unlock_bh(&csk->lock); 1006 cxgbi_sock_put(csk); 1007 } 1008 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); 1009 1010 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, 1011 unsigned int snd_una, int seq_chk) 1012 { 1013 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1014 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", 1015 csk, csk->state, csk->flags, csk->tid, credits, 1016 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); 1017 1018 spin_lock_bh(&csk->lock); 1019 1020 csk->wr_cred += credits; 1021 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) 1022 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; 1023 1024 while (credits) { 1025 struct sk_buff *p = cxgbi_sock_peek_wr(csk); 1026 1027 if (unlikely(!p)) { 1028 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", 1029 csk, csk->state, csk->flags, csk->tid, credits, 1030 csk->wr_cred, csk->wr_una_cred); 1031 break; 1032 } 1033 1034 if (unlikely(credits < p->csum)) { 1035 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", 1036 csk, csk->state, csk->flags, csk->tid, 1037 credits, csk->wr_cred, csk->wr_una_cred, 1038 p->csum); 1039 p->csum -= credits; 1040 break; 1041 } else { 1042 cxgbi_sock_dequeue_wr(csk); 1043 credits -= p->csum; 1044 kfree_skb(p); 1045 } 1046 } 1047 1048 cxgbi_sock_check_wr_invariants(csk); 1049 1050 if (seq_chk) { 1051 if (unlikely(before(snd_una, csk->snd_una))) { 1052 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", 1053 csk, csk->state, csk->flags, csk->tid, snd_una, 1054 csk->snd_una); 1055 goto done; 1056 } 1057 1058 if (csk->snd_una != snd_una) { 1059 csk->snd_una = snd_una; 1060 dst_confirm(csk->dst); 1061 } 1062 } 1063 1064 if (skb_queue_len(&csk->write_queue)) { 1065 if (csk->cdev->csk_push_tx_frames(csk, 0)) 1066 cxgbi_conn_tx_open(csk); 1067 } else 1068 cxgbi_conn_tx_open(csk); 1069 done: 1070 spin_unlock_bh(&csk->lock); 1071 } 1072 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); 1073 1074 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, 1075 unsigned short mtu) 1076 { 1077 int i = 0; 1078 1079 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) 1080 ++i; 1081 1082 return i; 1083 } 1084 1085 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) 1086 { 1087 unsigned int idx; 1088 struct dst_entry *dst = csk->dst; 1089 1090 csk->advmss = dst_metric_advmss(dst); 1091 1092 if (csk->advmss > pmtu - 40) 1093 csk->advmss = pmtu - 40; 1094 if (csk->advmss < csk->cdev->mtus[0] - 40) 1095 csk->advmss = csk->cdev->mtus[0] - 40; 1096 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); 1097 1098 return idx; 1099 } 1100 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); 1101 1102 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) 1103 { 1104 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; 1105 __skb_queue_tail(&csk->write_queue, skb); 1106 } 1107 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); 1108 1109 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) 1110 { 1111 struct sk_buff *skb; 1112 1113 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) 1114 kfree_skb(skb); 1115 } 1116 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); 1117 1118 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) 1119 { 1120 int pending = cxgbi_sock_count_pending_wrs(csk); 1121 1122 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) 1123 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", 1124 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); 1125 } 1126 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); 1127 1128 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) 1129 { 1130 struct cxgbi_device *cdev = csk->cdev; 1131 struct sk_buff *next; 1132 int err, copied = 0; 1133 1134 spin_lock_bh(&csk->lock); 1135 1136 if (csk->state != CTP_ESTABLISHED) { 1137 log_debug(1 << CXGBI_DBG_PDU_TX, 1138 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", 1139 csk, csk->state, csk->flags, csk->tid); 1140 err = -EAGAIN; 1141 goto out_err; 1142 } 1143 1144 if (csk->err) { 1145 log_debug(1 << CXGBI_DBG_PDU_TX, 1146 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", 1147 csk, csk->state, csk->flags, csk->tid, csk->err); 1148 err = -EPIPE; 1149 goto out_err; 1150 } 1151 1152 if (csk->write_seq - csk->snd_una >= csk->snd_win) { 1153 log_debug(1 << CXGBI_DBG_PDU_TX, 1154 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 1155 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 1156 csk->snd_una, csk->snd_win); 1157 err = -ENOBUFS; 1158 goto out_err; 1159 } 1160 1161 while (skb) { 1162 int frags = skb_shinfo(skb)->nr_frags + 1163 (skb->len != skb->data_len); 1164 1165 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { 1166 pr_err("csk 0x%p, skb head %u < %u.\n", 1167 csk, skb_headroom(skb), cdev->skb_tx_rsvd); 1168 err = -EINVAL; 1169 goto out_err; 1170 } 1171 1172 if (frags >= SKB_WR_LIST_SIZE) { 1173 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", 1174 csk, skb_shinfo(skb)->nr_frags, skb->len, 1175 skb->data_len, (uint)(SKB_WR_LIST_SIZE)); 1176 err = -EINVAL; 1177 goto out_err; 1178 } 1179 1180 next = skb->next; 1181 skb->next = NULL; 1182 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); 1183 cxgbi_sock_skb_entail(csk, skb); 1184 copied += skb->len; 1185 csk->write_seq += skb->len + 1186 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1187 skb = next; 1188 } 1189 done: 1190 if (likely(skb_queue_len(&csk->write_queue))) 1191 cdev->csk_push_tx_frames(csk, 1); 1192 spin_unlock_bh(&csk->lock); 1193 return copied; 1194 1195 out_err: 1196 if (copied == 0 && err == -EPIPE) 1197 copied = csk->err ? csk->err : -EPIPE; 1198 else 1199 copied = err; 1200 goto done; 1201 } 1202 1203 static inline void 1204 scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, 1205 unsigned int *sgcnt, unsigned int *dlen, 1206 unsigned int prot) 1207 { 1208 struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc); 1209 1210 *sgl = sdb->table.sgl; 1211 *sgcnt = sdb->table.nents; 1212 *dlen = sdb->length; 1213 /* Caution: for protection sdb, sdb->length is invalid */ 1214 } 1215 1216 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod, 1217 struct cxgbi_task_tag_info *ttinfo, 1218 struct scatterlist **sg_pp, unsigned int *sg_off) 1219 { 1220 struct scatterlist *sg = sg_pp ? *sg_pp : NULL; 1221 unsigned int offset = sg_off ? *sg_off : 0; 1222 dma_addr_t addr = 0UL; 1223 unsigned int len = 0; 1224 int i; 1225 1226 memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr)); 1227 1228 if (sg) { 1229 addr = sg_dma_address(sg); 1230 len = sg_dma_len(sg); 1231 } 1232 1233 for (i = 0; i < PPOD_PAGES_MAX; i++) { 1234 if (sg) { 1235 ppod->addr[i] = cpu_to_be64(addr + offset); 1236 offset += PAGE_SIZE; 1237 if (offset == (len + sg->offset)) { 1238 offset = 0; 1239 sg = sg_next(sg); 1240 if (sg) { 1241 addr = sg_dma_address(sg); 1242 len = sg_dma_len(sg); 1243 } 1244 } 1245 } else { 1246 ppod->addr[i] = 0ULL; 1247 } 1248 } 1249 1250 /* 1251 * the fifth address needs to be repeated in the next ppod, so do 1252 * not move sg 1253 */ 1254 if (sg_pp) { 1255 *sg_pp = sg; 1256 *sg_off = offset; 1257 } 1258 1259 if (offset == len) { 1260 offset = 0; 1261 sg = sg_next(sg); 1262 if (sg) { 1263 addr = sg_dma_address(sg); 1264 len = sg_dma_len(sg); 1265 } 1266 } 1267 ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL; 1268 } 1269 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod); 1270 1271 /* 1272 * APIs interacting with open-iscsi libraries 1273 */ 1274 1275 static unsigned char padding[4]; 1276 1277 void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev, 1278 struct cxgbi_tag_format *tformat, unsigned int ppmax, 1279 unsigned int llimit, unsigned int start, 1280 unsigned int rsvd_factor) 1281 { 1282 int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev, 1283 cdev->lldev, tformat, ppmax, llimit, start, 1284 rsvd_factor); 1285 1286 if (err >= 0) { 1287 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); 1288 1289 if (ppm->ppmax < 1024 || 1290 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) 1291 cdev->flags |= CXGBI_FLAG_DDP_OFF; 1292 err = 0; 1293 } else { 1294 cdev->flags |= CXGBI_FLAG_DDP_OFF; 1295 } 1296 } 1297 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup); 1298 1299 static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents) 1300 { 1301 int i; 1302 int last_sgidx = nents - 1; 1303 struct scatterlist *sg = sgl; 1304 1305 for (i = 0; i < nents; i++, sg = sg_next(sg)) { 1306 unsigned int len = sg->length + sg->offset; 1307 1308 if ((sg->offset & 0x3) || (i && sg->offset) || 1309 ((i != last_sgidx) && len != PAGE_SIZE)) { 1310 log_debug(1 << CXGBI_DBG_DDP, 1311 "sg %u/%u, %u,%u, not aligned.\n", 1312 i, nents, sg->offset, sg->length); 1313 goto err_out; 1314 } 1315 } 1316 return 0; 1317 err_out: 1318 return -EINVAL; 1319 } 1320 1321 static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn, 1322 struct cxgbi_task_data *tdata, u32 sw_tag, 1323 unsigned int xferlen) 1324 { 1325 struct cxgbi_sock *csk = cconn->cep->csk; 1326 struct cxgbi_device *cdev = csk->cdev; 1327 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1328 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1329 struct scatterlist *sgl = ttinfo->sgl; 1330 unsigned int sgcnt = ttinfo->nents; 1331 unsigned int sg_offset = sgl->offset; 1332 int err; 1333 1334 if (cdev->flags & CXGBI_FLAG_DDP_OFF) { 1335 log_debug(1 << CXGBI_DBG_DDP, 1336 "cdev 0x%p DDP off.\n", cdev); 1337 return -EINVAL; 1338 } 1339 1340 if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt || 1341 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) { 1342 log_debug(1 << CXGBI_DBG_DDP, 1343 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n", 1344 ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX, 1345 xferlen, ttinfo->nents); 1346 return -EINVAL; 1347 } 1348 1349 /* make sure the buffer is suitable for ddp */ 1350 if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0) 1351 return -EINVAL; 1352 1353 ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >> 1354 PAGE_SHIFT; 1355 1356 /* 1357 * the ddp tag will be used for the itt in the outgoing pdu, 1358 * the itt genrated by libiscsi is saved in the ppm and can be 1359 * retrieved via the ddp tag 1360 */ 1361 err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx, 1362 &ttinfo->tag, (unsigned long)sw_tag); 1363 if (err < 0) { 1364 cconn->ddp_full++; 1365 return err; 1366 } 1367 ttinfo->npods = err; 1368 1369 /* setup dma from scsi command sgl */ 1370 sgl->offset = 0; 1371 err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 1372 sgl->offset = sg_offset; 1373 if (err == 0) { 1374 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 1375 __func__, sw_tag, xferlen, sgcnt); 1376 goto rel_ppods; 1377 } 1378 if (err != ttinfo->nr_pages) { 1379 log_debug(1 << CXGBI_DBG_DDP, 1380 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n", 1381 __func__, sw_tag, xferlen, sgcnt, err); 1382 } 1383 1384 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED; 1385 ttinfo->cid = csk->port_id; 1386 1387 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, 1388 xferlen, &ttinfo->hdr); 1389 1390 if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) { 1391 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */ 1392 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID; 1393 } else { 1394 /* write ppod from control queue now */ 1395 err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); 1396 if (err < 0) 1397 goto rel_ppods; 1398 } 1399 1400 return 0; 1401 1402 rel_ppods: 1403 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 1404 1405 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) { 1406 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED; 1407 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 1408 } 1409 return -EINVAL; 1410 } 1411 1412 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) 1413 { 1414 struct scsi_cmnd *sc = task->sc; 1415 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1416 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1417 struct cxgbi_device *cdev = cconn->chba->cdev; 1418 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1419 u32 tag = ntohl((__force u32)hdr_itt); 1420 1421 log_debug(1 << CXGBI_DBG_DDP, 1422 "cdev 0x%p, task 0x%p, release tag 0x%x.\n", 1423 cdev, task, tag); 1424 if (sc && 1425 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && 1426 cxgbi_ppm_is_ddp_tag(ppm, tag)) { 1427 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1428 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1429 1430 if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ)) 1431 cdev->csk_ddp_clear_map(cdev, ppm, ttinfo); 1432 cxgbi_ppm_ppod_release(ppm, ttinfo->idx); 1433 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents, 1434 DMA_FROM_DEVICE); 1435 } 1436 } 1437 1438 static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age) 1439 { 1440 /* assume idx and age both are < 0x7FFF (32767) */ 1441 return (idx << 16) | age; 1442 } 1443 1444 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 1445 { 1446 struct scsi_cmnd *sc = task->sc; 1447 struct iscsi_conn *conn = task->conn; 1448 struct iscsi_session *sess = conn->session; 1449 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1450 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1451 struct cxgbi_device *cdev = cconn->chba->cdev; 1452 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1453 u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age); 1454 u32 tag = 0; 1455 int err = -EINVAL; 1456 1457 if (sc && 1458 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) 1459 ) { 1460 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1461 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 1462 1463 scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents, 1464 &tdata->dlen, 0); 1465 err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen); 1466 if (!err) 1467 tag = ttinfo->tag; 1468 else 1469 log_debug(1 << CXGBI_DBG_DDP, 1470 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", 1471 cconn->cep->csk, task, tdata->dlen, 1472 ttinfo->nents); 1473 } 1474 1475 if (err < 0) { 1476 err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag); 1477 if (err < 0) 1478 return err; 1479 } 1480 /* the itt need to sent in big-endian order */ 1481 *hdr_itt = (__force itt_t)htonl(tag); 1482 1483 log_debug(1 << CXGBI_DBG_DDP, 1484 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", 1485 cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); 1486 return 0; 1487 } 1488 1489 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) 1490 { 1491 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1492 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1493 struct cxgbi_device *cdev = cconn->chba->cdev; 1494 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev); 1495 u32 tag = ntohl((__force u32)itt); 1496 u32 sw_bits; 1497 1498 if (ppm) { 1499 if (cxgbi_ppm_is_ddp_tag(ppm, tag)) 1500 sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag); 1501 else 1502 sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag); 1503 } else { 1504 sw_bits = tag; 1505 } 1506 1507 cxgbi_decode_sw_tag(sw_bits, idx, age); 1508 log_debug(1 << CXGBI_DBG_DDP, 1509 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", 1510 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, 1511 age ? *age : 0xFF); 1512 } 1513 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); 1514 1515 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) 1516 { 1517 struct iscsi_conn *conn = csk->user_data; 1518 1519 if (conn) { 1520 log_debug(1 << CXGBI_DBG_SOCK, 1521 "csk 0x%p, cid %d.\n", csk, conn->id); 1522 iscsi_conn_queue_work(conn); 1523 } 1524 } 1525 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); 1526 1527 /* 1528 * pdu receive, interact with libiscsi_tcp 1529 */ 1530 static inline int read_pdu_skb(struct iscsi_conn *conn, 1531 struct sk_buff *skb, 1532 unsigned int offset, 1533 int offloaded) 1534 { 1535 int status = 0; 1536 int bytes_read; 1537 1538 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); 1539 switch (status) { 1540 case ISCSI_TCP_CONN_ERR: 1541 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", 1542 skb, offset, offloaded); 1543 return -EIO; 1544 case ISCSI_TCP_SUSPENDED: 1545 log_debug(1 << CXGBI_DBG_PDU_RX, 1546 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", 1547 skb, offset, offloaded, bytes_read); 1548 /* no transfer - just have caller flush queue */ 1549 return bytes_read; 1550 case ISCSI_TCP_SKB_DONE: 1551 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", 1552 skb, offset, offloaded); 1553 /* 1554 * pdus should always fit in the skb and we should get 1555 * segment done notifcation. 1556 */ 1557 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); 1558 return -EFAULT; 1559 case ISCSI_TCP_SEGMENT_DONE: 1560 log_debug(1 << CXGBI_DBG_PDU_RX, 1561 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", 1562 skb, offset, offloaded, bytes_read); 1563 return bytes_read; 1564 default: 1565 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", 1566 skb, offset, offloaded, status); 1567 return -EINVAL; 1568 } 1569 } 1570 1571 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1572 { 1573 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1574 1575 log_debug(1 << CXGBI_DBG_PDU_RX, 1576 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1577 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1578 1579 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { 1580 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); 1581 iscsi_conn_failure(conn, ISCSI_ERR_PROTO); 1582 return -EIO; 1583 } 1584 1585 if (conn->hdrdgst_en && 1586 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { 1587 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); 1588 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); 1589 return -EIO; 1590 } 1591 1592 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) && 1593 cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) { 1594 /* If completion flag is set and data is directly 1595 * placed in to the host memory then update 1596 * task->exp_datasn to the datasn in completion 1597 * iSCSI hdr as T6 adapter generates completion only 1598 * for the last pdu of a sequence. 1599 */ 1600 itt_t itt = ((struct iscsi_data *)skb->data)->itt; 1601 struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt); 1602 u32 data_sn = be32_to_cpu(((struct iscsi_data *) 1603 skb->data)->datasn); 1604 if (task && task->sc) { 1605 struct iscsi_tcp_task *tcp_task = task->dd_data; 1606 1607 tcp_task->exp_datasn = data_sn; 1608 } 1609 } 1610 1611 return read_pdu_skb(conn, skb, 0, 0); 1612 } 1613 1614 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1615 struct sk_buff *skb, unsigned int offset) 1616 { 1617 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1618 bool offloaded = 0; 1619 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 1620 1621 log_debug(1 << CXGBI_DBG_PDU_RX, 1622 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1623 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1624 1625 if (conn->datadgst_en && 1626 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { 1627 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", 1628 conn, lskb, cxgbi_skcb_flags(lskb)); 1629 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 1630 return -EIO; 1631 } 1632 1633 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) 1634 return 0; 1635 1636 /* coalesced, add header digest length */ 1637 if (lskb == skb && conn->hdrdgst_en) 1638 offset += ISCSI_DIGEST_SIZE; 1639 1640 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) 1641 offloaded = 1; 1642 1643 if (opcode == ISCSI_OP_SCSI_DATA_IN) 1644 log_debug(1 << CXGBI_DBG_PDU_RX, 1645 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", 1646 skb, opcode, ntohl(tcp_conn->in.hdr->itt), 1647 tcp_conn->in.datalen, offloaded ? "is" : "not"); 1648 1649 return read_pdu_skb(conn, skb, offset, offloaded); 1650 } 1651 1652 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) 1653 { 1654 struct cxgbi_device *cdev = csk->cdev; 1655 int must_send; 1656 u32 credits; 1657 1658 log_debug(1 << CXGBI_DBG_PDU_RX, 1659 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", 1660 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1661 csk->rcv_wup, cdev->rx_credit_thres, 1662 csk->rcv_win); 1663 1664 if (!cdev->rx_credit_thres) 1665 return; 1666 1667 if (csk->state != CTP_ESTABLISHED) 1668 return; 1669 1670 credits = csk->copied_seq - csk->rcv_wup; 1671 if (unlikely(!credits)) 1672 return; 1673 must_send = credits + 16384 >= csk->rcv_win; 1674 if (must_send || credits >= cdev->rx_credit_thres) 1675 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1676 } 1677 1678 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) 1679 { 1680 struct cxgbi_device *cdev = csk->cdev; 1681 struct iscsi_conn *conn = csk->user_data; 1682 struct sk_buff *skb; 1683 unsigned int read = 0; 1684 int err = 0; 1685 1686 log_debug(1 << CXGBI_DBG_PDU_RX, 1687 "csk 0x%p, conn 0x%p.\n", csk, conn); 1688 1689 if (unlikely(!conn || conn->suspend_rx)) { 1690 log_debug(1 << CXGBI_DBG_PDU_RX, 1691 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", 1692 csk, conn, conn ? conn->id : 0xFF, 1693 conn ? conn->suspend_rx : 0xFF); 1694 return; 1695 } 1696 1697 while (!err) { 1698 skb = skb_peek(&csk->receive_queue); 1699 if (!skb || 1700 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { 1701 if (skb) 1702 log_debug(1 << CXGBI_DBG_PDU_RX, 1703 "skb 0x%p, NOT ready 0x%lx.\n", 1704 skb, cxgbi_skcb_flags(skb)); 1705 break; 1706 } 1707 __skb_unlink(skb, &csk->receive_queue); 1708 1709 read += cxgbi_skcb_rx_pdulen(skb); 1710 log_debug(1 << CXGBI_DBG_PDU_RX, 1711 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", 1712 csk, skb, skb->len, cxgbi_skcb_flags(skb), 1713 cxgbi_skcb_rx_pdulen(skb)); 1714 1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1716 err = skb_read_pdu_bhs(conn, skb); 1717 if (err < 0) { 1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1719 "f 0x%lx, plen %u.\n", 1720 csk, skb, skb->len, 1721 cxgbi_skcb_flags(skb), 1722 cxgbi_skcb_rx_pdulen(skb)); 1723 goto skb_done; 1724 } 1725 err = skb_read_pdu_data(conn, skb, skb, 1726 err + cdev->skb_rx_extra); 1727 if (err < 0) 1728 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " 1729 "f 0x%lx, plen %u.\n", 1730 csk, skb, skb->len, 1731 cxgbi_skcb_flags(skb), 1732 cxgbi_skcb_rx_pdulen(skb)); 1733 } else { 1734 err = skb_read_pdu_bhs(conn, skb); 1735 if (err < 0) { 1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1737 "f 0x%lx, plen %u.\n", 1738 csk, skb, skb->len, 1739 cxgbi_skcb_flags(skb), 1740 cxgbi_skcb_rx_pdulen(skb)); 1741 goto skb_done; 1742 } 1743 1744 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1745 struct sk_buff *dskb; 1746 1747 dskb = skb_peek(&csk->receive_queue); 1748 if (!dskb) { 1749 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," 1750 " plen %u, NO data.\n", 1751 csk, skb, skb->len, 1752 cxgbi_skcb_flags(skb), 1753 cxgbi_skcb_rx_pdulen(skb)); 1754 err = -EIO; 1755 goto skb_done; 1756 } 1757 __skb_unlink(dskb, &csk->receive_queue); 1758 1759 err = skb_read_pdu_data(conn, skb, dskb, 0); 1760 if (err < 0) 1761 pr_err("data, csk 0x%p, skb 0x%p,%u, " 1762 "f 0x%lx, plen %u, dskb 0x%p," 1763 "%u.\n", 1764 csk, skb, skb->len, 1765 cxgbi_skcb_flags(skb), 1766 cxgbi_skcb_rx_pdulen(skb), 1767 dskb, dskb->len); 1768 __kfree_skb(dskb); 1769 } else 1770 err = skb_read_pdu_data(conn, skb, skb, 0); 1771 } 1772 skb_done: 1773 __kfree_skb(skb); 1774 1775 if (err < 0) 1776 break; 1777 } 1778 1779 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); 1780 if (read) { 1781 csk->copied_seq += read; 1782 csk_return_rx_credits(csk, read); 1783 conn->rxdata_octets += read; 1784 } 1785 1786 if (err < 0) { 1787 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", 1788 csk, conn, err, read); 1789 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1790 } 1791 } 1792 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); 1793 1794 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, 1795 unsigned int offset, unsigned int *off, 1796 struct scatterlist **sgp) 1797 { 1798 int i; 1799 struct scatterlist *sg; 1800 1801 for_each_sg(sgl, sg, sgcnt, i) { 1802 if (offset < sg->length) { 1803 *off = offset; 1804 *sgp = sg; 1805 return 0; 1806 } 1807 offset -= sg->length; 1808 } 1809 return -EFAULT; 1810 } 1811 1812 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, 1813 unsigned int dlen, struct page_frag *frags, 1814 int frag_max) 1815 { 1816 unsigned int datalen = dlen; 1817 unsigned int sglen = sg->length - sgoffset; 1818 struct page *page = sg_page(sg); 1819 int i; 1820 1821 i = 0; 1822 do { 1823 unsigned int copy; 1824 1825 if (!sglen) { 1826 sg = sg_next(sg); 1827 if (!sg) { 1828 pr_warn("sg %d NULL, len %u/%u.\n", 1829 i, datalen, dlen); 1830 return -EINVAL; 1831 } 1832 sgoffset = 0; 1833 sglen = sg->length; 1834 page = sg_page(sg); 1835 1836 } 1837 copy = min(datalen, sglen); 1838 if (i && page == frags[i - 1].page && 1839 sgoffset + sg->offset == 1840 frags[i - 1].offset + frags[i - 1].size) { 1841 frags[i - 1].size += copy; 1842 } else { 1843 if (i >= frag_max) { 1844 pr_warn("too many pages %u, dlen %u.\n", 1845 frag_max, dlen); 1846 return -EINVAL; 1847 } 1848 1849 frags[i].page = page; 1850 frags[i].offset = sg->offset + sgoffset; 1851 frags[i].size = copy; 1852 i++; 1853 } 1854 datalen -= copy; 1855 sgoffset += copy; 1856 sglen -= copy; 1857 } while (datalen); 1858 1859 return i; 1860 } 1861 1862 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 1863 { 1864 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1865 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1866 struct cxgbi_device *cdev = cconn->chba->cdev; 1867 struct iscsi_conn *conn = task->conn; 1868 struct iscsi_tcp_task *tcp_task = task->dd_data; 1869 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1870 struct scsi_cmnd *sc = task->sc; 1871 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 1872 1873 tcp_task->dd_data = tdata; 1874 task->hdr = NULL; 1875 1876 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1877 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1878 (opcode == ISCSI_OP_SCSI_CMD && 1879 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) 1880 /* data could goes into skb head */ 1881 headroom += min_t(unsigned int, 1882 SKB_MAX_HEAD(cdev->skb_tx_rsvd), 1883 conn->max_xmit_dlength); 1884 1885 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1886 if (!tdata->skb) { 1887 struct cxgbi_sock *csk = cconn->cep->csk; 1888 struct net_device *ndev = cdev->ports[csk->port_id]; 1889 ndev->stats.tx_dropped++; 1890 return -ENOMEM; 1891 } 1892 1893 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1894 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1895 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 1896 1897 /* data_out uses scsi_cmd's itt */ 1898 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 1899 task_reserve_itt(task, &task->hdr->itt); 1900 1901 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1902 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", 1903 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, 1904 conn->max_xmit_dlength, ntohl(task->hdr->itt)); 1905 1906 return 0; 1907 } 1908 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); 1909 1910 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) 1911 { 1912 if (hcrc || dcrc) { 1913 u8 submode = 0; 1914 1915 if (hcrc) 1916 submode |= 1; 1917 if (dcrc) 1918 submode |= 2; 1919 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; 1920 } else 1921 cxgbi_skcb_ulp_mode(skb) = 0; 1922 } 1923 1924 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 1925 unsigned int count) 1926 { 1927 struct iscsi_conn *conn = task->conn; 1928 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1929 struct sk_buff *skb = tdata->skb; 1930 unsigned int datalen = count; 1931 int i, padlen = iscsi_padding(count); 1932 struct page *pg; 1933 1934 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1935 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", 1936 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, 1937 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); 1938 1939 skb_put(skb, task->hdr_len); 1940 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); 1941 if (!count) 1942 return 0; 1943 1944 if (task->sc) { 1945 struct scsi_data_buffer *sdb = scsi_out(task->sc); 1946 struct scatterlist *sg = NULL; 1947 int err; 1948 1949 tdata->offset = offset; 1950 tdata->count = count; 1951 err = sgl_seek_offset( 1952 sdb->table.sgl, sdb->table.nents, 1953 tdata->offset, &tdata->sgoffset, &sg); 1954 if (err < 0) { 1955 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", 1956 sdb->table.nents, tdata->offset, sdb->length); 1957 return err; 1958 } 1959 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, 1960 tdata->frags, MAX_PDU_FRAGS); 1961 if (err < 0) { 1962 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", 1963 sdb->table.nents, tdata->offset, tdata->count); 1964 return err; 1965 } 1966 tdata->nr_frags = err; 1967 1968 if (tdata->nr_frags > MAX_SKB_FRAGS || 1969 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { 1970 char *dst = skb->data + task->hdr_len; 1971 struct page_frag *frag = tdata->frags; 1972 1973 /* data fits in the skb's headroom */ 1974 for (i = 0; i < tdata->nr_frags; i++, frag++) { 1975 char *src = kmap_atomic(frag->page); 1976 1977 memcpy(dst, src+frag->offset, frag->size); 1978 dst += frag->size; 1979 kunmap_atomic(src); 1980 } 1981 if (padlen) { 1982 memset(dst, 0, padlen); 1983 padlen = 0; 1984 } 1985 skb_put(skb, count + padlen); 1986 } else { 1987 /* data fit into frag_list */ 1988 for (i = 0; i < tdata->nr_frags; i++) { 1989 __skb_fill_page_desc(skb, i, 1990 tdata->frags[i].page, 1991 tdata->frags[i].offset, 1992 tdata->frags[i].size); 1993 skb_frag_ref(skb, i); 1994 } 1995 skb_shinfo(skb)->nr_frags = tdata->nr_frags; 1996 skb->len += count; 1997 skb->data_len += count; 1998 skb->truesize += count; 1999 } 2000 2001 } else { 2002 pg = virt_to_page(task->data); 2003 2004 get_page(pg); 2005 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), 2006 count); 2007 skb->len += count; 2008 skb->data_len += count; 2009 skb->truesize += count; 2010 } 2011 2012 if (padlen) { 2013 i = skb_shinfo(skb)->nr_frags; 2014 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 2015 virt_to_page(padding), offset_in_page(padding), 2016 padlen); 2017 2018 skb->data_len += padlen; 2019 skb->truesize += padlen; 2020 skb->len += padlen; 2021 } 2022 2023 return 0; 2024 } 2025 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); 2026 2027 int cxgbi_conn_xmit_pdu(struct iscsi_task *task) 2028 { 2029 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2030 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2031 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2032 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; 2033 struct sk_buff *skb = tdata->skb; 2034 struct cxgbi_sock *csk = NULL; 2035 unsigned int datalen; 2036 int err; 2037 2038 if (!skb) { 2039 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2040 "task 0x%p, skb NULL.\n", task); 2041 return 0; 2042 } 2043 2044 if (cconn && cconn->cep) 2045 csk = cconn->cep->csk; 2046 if (!csk) { 2047 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2048 "task 0x%p, csk gone.\n", task); 2049 return -EPIPE; 2050 } 2051 2052 datalen = skb->data_len; 2053 tdata->skb = NULL; 2054 2055 /* write ppod first if using ofldq to write ppod */ 2056 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { 2057 struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); 2058 2059 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID; 2060 if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) 2061 pr_err("task 0x%p, ppod writing using ofldq failed.\n", 2062 task); 2063 /* continue. Let fl get the data */ 2064 } 2065 2066 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2067 if (err > 0) { 2068 int pdulen = err; 2069 2070 log_debug(1 << CXGBI_DBG_PDU_TX, 2071 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", 2072 task, task->sc, skb, skb->len, skb->data_len, err); 2073 2074 if (task->conn->hdrdgst_en) 2075 pdulen += ISCSI_DIGEST_SIZE; 2076 2077 if (datalen && task->conn->datadgst_en) 2078 pdulen += ISCSI_DIGEST_SIZE; 2079 2080 task->conn->txdata_octets += pdulen; 2081 return 0; 2082 } 2083 2084 if (err == -EAGAIN || err == -ENOBUFS) { 2085 log_debug(1 << CXGBI_DBG_PDU_TX, 2086 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2087 task, skb, skb->len, skb->data_len, err); 2088 /* reset skb to send when we are called again */ 2089 tdata->skb = skb; 2090 return err; 2091 } 2092 2093 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2094 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2095 task->itt, skb, skb->len, skb->data_len, err); 2096 2097 kfree_skb(skb); 2098 2099 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2100 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2101 return err; 2102 } 2103 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); 2104 2105 void cxgbi_cleanup_task(struct iscsi_task *task) 2106 { 2107 struct iscsi_tcp_task *tcp_task = task->dd_data; 2108 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2109 2110 log_debug(1 << CXGBI_DBG_ISCSI, 2111 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2112 task, tdata->skb, task->hdr_itt); 2113 2114 tcp_task->dd_data = NULL; 2115 /* never reached the xmit task callout */ 2116 if (tdata->skb) 2117 __kfree_skb(tdata->skb); 2118 2119 task_release_itt(task, task->hdr_itt); 2120 memset(tdata, 0, sizeof(*tdata)); 2121 2122 iscsi_tcp_cleanup_task(task); 2123 } 2124 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2125 2126 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, 2127 struct iscsi_stats *stats) 2128 { 2129 struct iscsi_conn *conn = cls_conn->dd_data; 2130 2131 stats->txdata_octets = conn->txdata_octets; 2132 stats->rxdata_octets = conn->rxdata_octets; 2133 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2134 stats->dataout_pdus = conn->dataout_pdus_cnt; 2135 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2136 stats->datain_pdus = conn->datain_pdus_cnt; 2137 stats->r2t_pdus = conn->r2t_pdus_cnt; 2138 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2139 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2140 stats->digest_err = 0; 2141 stats->timeout_err = 0; 2142 stats->custom_length = 1; 2143 strcpy(stats->custom[0].desc, "eh_abort_cnt"); 2144 stats->custom[0].value = conn->eh_abort_cnt; 2145 } 2146 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); 2147 2148 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) 2149 { 2150 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2151 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2152 struct cxgbi_device *cdev = cconn->chba->cdev; 2153 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); 2154 unsigned int max_def = 512 * MAX_SKB_FRAGS; 2155 unsigned int max = max(max_def, headroom); 2156 2157 max = min(cconn->chba->cdev->tx_max_size, max); 2158 if (conn->max_xmit_dlength) 2159 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); 2160 else 2161 conn->max_xmit_dlength = max; 2162 cxgbi_align_pdu_size(conn->max_xmit_dlength); 2163 2164 return 0; 2165 } 2166 2167 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) 2168 { 2169 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2170 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2171 unsigned int max = cconn->chba->cdev->rx_max_size; 2172 2173 cxgbi_align_pdu_size(max); 2174 2175 if (conn->max_recv_dlength) { 2176 if (conn->max_recv_dlength > max) { 2177 pr_err("MaxRecvDataSegmentLength %u > %u.\n", 2178 conn->max_recv_dlength, max); 2179 return -EINVAL; 2180 } 2181 conn->max_recv_dlength = min(conn->max_recv_dlength, max); 2182 cxgbi_align_pdu_size(conn->max_recv_dlength); 2183 } else 2184 conn->max_recv_dlength = max; 2185 2186 return 0; 2187 } 2188 2189 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, 2190 enum iscsi_param param, char *buf, int buflen) 2191 { 2192 struct iscsi_conn *conn = cls_conn->dd_data; 2193 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2194 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2195 struct cxgbi_sock *csk = cconn->cep->csk; 2196 int err; 2197 2198 log_debug(1 << CXGBI_DBG_ISCSI, 2199 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2200 cls_conn, param, buflen, buf); 2201 2202 switch (param) { 2203 case ISCSI_PARAM_HDRDGST_EN: 2204 err = iscsi_set_param(cls_conn, param, buf, buflen); 2205 if (!err && conn->hdrdgst_en) 2206 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2207 conn->hdrdgst_en, 2208 conn->datadgst_en, 0); 2209 break; 2210 case ISCSI_PARAM_DATADGST_EN: 2211 err = iscsi_set_param(cls_conn, param, buf, buflen); 2212 if (!err && conn->datadgst_en) 2213 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2214 conn->hdrdgst_en, 2215 conn->datadgst_en, 0); 2216 break; 2217 case ISCSI_PARAM_MAX_R2T: 2218 return iscsi_tcp_set_max_r2t(conn, buf); 2219 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2220 err = iscsi_set_param(cls_conn, param, buf, buflen); 2221 if (!err) 2222 err = cxgbi_conn_max_recv_dlength(conn); 2223 break; 2224 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2225 err = iscsi_set_param(cls_conn, param, buf, buflen); 2226 if (!err) 2227 err = cxgbi_conn_max_xmit_dlength(conn); 2228 break; 2229 default: 2230 return iscsi_set_param(cls_conn, param, buf, buflen); 2231 } 2232 return err; 2233 } 2234 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); 2235 2236 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) 2237 { 2238 int len; 2239 2240 cxgbi_sock_get(csk); 2241 len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); 2242 cxgbi_sock_put(csk); 2243 2244 return len; 2245 } 2246 2247 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) 2248 { 2249 int len; 2250 2251 cxgbi_sock_get(csk); 2252 if (csk->csk_family == AF_INET) 2253 len = sprintf(buf, "%pI4", 2254 &csk->daddr.sin_addr.s_addr); 2255 else 2256 len = sprintf(buf, "%pI6", 2257 &csk->daddr6.sin6_addr); 2258 2259 cxgbi_sock_put(csk); 2260 2261 return len; 2262 } 2263 2264 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, 2265 char *buf) 2266 { 2267 struct cxgbi_endpoint *cep = ep->dd_data; 2268 struct cxgbi_sock *csk; 2269 int len; 2270 2271 log_debug(1 << CXGBI_DBG_ISCSI, 2272 "cls_conn 0x%p, param %d.\n", ep, param); 2273 2274 switch (param) { 2275 case ISCSI_PARAM_CONN_PORT: 2276 case ISCSI_PARAM_CONN_ADDRESS: 2277 if (!cep) 2278 return -ENOTCONN; 2279 2280 csk = cep->csk; 2281 if (!csk) 2282 return -ENOTCONN; 2283 2284 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2285 &csk->daddr, param, buf); 2286 default: 2287 return -ENOSYS; 2288 } 2289 return len; 2290 } 2291 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); 2292 2293 struct iscsi_cls_conn * 2294 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) 2295 { 2296 struct iscsi_cls_conn *cls_conn; 2297 struct iscsi_conn *conn; 2298 struct iscsi_tcp_conn *tcp_conn; 2299 struct cxgbi_conn *cconn; 2300 2301 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); 2302 if (!cls_conn) 2303 return NULL; 2304 2305 conn = cls_conn->dd_data; 2306 tcp_conn = conn->dd_data; 2307 cconn = tcp_conn->dd_data; 2308 cconn->iconn = conn; 2309 2310 log_debug(1 << CXGBI_DBG_ISCSI, 2311 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", 2312 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); 2313 2314 return cls_conn; 2315 } 2316 EXPORT_SYMBOL_GPL(cxgbi_create_conn); 2317 2318 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, 2319 struct iscsi_cls_conn *cls_conn, 2320 u64 transport_eph, int is_leading) 2321 { 2322 struct iscsi_conn *conn = cls_conn->dd_data; 2323 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2324 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2325 struct cxgbi_ppm *ppm; 2326 struct iscsi_endpoint *ep; 2327 struct cxgbi_endpoint *cep; 2328 struct cxgbi_sock *csk; 2329 int err; 2330 2331 ep = iscsi_lookup_endpoint(transport_eph); 2332 if (!ep) 2333 return -EINVAL; 2334 2335 /* setup ddp pagesize */ 2336 cep = ep->dd_data; 2337 csk = cep->csk; 2338 2339 ppm = csk->cdev->cdev2ppm(csk->cdev); 2340 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2341 ppm->tformat.pgsz_idx_dflt, 0); 2342 if (err < 0) 2343 return err; 2344 2345 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2346 if (err) 2347 return -EINVAL; 2348 2349 /* calculate the tag idx bits needed for this conn based on cmds_max */ 2350 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; 2351 2352 write_lock_bh(&csk->callback_lock); 2353 csk->user_data = conn; 2354 cconn->chba = cep->chba; 2355 cconn->cep = cep; 2356 cep->cconn = cconn; 2357 write_unlock_bh(&csk->callback_lock); 2358 2359 cxgbi_conn_max_xmit_dlength(conn); 2360 cxgbi_conn_max_recv_dlength(conn); 2361 2362 log_debug(1 << CXGBI_DBG_ISCSI, 2363 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", 2364 cls_session, cls_conn, ep, cconn, csk); 2365 /* init recv engine */ 2366 iscsi_tcp_hdr_recv_prep(tcp_conn); 2367 2368 return 0; 2369 } 2370 EXPORT_SYMBOL_GPL(cxgbi_bind_conn); 2371 2372 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, 2373 u16 cmds_max, u16 qdepth, 2374 u32 initial_cmdsn) 2375 { 2376 struct cxgbi_endpoint *cep; 2377 struct cxgbi_hba *chba; 2378 struct Scsi_Host *shost; 2379 struct iscsi_cls_session *cls_session; 2380 struct iscsi_session *session; 2381 2382 if (!ep) { 2383 pr_err("missing endpoint.\n"); 2384 return NULL; 2385 } 2386 2387 cep = ep->dd_data; 2388 chba = cep->chba; 2389 shost = chba->shost; 2390 2391 BUG_ON(chba != iscsi_host_priv(shost)); 2392 2393 cls_session = iscsi_session_setup(chba->cdev->itp, shost, 2394 cmds_max, 0, 2395 sizeof(struct iscsi_tcp_task) + 2396 sizeof(struct cxgbi_task_data), 2397 initial_cmdsn, ISCSI_MAX_TARGET); 2398 if (!cls_session) 2399 return NULL; 2400 2401 session = cls_session->dd_data; 2402 if (iscsi_tcp_r2tpool_alloc(session)) 2403 goto remove_session; 2404 2405 log_debug(1 << CXGBI_DBG_ISCSI, 2406 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); 2407 return cls_session; 2408 2409 remove_session: 2410 iscsi_session_teardown(cls_session); 2411 return NULL; 2412 } 2413 EXPORT_SYMBOL_GPL(cxgbi_create_session); 2414 2415 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) 2416 { 2417 log_debug(1 << CXGBI_DBG_ISCSI, 2418 "cls sess 0x%p.\n", cls_session); 2419 2420 iscsi_tcp_r2tpool_free(cls_session->dd_data); 2421 iscsi_session_teardown(cls_session); 2422 } 2423 EXPORT_SYMBOL_GPL(cxgbi_destroy_session); 2424 2425 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2426 char *buf, int buflen) 2427 { 2428 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2429 2430 if (!chba->ndev) { 2431 shost_printk(KERN_ERR, shost, "Could not get host param. " 2432 "netdev for host not set.\n"); 2433 return -ENODEV; 2434 } 2435 2436 log_debug(1 << CXGBI_DBG_ISCSI, 2437 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", 2438 shost, chba, chba->ndev->name, param, buflen, buf); 2439 2440 switch (param) { 2441 case ISCSI_HOST_PARAM_IPADDRESS: 2442 { 2443 __be32 addr = in_aton(buf); 2444 log_debug(1 << CXGBI_DBG_ISCSI, 2445 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); 2446 cxgbi_set_iscsi_ipv4(chba, addr); 2447 return 0; 2448 } 2449 case ISCSI_HOST_PARAM_HWADDRESS: 2450 case ISCSI_HOST_PARAM_NETDEV_NAME: 2451 return 0; 2452 default: 2453 return iscsi_host_set_param(shost, param, buf, buflen); 2454 } 2455 } 2456 EXPORT_SYMBOL_GPL(cxgbi_set_host_param); 2457 2458 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2459 char *buf) 2460 { 2461 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2462 int len = 0; 2463 2464 if (!chba->ndev) { 2465 shost_printk(KERN_ERR, shost, "Could not get host param. " 2466 "netdev for host not set.\n"); 2467 return -ENODEV; 2468 } 2469 2470 log_debug(1 << CXGBI_DBG_ISCSI, 2471 "shost 0x%p, hba 0x%p,%s, param %d.\n", 2472 shost, chba, chba->ndev->name, param); 2473 2474 switch (param) { 2475 case ISCSI_HOST_PARAM_HWADDRESS: 2476 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); 2477 break; 2478 case ISCSI_HOST_PARAM_NETDEV_NAME: 2479 len = sprintf(buf, "%s\n", chba->ndev->name); 2480 break; 2481 case ISCSI_HOST_PARAM_IPADDRESS: 2482 { 2483 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, 2484 chba->port_id); 2485 if (csk) { 2486 len = sprintf(buf, "%pIS", 2487 (struct sockaddr *)&csk->saddr); 2488 } 2489 log_debug(1 << CXGBI_DBG_ISCSI, 2490 "hba %s, addr %s.\n", chba->ndev->name, buf); 2491 break; 2492 } 2493 default: 2494 return iscsi_host_get_param(shost, param, buf); 2495 } 2496 2497 return len; 2498 } 2499 EXPORT_SYMBOL_GPL(cxgbi_get_host_param); 2500 2501 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, 2502 struct sockaddr *dst_addr, 2503 int non_blocking) 2504 { 2505 struct iscsi_endpoint *ep; 2506 struct cxgbi_endpoint *cep; 2507 struct cxgbi_hba *hba = NULL; 2508 struct cxgbi_sock *csk; 2509 int err = -EINVAL; 2510 2511 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2512 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", 2513 shost, non_blocking, dst_addr); 2514 2515 if (shost) { 2516 hba = iscsi_host_priv(shost); 2517 if (!hba) { 2518 pr_info("shost 0x%p, priv NULL.\n", shost); 2519 goto err_out; 2520 } 2521 } 2522 2523 if (dst_addr->sa_family == AF_INET) { 2524 csk = cxgbi_check_route(dst_addr); 2525 #if IS_ENABLED(CONFIG_IPV6) 2526 } else if (dst_addr->sa_family == AF_INET6) { 2527 csk = cxgbi_check_route6(dst_addr); 2528 #endif 2529 } else { 2530 pr_info("address family 0x%x NOT supported.\n", 2531 dst_addr->sa_family); 2532 err = -EAFNOSUPPORT; 2533 return (struct iscsi_endpoint *)ERR_PTR(err); 2534 } 2535 2536 if (IS_ERR(csk)) 2537 return (struct iscsi_endpoint *)csk; 2538 cxgbi_sock_get(csk); 2539 2540 if (!hba) 2541 hba = csk->cdev->hbas[csk->port_id]; 2542 else if (hba != csk->cdev->hbas[csk->port_id]) { 2543 pr_info("Could not connect through requested host %u" 2544 "hba 0x%p != 0x%p (%u).\n", 2545 shost->host_no, hba, 2546 csk->cdev->hbas[csk->port_id], csk->port_id); 2547 err = -ENOSPC; 2548 goto release_conn; 2549 } 2550 2551 err = sock_get_port(csk); 2552 if (err) 2553 goto release_conn; 2554 2555 cxgbi_sock_set_state(csk, CTP_CONNECTING); 2556 err = csk->cdev->csk_init_act_open(csk); 2557 if (err) 2558 goto release_conn; 2559 2560 if (cxgbi_sock_is_closing(csk)) { 2561 err = -ENOSPC; 2562 pr_info("csk 0x%p is closing.\n", csk); 2563 goto release_conn; 2564 } 2565 2566 ep = iscsi_create_endpoint(sizeof(*cep)); 2567 if (!ep) { 2568 err = -ENOMEM; 2569 pr_info("iscsi alloc ep, OOM.\n"); 2570 goto release_conn; 2571 } 2572 2573 cep = ep->dd_data; 2574 cep->csk = csk; 2575 cep->chba = hba; 2576 2577 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2578 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", 2579 ep, cep, csk, hba, hba->ndev->name); 2580 return ep; 2581 2582 release_conn: 2583 cxgbi_sock_put(csk); 2584 cxgbi_sock_closed(csk); 2585 err_out: 2586 return ERR_PTR(err); 2587 } 2588 EXPORT_SYMBOL_GPL(cxgbi_ep_connect); 2589 2590 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 2591 { 2592 struct cxgbi_endpoint *cep = ep->dd_data; 2593 struct cxgbi_sock *csk = cep->csk; 2594 2595 if (!cxgbi_sock_is_established(csk)) 2596 return 0; 2597 return 1; 2598 } 2599 EXPORT_SYMBOL_GPL(cxgbi_ep_poll); 2600 2601 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) 2602 { 2603 struct cxgbi_endpoint *cep = ep->dd_data; 2604 struct cxgbi_conn *cconn = cep->cconn; 2605 struct cxgbi_sock *csk = cep->csk; 2606 2607 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2608 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", 2609 ep, cep, cconn, csk, csk->state, csk->flags); 2610 2611 if (cconn && cconn->iconn) { 2612 iscsi_suspend_tx(cconn->iconn); 2613 write_lock_bh(&csk->callback_lock); 2614 cep->csk->user_data = NULL; 2615 cconn->cep = NULL; 2616 write_unlock_bh(&csk->callback_lock); 2617 } 2618 iscsi_destroy_endpoint(ep); 2619 2620 if (likely(csk->state >= CTP_ESTABLISHED)) 2621 need_active_close(csk); 2622 else 2623 cxgbi_sock_closed(csk); 2624 2625 cxgbi_sock_put(csk); 2626 } 2627 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); 2628 2629 int cxgbi_iscsi_init(struct iscsi_transport *itp, 2630 struct scsi_transport_template **stt) 2631 { 2632 *stt = iscsi_register_transport(itp); 2633 if (*stt == NULL) { 2634 pr_err("unable to register %s transport 0x%p.\n", 2635 itp->name, itp); 2636 return -ENODEV; 2637 } 2638 log_debug(1 << CXGBI_DBG_ISCSI, 2639 "%s, registered iscsi transport 0x%p.\n", 2640 itp->name, stt); 2641 return 0; 2642 } 2643 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); 2644 2645 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, 2646 struct scsi_transport_template **stt) 2647 { 2648 if (*stt) { 2649 log_debug(1 << CXGBI_DBG_ISCSI, 2650 "de-register transport 0x%p, %s, stt 0x%p.\n", 2651 itp, itp->name, *stt); 2652 *stt = NULL; 2653 iscsi_unregister_transport(itp); 2654 } 2655 } 2656 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2657 2658 umode_t cxgbi_attr_is_visible(int param_type, int param) 2659 { 2660 switch (param_type) { 2661 case ISCSI_HOST_PARAM: 2662 switch (param) { 2663 case ISCSI_HOST_PARAM_NETDEV_NAME: 2664 case ISCSI_HOST_PARAM_HWADDRESS: 2665 case ISCSI_HOST_PARAM_IPADDRESS: 2666 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2667 return S_IRUGO; 2668 default: 2669 return 0; 2670 } 2671 case ISCSI_PARAM: 2672 switch (param) { 2673 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2674 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2675 case ISCSI_PARAM_HDRDGST_EN: 2676 case ISCSI_PARAM_DATADGST_EN: 2677 case ISCSI_PARAM_CONN_ADDRESS: 2678 case ISCSI_PARAM_CONN_PORT: 2679 case ISCSI_PARAM_EXP_STATSN: 2680 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2681 case ISCSI_PARAM_PERSISTENT_PORT: 2682 case ISCSI_PARAM_PING_TMO: 2683 case ISCSI_PARAM_RECV_TMO: 2684 case ISCSI_PARAM_INITIAL_R2T_EN: 2685 case ISCSI_PARAM_MAX_R2T: 2686 case ISCSI_PARAM_IMM_DATA_EN: 2687 case ISCSI_PARAM_FIRST_BURST: 2688 case ISCSI_PARAM_MAX_BURST: 2689 case ISCSI_PARAM_PDU_INORDER_EN: 2690 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2691 case ISCSI_PARAM_ERL: 2692 case ISCSI_PARAM_TARGET_NAME: 2693 case ISCSI_PARAM_TPGT: 2694 case ISCSI_PARAM_USERNAME: 2695 case ISCSI_PARAM_PASSWORD: 2696 case ISCSI_PARAM_USERNAME_IN: 2697 case ISCSI_PARAM_PASSWORD_IN: 2698 case ISCSI_PARAM_FAST_ABORT: 2699 case ISCSI_PARAM_ABORT_TMO: 2700 case ISCSI_PARAM_LU_RESET_TMO: 2701 case ISCSI_PARAM_TGT_RESET_TMO: 2702 case ISCSI_PARAM_IFACE_NAME: 2703 case ISCSI_PARAM_INITIATOR_NAME: 2704 return S_IRUGO; 2705 default: 2706 return 0; 2707 } 2708 } 2709 2710 return 0; 2711 } 2712 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); 2713 2714 static int __init libcxgbi_init_module(void) 2715 { 2716 pr_info("%s", version); 2717 return 0; 2718 } 2719 2720 static void __exit libcxgbi_exit_module(void) 2721 { 2722 cxgbi_device_unregister_all(0xFF); 2723 return; 2724 } 2725 2726 module_init(libcxgbi_init_module); 2727 module_exit(libcxgbi_exit_module); 2728