1 /* 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Written by: Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/skbuff.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/pci.h> 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_host.h> 23 #include <linux/if_vlan.h> 24 #include <linux/inet.h> 25 #include <net/dst.h> 26 #include <net/route.h> 27 #include <net/ipv6.h> 28 #include <net/ip6_route.h> 29 #include <net/addrconf.h> 30 31 #include <linux/inetdevice.h> /* ip_dev_find */ 32 #include <linux/module.h> 33 #include <net/tcp.h> 34 35 static unsigned int dbg_level; 36 37 #include "libcxgbi.h" 38 39 #define DRV_MODULE_NAME "libcxgbi" 40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library" 41 #define DRV_MODULE_VERSION "0.9.0" 42 #define DRV_MODULE_RELDATE "Jun. 2010" 43 44 MODULE_AUTHOR("Chelsio Communications, Inc."); 45 MODULE_DESCRIPTION(DRV_MODULE_DESC); 46 MODULE_VERSION(DRV_MODULE_VERSION); 47 MODULE_LICENSE("GPL"); 48 49 module_param(dbg_level, uint, 0644); 50 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); 51 52 53 /* 54 * cxgbi device management 55 * maintains a list of the cxgbi devices 56 */ 57 static LIST_HEAD(cdev_list); 58 static DEFINE_MUTEX(cdev_mutex); 59 60 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 61 unsigned int max_conn) 62 { 63 struct cxgbi_ports_map *pmap = &cdev->pmap; 64 65 pmap->port_csk = cxgbi_alloc_big_mem(max_conn * 66 sizeof(struct cxgbi_sock *), 67 GFP_KERNEL); 68 if (!pmap->port_csk) { 69 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); 70 return -ENOMEM; 71 } 72 73 pmap->max_connect = max_conn; 74 pmap->sport_base = base; 75 spin_lock_init(&pmap->lock); 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); 79 80 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) 81 { 82 struct cxgbi_ports_map *pmap = &cdev->pmap; 83 struct cxgbi_sock *csk; 84 int i; 85 86 for (i = 0; i < pmap->max_connect; i++) { 87 if (pmap->port_csk[i]) { 88 csk = pmap->port_csk[i]; 89 pmap->port_csk[i] = NULL; 90 log_debug(1 << CXGBI_DBG_SOCK, 91 "csk 0x%p, cdev 0x%p, offload down.\n", 92 csk, cdev); 93 spin_lock_bh(&csk->lock); 94 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); 95 cxgbi_sock_closed(csk); 96 spin_unlock_bh(&csk->lock); 97 cxgbi_sock_put(csk); 98 } 99 } 100 } 101 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); 102 103 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) 104 { 105 log_debug(1 << CXGBI_DBG_DEV, 106 "cdev 0x%p, p# %u.\n", cdev, cdev->nports); 107 cxgbi_hbas_remove(cdev); 108 cxgbi_device_portmap_cleanup(cdev); 109 if (cdev->dev_ddp_cleanup) 110 cdev->dev_ddp_cleanup(cdev); 111 else 112 cxgbi_ddp_cleanup(cdev); 113 if (cdev->ddp) 114 cxgbi_ddp_cleanup(cdev); 115 if (cdev->pmap.max_connect) 116 cxgbi_free_big_mem(cdev->pmap.port_csk); 117 kfree(cdev); 118 } 119 120 struct cxgbi_device *cxgbi_device_register(unsigned int extra, 121 unsigned int nports) 122 { 123 struct cxgbi_device *cdev; 124 125 cdev = kzalloc(sizeof(*cdev) + extra + nports * 126 (sizeof(struct cxgbi_hba *) + 127 sizeof(struct net_device *)), 128 GFP_KERNEL); 129 if (!cdev) { 130 pr_warn("nport %d, OOM.\n", nports); 131 return NULL; 132 } 133 cdev->ports = (struct net_device **)(cdev + 1); 134 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * 135 sizeof(struct net_device *)); 136 if (extra) 137 cdev->dd_data = ((char *)cdev->hbas) + 138 nports * sizeof(struct cxgbi_hba *); 139 spin_lock_init(&cdev->pmap.lock); 140 141 mutex_lock(&cdev_mutex); 142 list_add_tail(&cdev->list_head, &cdev_list); 143 mutex_unlock(&cdev_mutex); 144 145 log_debug(1 << CXGBI_DBG_DEV, 146 "cdev 0x%p, p# %u.\n", cdev, nports); 147 return cdev; 148 } 149 EXPORT_SYMBOL_GPL(cxgbi_device_register); 150 151 void cxgbi_device_unregister(struct cxgbi_device *cdev) 152 { 153 log_debug(1 << CXGBI_DBG_DEV, 154 "cdev 0x%p, p# %u,%s.\n", 155 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 156 mutex_lock(&cdev_mutex); 157 list_del(&cdev->list_head); 158 mutex_unlock(&cdev_mutex); 159 cxgbi_device_destroy(cdev); 160 } 161 EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 162 163 void cxgbi_device_unregister_all(unsigned int flag) 164 { 165 struct cxgbi_device *cdev, *tmp; 166 167 mutex_lock(&cdev_mutex); 168 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 169 if ((cdev->flags & flag) == flag) { 170 log_debug(1 << CXGBI_DBG_DEV, 171 "cdev 0x%p, p# %u,%s.\n", 172 cdev, cdev->nports, cdev->nports ? 173 cdev->ports[0]->name : ""); 174 list_del(&cdev->list_head); 175 cxgbi_device_destroy(cdev); 176 } 177 } 178 mutex_unlock(&cdev_mutex); 179 } 180 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); 181 182 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) 183 { 184 struct cxgbi_device *cdev, *tmp; 185 186 mutex_lock(&cdev_mutex); 187 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 188 if (cdev->lldev == lldev) { 189 mutex_unlock(&cdev_mutex); 190 return cdev; 191 } 192 } 193 mutex_unlock(&cdev_mutex); 194 log_debug(1 << CXGBI_DBG_DEV, 195 "lldev 0x%p, NO match found.\n", lldev); 196 return NULL; 197 } 198 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); 199 200 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, 201 int *port) 202 { 203 struct net_device *vdev = NULL; 204 struct cxgbi_device *cdev, *tmp; 205 int i; 206 207 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 208 vdev = ndev; 209 ndev = vlan_dev_real_dev(ndev); 210 log_debug(1 << CXGBI_DBG_DEV, 211 "vlan dev %s -> %s.\n", vdev->name, ndev->name); 212 } 213 214 mutex_lock(&cdev_mutex); 215 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 216 for (i = 0; i < cdev->nports; i++) { 217 if (ndev == cdev->ports[i]) { 218 cdev->hbas[i]->vdev = vdev; 219 mutex_unlock(&cdev_mutex); 220 if (port) 221 *port = i; 222 return cdev; 223 } 224 } 225 } 226 mutex_unlock(&cdev_mutex); 227 log_debug(1 << CXGBI_DBG_DEV, 228 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 229 return NULL; 230 } 231 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); 232 233 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 234 int *port) 235 { 236 struct net_device *vdev = NULL; 237 struct cxgbi_device *cdev, *tmp; 238 int i; 239 240 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 241 vdev = ndev; 242 ndev = vlan_dev_real_dev(ndev); 243 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 244 } 245 246 mutex_lock(&cdev_mutex); 247 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 248 for (i = 0; i < cdev->nports; i++) { 249 if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, 250 MAX_ADDR_LEN)) { 251 cdev->hbas[i]->vdev = vdev; 252 mutex_unlock(&cdev_mutex); 253 if (port) 254 *port = i; 255 return cdev; 256 } 257 } 258 } 259 mutex_unlock(&cdev_mutex); 260 log_debug(1 << CXGBI_DBG_DEV, 261 "ndev 0x%p, %s, NO match mac found.\n", 262 ndev, ndev->name); 263 return NULL; 264 } 265 266 void cxgbi_hbas_remove(struct cxgbi_device *cdev) 267 { 268 int i; 269 struct cxgbi_hba *chba; 270 271 log_debug(1 << CXGBI_DBG_DEV, 272 "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 273 274 for (i = 0; i < cdev->nports; i++) { 275 chba = cdev->hbas[i]; 276 if (chba) { 277 cdev->hbas[i] = NULL; 278 iscsi_host_remove(chba->shost); 279 pci_dev_put(cdev->pdev); 280 iscsi_host_free(chba->shost); 281 } 282 } 283 } 284 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); 285 286 int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, 287 unsigned int max_id, struct scsi_host_template *sht, 288 struct scsi_transport_template *stt) 289 { 290 struct cxgbi_hba *chba; 291 struct Scsi_Host *shost; 292 int i, err; 293 294 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 295 296 for (i = 0; i < cdev->nports; i++) { 297 shost = iscsi_host_alloc(sht, sizeof(*chba), 1); 298 if (!shost) { 299 pr_info("0x%p, p%d, %s, host alloc failed.\n", 300 cdev, i, cdev->ports[i]->name); 301 err = -ENOMEM; 302 goto err_out; 303 } 304 305 shost->transportt = stt; 306 shost->max_lun = max_lun; 307 shost->max_id = max_id; 308 shost->max_channel = 0; 309 shost->max_cmd_len = 16; 310 311 chba = iscsi_host_priv(shost); 312 chba->cdev = cdev; 313 chba->ndev = cdev->ports[i]; 314 chba->shost = shost; 315 316 log_debug(1 << CXGBI_DBG_DEV, 317 "cdev 0x%p, p#%d %s: chba 0x%p.\n", 318 cdev, i, cdev->ports[i]->name, chba); 319 320 pci_dev_get(cdev->pdev); 321 err = iscsi_host_add(shost, &cdev->pdev->dev); 322 if (err) { 323 pr_info("cdev 0x%p, p#%d %s, host add failed.\n", 324 cdev, i, cdev->ports[i]->name); 325 pci_dev_put(cdev->pdev); 326 scsi_host_put(shost); 327 goto err_out; 328 } 329 330 cdev->hbas[i] = chba; 331 } 332 333 return 0; 334 335 err_out: 336 cxgbi_hbas_remove(cdev); 337 return err; 338 } 339 EXPORT_SYMBOL_GPL(cxgbi_hbas_add); 340 341 /* 342 * iSCSI offload 343 * 344 * - source port management 345 * To find a free source port in the port allocation map we use a very simple 346 * rotor scheme to look for the next free port. 347 * 348 * If a source port has been specified make sure that it doesn't collide with 349 * our normal source port allocation map. If it's outside the range of our 350 * allocation/deallocation scheme just let them use it. 351 * 352 * If the source port is outside our allocation range, the caller is 353 * responsible for keeping track of their port usage. 354 */ 355 static int sock_get_port(struct cxgbi_sock *csk) 356 { 357 struct cxgbi_device *cdev = csk->cdev; 358 struct cxgbi_ports_map *pmap = &cdev->pmap; 359 unsigned int start; 360 int idx; 361 __be16 *port; 362 363 if (!pmap->max_connect) { 364 pr_err("cdev 0x%p, p#%u %s, NO port map.\n", 365 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 366 return -EADDRNOTAVAIL; 367 } 368 369 if (csk->csk_family == AF_INET) 370 port = &csk->saddr.sin_port; 371 else /* ipv6 */ 372 port = &csk->saddr6.sin6_port; 373 374 if (*port) { 375 pr_err("source port NON-ZERO %u.\n", 376 ntohs(*port)); 377 return -EADDRINUSE; 378 } 379 380 spin_lock_bh(&pmap->lock); 381 if (pmap->used >= pmap->max_connect) { 382 spin_unlock_bh(&pmap->lock); 383 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", 384 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 385 return -EADDRNOTAVAIL; 386 } 387 388 start = idx = pmap->next; 389 do { 390 if (++idx >= pmap->max_connect) 391 idx = 0; 392 if (!pmap->port_csk[idx]) { 393 pmap->used++; 394 *port = htons(pmap->sport_base + idx); 395 pmap->next = idx; 396 pmap->port_csk[idx] = csk; 397 spin_unlock_bh(&pmap->lock); 398 cxgbi_sock_get(csk); 399 log_debug(1 << CXGBI_DBG_SOCK, 400 "cdev 0x%p, p#%u %s, p %u, %u.\n", 401 cdev, csk->port_id, 402 cdev->ports[csk->port_id]->name, 403 pmap->sport_base + idx, pmap->next); 404 return 0; 405 } 406 } while (idx != start); 407 spin_unlock_bh(&pmap->lock); 408 409 /* should not happen */ 410 pr_warn("cdev 0x%p, p#%u %s, next %u?\n", 411 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 412 pmap->next); 413 return -EADDRNOTAVAIL; 414 } 415 416 static void sock_put_port(struct cxgbi_sock *csk) 417 { 418 struct cxgbi_device *cdev = csk->cdev; 419 struct cxgbi_ports_map *pmap = &cdev->pmap; 420 __be16 *port; 421 422 if (csk->csk_family == AF_INET) 423 port = &csk->saddr.sin_port; 424 else /* ipv6 */ 425 port = &csk->saddr6.sin6_port; 426 427 if (*port) { 428 int idx = ntohs(*port) - pmap->sport_base; 429 430 *port = 0; 431 if (idx < 0 || idx >= pmap->max_connect) { 432 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", 433 cdev, csk->port_id, 434 cdev->ports[csk->port_id]->name, 435 ntohs(*port)); 436 return; 437 } 438 439 spin_lock_bh(&pmap->lock); 440 pmap->port_csk[idx] = NULL; 441 pmap->used--; 442 spin_unlock_bh(&pmap->lock); 443 444 log_debug(1 << CXGBI_DBG_SOCK, 445 "cdev 0x%p, p#%u %s, release %u.\n", 446 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 447 pmap->sport_base + idx); 448 449 cxgbi_sock_put(csk); 450 } 451 } 452 453 /* 454 * iscsi tcp connection 455 */ 456 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) 457 { 458 if (csk->cpl_close) { 459 kfree_skb(csk->cpl_close); 460 csk->cpl_close = NULL; 461 } 462 if (csk->cpl_abort_req) { 463 kfree_skb(csk->cpl_abort_req); 464 csk->cpl_abort_req = NULL; 465 } 466 if (csk->cpl_abort_rpl) { 467 kfree_skb(csk->cpl_abort_rpl); 468 csk->cpl_abort_rpl = NULL; 469 } 470 } 471 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); 472 473 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) 474 { 475 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); 476 477 if (!csk) { 478 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); 479 return NULL; 480 } 481 482 if (cdev->csk_alloc_cpls(csk) < 0) { 483 pr_info("csk 0x%p, alloc cpls failed.\n", csk); 484 kfree(csk); 485 return NULL; 486 } 487 488 spin_lock_init(&csk->lock); 489 kref_init(&csk->refcnt); 490 skb_queue_head_init(&csk->receive_queue); 491 skb_queue_head_init(&csk->write_queue); 492 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 493 rwlock_init(&csk->callback_lock); 494 csk->cdev = cdev; 495 csk->flags = 0; 496 cxgbi_sock_set_state(csk, CTP_CLOSED); 497 498 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); 499 500 return csk; 501 } 502 503 static struct rtable *find_route_ipv4(struct flowi4 *fl4, 504 __be32 saddr, __be32 daddr, 505 __be16 sport, __be16 dport, u8 tos) 506 { 507 struct rtable *rt; 508 509 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, 510 dport, sport, IPPROTO_TCP, tos, 0); 511 if (IS_ERR(rt)) 512 return NULL; 513 514 return rt; 515 } 516 517 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) 518 { 519 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; 520 struct dst_entry *dst; 521 struct net_device *ndev; 522 struct cxgbi_device *cdev; 523 struct rtable *rt = NULL; 524 struct neighbour *n; 525 struct flowi4 fl4; 526 struct cxgbi_sock *csk = NULL; 527 unsigned int mtu = 0; 528 int port = 0xFFFF; 529 int err = 0; 530 531 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 532 if (!rt) { 533 pr_info("no route to ipv4 0x%x, port %u.\n", 534 be32_to_cpu(daddr->sin_addr.s_addr), 535 be16_to_cpu(daddr->sin_port)); 536 err = -ENETUNREACH; 537 goto err_out; 538 } 539 dst = &rt->dst; 540 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); 541 if (!n) { 542 err = -ENODEV; 543 goto rel_rt; 544 } 545 ndev = n->dev; 546 547 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 548 pr_info("multi-cast route %pI4, port %u, dev %s.\n", 549 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 550 ndev->name); 551 err = -ENETUNREACH; 552 goto rel_neigh; 553 } 554 555 if (ndev->flags & IFF_LOOPBACK) { 556 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 557 mtu = ndev->mtu; 558 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 559 n->dev->name, ndev->name, mtu); 560 } 561 562 cdev = cxgbi_device_find_by_netdev(ndev, &port); 563 if (!cdev) { 564 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 565 &daddr->sin_addr.s_addr, ndev->name); 566 err = -ENETUNREACH; 567 goto rel_neigh; 568 } 569 log_debug(1 << CXGBI_DBG_SOCK, 570 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 571 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 572 port, ndev->name, cdev); 573 574 csk = cxgbi_sock_create(cdev); 575 if (!csk) { 576 err = -ENOMEM; 577 goto rel_neigh; 578 } 579 csk->cdev = cdev; 580 csk->port_id = port; 581 csk->mtu = mtu; 582 csk->dst = dst; 583 584 csk->csk_family = AF_INET; 585 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 586 csk->daddr.sin_port = daddr->sin_port; 587 csk->daddr.sin_family = daddr->sin_family; 588 csk->saddr.sin_family = daddr->sin_family; 589 csk->saddr.sin_addr.s_addr = fl4.saddr; 590 neigh_release(n); 591 592 return csk; 593 594 rel_neigh: 595 neigh_release(n); 596 597 rel_rt: 598 ip_rt_put(rt); 599 if (csk) 600 cxgbi_sock_closed(csk); 601 err_out: 602 return ERR_PTR(err); 603 } 604 605 #if IS_ENABLED(CONFIG_IPV6) 606 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, 607 const struct in6_addr *daddr) 608 { 609 struct flowi6 fl; 610 611 if (saddr) 612 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 613 if (daddr) 614 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); 615 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 616 } 617 618 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) 619 { 620 struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; 621 struct dst_entry *dst; 622 struct net_device *ndev; 623 struct cxgbi_device *cdev; 624 struct rt6_info *rt = NULL; 625 struct neighbour *n; 626 struct in6_addr pref_saddr; 627 struct cxgbi_sock *csk = NULL; 628 unsigned int mtu = 0; 629 int port = 0xFFFF; 630 int err = 0; 631 632 rt = find_route_ipv6(NULL, &daddr6->sin6_addr); 633 634 if (!rt) { 635 pr_info("no route to ipv6 %pI6 port %u\n", 636 daddr6->sin6_addr.s6_addr, 637 be16_to_cpu(daddr6->sin6_port)); 638 err = -ENETUNREACH; 639 goto err_out; 640 } 641 642 dst = &rt->dst; 643 644 n = dst_neigh_lookup(dst, &daddr6->sin6_addr); 645 646 if (!n) { 647 pr_info("%pI6, port %u, dst no neighbour.\n", 648 daddr6->sin6_addr.s6_addr, 649 be16_to_cpu(daddr6->sin6_port)); 650 err = -ENETUNREACH; 651 goto rel_rt; 652 } 653 ndev = n->dev; 654 655 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { 656 pr_info("multi-cast route %pI6 port %u, dev %s.\n", 657 daddr6->sin6_addr.s6_addr, 658 ntohs(daddr6->sin6_port), ndev->name); 659 err = -ENETUNREACH; 660 goto rel_rt; 661 } 662 663 cdev = cxgbi_device_find_by_netdev(ndev, &port); 664 if (!cdev) 665 cdev = cxgbi_device_find_by_mac(ndev, &port); 666 if (!cdev) { 667 pr_info("dst %pI6 %s, NOT cxgbi device.\n", 668 daddr6->sin6_addr.s6_addr, ndev->name); 669 err = -ENETUNREACH; 670 goto rel_rt; 671 } 672 log_debug(1 << CXGBI_DBG_SOCK, 673 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", 674 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, 675 ndev->name, cdev); 676 677 csk = cxgbi_sock_create(cdev); 678 if (!csk) { 679 err = -ENOMEM; 680 goto rel_rt; 681 } 682 csk->cdev = cdev; 683 csk->port_id = port; 684 csk->mtu = mtu; 685 csk->dst = dst; 686 687 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { 688 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); 689 690 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, 691 &daddr6->sin6_addr, 0, &pref_saddr); 692 if (err) { 693 pr_info("failed to get source address to reach %pI6\n", 694 &daddr6->sin6_addr); 695 goto rel_rt; 696 } 697 } else { 698 pref_saddr = rt->rt6i_prefsrc.addr; 699 } 700 701 csk->csk_family = AF_INET6; 702 csk->daddr6.sin6_addr = daddr6->sin6_addr; 703 csk->daddr6.sin6_port = daddr6->sin6_port; 704 csk->daddr6.sin6_family = daddr6->sin6_family; 705 csk->saddr6.sin6_addr = pref_saddr; 706 707 neigh_release(n); 708 return csk; 709 710 rel_rt: 711 if (n) 712 neigh_release(n); 713 714 ip6_rt_put(rt); 715 if (csk) 716 cxgbi_sock_closed(csk); 717 err_out: 718 return ERR_PTR(err); 719 } 720 #endif /* IS_ENABLED(CONFIG_IPV6) */ 721 722 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, 723 unsigned int opt) 724 { 725 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; 726 dst_confirm(csk->dst); 727 smp_mb(); 728 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); 729 } 730 EXPORT_SYMBOL_GPL(cxgbi_sock_established); 731 732 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) 733 { 734 log_debug(1 << CXGBI_DBG_SOCK, 735 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", 736 csk, csk->state, csk->flags, csk->user_data); 737 738 if (csk->state != CTP_ESTABLISHED) { 739 read_lock_bh(&csk->callback_lock); 740 if (csk->user_data) 741 iscsi_conn_failure(csk->user_data, 742 ISCSI_ERR_CONN_FAILED); 743 read_unlock_bh(&csk->callback_lock); 744 } 745 } 746 747 void cxgbi_sock_closed(struct cxgbi_sock *csk) 748 { 749 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 750 csk, (csk)->state, (csk)->flags, (csk)->tid); 751 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 752 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) 753 return; 754 if (csk->saddr.sin_port) 755 sock_put_port(csk); 756 if (csk->dst) 757 dst_release(csk->dst); 758 csk->cdev->csk_release_offload_resources(csk); 759 cxgbi_sock_set_state(csk, CTP_CLOSED); 760 cxgbi_inform_iscsi_conn_closing(csk); 761 cxgbi_sock_put(csk); 762 } 763 EXPORT_SYMBOL_GPL(cxgbi_sock_closed); 764 765 static void need_active_close(struct cxgbi_sock *csk) 766 { 767 int data_lost; 768 int close_req = 0; 769 770 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 771 csk, (csk)->state, (csk)->flags, (csk)->tid); 772 spin_lock_bh(&csk->lock); 773 dst_confirm(csk->dst); 774 data_lost = skb_queue_len(&csk->receive_queue); 775 __skb_queue_purge(&csk->receive_queue); 776 777 if (csk->state == CTP_ACTIVE_OPEN) 778 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 779 else if (csk->state == CTP_ESTABLISHED) { 780 close_req = 1; 781 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); 782 } else if (csk->state == CTP_PASSIVE_CLOSE) { 783 close_req = 1; 784 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 785 } 786 787 if (close_req) { 788 if (data_lost) 789 csk->cdev->csk_send_abort_req(csk); 790 else 791 csk->cdev->csk_send_close_req(csk); 792 } 793 794 spin_unlock_bh(&csk->lock); 795 } 796 797 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) 798 { 799 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", 800 csk, csk->state, csk->flags, 801 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, 802 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, 803 errno); 804 805 cxgbi_sock_set_state(csk, CTP_CONNECTING); 806 csk->err = errno; 807 cxgbi_sock_closed(csk); 808 } 809 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); 810 811 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 812 { 813 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 814 815 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 816 csk, (csk)->state, (csk)->flags, (csk)->tid); 817 cxgbi_sock_get(csk); 818 spin_lock_bh(&csk->lock); 819 if (csk->state == CTP_ACTIVE_OPEN) 820 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); 821 spin_unlock_bh(&csk->lock); 822 cxgbi_sock_put(csk); 823 __kfree_skb(skb); 824 } 825 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 826 827 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) 828 { 829 cxgbi_sock_get(csk); 830 spin_lock_bh(&csk->lock); 831 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 832 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) 833 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 834 else { 835 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); 836 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 837 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) 838 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", 839 csk, csk->state, csk->flags, csk->tid); 840 cxgbi_sock_closed(csk); 841 } 842 } 843 spin_unlock_bh(&csk->lock); 844 cxgbi_sock_put(csk); 845 } 846 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); 847 848 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) 849 { 850 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 851 csk, (csk)->state, (csk)->flags, (csk)->tid); 852 cxgbi_sock_get(csk); 853 spin_lock_bh(&csk->lock); 854 855 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 856 goto done; 857 858 switch (csk->state) { 859 case CTP_ESTABLISHED: 860 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); 861 break; 862 case CTP_ACTIVE_CLOSE: 863 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 864 break; 865 case CTP_CLOSE_WAIT_1: 866 cxgbi_sock_closed(csk); 867 break; 868 case CTP_ABORTING: 869 break; 870 default: 871 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 872 csk, csk->state, csk->flags, csk->tid); 873 } 874 cxgbi_inform_iscsi_conn_closing(csk); 875 done: 876 spin_unlock_bh(&csk->lock); 877 cxgbi_sock_put(csk); 878 } 879 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); 880 881 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) 882 { 883 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 884 csk, (csk)->state, (csk)->flags, (csk)->tid); 885 cxgbi_sock_get(csk); 886 spin_lock_bh(&csk->lock); 887 888 csk->snd_una = snd_nxt - 1; 889 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 890 goto done; 891 892 switch (csk->state) { 893 case CTP_ACTIVE_CLOSE: 894 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); 895 break; 896 case CTP_CLOSE_WAIT_1: 897 case CTP_CLOSE_WAIT_2: 898 cxgbi_sock_closed(csk); 899 break; 900 case CTP_ABORTING: 901 break; 902 default: 903 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 904 csk, csk->state, csk->flags, csk->tid); 905 } 906 done: 907 spin_unlock_bh(&csk->lock); 908 cxgbi_sock_put(csk); 909 } 910 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); 911 912 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, 913 unsigned int snd_una, int seq_chk) 914 { 915 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 916 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", 917 csk, csk->state, csk->flags, csk->tid, credits, 918 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); 919 920 spin_lock_bh(&csk->lock); 921 922 csk->wr_cred += credits; 923 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) 924 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; 925 926 while (credits) { 927 struct sk_buff *p = cxgbi_sock_peek_wr(csk); 928 929 if (unlikely(!p)) { 930 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", 931 csk, csk->state, csk->flags, csk->tid, credits, 932 csk->wr_cred, csk->wr_una_cred); 933 break; 934 } 935 936 if (unlikely(credits < p->csum)) { 937 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", 938 csk, csk->state, csk->flags, csk->tid, 939 credits, csk->wr_cred, csk->wr_una_cred, 940 p->csum); 941 p->csum -= credits; 942 break; 943 } else { 944 cxgbi_sock_dequeue_wr(csk); 945 credits -= p->csum; 946 kfree_skb(p); 947 } 948 } 949 950 cxgbi_sock_check_wr_invariants(csk); 951 952 if (seq_chk) { 953 if (unlikely(before(snd_una, csk->snd_una))) { 954 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", 955 csk, csk->state, csk->flags, csk->tid, snd_una, 956 csk->snd_una); 957 goto done; 958 } 959 960 if (csk->snd_una != snd_una) { 961 csk->snd_una = snd_una; 962 dst_confirm(csk->dst); 963 } 964 } 965 966 if (skb_queue_len(&csk->write_queue)) { 967 if (csk->cdev->csk_push_tx_frames(csk, 0)) 968 cxgbi_conn_tx_open(csk); 969 } else 970 cxgbi_conn_tx_open(csk); 971 done: 972 spin_unlock_bh(&csk->lock); 973 } 974 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); 975 976 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, 977 unsigned short mtu) 978 { 979 int i = 0; 980 981 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) 982 ++i; 983 984 return i; 985 } 986 987 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) 988 { 989 unsigned int idx; 990 struct dst_entry *dst = csk->dst; 991 992 csk->advmss = dst_metric_advmss(dst); 993 994 if (csk->advmss > pmtu - 40) 995 csk->advmss = pmtu - 40; 996 if (csk->advmss < csk->cdev->mtus[0] - 40) 997 csk->advmss = csk->cdev->mtus[0] - 40; 998 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); 999 1000 return idx; 1001 } 1002 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); 1003 1004 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) 1005 { 1006 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; 1007 __skb_queue_tail(&csk->write_queue, skb); 1008 } 1009 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); 1010 1011 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) 1012 { 1013 struct sk_buff *skb; 1014 1015 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) 1016 kfree_skb(skb); 1017 } 1018 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); 1019 1020 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) 1021 { 1022 int pending = cxgbi_sock_count_pending_wrs(csk); 1023 1024 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) 1025 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", 1026 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); 1027 } 1028 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); 1029 1030 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) 1031 { 1032 struct cxgbi_device *cdev = csk->cdev; 1033 struct sk_buff *next; 1034 int err, copied = 0; 1035 1036 spin_lock_bh(&csk->lock); 1037 1038 if (csk->state != CTP_ESTABLISHED) { 1039 log_debug(1 << CXGBI_DBG_PDU_TX, 1040 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", 1041 csk, csk->state, csk->flags, csk->tid); 1042 err = -EAGAIN; 1043 goto out_err; 1044 } 1045 1046 if (csk->err) { 1047 log_debug(1 << CXGBI_DBG_PDU_TX, 1048 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", 1049 csk, csk->state, csk->flags, csk->tid, csk->err); 1050 err = -EPIPE; 1051 goto out_err; 1052 } 1053 1054 if (csk->write_seq - csk->snd_una >= cdev->snd_win) { 1055 log_debug(1 << CXGBI_DBG_PDU_TX, 1056 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 1057 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 1058 csk->snd_una, cdev->snd_win); 1059 err = -ENOBUFS; 1060 goto out_err; 1061 } 1062 1063 while (skb) { 1064 int frags = skb_shinfo(skb)->nr_frags + 1065 (skb->len != skb->data_len); 1066 1067 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { 1068 pr_err("csk 0x%p, skb head %u < %u.\n", 1069 csk, skb_headroom(skb), cdev->skb_tx_rsvd); 1070 err = -EINVAL; 1071 goto out_err; 1072 } 1073 1074 if (frags >= SKB_WR_LIST_SIZE) { 1075 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", 1076 csk, skb_shinfo(skb)->nr_frags, skb->len, 1077 skb->data_len, (uint)(SKB_WR_LIST_SIZE)); 1078 err = -EINVAL; 1079 goto out_err; 1080 } 1081 1082 next = skb->next; 1083 skb->next = NULL; 1084 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); 1085 cxgbi_sock_skb_entail(csk, skb); 1086 copied += skb->len; 1087 csk->write_seq += skb->len + 1088 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1089 skb = next; 1090 } 1091 done: 1092 if (likely(skb_queue_len(&csk->write_queue))) 1093 cdev->csk_push_tx_frames(csk, 1); 1094 spin_unlock_bh(&csk->lock); 1095 return copied; 1096 1097 out_err: 1098 if (copied == 0 && err == -EPIPE) 1099 copied = csk->err ? csk->err : -EPIPE; 1100 else 1101 copied = err; 1102 goto done; 1103 } 1104 1105 /* 1106 * Direct Data Placement - 1107 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted 1108 * final destination host-memory buffers based on the Initiator Task Tag (ITT) 1109 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. 1110 * The host memory address is programmed into h/w in the format of pagepod 1111 * entries. 1112 * The location of the pagepod entry is encoded into ddp tag which is used as 1113 * the base for ITT/TTT. 1114 */ 1115 1116 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; 1117 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; 1118 static unsigned char page_idx = DDP_PGIDX_MAX; 1119 1120 static unsigned char sw_tag_idx_bits; 1121 static unsigned char sw_tag_age_bits; 1122 1123 /* 1124 * Direct-Data Placement page size adjustment 1125 */ 1126 static int ddp_adjust_page_table(void) 1127 { 1128 int i; 1129 unsigned int base_order, order; 1130 1131 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { 1132 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", 1133 PAGE_SIZE, 1UL << ddp_page_shift[0]); 1134 return -EINVAL; 1135 } 1136 1137 base_order = get_order(1UL << ddp_page_shift[0]); 1138 order = get_order(1UL << PAGE_SHIFT); 1139 1140 for (i = 0; i < DDP_PGIDX_MAX; i++) { 1141 /* first is the kernel page size, then just doubling */ 1142 ddp_page_order[i] = order - base_order + i; 1143 ddp_page_shift[i] = PAGE_SHIFT + i; 1144 } 1145 return 0; 1146 } 1147 1148 static int ddp_find_page_index(unsigned long pgsz) 1149 { 1150 int i; 1151 1152 for (i = 0; i < DDP_PGIDX_MAX; i++) { 1153 if (pgsz == (1UL << ddp_page_shift[i])) 1154 return i; 1155 } 1156 pr_info("ddp page size %lu not supported.\n", pgsz); 1157 return DDP_PGIDX_MAX; 1158 } 1159 1160 static void ddp_setup_host_page_size(void) 1161 { 1162 if (page_idx == DDP_PGIDX_MAX) { 1163 page_idx = ddp_find_page_index(PAGE_SIZE); 1164 1165 if (page_idx == DDP_PGIDX_MAX) { 1166 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); 1167 if (ddp_adjust_page_table() < 0) { 1168 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); 1169 return; 1170 } 1171 page_idx = ddp_find_page_index(PAGE_SIZE); 1172 } 1173 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); 1174 } 1175 } 1176 1177 void cxgbi_ddp_page_size_factor(int *pgsz_factor) 1178 { 1179 int i; 1180 1181 for (i = 0; i < DDP_PGIDX_MAX; i++) 1182 pgsz_factor[i] = ddp_page_order[i]; 1183 } 1184 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); 1185 1186 /* 1187 * DDP setup & teardown 1188 */ 1189 1190 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, 1191 struct cxgbi_pagepod_hdr *hdr, 1192 struct cxgbi_gather_list *gl, unsigned int gidx) 1193 { 1194 int i; 1195 1196 memcpy(ppod, hdr, sizeof(*hdr)); 1197 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { 1198 ppod->addr[i] = gidx < gl->nelem ? 1199 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; 1200 } 1201 } 1202 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); 1203 1204 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) 1205 { 1206 memset(ppod, 0, sizeof(*ppod)); 1207 } 1208 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); 1209 1210 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, 1211 unsigned int start, unsigned int max, 1212 unsigned int count, 1213 struct cxgbi_gather_list *gl) 1214 { 1215 unsigned int i, j, k; 1216 1217 /* not enough entries */ 1218 if ((max - start) < count) { 1219 log_debug(1 << CXGBI_DBG_DDP, 1220 "NOT enough entries %u+%u < %u.\n", start, count, max); 1221 return -EBUSY; 1222 } 1223 1224 max -= count; 1225 spin_lock(&ddp->map_lock); 1226 for (i = start; i < max;) { 1227 for (j = 0, k = i; j < count; j++, k++) { 1228 if (ddp->gl_map[k]) 1229 break; 1230 } 1231 if (j == count) { 1232 for (j = 0, k = i; j < count; j++, k++) 1233 ddp->gl_map[k] = gl; 1234 spin_unlock(&ddp->map_lock); 1235 return i; 1236 } 1237 i += j + 1; 1238 } 1239 spin_unlock(&ddp->map_lock); 1240 log_debug(1 << CXGBI_DBG_DDP, 1241 "NO suitable entries %u available.\n", count); 1242 return -EBUSY; 1243 } 1244 1245 static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, 1246 int start, int count) 1247 { 1248 spin_lock(&ddp->map_lock); 1249 memset(&ddp->gl_map[start], 0, 1250 count * sizeof(struct cxgbi_gather_list *)); 1251 spin_unlock(&ddp->map_lock); 1252 } 1253 1254 static inline void ddp_gl_unmap(struct pci_dev *pdev, 1255 struct cxgbi_gather_list *gl) 1256 { 1257 int i; 1258 1259 for (i = 0; i < gl->nelem; i++) 1260 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, 1261 PCI_DMA_FROMDEVICE); 1262 } 1263 1264 static inline int ddp_gl_map(struct pci_dev *pdev, 1265 struct cxgbi_gather_list *gl) 1266 { 1267 int i; 1268 1269 for (i = 0; i < gl->nelem; i++) { 1270 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, 1271 PAGE_SIZE, 1272 PCI_DMA_FROMDEVICE); 1273 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { 1274 log_debug(1 << CXGBI_DBG_DDP, 1275 "page %d 0x%p, 0x%p dma mapping err.\n", 1276 i, gl->pages[i], pdev); 1277 goto unmap; 1278 } 1279 } 1280 return i; 1281 unmap: 1282 if (i) { 1283 unsigned int nelem = gl->nelem; 1284 1285 gl->nelem = i; 1286 ddp_gl_unmap(pdev, gl); 1287 gl->nelem = nelem; 1288 } 1289 return -EINVAL; 1290 } 1291 1292 static void ddp_release_gl(struct cxgbi_gather_list *gl, 1293 struct pci_dev *pdev) 1294 { 1295 ddp_gl_unmap(pdev, gl); 1296 kfree(gl); 1297 } 1298 1299 static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, 1300 struct scatterlist *sgl, 1301 unsigned int sgcnt, 1302 struct pci_dev *pdev, 1303 gfp_t gfp) 1304 { 1305 struct cxgbi_gather_list *gl; 1306 struct scatterlist *sg = sgl; 1307 struct page *sgpage = sg_page(sg); 1308 unsigned int sglen = sg->length; 1309 unsigned int sgoffset = sg->offset; 1310 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> 1311 PAGE_SHIFT; 1312 int i = 1, j = 0; 1313 1314 if (xferlen < DDP_THRESHOLD) { 1315 log_debug(1 << CXGBI_DBG_DDP, 1316 "xfer %u < threshold %u, no ddp.\n", 1317 xferlen, DDP_THRESHOLD); 1318 return NULL; 1319 } 1320 1321 gl = kzalloc(sizeof(struct cxgbi_gather_list) + 1322 npages * (sizeof(dma_addr_t) + 1323 sizeof(struct page *)), gfp); 1324 if (!gl) { 1325 log_debug(1 << CXGBI_DBG_DDP, 1326 "xfer %u, %u pages, OOM.\n", xferlen, npages); 1327 return NULL; 1328 } 1329 1330 log_debug(1 << CXGBI_DBG_DDP, 1331 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); 1332 1333 gl->pages = (struct page **)&gl->phys_addr[npages]; 1334 gl->nelem = npages; 1335 gl->length = xferlen; 1336 gl->offset = sgoffset; 1337 gl->pages[0] = sgpage; 1338 1339 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; 1340 i++, sg = sg_next(sg)) { 1341 struct page *page = sg_page(sg); 1342 1343 if (sgpage == page && sg->offset == sgoffset + sglen) 1344 sglen += sg->length; 1345 else { 1346 /* make sure the sgl is fit for ddp: 1347 * each has the same page size, and 1348 * all of the middle pages are used completely 1349 */ 1350 if ((j && sgoffset) || ((i != sgcnt - 1) && 1351 ((sglen + sgoffset) & ~PAGE_MASK))) { 1352 log_debug(1 << CXGBI_DBG_DDP, 1353 "page %d/%u, %u + %u.\n", 1354 i, sgcnt, sgoffset, sglen); 1355 goto error_out; 1356 } 1357 1358 j++; 1359 if (j == gl->nelem || sg->offset) { 1360 log_debug(1 << CXGBI_DBG_DDP, 1361 "page %d/%u, offset %u.\n", 1362 j, gl->nelem, sg->offset); 1363 goto error_out; 1364 } 1365 gl->pages[j] = page; 1366 sglen = sg->length; 1367 sgoffset = sg->offset; 1368 sgpage = page; 1369 } 1370 } 1371 gl->nelem = ++j; 1372 1373 if (ddp_gl_map(pdev, gl) < 0) 1374 goto error_out; 1375 1376 return gl; 1377 1378 error_out: 1379 kfree(gl); 1380 return NULL; 1381 } 1382 1383 static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) 1384 { 1385 struct cxgbi_device *cdev = chba->cdev; 1386 struct cxgbi_ddp_info *ddp = cdev->ddp; 1387 u32 idx; 1388 1389 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; 1390 if (idx < ddp->nppods) { 1391 struct cxgbi_gather_list *gl = ddp->gl_map[idx]; 1392 unsigned int npods; 1393 1394 if (!gl || !gl->nelem) { 1395 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", 1396 tag, idx, gl, gl ? gl->nelem : 0); 1397 return; 1398 } 1399 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1400 log_debug(1 << CXGBI_DBG_DDP, 1401 "tag 0x%x, release idx %u, npods %u.\n", 1402 tag, idx, npods); 1403 cdev->csk_ddp_clear(chba, tag, idx, npods); 1404 ddp_unmark_entries(ddp, idx, npods); 1405 ddp_release_gl(gl, ddp->pdev); 1406 } else 1407 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); 1408 } 1409 1410 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, 1411 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, 1412 gfp_t gfp) 1413 { 1414 struct cxgbi_device *cdev = csk->cdev; 1415 struct cxgbi_ddp_info *ddp = cdev->ddp; 1416 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1417 struct cxgbi_pagepod_hdr hdr; 1418 unsigned int npods; 1419 int idx = -1; 1420 int err = -ENOMEM; 1421 u32 tag; 1422 1423 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1424 if (ddp->idx_last == ddp->nppods) 1425 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, 1426 npods, gl); 1427 else { 1428 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, 1429 ddp->nppods, npods, 1430 gl); 1431 if (idx < 0 && ddp->idx_last >= npods) { 1432 idx = ddp_find_unused_entries(ddp, 0, 1433 min(ddp->idx_last + npods, ddp->nppods), 1434 npods, gl); 1435 } 1436 } 1437 if (idx < 0) { 1438 log_debug(1 << CXGBI_DBG_DDP, 1439 "xferlen %u, gl %u, npods %u NO DDP.\n", 1440 gl->length, gl->nelem, npods); 1441 return idx; 1442 } 1443 1444 tag = cxgbi_ddp_tag_base(tformat, sw_tag); 1445 tag |= idx << PPOD_IDX_SHIFT; 1446 1447 hdr.rsvd = 0; 1448 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); 1449 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); 1450 hdr.max_offset = htonl(gl->length); 1451 hdr.page_offset = htonl(gl->offset); 1452 1453 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); 1454 if (err < 0) 1455 goto unmark_entries; 1456 1457 ddp->idx_last = idx; 1458 log_debug(1 << CXGBI_DBG_DDP, 1459 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", 1460 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, 1461 npods); 1462 *tagp = tag; 1463 return 0; 1464 1465 unmark_entries: 1466 ddp_unmark_entries(ddp, idx, npods); 1467 return err; 1468 } 1469 1470 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, 1471 unsigned int sw_tag, unsigned int xferlen, 1472 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) 1473 { 1474 struct cxgbi_device *cdev = csk->cdev; 1475 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1476 struct cxgbi_gather_list *gl; 1477 int err; 1478 1479 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || 1480 xferlen < DDP_THRESHOLD) { 1481 log_debug(1 << CXGBI_DBG_DDP, 1482 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); 1483 return -EINVAL; 1484 } 1485 1486 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { 1487 log_debug(1 << CXGBI_DBG_DDP, 1488 "sw_tag 0x%x NOT usable.\n", sw_tag); 1489 return -EINVAL; 1490 } 1491 1492 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); 1493 if (!gl) 1494 return -ENOMEM; 1495 1496 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); 1497 if (err < 0) 1498 ddp_release_gl(gl, cdev->pdev); 1499 1500 return err; 1501 } 1502 1503 static void ddp_destroy(struct kref *kref) 1504 { 1505 struct cxgbi_ddp_info *ddp = container_of(kref, 1506 struct cxgbi_ddp_info, 1507 refcnt); 1508 struct cxgbi_device *cdev = ddp->cdev; 1509 int i = 0; 1510 1511 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); 1512 1513 while (i < ddp->nppods) { 1514 struct cxgbi_gather_list *gl = ddp->gl_map[i]; 1515 1516 if (gl) { 1517 int npods = (gl->nelem + PPOD_PAGES_MAX - 1) 1518 >> PPOD_PAGES_SHIFT; 1519 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); 1520 kfree(gl); 1521 i += npods; 1522 } else 1523 i++; 1524 } 1525 cxgbi_free_big_mem(ddp); 1526 } 1527 1528 int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) 1529 { 1530 struct cxgbi_ddp_info *ddp = cdev->ddp; 1531 1532 log_debug(1 << CXGBI_DBG_DDP, 1533 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); 1534 cdev->ddp = NULL; 1535 if (ddp) 1536 return kref_put(&ddp->refcnt, ddp_destroy); 1537 return 0; 1538 } 1539 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); 1540 1541 int cxgbi_ddp_init(struct cxgbi_device *cdev, 1542 unsigned int llimit, unsigned int ulimit, 1543 unsigned int max_txsz, unsigned int max_rxsz) 1544 { 1545 struct cxgbi_ddp_info *ddp; 1546 unsigned int ppmax, bits; 1547 1548 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; 1549 bits = __ilog2_u32(ppmax) + 1; 1550 if (bits > PPOD_IDX_MAX_SIZE) 1551 bits = PPOD_IDX_MAX_SIZE; 1552 ppmax = (1 << (bits - 1)) - 1; 1553 1554 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + 1555 ppmax * (sizeof(struct cxgbi_gather_list *) + 1556 sizeof(struct sk_buff *)), 1557 GFP_KERNEL); 1558 if (!ddp) { 1559 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); 1560 return -ENOMEM; 1561 } 1562 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); 1563 cdev->ddp = ddp; 1564 1565 spin_lock_init(&ddp->map_lock); 1566 kref_init(&ddp->refcnt); 1567 1568 ddp->cdev = cdev; 1569 ddp->pdev = cdev->pdev; 1570 ddp->llimit = llimit; 1571 ddp->ulimit = ulimit; 1572 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); 1573 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); 1574 ddp->nppods = ppmax; 1575 ddp->idx_last = ppmax; 1576 ddp->idx_bits = bits; 1577 ddp->idx_mask = (1 << bits) - 1; 1578 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; 1579 1580 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; 1581 cdev->tag_format.rsvd_bits = ddp->idx_bits; 1582 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; 1583 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; 1584 1585 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", 1586 cdev->ports[0]->name, cdev->tag_format.sw_bits, 1587 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, 1588 cdev->tag_format.rsvd_mask); 1589 1590 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1591 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); 1592 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1593 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); 1594 1595 log_debug(1 << CXGBI_DBG_DDP, 1596 "%s max payload size: %u/%u, %u/%u.\n", 1597 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, 1598 cdev->rx_max_size, ddp->max_rxsz); 1599 return 0; 1600 } 1601 EXPORT_SYMBOL_GPL(cxgbi_ddp_init); 1602 1603 /* 1604 * APIs interacting with open-iscsi libraries 1605 */ 1606 1607 static unsigned char padding[4]; 1608 1609 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) 1610 { 1611 struct scsi_cmnd *sc = task->sc; 1612 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1613 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1614 struct cxgbi_hba *chba = cconn->chba; 1615 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1616 u32 tag = ntohl((__force u32)hdr_itt); 1617 1618 log_debug(1 << CXGBI_DBG_DDP, 1619 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); 1620 if (sc && 1621 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && 1622 cxgbi_is_ddp_tag(tformat, tag)) 1623 ddp_tag_release(chba, tag); 1624 } 1625 1626 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 1627 { 1628 struct scsi_cmnd *sc = task->sc; 1629 struct iscsi_conn *conn = task->conn; 1630 struct iscsi_session *sess = conn->session; 1631 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1632 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1633 struct cxgbi_hba *chba = cconn->chba; 1634 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1635 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; 1636 u32 tag = 0; 1637 int err = -EINVAL; 1638 1639 if (sc && 1640 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { 1641 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, 1642 scsi_in(sc)->length, 1643 scsi_in(sc)->table.sgl, 1644 scsi_in(sc)->table.nents, 1645 GFP_ATOMIC); 1646 if (err < 0) 1647 log_debug(1 << CXGBI_DBG_DDP, 1648 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", 1649 cconn->cep->csk, task, scsi_in(sc)->length, 1650 scsi_in(sc)->table.nents); 1651 } 1652 1653 if (err < 0) 1654 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); 1655 /* the itt need to sent in big-endian order */ 1656 *hdr_itt = (__force itt_t)htonl(tag); 1657 1658 log_debug(1 << CXGBI_DBG_DDP, 1659 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", 1660 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); 1661 return 0; 1662 } 1663 1664 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) 1665 { 1666 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1667 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1668 struct cxgbi_device *cdev = cconn->chba->cdev; 1669 u32 tag = ntohl((__force u32) itt); 1670 u32 sw_bits; 1671 1672 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); 1673 if (idx) 1674 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); 1675 if (age) 1676 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; 1677 1678 log_debug(1 << CXGBI_DBG_DDP, 1679 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", 1680 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, 1681 age ? *age : 0xFF); 1682 } 1683 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); 1684 1685 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) 1686 { 1687 struct iscsi_conn *conn = csk->user_data; 1688 1689 if (conn) { 1690 log_debug(1 << CXGBI_DBG_SOCK, 1691 "csk 0x%p, cid %d.\n", csk, conn->id); 1692 iscsi_conn_queue_work(conn); 1693 } 1694 } 1695 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); 1696 1697 /* 1698 * pdu receive, interact with libiscsi_tcp 1699 */ 1700 static inline int read_pdu_skb(struct iscsi_conn *conn, 1701 struct sk_buff *skb, 1702 unsigned int offset, 1703 int offloaded) 1704 { 1705 int status = 0; 1706 int bytes_read; 1707 1708 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); 1709 switch (status) { 1710 case ISCSI_TCP_CONN_ERR: 1711 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", 1712 skb, offset, offloaded); 1713 return -EIO; 1714 case ISCSI_TCP_SUSPENDED: 1715 log_debug(1 << CXGBI_DBG_PDU_RX, 1716 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", 1717 skb, offset, offloaded, bytes_read); 1718 /* no transfer - just have caller flush queue */ 1719 return bytes_read; 1720 case ISCSI_TCP_SKB_DONE: 1721 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", 1722 skb, offset, offloaded); 1723 /* 1724 * pdus should always fit in the skb and we should get 1725 * segment done notifcation. 1726 */ 1727 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); 1728 return -EFAULT; 1729 case ISCSI_TCP_SEGMENT_DONE: 1730 log_debug(1 << CXGBI_DBG_PDU_RX, 1731 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", 1732 skb, offset, offloaded, bytes_read); 1733 return bytes_read; 1734 default: 1735 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", 1736 skb, offset, offloaded, status); 1737 return -EINVAL; 1738 } 1739 } 1740 1741 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1742 { 1743 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1744 1745 log_debug(1 << CXGBI_DBG_PDU_RX, 1746 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1747 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1748 1749 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { 1750 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); 1751 iscsi_conn_failure(conn, ISCSI_ERR_PROTO); 1752 return -EIO; 1753 } 1754 1755 if (conn->hdrdgst_en && 1756 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { 1757 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); 1758 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); 1759 return -EIO; 1760 } 1761 1762 return read_pdu_skb(conn, skb, 0, 0); 1763 } 1764 1765 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1766 struct sk_buff *skb, unsigned int offset) 1767 { 1768 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1769 bool offloaded = 0; 1770 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 1771 1772 log_debug(1 << CXGBI_DBG_PDU_RX, 1773 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1774 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1775 1776 if (conn->datadgst_en && 1777 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { 1778 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", 1779 conn, lskb, cxgbi_skcb_flags(lskb)); 1780 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 1781 return -EIO; 1782 } 1783 1784 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) 1785 return 0; 1786 1787 /* coalesced, add header digest length */ 1788 if (lskb == skb && conn->hdrdgst_en) 1789 offset += ISCSI_DIGEST_SIZE; 1790 1791 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) 1792 offloaded = 1; 1793 1794 if (opcode == ISCSI_OP_SCSI_DATA_IN) 1795 log_debug(1 << CXGBI_DBG_PDU_RX, 1796 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", 1797 skb, opcode, ntohl(tcp_conn->in.hdr->itt), 1798 tcp_conn->in.datalen, offloaded ? "is" : "not"); 1799 1800 return read_pdu_skb(conn, skb, offset, offloaded); 1801 } 1802 1803 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) 1804 { 1805 struct cxgbi_device *cdev = csk->cdev; 1806 int must_send; 1807 u32 credits; 1808 1809 log_debug(1 << CXGBI_DBG_PDU_RX, 1810 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", 1811 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1812 csk->rcv_wup, cdev->rx_credit_thres, 1813 cdev->rcv_win); 1814 1815 if (csk->state != CTP_ESTABLISHED) 1816 return; 1817 1818 credits = csk->copied_seq - csk->rcv_wup; 1819 if (unlikely(!credits)) 1820 return; 1821 if (unlikely(cdev->rx_credit_thres == 0)) 1822 return; 1823 1824 must_send = credits + 16384 >= cdev->rcv_win; 1825 if (must_send || credits >= cdev->rx_credit_thres) 1826 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1827 } 1828 1829 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) 1830 { 1831 struct cxgbi_device *cdev = csk->cdev; 1832 struct iscsi_conn *conn = csk->user_data; 1833 struct sk_buff *skb; 1834 unsigned int read = 0; 1835 int err = 0; 1836 1837 log_debug(1 << CXGBI_DBG_PDU_RX, 1838 "csk 0x%p, conn 0x%p.\n", csk, conn); 1839 1840 if (unlikely(!conn || conn->suspend_rx)) { 1841 log_debug(1 << CXGBI_DBG_PDU_RX, 1842 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", 1843 csk, conn, conn ? conn->id : 0xFF, 1844 conn ? conn->suspend_rx : 0xFF); 1845 return; 1846 } 1847 1848 while (!err) { 1849 skb = skb_peek(&csk->receive_queue); 1850 if (!skb || 1851 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { 1852 if (skb) 1853 log_debug(1 << CXGBI_DBG_PDU_RX, 1854 "skb 0x%p, NOT ready 0x%lx.\n", 1855 skb, cxgbi_skcb_flags(skb)); 1856 break; 1857 } 1858 __skb_unlink(skb, &csk->receive_queue); 1859 1860 read += cxgbi_skcb_rx_pdulen(skb); 1861 log_debug(1 << CXGBI_DBG_PDU_RX, 1862 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", 1863 csk, skb, skb->len, cxgbi_skcb_flags(skb), 1864 cxgbi_skcb_rx_pdulen(skb)); 1865 1866 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1867 err = skb_read_pdu_bhs(conn, skb); 1868 if (err < 0) { 1869 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1870 "f 0x%lx, plen %u.\n", 1871 csk, skb, skb->len, 1872 cxgbi_skcb_flags(skb), 1873 cxgbi_skcb_rx_pdulen(skb)); 1874 goto skb_done; 1875 } 1876 err = skb_read_pdu_data(conn, skb, skb, 1877 err + cdev->skb_rx_extra); 1878 if (err < 0) 1879 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " 1880 "f 0x%lx, plen %u.\n", 1881 csk, skb, skb->len, 1882 cxgbi_skcb_flags(skb), 1883 cxgbi_skcb_rx_pdulen(skb)); 1884 } else { 1885 err = skb_read_pdu_bhs(conn, skb); 1886 if (err < 0) { 1887 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1888 "f 0x%lx, plen %u.\n", 1889 csk, skb, skb->len, 1890 cxgbi_skcb_flags(skb), 1891 cxgbi_skcb_rx_pdulen(skb)); 1892 goto skb_done; 1893 } 1894 1895 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1896 struct sk_buff *dskb; 1897 1898 dskb = skb_peek(&csk->receive_queue); 1899 if (!dskb) { 1900 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," 1901 " plen %u, NO data.\n", 1902 csk, skb, skb->len, 1903 cxgbi_skcb_flags(skb), 1904 cxgbi_skcb_rx_pdulen(skb)); 1905 err = -EIO; 1906 goto skb_done; 1907 } 1908 __skb_unlink(dskb, &csk->receive_queue); 1909 1910 err = skb_read_pdu_data(conn, skb, dskb, 0); 1911 if (err < 0) 1912 pr_err("data, csk 0x%p, skb 0x%p,%u, " 1913 "f 0x%lx, plen %u, dskb 0x%p," 1914 "%u.\n", 1915 csk, skb, skb->len, 1916 cxgbi_skcb_flags(skb), 1917 cxgbi_skcb_rx_pdulen(skb), 1918 dskb, dskb->len); 1919 __kfree_skb(dskb); 1920 } else 1921 err = skb_read_pdu_data(conn, skb, skb, 0); 1922 } 1923 skb_done: 1924 __kfree_skb(skb); 1925 1926 if (err < 0) 1927 break; 1928 } 1929 1930 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); 1931 if (read) { 1932 csk->copied_seq += read; 1933 csk_return_rx_credits(csk, read); 1934 conn->rxdata_octets += read; 1935 } 1936 1937 if (err < 0) { 1938 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", 1939 csk, conn, err, read); 1940 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1941 } 1942 } 1943 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); 1944 1945 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, 1946 unsigned int offset, unsigned int *off, 1947 struct scatterlist **sgp) 1948 { 1949 int i; 1950 struct scatterlist *sg; 1951 1952 for_each_sg(sgl, sg, sgcnt, i) { 1953 if (offset < sg->length) { 1954 *off = offset; 1955 *sgp = sg; 1956 return 0; 1957 } 1958 offset -= sg->length; 1959 } 1960 return -EFAULT; 1961 } 1962 1963 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, 1964 unsigned int dlen, struct page_frag *frags, 1965 int frag_max) 1966 { 1967 unsigned int datalen = dlen; 1968 unsigned int sglen = sg->length - sgoffset; 1969 struct page *page = sg_page(sg); 1970 int i; 1971 1972 i = 0; 1973 do { 1974 unsigned int copy; 1975 1976 if (!sglen) { 1977 sg = sg_next(sg); 1978 if (!sg) { 1979 pr_warn("sg %d NULL, len %u/%u.\n", 1980 i, datalen, dlen); 1981 return -EINVAL; 1982 } 1983 sgoffset = 0; 1984 sglen = sg->length; 1985 page = sg_page(sg); 1986 1987 } 1988 copy = min(datalen, sglen); 1989 if (i && page == frags[i - 1].page && 1990 sgoffset + sg->offset == 1991 frags[i - 1].offset + frags[i - 1].size) { 1992 frags[i - 1].size += copy; 1993 } else { 1994 if (i >= frag_max) { 1995 pr_warn("too many pages %u, dlen %u.\n", 1996 frag_max, dlen); 1997 return -EINVAL; 1998 } 1999 2000 frags[i].page = page; 2001 frags[i].offset = sg->offset + sgoffset; 2002 frags[i].size = copy; 2003 i++; 2004 } 2005 datalen -= copy; 2006 sgoffset += copy; 2007 sglen -= copy; 2008 } while (datalen); 2009 2010 return i; 2011 } 2012 2013 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 2014 { 2015 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2016 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2017 struct cxgbi_device *cdev = cconn->chba->cdev; 2018 struct iscsi_conn *conn = task->conn; 2019 struct iscsi_tcp_task *tcp_task = task->dd_data; 2020 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2021 struct scsi_cmnd *sc = task->sc; 2022 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 2023 2024 tcp_task->dd_data = tdata; 2025 task->hdr = NULL; 2026 2027 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 2028 (opcode == ISCSI_OP_SCSI_DATA_OUT || 2029 (opcode == ISCSI_OP_SCSI_CMD && 2030 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) 2031 /* data could goes into skb head */ 2032 headroom += min_t(unsigned int, 2033 SKB_MAX_HEAD(cdev->skb_tx_rsvd), 2034 conn->max_xmit_dlength); 2035 2036 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 2037 if (!tdata->skb) { 2038 struct cxgbi_sock *csk = cconn->cep->csk; 2039 struct net_device *ndev = cdev->ports[csk->port_id]; 2040 ndev->stats.tx_dropped++; 2041 return -ENOMEM; 2042 } 2043 2044 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 2045 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 2046 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 2047 2048 /* data_out uses scsi_cmd's itt */ 2049 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 2050 task_reserve_itt(task, &task->hdr->itt); 2051 2052 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2053 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", 2054 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, 2055 conn->max_xmit_dlength, ntohl(task->hdr->itt)); 2056 2057 return 0; 2058 } 2059 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); 2060 2061 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) 2062 { 2063 if (hcrc || dcrc) { 2064 u8 submode = 0; 2065 2066 if (hcrc) 2067 submode |= 1; 2068 if (dcrc) 2069 submode |= 2; 2070 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; 2071 } else 2072 cxgbi_skcb_ulp_mode(skb) = 0; 2073 } 2074 2075 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 2076 unsigned int count) 2077 { 2078 struct iscsi_conn *conn = task->conn; 2079 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2080 struct sk_buff *skb = tdata->skb; 2081 unsigned int datalen = count; 2082 int i, padlen = iscsi_padding(count); 2083 struct page *pg; 2084 2085 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2086 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", 2087 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, 2088 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); 2089 2090 skb_put(skb, task->hdr_len); 2091 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); 2092 if (!count) 2093 return 0; 2094 2095 if (task->sc) { 2096 struct scsi_data_buffer *sdb = scsi_out(task->sc); 2097 struct scatterlist *sg = NULL; 2098 int err; 2099 2100 tdata->offset = offset; 2101 tdata->count = count; 2102 err = sgl_seek_offset( 2103 sdb->table.sgl, sdb->table.nents, 2104 tdata->offset, &tdata->sgoffset, &sg); 2105 if (err < 0) { 2106 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", 2107 sdb->table.nents, tdata->offset, sdb->length); 2108 return err; 2109 } 2110 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, 2111 tdata->frags, MAX_PDU_FRAGS); 2112 if (err < 0) { 2113 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", 2114 sdb->table.nents, tdata->offset, tdata->count); 2115 return err; 2116 } 2117 tdata->nr_frags = err; 2118 2119 if (tdata->nr_frags > MAX_SKB_FRAGS || 2120 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { 2121 char *dst = skb->data + task->hdr_len; 2122 struct page_frag *frag = tdata->frags; 2123 2124 /* data fits in the skb's headroom */ 2125 for (i = 0; i < tdata->nr_frags; i++, frag++) { 2126 char *src = kmap_atomic(frag->page); 2127 2128 memcpy(dst, src+frag->offset, frag->size); 2129 dst += frag->size; 2130 kunmap_atomic(src); 2131 } 2132 if (padlen) { 2133 memset(dst, 0, padlen); 2134 padlen = 0; 2135 } 2136 skb_put(skb, count + padlen); 2137 } else { 2138 /* data fit into frag_list */ 2139 for (i = 0; i < tdata->nr_frags; i++) { 2140 __skb_fill_page_desc(skb, i, 2141 tdata->frags[i].page, 2142 tdata->frags[i].offset, 2143 tdata->frags[i].size); 2144 skb_frag_ref(skb, i); 2145 } 2146 skb_shinfo(skb)->nr_frags = tdata->nr_frags; 2147 skb->len += count; 2148 skb->data_len += count; 2149 skb->truesize += count; 2150 } 2151 2152 } else { 2153 pg = virt_to_page(task->data); 2154 2155 get_page(pg); 2156 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), 2157 count); 2158 skb->len += count; 2159 skb->data_len += count; 2160 skb->truesize += count; 2161 } 2162 2163 if (padlen) { 2164 i = skb_shinfo(skb)->nr_frags; 2165 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 2166 virt_to_page(padding), offset_in_page(padding), 2167 padlen); 2168 2169 skb->data_len += padlen; 2170 skb->truesize += padlen; 2171 skb->len += padlen; 2172 } 2173 2174 return 0; 2175 } 2176 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); 2177 2178 int cxgbi_conn_xmit_pdu(struct iscsi_task *task) 2179 { 2180 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2181 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2182 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2183 struct sk_buff *skb = tdata->skb; 2184 unsigned int datalen; 2185 int err; 2186 2187 if (!skb) { 2188 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2189 "task 0x%p, skb NULL.\n", task); 2190 return 0; 2191 } 2192 2193 datalen = skb->data_len; 2194 tdata->skb = NULL; 2195 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2196 if (err > 0) { 2197 int pdulen = err; 2198 2199 log_debug(1 << CXGBI_DBG_PDU_TX, 2200 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", 2201 task, task->sc, skb, skb->len, skb->data_len, err); 2202 2203 if (task->conn->hdrdgst_en) 2204 pdulen += ISCSI_DIGEST_SIZE; 2205 2206 if (datalen && task->conn->datadgst_en) 2207 pdulen += ISCSI_DIGEST_SIZE; 2208 2209 task->conn->txdata_octets += pdulen; 2210 return 0; 2211 } 2212 2213 if (err == -EAGAIN || err == -ENOBUFS) { 2214 log_debug(1 << CXGBI_DBG_PDU_TX, 2215 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2216 task, skb, skb->len, skb->data_len, err); 2217 /* reset skb to send when we are called again */ 2218 tdata->skb = skb; 2219 return err; 2220 } 2221 2222 kfree_skb(skb); 2223 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2224 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2225 task->itt, skb, skb->len, skb->data_len, err); 2226 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2227 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2228 return err; 2229 } 2230 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); 2231 2232 void cxgbi_cleanup_task(struct iscsi_task *task) 2233 { 2234 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2235 2236 log_debug(1 << CXGBI_DBG_ISCSI, 2237 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2238 task, tdata->skb, task->hdr_itt); 2239 2240 /* never reached the xmit task callout */ 2241 if (tdata->skb) 2242 __kfree_skb(tdata->skb); 2243 memset(tdata, 0, sizeof(*tdata)); 2244 2245 task_release_itt(task, task->hdr_itt); 2246 iscsi_tcp_cleanup_task(task); 2247 } 2248 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2249 2250 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, 2251 struct iscsi_stats *stats) 2252 { 2253 struct iscsi_conn *conn = cls_conn->dd_data; 2254 2255 stats->txdata_octets = conn->txdata_octets; 2256 stats->rxdata_octets = conn->rxdata_octets; 2257 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2258 stats->dataout_pdus = conn->dataout_pdus_cnt; 2259 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2260 stats->datain_pdus = conn->datain_pdus_cnt; 2261 stats->r2t_pdus = conn->r2t_pdus_cnt; 2262 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2263 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2264 stats->digest_err = 0; 2265 stats->timeout_err = 0; 2266 stats->custom_length = 1; 2267 strcpy(stats->custom[0].desc, "eh_abort_cnt"); 2268 stats->custom[0].value = conn->eh_abort_cnt; 2269 } 2270 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); 2271 2272 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) 2273 { 2274 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2275 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2276 struct cxgbi_device *cdev = cconn->chba->cdev; 2277 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); 2278 unsigned int max_def = 512 * MAX_SKB_FRAGS; 2279 unsigned int max = max(max_def, headroom); 2280 2281 max = min(cconn->chba->cdev->tx_max_size, max); 2282 if (conn->max_xmit_dlength) 2283 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); 2284 else 2285 conn->max_xmit_dlength = max; 2286 cxgbi_align_pdu_size(conn->max_xmit_dlength); 2287 2288 return 0; 2289 } 2290 2291 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) 2292 { 2293 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2294 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2295 unsigned int max = cconn->chba->cdev->rx_max_size; 2296 2297 cxgbi_align_pdu_size(max); 2298 2299 if (conn->max_recv_dlength) { 2300 if (conn->max_recv_dlength > max) { 2301 pr_err("MaxRecvDataSegmentLength %u > %u.\n", 2302 conn->max_recv_dlength, max); 2303 return -EINVAL; 2304 } 2305 conn->max_recv_dlength = min(conn->max_recv_dlength, max); 2306 cxgbi_align_pdu_size(conn->max_recv_dlength); 2307 } else 2308 conn->max_recv_dlength = max; 2309 2310 return 0; 2311 } 2312 2313 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, 2314 enum iscsi_param param, char *buf, int buflen) 2315 { 2316 struct iscsi_conn *conn = cls_conn->dd_data; 2317 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2318 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2319 struct cxgbi_sock *csk = cconn->cep->csk; 2320 int err; 2321 2322 log_debug(1 << CXGBI_DBG_ISCSI, 2323 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2324 cls_conn, param, buflen, buf); 2325 2326 switch (param) { 2327 case ISCSI_PARAM_HDRDGST_EN: 2328 err = iscsi_set_param(cls_conn, param, buf, buflen); 2329 if (!err && conn->hdrdgst_en) 2330 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2331 conn->hdrdgst_en, 2332 conn->datadgst_en, 0); 2333 break; 2334 case ISCSI_PARAM_DATADGST_EN: 2335 err = iscsi_set_param(cls_conn, param, buf, buflen); 2336 if (!err && conn->datadgst_en) 2337 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2338 conn->hdrdgst_en, 2339 conn->datadgst_en, 0); 2340 break; 2341 case ISCSI_PARAM_MAX_R2T: 2342 return iscsi_tcp_set_max_r2t(conn, buf); 2343 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2344 err = iscsi_set_param(cls_conn, param, buf, buflen); 2345 if (!err) 2346 err = cxgbi_conn_max_recv_dlength(conn); 2347 break; 2348 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2349 err = iscsi_set_param(cls_conn, param, buf, buflen); 2350 if (!err) 2351 err = cxgbi_conn_max_xmit_dlength(conn); 2352 break; 2353 default: 2354 return iscsi_set_param(cls_conn, param, buf, buflen); 2355 } 2356 return err; 2357 } 2358 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); 2359 2360 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) 2361 { 2362 int len; 2363 2364 cxgbi_sock_get(csk); 2365 len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); 2366 cxgbi_sock_put(csk); 2367 2368 return len; 2369 } 2370 2371 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) 2372 { 2373 int len; 2374 2375 cxgbi_sock_get(csk); 2376 if (csk->csk_family == AF_INET) 2377 len = sprintf(buf, "%pI4", 2378 &csk->daddr.sin_addr.s_addr); 2379 else 2380 len = sprintf(buf, "%pI6", 2381 &csk->daddr6.sin6_addr); 2382 2383 cxgbi_sock_put(csk); 2384 2385 return len; 2386 } 2387 2388 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, 2389 char *buf) 2390 { 2391 struct cxgbi_endpoint *cep = ep->dd_data; 2392 struct cxgbi_sock *csk; 2393 int len; 2394 2395 log_debug(1 << CXGBI_DBG_ISCSI, 2396 "cls_conn 0x%p, param %d.\n", ep, param); 2397 2398 switch (param) { 2399 case ISCSI_PARAM_CONN_PORT: 2400 case ISCSI_PARAM_CONN_ADDRESS: 2401 if (!cep) 2402 return -ENOTCONN; 2403 2404 csk = cep->csk; 2405 if (!csk) 2406 return -ENOTCONN; 2407 2408 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2409 &csk->daddr, param, buf); 2410 default: 2411 return -ENOSYS; 2412 } 2413 return len; 2414 } 2415 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); 2416 2417 struct iscsi_cls_conn * 2418 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) 2419 { 2420 struct iscsi_cls_conn *cls_conn; 2421 struct iscsi_conn *conn; 2422 struct iscsi_tcp_conn *tcp_conn; 2423 struct cxgbi_conn *cconn; 2424 2425 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); 2426 if (!cls_conn) 2427 return NULL; 2428 2429 conn = cls_conn->dd_data; 2430 tcp_conn = conn->dd_data; 2431 cconn = tcp_conn->dd_data; 2432 cconn->iconn = conn; 2433 2434 log_debug(1 << CXGBI_DBG_ISCSI, 2435 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", 2436 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); 2437 2438 return cls_conn; 2439 } 2440 EXPORT_SYMBOL_GPL(cxgbi_create_conn); 2441 2442 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, 2443 struct iscsi_cls_conn *cls_conn, 2444 u64 transport_eph, int is_leading) 2445 { 2446 struct iscsi_conn *conn = cls_conn->dd_data; 2447 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2448 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2449 struct iscsi_endpoint *ep; 2450 struct cxgbi_endpoint *cep; 2451 struct cxgbi_sock *csk; 2452 int err; 2453 2454 ep = iscsi_lookup_endpoint(transport_eph); 2455 if (!ep) 2456 return -EINVAL; 2457 2458 /* setup ddp pagesize */ 2459 cep = ep->dd_data; 2460 csk = cep->csk; 2461 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); 2462 if (err < 0) 2463 return err; 2464 2465 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2466 if (err) 2467 return -EINVAL; 2468 2469 /* calculate the tag idx bits needed for this conn based on cmds_max */ 2470 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; 2471 2472 write_lock_bh(&csk->callback_lock); 2473 csk->user_data = conn; 2474 cconn->chba = cep->chba; 2475 cconn->cep = cep; 2476 cep->cconn = cconn; 2477 write_unlock_bh(&csk->callback_lock); 2478 2479 cxgbi_conn_max_xmit_dlength(conn); 2480 cxgbi_conn_max_recv_dlength(conn); 2481 2482 log_debug(1 << CXGBI_DBG_ISCSI, 2483 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", 2484 cls_session, cls_conn, ep, cconn, csk); 2485 /* init recv engine */ 2486 iscsi_tcp_hdr_recv_prep(tcp_conn); 2487 2488 return 0; 2489 } 2490 EXPORT_SYMBOL_GPL(cxgbi_bind_conn); 2491 2492 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, 2493 u16 cmds_max, u16 qdepth, 2494 u32 initial_cmdsn) 2495 { 2496 struct cxgbi_endpoint *cep; 2497 struct cxgbi_hba *chba; 2498 struct Scsi_Host *shost; 2499 struct iscsi_cls_session *cls_session; 2500 struct iscsi_session *session; 2501 2502 if (!ep) { 2503 pr_err("missing endpoint.\n"); 2504 return NULL; 2505 } 2506 2507 cep = ep->dd_data; 2508 chba = cep->chba; 2509 shost = chba->shost; 2510 2511 BUG_ON(chba != iscsi_host_priv(shost)); 2512 2513 cls_session = iscsi_session_setup(chba->cdev->itp, shost, 2514 cmds_max, 0, 2515 sizeof(struct iscsi_tcp_task) + 2516 sizeof(struct cxgbi_task_data), 2517 initial_cmdsn, ISCSI_MAX_TARGET); 2518 if (!cls_session) 2519 return NULL; 2520 2521 session = cls_session->dd_data; 2522 if (iscsi_tcp_r2tpool_alloc(session)) 2523 goto remove_session; 2524 2525 log_debug(1 << CXGBI_DBG_ISCSI, 2526 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); 2527 return cls_session; 2528 2529 remove_session: 2530 iscsi_session_teardown(cls_session); 2531 return NULL; 2532 } 2533 EXPORT_SYMBOL_GPL(cxgbi_create_session); 2534 2535 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) 2536 { 2537 log_debug(1 << CXGBI_DBG_ISCSI, 2538 "cls sess 0x%p.\n", cls_session); 2539 2540 iscsi_tcp_r2tpool_free(cls_session->dd_data); 2541 iscsi_session_teardown(cls_session); 2542 } 2543 EXPORT_SYMBOL_GPL(cxgbi_destroy_session); 2544 2545 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2546 char *buf, int buflen) 2547 { 2548 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2549 2550 if (!chba->ndev) { 2551 shost_printk(KERN_ERR, shost, "Could not get host param. " 2552 "netdev for host not set.\n"); 2553 return -ENODEV; 2554 } 2555 2556 log_debug(1 << CXGBI_DBG_ISCSI, 2557 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", 2558 shost, chba, chba->ndev->name, param, buflen, buf); 2559 2560 switch (param) { 2561 case ISCSI_HOST_PARAM_IPADDRESS: 2562 { 2563 __be32 addr = in_aton(buf); 2564 log_debug(1 << CXGBI_DBG_ISCSI, 2565 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); 2566 cxgbi_set_iscsi_ipv4(chba, addr); 2567 return 0; 2568 } 2569 case ISCSI_HOST_PARAM_HWADDRESS: 2570 case ISCSI_HOST_PARAM_NETDEV_NAME: 2571 return 0; 2572 default: 2573 return iscsi_host_set_param(shost, param, buf, buflen); 2574 } 2575 } 2576 EXPORT_SYMBOL_GPL(cxgbi_set_host_param); 2577 2578 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2579 char *buf) 2580 { 2581 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2582 int len = 0; 2583 2584 if (!chba->ndev) { 2585 shost_printk(KERN_ERR, shost, "Could not get host param. " 2586 "netdev for host not set.\n"); 2587 return -ENODEV; 2588 } 2589 2590 log_debug(1 << CXGBI_DBG_ISCSI, 2591 "shost 0x%p, hba 0x%p,%s, param %d.\n", 2592 shost, chba, chba->ndev->name, param); 2593 2594 switch (param) { 2595 case ISCSI_HOST_PARAM_HWADDRESS: 2596 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); 2597 break; 2598 case ISCSI_HOST_PARAM_NETDEV_NAME: 2599 len = sprintf(buf, "%s\n", chba->ndev->name); 2600 break; 2601 case ISCSI_HOST_PARAM_IPADDRESS: 2602 { 2603 __be32 addr; 2604 2605 addr = cxgbi_get_iscsi_ipv4(chba); 2606 len = sprintf(buf, "%pI4", &addr); 2607 log_debug(1 << CXGBI_DBG_ISCSI, 2608 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); 2609 break; 2610 } 2611 default: 2612 return iscsi_host_get_param(shost, param, buf); 2613 } 2614 2615 return len; 2616 } 2617 EXPORT_SYMBOL_GPL(cxgbi_get_host_param); 2618 2619 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, 2620 struct sockaddr *dst_addr, 2621 int non_blocking) 2622 { 2623 struct iscsi_endpoint *ep; 2624 struct cxgbi_endpoint *cep; 2625 struct cxgbi_hba *hba = NULL; 2626 struct cxgbi_sock *csk; 2627 int err = -EINVAL; 2628 2629 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2630 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", 2631 shost, non_blocking, dst_addr); 2632 2633 if (shost) { 2634 hba = iscsi_host_priv(shost); 2635 if (!hba) { 2636 pr_info("shost 0x%p, priv NULL.\n", shost); 2637 goto err_out; 2638 } 2639 } 2640 2641 if (dst_addr->sa_family == AF_INET) { 2642 csk = cxgbi_check_route(dst_addr); 2643 #if IS_ENABLED(CONFIG_IPV6) 2644 } else if (dst_addr->sa_family == AF_INET6) { 2645 csk = cxgbi_check_route6(dst_addr); 2646 #endif 2647 } else { 2648 pr_info("address family 0x%x NOT supported.\n", 2649 dst_addr->sa_family); 2650 err = -EAFNOSUPPORT; 2651 return (struct iscsi_endpoint *)ERR_PTR(err); 2652 } 2653 2654 if (IS_ERR(csk)) 2655 return (struct iscsi_endpoint *)csk; 2656 cxgbi_sock_get(csk); 2657 2658 if (!hba) 2659 hba = csk->cdev->hbas[csk->port_id]; 2660 else if (hba != csk->cdev->hbas[csk->port_id]) { 2661 pr_info("Could not connect through requested host %u" 2662 "hba 0x%p != 0x%p (%u).\n", 2663 shost->host_no, hba, 2664 csk->cdev->hbas[csk->port_id], csk->port_id); 2665 err = -ENOSPC; 2666 goto release_conn; 2667 } 2668 2669 err = sock_get_port(csk); 2670 if (err) 2671 goto release_conn; 2672 2673 cxgbi_sock_set_state(csk, CTP_CONNECTING); 2674 err = csk->cdev->csk_init_act_open(csk); 2675 if (err) 2676 goto release_conn; 2677 2678 if (cxgbi_sock_is_closing(csk)) { 2679 err = -ENOSPC; 2680 pr_info("csk 0x%p is closing.\n", csk); 2681 goto release_conn; 2682 } 2683 2684 ep = iscsi_create_endpoint(sizeof(*cep)); 2685 if (!ep) { 2686 err = -ENOMEM; 2687 pr_info("iscsi alloc ep, OOM.\n"); 2688 goto release_conn; 2689 } 2690 2691 cep = ep->dd_data; 2692 cep->csk = csk; 2693 cep->chba = hba; 2694 2695 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2696 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", 2697 ep, cep, csk, hba, hba->ndev->name); 2698 return ep; 2699 2700 release_conn: 2701 cxgbi_sock_put(csk); 2702 cxgbi_sock_closed(csk); 2703 err_out: 2704 return ERR_PTR(err); 2705 } 2706 EXPORT_SYMBOL_GPL(cxgbi_ep_connect); 2707 2708 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 2709 { 2710 struct cxgbi_endpoint *cep = ep->dd_data; 2711 struct cxgbi_sock *csk = cep->csk; 2712 2713 if (!cxgbi_sock_is_established(csk)) 2714 return 0; 2715 return 1; 2716 } 2717 EXPORT_SYMBOL_GPL(cxgbi_ep_poll); 2718 2719 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) 2720 { 2721 struct cxgbi_endpoint *cep = ep->dd_data; 2722 struct cxgbi_conn *cconn = cep->cconn; 2723 struct cxgbi_sock *csk = cep->csk; 2724 2725 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2726 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", 2727 ep, cep, cconn, csk, csk->state, csk->flags); 2728 2729 if (cconn && cconn->iconn) { 2730 iscsi_suspend_tx(cconn->iconn); 2731 write_lock_bh(&csk->callback_lock); 2732 cep->csk->user_data = NULL; 2733 cconn->cep = NULL; 2734 write_unlock_bh(&csk->callback_lock); 2735 } 2736 iscsi_destroy_endpoint(ep); 2737 2738 if (likely(csk->state >= CTP_ESTABLISHED)) 2739 need_active_close(csk); 2740 else 2741 cxgbi_sock_closed(csk); 2742 2743 cxgbi_sock_put(csk); 2744 } 2745 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); 2746 2747 int cxgbi_iscsi_init(struct iscsi_transport *itp, 2748 struct scsi_transport_template **stt) 2749 { 2750 *stt = iscsi_register_transport(itp); 2751 if (*stt == NULL) { 2752 pr_err("unable to register %s transport 0x%p.\n", 2753 itp->name, itp); 2754 return -ENODEV; 2755 } 2756 log_debug(1 << CXGBI_DBG_ISCSI, 2757 "%s, registered iscsi transport 0x%p.\n", 2758 itp->name, stt); 2759 return 0; 2760 } 2761 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); 2762 2763 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, 2764 struct scsi_transport_template **stt) 2765 { 2766 if (*stt) { 2767 log_debug(1 << CXGBI_DBG_ISCSI, 2768 "de-register transport 0x%p, %s, stt 0x%p.\n", 2769 itp, itp->name, *stt); 2770 *stt = NULL; 2771 iscsi_unregister_transport(itp); 2772 } 2773 } 2774 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2775 2776 umode_t cxgbi_attr_is_visible(int param_type, int param) 2777 { 2778 switch (param_type) { 2779 case ISCSI_HOST_PARAM: 2780 switch (param) { 2781 case ISCSI_HOST_PARAM_NETDEV_NAME: 2782 case ISCSI_HOST_PARAM_HWADDRESS: 2783 case ISCSI_HOST_PARAM_IPADDRESS: 2784 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2785 return S_IRUGO; 2786 default: 2787 return 0; 2788 } 2789 case ISCSI_PARAM: 2790 switch (param) { 2791 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2792 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2793 case ISCSI_PARAM_HDRDGST_EN: 2794 case ISCSI_PARAM_DATADGST_EN: 2795 case ISCSI_PARAM_CONN_ADDRESS: 2796 case ISCSI_PARAM_CONN_PORT: 2797 case ISCSI_PARAM_EXP_STATSN: 2798 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2799 case ISCSI_PARAM_PERSISTENT_PORT: 2800 case ISCSI_PARAM_PING_TMO: 2801 case ISCSI_PARAM_RECV_TMO: 2802 case ISCSI_PARAM_INITIAL_R2T_EN: 2803 case ISCSI_PARAM_MAX_R2T: 2804 case ISCSI_PARAM_IMM_DATA_EN: 2805 case ISCSI_PARAM_FIRST_BURST: 2806 case ISCSI_PARAM_MAX_BURST: 2807 case ISCSI_PARAM_PDU_INORDER_EN: 2808 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2809 case ISCSI_PARAM_ERL: 2810 case ISCSI_PARAM_TARGET_NAME: 2811 case ISCSI_PARAM_TPGT: 2812 case ISCSI_PARAM_USERNAME: 2813 case ISCSI_PARAM_PASSWORD: 2814 case ISCSI_PARAM_USERNAME_IN: 2815 case ISCSI_PARAM_PASSWORD_IN: 2816 case ISCSI_PARAM_FAST_ABORT: 2817 case ISCSI_PARAM_ABORT_TMO: 2818 case ISCSI_PARAM_LU_RESET_TMO: 2819 case ISCSI_PARAM_TGT_RESET_TMO: 2820 case ISCSI_PARAM_IFACE_NAME: 2821 case ISCSI_PARAM_INITIATOR_NAME: 2822 return S_IRUGO; 2823 default: 2824 return 0; 2825 } 2826 } 2827 2828 return 0; 2829 } 2830 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); 2831 2832 static int __init libcxgbi_init_module(void) 2833 { 2834 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; 2835 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; 2836 2837 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", 2838 ISCSI_ITT_MASK, sw_tag_idx_bits, 2839 ISCSI_AGE_MASK, sw_tag_age_bits); 2840 2841 ddp_setup_host_page_size(); 2842 return 0; 2843 } 2844 2845 static void __exit libcxgbi_exit_module(void) 2846 { 2847 cxgbi_device_unregister_all(0xFF); 2848 return; 2849 } 2850 2851 module_init(libcxgbi_init_module); 2852 module_exit(libcxgbi_exit_module); 2853