1 /* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/module.h> 16 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <linux/init.h> 23 #include <linux/netdevice.h> 24 #include <linux/uio_driver.h> 25 #include <linux/in.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/delay.h> 28 #include <linux/ethtool.h> 29 #include <linux/if_vlan.h> 30 #include <linux/prefetch.h> 31 #include <linux/random.h> 32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 33 #define BCM_VLAN 1 34 #endif 35 #include <net/ip.h> 36 #include <net/tcp.h> 37 #include <net/route.h> 38 #include <net/ipv6.h> 39 #include <net/ip6_route.h> 40 #include <net/ip6_checksum.h> 41 #include <scsi/iscsi_if.h> 42 43 #include "cnic_if.h" 44 #include "bnx2.h" 45 #include "bnx2x/bnx2x_reg.h" 46 #include "bnx2x/bnx2x_fw_defs.h" 47 #include "bnx2x/bnx2x_hsi.h" 48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h" 49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" 50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h" 51 #include "cnic.h" 52 #include "cnic_defs.h" 53 54 #define DRV_MODULE_NAME "cnic" 55 56 static char version[] __devinitdata = 57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 58 59 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 60 "Chen (zongxi@broadcom.com"); 61 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 62 MODULE_LICENSE("GPL"); 63 MODULE_VERSION(CNIC_MODULE_VERSION); 64 65 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ 66 static LIST_HEAD(cnic_dev_list); 67 static LIST_HEAD(cnic_udev_list); 68 static DEFINE_RWLOCK(cnic_dev_lock); 69 static DEFINE_MUTEX(cnic_lock); 70 71 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 72 73 /* helper function, assuming cnic_lock is held */ 74 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) 75 { 76 return rcu_dereference_protected(cnic_ulp_tbl[type], 77 lockdep_is_held(&cnic_lock)); 78 } 79 80 static int cnic_service_bnx2(void *, void *); 81 static int cnic_service_bnx2x(void *, void *); 82 static int cnic_ctl(void *, struct cnic_ctl_info *); 83 84 static struct cnic_ops cnic_bnx2_ops = { 85 .cnic_owner = THIS_MODULE, 86 .cnic_handler = cnic_service_bnx2, 87 .cnic_ctl = cnic_ctl, 88 }; 89 90 static struct cnic_ops cnic_bnx2x_ops = { 91 .cnic_owner = THIS_MODULE, 92 .cnic_handler = cnic_service_bnx2x, 93 .cnic_ctl = cnic_ctl, 94 }; 95 96 static struct workqueue_struct *cnic_wq; 97 98 static void cnic_shutdown_rings(struct cnic_dev *); 99 static void cnic_init_rings(struct cnic_dev *); 100 static int cnic_cm_set_pg(struct cnic_sock *); 101 102 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 103 { 104 struct cnic_uio_dev *udev = uinfo->priv; 105 struct cnic_dev *dev; 106 107 if (!capable(CAP_NET_ADMIN)) 108 return -EPERM; 109 110 if (udev->uio_dev != -1) 111 return -EBUSY; 112 113 rtnl_lock(); 114 dev = udev->dev; 115 116 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 117 rtnl_unlock(); 118 return -ENODEV; 119 } 120 121 udev->uio_dev = iminor(inode); 122 123 cnic_shutdown_rings(dev); 124 cnic_init_rings(dev); 125 rtnl_unlock(); 126 127 return 0; 128 } 129 130 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 131 { 132 struct cnic_uio_dev *udev = uinfo->priv; 133 134 udev->uio_dev = -1; 135 return 0; 136 } 137 138 static inline void cnic_hold(struct cnic_dev *dev) 139 { 140 atomic_inc(&dev->ref_count); 141 } 142 143 static inline void cnic_put(struct cnic_dev *dev) 144 { 145 atomic_dec(&dev->ref_count); 146 } 147 148 static inline void csk_hold(struct cnic_sock *csk) 149 { 150 atomic_inc(&csk->ref_count); 151 } 152 153 static inline void csk_put(struct cnic_sock *csk) 154 { 155 atomic_dec(&csk->ref_count); 156 } 157 158 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 159 { 160 struct cnic_dev *cdev; 161 162 read_lock(&cnic_dev_lock); 163 list_for_each_entry(cdev, &cnic_dev_list, list) { 164 if (netdev == cdev->netdev) { 165 cnic_hold(cdev); 166 read_unlock(&cnic_dev_lock); 167 return cdev; 168 } 169 } 170 read_unlock(&cnic_dev_lock); 171 return NULL; 172 } 173 174 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 175 { 176 atomic_inc(&ulp_ops->ref_count); 177 } 178 179 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 180 { 181 atomic_dec(&ulp_ops->ref_count); 182 } 183 184 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 185 { 186 struct cnic_local *cp = dev->cnic_priv; 187 struct cnic_eth_dev *ethdev = cp->ethdev; 188 struct drv_ctl_info info; 189 struct drv_ctl_io *io = &info.data.io; 190 191 info.cmd = DRV_CTL_CTX_WR_CMD; 192 io->cid_addr = cid_addr; 193 io->offset = off; 194 io->data = val; 195 ethdev->drv_ctl(dev->netdev, &info); 196 } 197 198 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 199 { 200 struct cnic_local *cp = dev->cnic_priv; 201 struct cnic_eth_dev *ethdev = cp->ethdev; 202 struct drv_ctl_info info; 203 struct drv_ctl_io *io = &info.data.io; 204 205 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 206 io->offset = off; 207 io->dma_addr = addr; 208 ethdev->drv_ctl(dev->netdev, &info); 209 } 210 211 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 212 { 213 struct cnic_local *cp = dev->cnic_priv; 214 struct cnic_eth_dev *ethdev = cp->ethdev; 215 struct drv_ctl_info info; 216 struct drv_ctl_l2_ring *ring = &info.data.ring; 217 218 if (start) 219 info.cmd = DRV_CTL_START_L2_CMD; 220 else 221 info.cmd = DRV_CTL_STOP_L2_CMD; 222 223 ring->cid = cid; 224 ring->client_id = cl_id; 225 ethdev->drv_ctl(dev->netdev, &info); 226 } 227 228 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 229 { 230 struct cnic_local *cp = dev->cnic_priv; 231 struct cnic_eth_dev *ethdev = cp->ethdev; 232 struct drv_ctl_info info; 233 struct drv_ctl_io *io = &info.data.io; 234 235 info.cmd = DRV_CTL_IO_WR_CMD; 236 io->offset = off; 237 io->data = val; 238 ethdev->drv_ctl(dev->netdev, &info); 239 } 240 241 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 242 { 243 struct cnic_local *cp = dev->cnic_priv; 244 struct cnic_eth_dev *ethdev = cp->ethdev; 245 struct drv_ctl_info info; 246 struct drv_ctl_io *io = &info.data.io; 247 248 info.cmd = DRV_CTL_IO_RD_CMD; 249 io->offset = off; 250 ethdev->drv_ctl(dev->netdev, &info); 251 return io->data; 252 } 253 254 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) 255 { 256 struct cnic_local *cp = dev->cnic_priv; 257 struct cnic_eth_dev *ethdev = cp->ethdev; 258 struct drv_ctl_info info; 259 struct fcoe_capabilities *fcoe_cap = 260 &info.data.register_data.fcoe_features; 261 262 if (reg) { 263 info.cmd = DRV_CTL_ULP_REGISTER_CMD; 264 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap) 265 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap)); 266 } else { 267 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; 268 } 269 270 info.data.ulp_type = ulp_type; 271 ethdev->drv_ctl(dev->netdev, &info); 272 } 273 274 static int cnic_in_use(struct cnic_sock *csk) 275 { 276 return test_bit(SK_F_INUSE, &csk->flags); 277 } 278 279 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 280 { 281 struct cnic_local *cp = dev->cnic_priv; 282 struct cnic_eth_dev *ethdev = cp->ethdev; 283 struct drv_ctl_info info; 284 285 info.cmd = cmd; 286 info.data.credit.credit_count = count; 287 ethdev->drv_ctl(dev->netdev, &info); 288 } 289 290 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 291 { 292 u32 i; 293 294 if (!cp->ctx_tbl) 295 return -EINVAL; 296 297 for (i = 0; i < cp->max_cid_space; i++) { 298 if (cp->ctx_tbl[i].cid == cid) { 299 *l5_cid = i; 300 return 0; 301 } 302 } 303 return -EINVAL; 304 } 305 306 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 307 struct cnic_sock *csk) 308 { 309 struct iscsi_path path_req; 310 char *buf = NULL; 311 u16 len = 0; 312 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 313 struct cnic_ulp_ops *ulp_ops; 314 struct cnic_uio_dev *udev = cp->udev; 315 int rc = 0, retry = 0; 316 317 if (!udev || udev->uio_dev == -1) 318 return -ENODEV; 319 320 if (csk) { 321 len = sizeof(path_req); 322 buf = (char *) &path_req; 323 memset(&path_req, 0, len); 324 325 msg_type = ISCSI_KEVENT_PATH_REQ; 326 path_req.handle = (u64) csk->l5_cid; 327 if (test_bit(SK_F_IPV6, &csk->flags)) { 328 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 329 sizeof(struct in6_addr)); 330 path_req.ip_addr_len = 16; 331 } else { 332 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 333 sizeof(struct in_addr)); 334 path_req.ip_addr_len = 4; 335 } 336 path_req.vlan_id = csk->vlan_id; 337 path_req.pmtu = csk->mtu; 338 } 339 340 while (retry < 3) { 341 rc = 0; 342 rcu_read_lock(); 343 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 344 if (ulp_ops) 345 rc = ulp_ops->iscsi_nl_send_msg( 346 cp->ulp_handle[CNIC_ULP_ISCSI], 347 msg_type, buf, len); 348 rcu_read_unlock(); 349 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) 350 break; 351 352 msleep(100); 353 retry++; 354 } 355 return rc; 356 } 357 358 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); 359 360 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 361 char *buf, u16 len) 362 { 363 int rc = -EINVAL; 364 365 switch (msg_type) { 366 case ISCSI_UEVENT_PATH_UPDATE: { 367 struct cnic_local *cp; 368 u32 l5_cid; 369 struct cnic_sock *csk; 370 struct iscsi_path *path_resp; 371 372 if (len < sizeof(*path_resp)) 373 break; 374 375 path_resp = (struct iscsi_path *) buf; 376 cp = dev->cnic_priv; 377 l5_cid = (u32) path_resp->handle; 378 if (l5_cid >= MAX_CM_SK_TBL_SZ) 379 break; 380 381 rcu_read_lock(); 382 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 383 rc = -ENODEV; 384 rcu_read_unlock(); 385 break; 386 } 387 csk = &cp->csk_tbl[l5_cid]; 388 csk_hold(csk); 389 if (cnic_in_use(csk) && 390 test_bit(SK_F_CONNECT_START, &csk->flags)) { 391 392 csk->vlan_id = path_resp->vlan_id; 393 394 memcpy(csk->ha, path_resp->mac_addr, 6); 395 if (test_bit(SK_F_IPV6, &csk->flags)) 396 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 397 sizeof(struct in6_addr)); 398 else 399 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 400 sizeof(struct in_addr)); 401 402 if (is_valid_ether_addr(csk->ha)) { 403 cnic_cm_set_pg(csk); 404 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && 405 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 406 407 cnic_cm_upcall(cp, csk, 408 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 409 clear_bit(SK_F_CONNECT_START, &csk->flags); 410 } 411 } 412 csk_put(csk); 413 rcu_read_unlock(); 414 rc = 0; 415 } 416 } 417 418 return rc; 419 } 420 421 static int cnic_offld_prep(struct cnic_sock *csk) 422 { 423 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 424 return 0; 425 426 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 427 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 428 return 0; 429 } 430 431 return 1; 432 } 433 434 static int cnic_close_prep(struct cnic_sock *csk) 435 { 436 clear_bit(SK_F_CONNECT_START, &csk->flags); 437 smp_mb__after_clear_bit(); 438 439 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 440 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 441 msleep(1); 442 443 return 1; 444 } 445 return 0; 446 } 447 448 static int cnic_abort_prep(struct cnic_sock *csk) 449 { 450 clear_bit(SK_F_CONNECT_START, &csk->flags); 451 smp_mb__after_clear_bit(); 452 453 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 454 msleep(1); 455 456 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 457 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 458 return 1; 459 } 460 461 return 0; 462 } 463 464 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 465 { 466 struct cnic_dev *dev; 467 468 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 469 pr_err("%s: Bad type %d\n", __func__, ulp_type); 470 return -EINVAL; 471 } 472 mutex_lock(&cnic_lock); 473 if (cnic_ulp_tbl_prot(ulp_type)) { 474 pr_err("%s: Type %d has already been registered\n", 475 __func__, ulp_type); 476 mutex_unlock(&cnic_lock); 477 return -EBUSY; 478 } 479 480 read_lock(&cnic_dev_lock); 481 list_for_each_entry(dev, &cnic_dev_list, list) { 482 struct cnic_local *cp = dev->cnic_priv; 483 484 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 485 } 486 read_unlock(&cnic_dev_lock); 487 488 atomic_set(&ulp_ops->ref_count, 0); 489 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 490 mutex_unlock(&cnic_lock); 491 492 /* Prevent race conditions with netdev_event */ 493 rtnl_lock(); 494 list_for_each_entry(dev, &cnic_dev_list, list) { 495 struct cnic_local *cp = dev->cnic_priv; 496 497 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 498 ulp_ops->cnic_init(dev); 499 } 500 rtnl_unlock(); 501 502 return 0; 503 } 504 505 int cnic_unregister_driver(int ulp_type) 506 { 507 struct cnic_dev *dev; 508 struct cnic_ulp_ops *ulp_ops; 509 int i = 0; 510 511 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 512 pr_err("%s: Bad type %d\n", __func__, ulp_type); 513 return -EINVAL; 514 } 515 mutex_lock(&cnic_lock); 516 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 517 if (!ulp_ops) { 518 pr_err("%s: Type %d has not been registered\n", 519 __func__, ulp_type); 520 goto out_unlock; 521 } 522 read_lock(&cnic_dev_lock); 523 list_for_each_entry(dev, &cnic_dev_list, list) { 524 struct cnic_local *cp = dev->cnic_priv; 525 526 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 527 pr_err("%s: Type %d still has devices registered\n", 528 __func__, ulp_type); 529 read_unlock(&cnic_dev_lock); 530 goto out_unlock; 531 } 532 } 533 read_unlock(&cnic_dev_lock); 534 535 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); 536 537 mutex_unlock(&cnic_lock); 538 synchronize_rcu(); 539 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 540 msleep(100); 541 i++; 542 } 543 544 if (atomic_read(&ulp_ops->ref_count) != 0) 545 pr_warn("%s: Failed waiting for ref count to go to zero\n", 546 __func__); 547 return 0; 548 549 out_unlock: 550 mutex_unlock(&cnic_lock); 551 return -EINVAL; 552 } 553 554 static int cnic_start_hw(struct cnic_dev *); 555 static void cnic_stop_hw(struct cnic_dev *); 556 557 static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 558 void *ulp_ctx) 559 { 560 struct cnic_local *cp = dev->cnic_priv; 561 struct cnic_ulp_ops *ulp_ops; 562 563 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 564 pr_err("%s: Bad type %d\n", __func__, ulp_type); 565 return -EINVAL; 566 } 567 mutex_lock(&cnic_lock); 568 if (cnic_ulp_tbl_prot(ulp_type) == NULL) { 569 pr_err("%s: Driver with type %d has not been registered\n", 570 __func__, ulp_type); 571 mutex_unlock(&cnic_lock); 572 return -EAGAIN; 573 } 574 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 575 pr_err("%s: Type %d has already been registered to this device\n", 576 __func__, ulp_type); 577 mutex_unlock(&cnic_lock); 578 return -EBUSY; 579 } 580 581 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 582 cp->ulp_handle[ulp_type] = ulp_ctx; 583 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 584 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 585 cnic_hold(dev); 586 587 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 588 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 589 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 590 591 mutex_unlock(&cnic_lock); 592 593 cnic_ulp_ctl(dev, ulp_type, true); 594 595 return 0; 596 597 } 598 EXPORT_SYMBOL(cnic_register_driver); 599 600 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 601 { 602 struct cnic_local *cp = dev->cnic_priv; 603 int i = 0; 604 605 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 606 pr_err("%s: Bad type %d\n", __func__, ulp_type); 607 return -EINVAL; 608 } 609 mutex_lock(&cnic_lock); 610 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 611 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 612 cnic_put(dev); 613 } else { 614 pr_err("%s: device not registered to this ulp type %d\n", 615 __func__, ulp_type); 616 mutex_unlock(&cnic_lock); 617 return -EINVAL; 618 } 619 mutex_unlock(&cnic_lock); 620 621 if (ulp_type == CNIC_ULP_ISCSI) 622 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 623 else if (ulp_type == CNIC_ULP_FCOE) 624 dev->fcoe_cap = NULL; 625 626 synchronize_rcu(); 627 628 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 629 i < 20) { 630 msleep(100); 631 i++; 632 } 633 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 634 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 635 636 cnic_ulp_ctl(dev, ulp_type, false); 637 638 return 0; 639 } 640 EXPORT_SYMBOL(cnic_unregister_driver); 641 642 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, 643 u32 next) 644 { 645 id_tbl->start = start_id; 646 id_tbl->max = size; 647 id_tbl->next = next; 648 spin_lock_init(&id_tbl->lock); 649 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 650 if (!id_tbl->table) 651 return -ENOMEM; 652 653 return 0; 654 } 655 656 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 657 { 658 kfree(id_tbl->table); 659 id_tbl->table = NULL; 660 } 661 662 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 663 { 664 int ret = -1; 665 666 id -= id_tbl->start; 667 if (id >= id_tbl->max) 668 return ret; 669 670 spin_lock(&id_tbl->lock); 671 if (!test_bit(id, id_tbl->table)) { 672 set_bit(id, id_tbl->table); 673 ret = 0; 674 } 675 spin_unlock(&id_tbl->lock); 676 return ret; 677 } 678 679 /* Returns -1 if not successful */ 680 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 681 { 682 u32 id; 683 684 spin_lock(&id_tbl->lock); 685 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 686 if (id >= id_tbl->max) { 687 id = -1; 688 if (id_tbl->next != 0) { 689 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 690 if (id >= id_tbl->next) 691 id = -1; 692 } 693 } 694 695 if (id < id_tbl->max) { 696 set_bit(id, id_tbl->table); 697 id_tbl->next = (id + 1) & (id_tbl->max - 1); 698 id += id_tbl->start; 699 } 700 701 spin_unlock(&id_tbl->lock); 702 703 return id; 704 } 705 706 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 707 { 708 if (id == -1) 709 return; 710 711 id -= id_tbl->start; 712 if (id >= id_tbl->max) 713 return; 714 715 clear_bit(id, id_tbl->table); 716 } 717 718 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 719 { 720 int i; 721 722 if (!dma->pg_arr) 723 return; 724 725 for (i = 0; i < dma->num_pages; i++) { 726 if (dma->pg_arr[i]) { 727 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 728 dma->pg_arr[i], dma->pg_map_arr[i]); 729 dma->pg_arr[i] = NULL; 730 } 731 } 732 if (dma->pgtbl) { 733 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 734 dma->pgtbl, dma->pgtbl_map); 735 dma->pgtbl = NULL; 736 } 737 kfree(dma->pg_arr); 738 dma->pg_arr = NULL; 739 dma->num_pages = 0; 740 } 741 742 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 743 { 744 int i; 745 __le32 *page_table = (__le32 *) dma->pgtbl; 746 747 for (i = 0; i < dma->num_pages; i++) { 748 /* Each entry needs to be in big endian format. */ 749 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 750 page_table++; 751 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 752 page_table++; 753 } 754 } 755 756 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 757 { 758 int i; 759 __le32 *page_table = (__le32 *) dma->pgtbl; 760 761 for (i = 0; i < dma->num_pages; i++) { 762 /* Each entry needs to be in little endian format. */ 763 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 764 page_table++; 765 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 766 page_table++; 767 } 768 } 769 770 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 771 int pages, int use_pg_tbl) 772 { 773 int i, size; 774 struct cnic_local *cp = dev->cnic_priv; 775 776 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 777 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 778 if (dma->pg_arr == NULL) 779 return -ENOMEM; 780 781 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 782 dma->num_pages = pages; 783 784 for (i = 0; i < pages; i++) { 785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 786 BCM_PAGE_SIZE, 787 &dma->pg_map_arr[i], 788 GFP_ATOMIC); 789 if (dma->pg_arr[i] == NULL) 790 goto error; 791 } 792 if (!use_pg_tbl) 793 return 0; 794 795 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 796 ~(BCM_PAGE_SIZE - 1); 797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 798 &dma->pgtbl_map, GFP_ATOMIC); 799 if (dma->pgtbl == NULL) 800 goto error; 801 802 cp->setup_pgtbl(dev, dma); 803 804 return 0; 805 806 error: 807 cnic_free_dma(dev, dma); 808 return -ENOMEM; 809 } 810 811 static void cnic_free_context(struct cnic_dev *dev) 812 { 813 struct cnic_local *cp = dev->cnic_priv; 814 int i; 815 816 for (i = 0; i < cp->ctx_blks; i++) { 817 if (cp->ctx_arr[i].ctx) { 818 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 819 cp->ctx_arr[i].ctx, 820 cp->ctx_arr[i].mapping); 821 cp->ctx_arr[i].ctx = NULL; 822 } 823 } 824 } 825 826 static void __cnic_free_uio(struct cnic_uio_dev *udev) 827 { 828 uio_unregister_device(&udev->cnic_uinfo); 829 830 if (udev->l2_buf) { 831 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 832 udev->l2_buf, udev->l2_buf_map); 833 udev->l2_buf = NULL; 834 } 835 836 if (udev->l2_ring) { 837 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 838 udev->l2_ring, udev->l2_ring_map); 839 udev->l2_ring = NULL; 840 } 841 842 pci_dev_put(udev->pdev); 843 kfree(udev); 844 } 845 846 static void cnic_free_uio(struct cnic_uio_dev *udev) 847 { 848 if (!udev) 849 return; 850 851 write_lock(&cnic_dev_lock); 852 list_del_init(&udev->list); 853 write_unlock(&cnic_dev_lock); 854 __cnic_free_uio(udev); 855 } 856 857 static void cnic_free_resc(struct cnic_dev *dev) 858 { 859 struct cnic_local *cp = dev->cnic_priv; 860 struct cnic_uio_dev *udev = cp->udev; 861 862 if (udev) { 863 udev->dev = NULL; 864 cp->udev = NULL; 865 } 866 867 cnic_free_context(dev); 868 kfree(cp->ctx_arr); 869 cp->ctx_arr = NULL; 870 cp->ctx_blks = 0; 871 872 cnic_free_dma(dev, &cp->gbl_buf_info); 873 cnic_free_dma(dev, &cp->kwq_info); 874 cnic_free_dma(dev, &cp->kwq_16_data_info); 875 cnic_free_dma(dev, &cp->kcq2.dma); 876 cnic_free_dma(dev, &cp->kcq1.dma); 877 kfree(cp->iscsi_tbl); 878 cp->iscsi_tbl = NULL; 879 kfree(cp->ctx_tbl); 880 cp->ctx_tbl = NULL; 881 882 cnic_free_id_tbl(&cp->fcoe_cid_tbl); 883 cnic_free_id_tbl(&cp->cid_tbl); 884 } 885 886 static int cnic_alloc_context(struct cnic_dev *dev) 887 { 888 struct cnic_local *cp = dev->cnic_priv; 889 890 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 891 int i, k, arr_size; 892 893 cp->ctx_blk_size = BCM_PAGE_SIZE; 894 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 895 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 896 sizeof(struct cnic_ctx); 897 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 898 if (cp->ctx_arr == NULL) 899 return -ENOMEM; 900 901 k = 0; 902 for (i = 0; i < 2; i++) { 903 u32 j, reg, off, lo, hi; 904 905 if (i == 0) 906 off = BNX2_PG_CTX_MAP; 907 else 908 off = BNX2_ISCSI_CTX_MAP; 909 910 reg = cnic_reg_rd_ind(dev, off); 911 lo = reg >> 16; 912 hi = reg & 0xffff; 913 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 914 cp->ctx_arr[k].cid = j; 915 } 916 917 cp->ctx_blks = k; 918 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 919 cp->ctx_blks = 0; 920 return -ENOMEM; 921 } 922 923 for (i = 0; i < cp->ctx_blks; i++) { 924 cp->ctx_arr[i].ctx = 925 dma_alloc_coherent(&dev->pcidev->dev, 926 BCM_PAGE_SIZE, 927 &cp->ctx_arr[i].mapping, 928 GFP_KERNEL); 929 if (cp->ctx_arr[i].ctx == NULL) 930 return -ENOMEM; 931 } 932 } 933 return 0; 934 } 935 936 static u16 cnic_bnx2_next_idx(u16 idx) 937 { 938 return idx + 1; 939 } 940 941 static u16 cnic_bnx2_hw_idx(u16 idx) 942 { 943 return idx; 944 } 945 946 static u16 cnic_bnx2x_next_idx(u16 idx) 947 { 948 idx++; 949 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 950 idx++; 951 952 return idx; 953 } 954 955 static u16 cnic_bnx2x_hw_idx(u16 idx) 956 { 957 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 958 idx++; 959 return idx; 960 } 961 962 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, 963 bool use_pg_tbl) 964 { 965 int err, i, use_page_tbl = 0; 966 struct kcqe **kcq; 967 968 if (use_pg_tbl) 969 use_page_tbl = 1; 970 971 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); 972 if (err) 973 return err; 974 975 kcq = (struct kcqe **) info->dma.pg_arr; 976 info->kcq = kcq; 977 978 info->next_idx = cnic_bnx2_next_idx; 979 info->hw_idx = cnic_bnx2_hw_idx; 980 if (use_pg_tbl) 981 return 0; 982 983 info->next_idx = cnic_bnx2x_next_idx; 984 info->hw_idx = cnic_bnx2x_hw_idx; 985 986 for (i = 0; i < KCQ_PAGE_CNT; i++) { 987 struct bnx2x_bd_chain_next *next = 988 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 989 int j = i + 1; 990 991 if (j >= KCQ_PAGE_CNT) 992 j = 0; 993 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 994 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 995 } 996 return 0; 997 } 998 999 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 1000 { 1001 struct cnic_local *cp = dev->cnic_priv; 1002 struct cnic_uio_dev *udev; 1003 1004 read_lock(&cnic_dev_lock); 1005 list_for_each_entry(udev, &cnic_udev_list, list) { 1006 if (udev->pdev == dev->pcidev) { 1007 udev->dev = dev; 1008 cp->udev = udev; 1009 read_unlock(&cnic_dev_lock); 1010 return 0; 1011 } 1012 } 1013 read_unlock(&cnic_dev_lock); 1014 1015 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1016 if (!udev) 1017 return -ENOMEM; 1018 1019 udev->uio_dev = -1; 1020 1021 udev->dev = dev; 1022 udev->pdev = dev->pcidev; 1023 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 1024 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1025 &udev->l2_ring_map, 1026 GFP_KERNEL | __GFP_COMP); 1027 if (!udev->l2_ring) 1028 goto err_udev; 1029 1030 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1031 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 1032 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 1033 &udev->l2_buf_map, 1034 GFP_KERNEL | __GFP_COMP); 1035 if (!udev->l2_buf) 1036 goto err_dma; 1037 1038 write_lock(&cnic_dev_lock); 1039 list_add(&udev->list, &cnic_udev_list); 1040 write_unlock(&cnic_dev_lock); 1041 1042 pci_dev_get(udev->pdev); 1043 1044 cp->udev = udev; 1045 1046 return 0; 1047 err_dma: 1048 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 1049 udev->l2_ring, udev->l2_ring_map); 1050 err_udev: 1051 kfree(udev); 1052 return -ENOMEM; 1053 } 1054 1055 static int cnic_init_uio(struct cnic_dev *dev) 1056 { 1057 struct cnic_local *cp = dev->cnic_priv; 1058 struct cnic_uio_dev *udev = cp->udev; 1059 struct uio_info *uinfo; 1060 int ret = 0; 1061 1062 if (!udev) 1063 return -ENOMEM; 1064 1065 uinfo = &udev->cnic_uinfo; 1066 1067 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0); 1068 uinfo->mem[0].internal_addr = dev->regview; 1069 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1070 1071 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1072 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + 1073 TX_MAX_TSS_RINGS + 1); 1074 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1075 PAGE_MASK; 1076 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1077 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1078 else 1079 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 1080 1081 uinfo->name = "bnx2_cnic"; 1082 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1083 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); 1084 1085 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1086 PAGE_MASK; 1087 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1088 1089 uinfo->name = "bnx2x_cnic"; 1090 } 1091 1092 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 1093 1094 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1095 uinfo->mem[2].size = udev->l2_ring_size; 1096 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1097 1098 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1099 uinfo->mem[3].size = udev->l2_buf_size; 1100 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1101 1102 uinfo->version = CNIC_MODULE_VERSION; 1103 uinfo->irq = UIO_IRQ_CUSTOM; 1104 1105 uinfo->open = cnic_uio_open; 1106 uinfo->release = cnic_uio_close; 1107 1108 if (udev->uio_dev == -1) { 1109 if (!uinfo->priv) { 1110 uinfo->priv = udev; 1111 1112 ret = uio_register_device(&udev->pdev->dev, uinfo); 1113 } 1114 } else { 1115 cnic_init_rings(dev); 1116 } 1117 1118 return ret; 1119 } 1120 1121 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1122 { 1123 struct cnic_local *cp = dev->cnic_priv; 1124 int ret; 1125 1126 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1127 if (ret) 1128 goto error; 1129 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1130 1131 ret = cnic_alloc_kcq(dev, &cp->kcq1, true); 1132 if (ret) 1133 goto error; 1134 1135 ret = cnic_alloc_context(dev); 1136 if (ret) 1137 goto error; 1138 1139 ret = cnic_alloc_uio_rings(dev, 2); 1140 if (ret) 1141 goto error; 1142 1143 ret = cnic_init_uio(dev); 1144 if (ret) 1145 goto error; 1146 1147 return 0; 1148 1149 error: 1150 cnic_free_resc(dev); 1151 return ret; 1152 } 1153 1154 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1155 { 1156 struct cnic_local *cp = dev->cnic_priv; 1157 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1158 int total_mem, blks, i; 1159 1160 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1161 blks = total_mem / ctx_blk_size; 1162 if (total_mem % ctx_blk_size) 1163 blks++; 1164 1165 if (blks > cp->ethdev->ctx_tbl_len) 1166 return -ENOMEM; 1167 1168 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1169 if (cp->ctx_arr == NULL) 1170 return -ENOMEM; 1171 1172 cp->ctx_blks = blks; 1173 cp->ctx_blk_size = ctx_blk_size; 1174 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1175 cp->ctx_align = 0; 1176 else 1177 cp->ctx_align = ctx_blk_size; 1178 1179 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1180 1181 for (i = 0; i < blks; i++) { 1182 cp->ctx_arr[i].ctx = 1183 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1184 &cp->ctx_arr[i].mapping, 1185 GFP_KERNEL); 1186 if (cp->ctx_arr[i].ctx == NULL) 1187 return -ENOMEM; 1188 1189 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1190 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1191 cnic_free_context(dev); 1192 cp->ctx_blk_size += cp->ctx_align; 1193 i = -1; 1194 continue; 1195 } 1196 } 1197 } 1198 return 0; 1199 } 1200 1201 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1202 { 1203 struct cnic_local *cp = dev->cnic_priv; 1204 struct cnic_eth_dev *ethdev = cp->ethdev; 1205 u32 start_cid = ethdev->starting_cid; 1206 int i, j, n, ret, pages; 1207 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1208 1209 cp->iro_arr = ethdev->iro_arr; 1210 1211 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1212 cp->iscsi_start_cid = start_cid; 1213 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1214 1215 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1216 cp->max_cid_space += dev->max_fcoe_conn; 1217 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1218 if (!cp->fcoe_init_cid) 1219 cp->fcoe_init_cid = 0x10; 1220 } 1221 1222 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1223 GFP_KERNEL); 1224 if (!cp->iscsi_tbl) 1225 goto error; 1226 1227 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1228 cp->max_cid_space, GFP_KERNEL); 1229 if (!cp->ctx_tbl) 1230 goto error; 1231 1232 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1233 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1234 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1235 } 1236 1237 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1238 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1239 1240 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1241 PAGE_SIZE; 1242 1243 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1244 if (ret) 1245 return -ENOMEM; 1246 1247 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1248 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1249 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1250 1251 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1252 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1253 off; 1254 1255 if ((i % n) == (n - 1)) 1256 j++; 1257 } 1258 1259 ret = cnic_alloc_kcq(dev, &cp->kcq1, false); 1260 if (ret) 1261 goto error; 1262 1263 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1264 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1265 if (ret) 1266 goto error; 1267 } 1268 1269 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1270 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1271 if (ret) 1272 goto error; 1273 1274 ret = cnic_alloc_bnx2x_context(dev); 1275 if (ret) 1276 goto error; 1277 1278 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1279 1280 cp->l2_rx_ring_size = 15; 1281 1282 ret = cnic_alloc_uio_rings(dev, 4); 1283 if (ret) 1284 goto error; 1285 1286 ret = cnic_init_uio(dev); 1287 if (ret) 1288 goto error; 1289 1290 return 0; 1291 1292 error: 1293 cnic_free_resc(dev); 1294 return -ENOMEM; 1295 } 1296 1297 static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1298 { 1299 return cp->max_kwq_idx - 1300 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1301 } 1302 1303 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1304 u32 num_wqes) 1305 { 1306 struct cnic_local *cp = dev->cnic_priv; 1307 struct kwqe *prod_qe; 1308 u16 prod, sw_prod, i; 1309 1310 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1311 return -EAGAIN; /* bnx2 is down */ 1312 1313 spin_lock_bh(&cp->cnic_ulp_lock); 1314 if (num_wqes > cnic_kwq_avail(cp) && 1315 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1316 spin_unlock_bh(&cp->cnic_ulp_lock); 1317 return -EAGAIN; 1318 } 1319 1320 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1321 1322 prod = cp->kwq_prod_idx; 1323 sw_prod = prod & MAX_KWQ_IDX; 1324 for (i = 0; i < num_wqes; i++) { 1325 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1326 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1327 prod++; 1328 sw_prod = prod & MAX_KWQ_IDX; 1329 } 1330 cp->kwq_prod_idx = prod; 1331 1332 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1333 1334 spin_unlock_bh(&cp->cnic_ulp_lock); 1335 return 0; 1336 } 1337 1338 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1339 union l5cm_specific_data *l5_data) 1340 { 1341 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1342 dma_addr_t map; 1343 1344 map = ctx->kwqe_data_mapping; 1345 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1346 l5_data->phy_address.hi = (u64) map >> 32; 1347 return ctx->kwqe_data; 1348 } 1349 1350 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1351 u32 type, union l5cm_specific_data *l5_data) 1352 { 1353 struct cnic_local *cp = dev->cnic_priv; 1354 struct l5cm_spe kwqe; 1355 struct kwqe_16 *kwq[1]; 1356 u16 type_16; 1357 int ret; 1358 1359 kwqe.hdr.conn_and_cmd_data = 1360 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1361 BNX2X_HW_CID(cp, cid))); 1362 1363 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1364 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1365 SPE_HDR_FUNCTION_ID; 1366 1367 kwqe.hdr.type = cpu_to_le16(type_16); 1368 kwqe.hdr.reserved1 = 0; 1369 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1370 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1371 1372 kwq[0] = (struct kwqe_16 *) &kwqe; 1373 1374 spin_lock_bh(&cp->cnic_ulp_lock); 1375 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1376 spin_unlock_bh(&cp->cnic_ulp_lock); 1377 1378 if (ret == 1) 1379 return 0; 1380 1381 return ret; 1382 } 1383 1384 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1385 struct kcqe *cqes[], u32 num_cqes) 1386 { 1387 struct cnic_local *cp = dev->cnic_priv; 1388 struct cnic_ulp_ops *ulp_ops; 1389 1390 rcu_read_lock(); 1391 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1392 if (likely(ulp_ops)) { 1393 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1394 cqes, num_cqes); 1395 } 1396 rcu_read_unlock(); 1397 } 1398 1399 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1400 { 1401 struct cnic_local *cp = dev->cnic_priv; 1402 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1403 int hq_bds, pages; 1404 u32 pfid = cp->pfid; 1405 1406 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1407 cp->num_ccells = req1->num_ccells_per_conn; 1408 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1409 cp->num_iscsi_tasks; 1410 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1411 BNX2X_ISCSI_R2TQE_SIZE; 1412 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1413 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1414 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1415 cp->num_cqs = req1->num_cqs; 1416 1417 if (!dev->max_iscsi_conn) 1418 return 0; 1419 1420 /* init Tstorm RAM */ 1421 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1422 req1->rq_num_wqes); 1423 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1424 PAGE_SIZE); 1425 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1426 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1427 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1428 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1429 req1->num_tasks_per_conn); 1430 1431 /* init Ustorm RAM */ 1432 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1433 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1434 req1->rq_buffer_size); 1435 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1436 PAGE_SIZE); 1437 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1438 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1439 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1440 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1441 req1->num_tasks_per_conn); 1442 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1443 req1->rq_num_wqes); 1444 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1445 req1->cq_num_wqes); 1446 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1447 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1448 1449 /* init Xstorm RAM */ 1450 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1451 PAGE_SIZE); 1452 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1453 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1454 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1455 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1456 req1->num_tasks_per_conn); 1457 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1458 hq_bds); 1459 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1460 req1->num_tasks_per_conn); 1461 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1462 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1463 1464 /* init Cstorm RAM */ 1465 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1466 PAGE_SIZE); 1467 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1468 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1469 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1470 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1471 req1->num_tasks_per_conn); 1472 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1473 req1->cq_num_wqes); 1474 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1475 hq_bds); 1476 1477 return 0; 1478 } 1479 1480 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1481 { 1482 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1483 struct cnic_local *cp = dev->cnic_priv; 1484 u32 pfid = cp->pfid; 1485 struct iscsi_kcqe kcqe; 1486 struct kcqe *cqes[1]; 1487 1488 memset(&kcqe, 0, sizeof(kcqe)); 1489 if (!dev->max_iscsi_conn) { 1490 kcqe.completion_status = 1491 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1492 goto done; 1493 } 1494 1495 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1496 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1497 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1498 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1499 req2->error_bit_map[1]); 1500 1501 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1502 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1503 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1504 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1505 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1506 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1507 req2->error_bit_map[1]); 1508 1509 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1510 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1511 1512 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1513 1514 done: 1515 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1516 cqes[0] = (struct kcqe *) &kcqe; 1517 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1518 1519 return 0; 1520 } 1521 1522 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1523 { 1524 struct cnic_local *cp = dev->cnic_priv; 1525 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1526 1527 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1528 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1529 1530 cnic_free_dma(dev, &iscsi->hq_info); 1531 cnic_free_dma(dev, &iscsi->r2tq_info); 1532 cnic_free_dma(dev, &iscsi->task_array_info); 1533 cnic_free_id(&cp->cid_tbl, ctx->cid); 1534 } else { 1535 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); 1536 } 1537 1538 ctx->cid = 0; 1539 } 1540 1541 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1542 { 1543 u32 cid; 1544 int ret, pages; 1545 struct cnic_local *cp = dev->cnic_priv; 1546 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1547 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1548 1549 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { 1550 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); 1551 if (cid == -1) { 1552 ret = -ENOMEM; 1553 goto error; 1554 } 1555 ctx->cid = cid; 1556 return 0; 1557 } 1558 1559 cid = cnic_alloc_new_id(&cp->cid_tbl); 1560 if (cid == -1) { 1561 ret = -ENOMEM; 1562 goto error; 1563 } 1564 1565 ctx->cid = cid; 1566 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1567 1568 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1569 if (ret) 1570 goto error; 1571 1572 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1573 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1574 if (ret) 1575 goto error; 1576 1577 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1578 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1579 if (ret) 1580 goto error; 1581 1582 return 0; 1583 1584 error: 1585 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1586 return ret; 1587 } 1588 1589 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1590 struct regpair *ctx_addr) 1591 { 1592 struct cnic_local *cp = dev->cnic_priv; 1593 struct cnic_eth_dev *ethdev = cp->ethdev; 1594 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1595 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1596 unsigned long align_off = 0; 1597 dma_addr_t ctx_map; 1598 void *ctx; 1599 1600 if (cp->ctx_align) { 1601 unsigned long mask = cp->ctx_align - 1; 1602 1603 if (cp->ctx_arr[blk].mapping & mask) 1604 align_off = cp->ctx_align - 1605 (cp->ctx_arr[blk].mapping & mask); 1606 } 1607 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1608 (off * BNX2X_CONTEXT_MEM_SIZE); 1609 ctx = cp->ctx_arr[blk].ctx + align_off + 1610 (off * BNX2X_CONTEXT_MEM_SIZE); 1611 if (init) 1612 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1613 1614 ctx_addr->lo = ctx_map & 0xffffffff; 1615 ctx_addr->hi = (u64) ctx_map >> 32; 1616 return ctx; 1617 } 1618 1619 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1620 u32 num) 1621 { 1622 struct cnic_local *cp = dev->cnic_priv; 1623 struct iscsi_kwqe_conn_offload1 *req1 = 1624 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1625 struct iscsi_kwqe_conn_offload2 *req2 = 1626 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1627 struct iscsi_kwqe_conn_offload3 *req3; 1628 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1629 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1630 u32 cid = ctx->cid; 1631 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1632 struct iscsi_context *ictx; 1633 struct regpair context_addr; 1634 int i, j, n = 2, n_max; 1635 u8 port = CNIC_PORT(cp); 1636 1637 ctx->ctx_flags = 0; 1638 if (!req2->num_additional_wqes) 1639 return -EINVAL; 1640 1641 n_max = req2->num_additional_wqes + 2; 1642 1643 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1644 if (ictx == NULL) 1645 return -ENOMEM; 1646 1647 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1648 1649 ictx->xstorm_ag_context.hq_prod = 1; 1650 1651 ictx->xstorm_st_context.iscsi.first_burst_length = 1652 ISCSI_DEF_FIRST_BURST_LEN; 1653 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1654 ISCSI_DEF_MAX_RECV_SEG_LEN; 1655 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1656 req1->sq_page_table_addr_lo; 1657 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1658 req1->sq_page_table_addr_hi; 1659 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1660 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1661 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1662 iscsi->hq_info.pgtbl_map & 0xffffffff; 1663 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1664 (u64) iscsi->hq_info.pgtbl_map >> 32; 1665 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1666 iscsi->hq_info.pgtbl[0]; 1667 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1668 iscsi->hq_info.pgtbl[1]; 1669 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1670 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1671 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1672 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1673 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1674 iscsi->r2tq_info.pgtbl[0]; 1675 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1676 iscsi->r2tq_info.pgtbl[1]; 1677 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1678 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1679 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1680 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1681 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1682 BNX2X_ISCSI_PBL_NOT_CACHED; 1683 ictx->xstorm_st_context.iscsi.flags.flags |= 1684 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1685 ictx->xstorm_st_context.iscsi.flags.flags |= 1686 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1687 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1688 ETH_P_8021Q; 1689 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 1690 cp->port_mode == CHIP_2_PORT_MODE) { 1691 1692 port = 0; 1693 } 1694 ictx->xstorm_st_context.common.flags = 1695 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; 1696 ictx->xstorm_st_context.common.flags = 1697 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; 1698 1699 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1700 /* TSTORM requires the base address of RQ DB & not PTE */ 1701 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1702 req2->rq_page_table_addr_lo & PAGE_MASK; 1703 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1704 req2->rq_page_table_addr_hi; 1705 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1706 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1707 ictx->tstorm_st_context.tcp.flags2 |= 1708 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1709 ictx->tstorm_st_context.tcp.ooo_support_mode = 1710 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1711 1712 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1713 1714 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1715 req2->rq_page_table_addr_lo; 1716 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1717 req2->rq_page_table_addr_hi; 1718 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1719 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1720 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1721 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1722 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1723 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1724 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1725 iscsi->r2tq_info.pgtbl[0]; 1726 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1727 iscsi->r2tq_info.pgtbl[1]; 1728 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1729 req1->cq_page_table_addr_lo; 1730 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1731 req1->cq_page_table_addr_hi; 1732 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1733 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1734 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1735 ictx->ustorm_st_context.task_pbe_cache_index = 1736 BNX2X_ISCSI_PBL_NOT_CACHED; 1737 ictx->ustorm_st_context.task_pdu_cache_index = 1738 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1739 1740 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1741 if (j == 3) { 1742 if (n >= n_max) 1743 break; 1744 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1745 j = 0; 1746 } 1747 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1748 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1749 req3->qp_first_pte[j].hi; 1750 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1751 req3->qp_first_pte[j].lo; 1752 } 1753 1754 ictx->ustorm_st_context.task_pbl_base.lo = 1755 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1756 ictx->ustorm_st_context.task_pbl_base.hi = 1757 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1758 ictx->ustorm_st_context.tce_phy_addr.lo = 1759 iscsi->task_array_info.pgtbl[0]; 1760 ictx->ustorm_st_context.tce_phy_addr.hi = 1761 iscsi->task_array_info.pgtbl[1]; 1762 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1763 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1764 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1765 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1766 ISCSI_DEF_MAX_BURST_LEN; 1767 ictx->ustorm_st_context.negotiated_rx |= 1768 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1769 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1770 1771 ictx->cstorm_st_context.hq_pbl_base.lo = 1772 iscsi->hq_info.pgtbl_map & 0xffffffff; 1773 ictx->cstorm_st_context.hq_pbl_base.hi = 1774 (u64) iscsi->hq_info.pgtbl_map >> 32; 1775 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1776 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1777 ictx->cstorm_st_context.task_pbl_base.lo = 1778 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1779 ictx->cstorm_st_context.task_pbl_base.hi = 1780 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1781 /* CSTORM and USTORM initialization is different, CSTORM requires 1782 * CQ DB base & not PTE addr */ 1783 ictx->cstorm_st_context.cq_db_base.lo = 1784 req1->cq_page_table_addr_lo & PAGE_MASK; 1785 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1786 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1787 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1788 for (i = 0; i < cp->num_cqs; i++) { 1789 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1790 ISCSI_INITIAL_SN; 1791 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1792 ISCSI_INITIAL_SN; 1793 } 1794 1795 ictx->xstorm_ag_context.cdu_reserved = 1796 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1797 ISCSI_CONNECTION_TYPE); 1798 ictx->ustorm_ag_context.cdu_usage = 1799 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1800 ISCSI_CONNECTION_TYPE); 1801 return 0; 1802 1803 } 1804 1805 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1806 u32 num, int *work) 1807 { 1808 struct iscsi_kwqe_conn_offload1 *req1; 1809 struct iscsi_kwqe_conn_offload2 *req2; 1810 struct cnic_local *cp = dev->cnic_priv; 1811 struct cnic_context *ctx; 1812 struct iscsi_kcqe kcqe; 1813 struct kcqe *cqes[1]; 1814 u32 l5_cid; 1815 int ret = 0; 1816 1817 if (num < 2) { 1818 *work = num; 1819 return -EINVAL; 1820 } 1821 1822 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1823 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1824 if ((num - 2) < req2->num_additional_wqes) { 1825 *work = num; 1826 return -EINVAL; 1827 } 1828 *work = 2 + req2->num_additional_wqes; 1829 1830 l5_cid = req1->iscsi_conn_id; 1831 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1832 return -EINVAL; 1833 1834 memset(&kcqe, 0, sizeof(kcqe)); 1835 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1836 kcqe.iscsi_conn_id = l5_cid; 1837 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1838 1839 ctx = &cp->ctx_tbl[l5_cid]; 1840 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1841 kcqe.completion_status = 1842 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1843 goto done; 1844 } 1845 1846 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1847 atomic_dec(&cp->iscsi_conn); 1848 goto done; 1849 } 1850 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1851 if (ret) { 1852 atomic_dec(&cp->iscsi_conn); 1853 ret = 0; 1854 goto done; 1855 } 1856 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1857 if (ret < 0) { 1858 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1859 atomic_dec(&cp->iscsi_conn); 1860 goto done; 1861 } 1862 1863 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1864 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1865 1866 done: 1867 cqes[0] = (struct kcqe *) &kcqe; 1868 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1869 return 0; 1870 } 1871 1872 1873 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1874 { 1875 struct cnic_local *cp = dev->cnic_priv; 1876 struct iscsi_kwqe_conn_update *req = 1877 (struct iscsi_kwqe_conn_update *) kwqe; 1878 void *data; 1879 union l5cm_specific_data l5_data; 1880 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1881 int ret; 1882 1883 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1884 return -EINVAL; 1885 1886 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1887 if (!data) 1888 return -ENOMEM; 1889 1890 memcpy(data, kwqe, sizeof(struct kwqe)); 1891 1892 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1893 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1894 return ret; 1895 } 1896 1897 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1898 { 1899 struct cnic_local *cp = dev->cnic_priv; 1900 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1901 union l5cm_specific_data l5_data; 1902 int ret; 1903 u32 hw_cid; 1904 1905 init_waitqueue_head(&ctx->waitq); 1906 ctx->wait_cond = 0; 1907 memset(&l5_data, 0, sizeof(l5_data)); 1908 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1909 1910 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1911 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1912 1913 if (ret == 0) { 1914 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 1915 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) 1916 return -EBUSY; 1917 } 1918 1919 return 0; 1920 } 1921 1922 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1923 { 1924 struct cnic_local *cp = dev->cnic_priv; 1925 struct iscsi_kwqe_conn_destroy *req = 1926 (struct iscsi_kwqe_conn_destroy *) kwqe; 1927 u32 l5_cid = req->reserved0; 1928 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1929 int ret = 0; 1930 struct iscsi_kcqe kcqe; 1931 struct kcqe *cqes[1]; 1932 1933 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1934 goto skip_cfc_delete; 1935 1936 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1937 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1938 1939 if (delta > (2 * HZ)) 1940 delta = 0; 1941 1942 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 1943 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 1944 goto destroy_reply; 1945 } 1946 1947 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 1948 1949 skip_cfc_delete: 1950 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1951 1952 if (!ret) { 1953 atomic_dec(&cp->iscsi_conn); 1954 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 1955 } 1956 1957 destroy_reply: 1958 memset(&kcqe, 0, sizeof(kcqe)); 1959 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1960 kcqe.iscsi_conn_id = l5_cid; 1961 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1962 kcqe.iscsi_conn_context_id = req->context_id; 1963 1964 cqes[0] = (struct kcqe *) &kcqe; 1965 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1966 1967 return 0; 1968 } 1969 1970 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1971 struct l4_kwq_connect_req1 *kwqe1, 1972 struct l4_kwq_connect_req3 *kwqe3, 1973 struct l5cm_active_conn_buffer *conn_buf) 1974 { 1975 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1976 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1977 &conn_buf->xstorm_conn_buffer; 1978 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1979 &conn_buf->tstorm_conn_buffer; 1980 struct regpair context_addr; 1981 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1982 struct in6_addr src_ip, dst_ip; 1983 int i; 1984 u32 *addrp; 1985 1986 addrp = (u32 *) &conn_addr->local_ip_addr; 1987 for (i = 0; i < 4; i++, addrp++) 1988 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1989 1990 addrp = (u32 *) &conn_addr->remote_ip_addr; 1991 for (i = 0; i < 4; i++, addrp++) 1992 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1993 1994 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1995 1996 xstorm_buf->context_addr.hi = context_addr.hi; 1997 xstorm_buf->context_addr.lo = context_addr.lo; 1998 xstorm_buf->mss = 0xffff; 1999 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 2000 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 2001 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 2002 xstorm_buf->pseudo_header_checksum = 2003 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 2004 2005 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 2006 tstorm_buf->params |= 2007 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 2008 if (kwqe3->ka_timeout) { 2009 tstorm_buf->ka_enable = 1; 2010 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 2011 tstorm_buf->ka_interval = kwqe3->ka_interval; 2012 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 2013 } 2014 tstorm_buf->max_rt_time = 0xffffffff; 2015 } 2016 2017 static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2018 { 2019 struct cnic_local *cp = dev->cnic_priv; 2020 u32 pfid = cp->pfid; 2021 u8 *mac = dev->mac_addr; 2022 2023 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2024 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 2025 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2026 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 2027 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2028 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 2029 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2030 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 2031 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2032 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 2033 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2034 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 2035 2036 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2037 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 2038 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2039 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2040 mac[4]); 2041 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2042 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 2043 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2044 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2045 mac[2]); 2046 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2047 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); 2048 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2049 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2050 mac[0]); 2051 } 2052 2053 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 2054 { 2055 struct cnic_local *cp = dev->cnic_priv; 2056 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 2057 u16 tstorm_flags = 0; 2058 2059 if (tcp_ts) { 2060 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2061 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2062 } 2063 2064 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2065 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); 2066 2067 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 2068 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); 2069 } 2070 2071 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2072 u32 num, int *work) 2073 { 2074 struct cnic_local *cp = dev->cnic_priv; 2075 struct l4_kwq_connect_req1 *kwqe1 = 2076 (struct l4_kwq_connect_req1 *) wqes[0]; 2077 struct l4_kwq_connect_req3 *kwqe3; 2078 struct l5cm_active_conn_buffer *conn_buf; 2079 struct l5cm_conn_addr_params *conn_addr; 2080 union l5cm_specific_data l5_data; 2081 u32 l5_cid = kwqe1->pg_cid; 2082 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 2083 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2084 int ret; 2085 2086 if (num < 2) { 2087 *work = num; 2088 return -EINVAL; 2089 } 2090 2091 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 2092 *work = 3; 2093 else 2094 *work = 2; 2095 2096 if (num < *work) { 2097 *work = num; 2098 return -EINVAL; 2099 } 2100 2101 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 2102 netdev_err(dev->netdev, "conn_buf size too big\n"); 2103 return -ENOMEM; 2104 } 2105 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2106 if (!conn_buf) 2107 return -ENOMEM; 2108 2109 memset(conn_buf, 0, sizeof(*conn_buf)); 2110 2111 conn_addr = &conn_buf->conn_addr_buf; 2112 conn_addr->remote_addr_0 = csk->ha[0]; 2113 conn_addr->remote_addr_1 = csk->ha[1]; 2114 conn_addr->remote_addr_2 = csk->ha[2]; 2115 conn_addr->remote_addr_3 = csk->ha[3]; 2116 conn_addr->remote_addr_4 = csk->ha[4]; 2117 conn_addr->remote_addr_5 = csk->ha[5]; 2118 2119 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 2120 struct l4_kwq_connect_req2 *kwqe2 = 2121 (struct l4_kwq_connect_req2 *) wqes[1]; 2122 2123 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 2124 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 2125 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 2126 2127 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2128 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2129 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2130 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2131 } 2132 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2133 2134 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2135 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2136 conn_addr->local_tcp_port = kwqe1->src_port; 2137 conn_addr->remote_tcp_port = kwqe1->dst_port; 2138 2139 conn_addr->pmtu = kwqe3->pmtu; 2140 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2141 2142 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2143 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2144 2145 cnic_bnx2x_set_tcp_timestamp(dev, 2146 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2147 2148 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2149 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2150 if (!ret) 2151 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2152 2153 return ret; 2154 } 2155 2156 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2157 { 2158 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2159 union l5cm_specific_data l5_data; 2160 int ret; 2161 2162 memset(&l5_data, 0, sizeof(l5_data)); 2163 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2164 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2165 return ret; 2166 } 2167 2168 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2169 { 2170 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2171 union l5cm_specific_data l5_data; 2172 int ret; 2173 2174 memset(&l5_data, 0, sizeof(l5_data)); 2175 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2176 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2177 return ret; 2178 } 2179 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2180 { 2181 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2182 struct l4_kcq kcqe; 2183 struct kcqe *cqes[1]; 2184 2185 memset(&kcqe, 0, sizeof(kcqe)); 2186 kcqe.pg_host_opaque = req->host_opaque; 2187 kcqe.pg_cid = req->host_opaque; 2188 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2189 cqes[0] = (struct kcqe *) &kcqe; 2190 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2191 return 0; 2192 } 2193 2194 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2195 { 2196 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2197 struct l4_kcq kcqe; 2198 struct kcqe *cqes[1]; 2199 2200 memset(&kcqe, 0, sizeof(kcqe)); 2201 kcqe.pg_host_opaque = req->pg_host_opaque; 2202 kcqe.pg_cid = req->pg_cid; 2203 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2204 cqes[0] = (struct kcqe *) &kcqe; 2205 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2206 return 0; 2207 } 2208 2209 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) 2210 { 2211 struct fcoe_kwqe_stat *req; 2212 struct fcoe_stat_ramrod_params *fcoe_stat; 2213 union l5cm_specific_data l5_data; 2214 struct cnic_local *cp = dev->cnic_priv; 2215 int ret; 2216 u32 cid; 2217 2218 req = (struct fcoe_kwqe_stat *) kwqe; 2219 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2220 2221 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2222 if (!fcoe_stat) 2223 return -ENOMEM; 2224 2225 memset(fcoe_stat, 0, sizeof(*fcoe_stat)); 2226 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); 2227 2228 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, 2229 FCOE_CONNECTION_TYPE, &l5_data); 2230 return ret; 2231 } 2232 2233 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], 2234 u32 num, int *work) 2235 { 2236 int ret; 2237 struct cnic_local *cp = dev->cnic_priv; 2238 u32 cid; 2239 struct fcoe_init_ramrod_params *fcoe_init; 2240 struct fcoe_kwqe_init1 *req1; 2241 struct fcoe_kwqe_init2 *req2; 2242 struct fcoe_kwqe_init3 *req3; 2243 union l5cm_specific_data l5_data; 2244 2245 if (num < 3) { 2246 *work = num; 2247 return -EINVAL; 2248 } 2249 req1 = (struct fcoe_kwqe_init1 *) wqes[0]; 2250 req2 = (struct fcoe_kwqe_init2 *) wqes[1]; 2251 req3 = (struct fcoe_kwqe_init3 *) wqes[2]; 2252 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { 2253 *work = 1; 2254 return -EINVAL; 2255 } 2256 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { 2257 *work = 2; 2258 return -EINVAL; 2259 } 2260 2261 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { 2262 netdev_err(dev->netdev, "fcoe_init size too big\n"); 2263 return -ENOMEM; 2264 } 2265 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2266 if (!fcoe_init) 2267 return -ENOMEM; 2268 2269 memset(fcoe_init, 0, sizeof(*fcoe_init)); 2270 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); 2271 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); 2272 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); 2273 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; 2274 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; 2275 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; 2276 2277 fcoe_init->sb_num = cp->status_blk_num; 2278 fcoe_init->eq_prod = MAX_KCQ_IDX; 2279 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2280 cp->kcq2.sw_prod_idx = 0; 2281 2282 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2283 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2284 FCOE_CONNECTION_TYPE, &l5_data); 2285 *work = 3; 2286 return ret; 2287 } 2288 2289 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 2290 u32 num, int *work) 2291 { 2292 int ret = 0; 2293 u32 cid = -1, l5_cid; 2294 struct cnic_local *cp = dev->cnic_priv; 2295 struct fcoe_kwqe_conn_offload1 *req1; 2296 struct fcoe_kwqe_conn_offload2 *req2; 2297 struct fcoe_kwqe_conn_offload3 *req3; 2298 struct fcoe_kwqe_conn_offload4 *req4; 2299 struct fcoe_conn_offload_ramrod_params *fcoe_offload; 2300 struct cnic_context *ctx; 2301 struct fcoe_context *fctx; 2302 struct regpair ctx_addr; 2303 union l5cm_specific_data l5_data; 2304 struct fcoe_kcqe kcqe; 2305 struct kcqe *cqes[1]; 2306 2307 if (num < 4) { 2308 *work = num; 2309 return -EINVAL; 2310 } 2311 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; 2312 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; 2313 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; 2314 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; 2315 2316 *work = 4; 2317 2318 l5_cid = req1->fcoe_conn_id; 2319 if (l5_cid >= dev->max_fcoe_conn) 2320 goto err_reply; 2321 2322 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2323 2324 ctx = &cp->ctx_tbl[l5_cid]; 2325 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2326 goto err_reply; 2327 2328 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 2329 if (ret) { 2330 ret = 0; 2331 goto err_reply; 2332 } 2333 cid = ctx->cid; 2334 2335 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2336 if (fctx) { 2337 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2338 u32 val; 2339 2340 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2341 FCOE_CONNECTION_TYPE); 2342 fctx->xstorm_ag_context.cdu_reserved = val; 2343 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 2344 FCOE_CONNECTION_TYPE); 2345 fctx->ustorm_ag_context.cdu_usage = val; 2346 } 2347 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { 2348 netdev_err(dev->netdev, "fcoe_offload size too big\n"); 2349 goto err_reply; 2350 } 2351 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2352 if (!fcoe_offload) 2353 goto err_reply; 2354 2355 memset(fcoe_offload, 0, sizeof(*fcoe_offload)); 2356 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); 2357 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); 2358 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2359 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2360 2361 cid = BNX2X_HW_CID(cp, cid); 2362 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2363 FCOE_CONNECTION_TYPE, &l5_data); 2364 if (!ret) 2365 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2366 2367 return ret; 2368 2369 err_reply: 2370 if (cid != -1) 2371 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2372 2373 memset(&kcqe, 0, sizeof(kcqe)); 2374 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; 2375 kcqe.fcoe_conn_id = req1->fcoe_conn_id; 2376 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 2377 2378 cqes[0] = (struct kcqe *) &kcqe; 2379 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2380 return ret; 2381 } 2382 2383 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) 2384 { 2385 struct fcoe_kwqe_conn_enable_disable *req; 2386 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; 2387 union l5cm_specific_data l5_data; 2388 int ret; 2389 u32 cid, l5_cid; 2390 struct cnic_local *cp = dev->cnic_priv; 2391 2392 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2393 cid = req->context_id; 2394 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; 2395 2396 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { 2397 netdev_err(dev->netdev, "fcoe_enable size too big\n"); 2398 return -ENOMEM; 2399 } 2400 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2401 if (!fcoe_enable) 2402 return -ENOMEM; 2403 2404 memset(fcoe_enable, 0, sizeof(*fcoe_enable)); 2405 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); 2406 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, 2407 FCOE_CONNECTION_TYPE, &l5_data); 2408 return ret; 2409 } 2410 2411 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) 2412 { 2413 struct fcoe_kwqe_conn_enable_disable *req; 2414 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; 2415 union l5cm_specific_data l5_data; 2416 int ret; 2417 u32 cid, l5_cid; 2418 struct cnic_local *cp = dev->cnic_priv; 2419 2420 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2421 cid = req->context_id; 2422 l5_cid = req->conn_id; 2423 if (l5_cid >= dev->max_fcoe_conn) 2424 return -EINVAL; 2425 2426 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2427 2428 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { 2429 netdev_err(dev->netdev, "fcoe_disable size too big\n"); 2430 return -ENOMEM; 2431 } 2432 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2433 if (!fcoe_disable) 2434 return -ENOMEM; 2435 2436 memset(fcoe_disable, 0, sizeof(*fcoe_disable)); 2437 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); 2438 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, 2439 FCOE_CONNECTION_TYPE, &l5_data); 2440 return ret; 2441 } 2442 2443 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2444 { 2445 struct fcoe_kwqe_conn_destroy *req; 2446 union l5cm_specific_data l5_data; 2447 int ret; 2448 u32 cid, l5_cid; 2449 struct cnic_local *cp = dev->cnic_priv; 2450 struct cnic_context *ctx; 2451 struct fcoe_kcqe kcqe; 2452 struct kcqe *cqes[1]; 2453 2454 req = (struct fcoe_kwqe_conn_destroy *) kwqe; 2455 cid = req->context_id; 2456 l5_cid = req->conn_id; 2457 if (l5_cid >= dev->max_fcoe_conn) 2458 return -EINVAL; 2459 2460 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2461 2462 ctx = &cp->ctx_tbl[l5_cid]; 2463 2464 init_waitqueue_head(&ctx->waitq); 2465 ctx->wait_cond = 0; 2466 2467 memset(&kcqe, 0, sizeof(kcqe)); 2468 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR; 2469 memset(&l5_data, 0, sizeof(l5_data)); 2470 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, 2471 FCOE_CONNECTION_TYPE, &l5_data); 2472 if (ret == 0) { 2473 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 2474 if (ctx->wait_cond) 2475 kcqe.completion_status = 0; 2476 } 2477 2478 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2479 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); 2480 2481 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; 2482 kcqe.fcoe_conn_id = req->conn_id; 2483 kcqe.fcoe_conn_context_id = cid; 2484 2485 cqes[0] = (struct kcqe *) &kcqe; 2486 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2487 return ret; 2488 } 2489 2490 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) 2491 { 2492 struct cnic_local *cp = dev->cnic_priv; 2493 u32 i; 2494 2495 for (i = start_cid; i < cp->max_cid_space; i++) { 2496 struct cnic_context *ctx = &cp->ctx_tbl[i]; 2497 int j; 2498 2499 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 2500 msleep(10); 2501 2502 for (j = 0; j < 5; j++) { 2503 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2504 break; 2505 msleep(20); 2506 } 2507 2508 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2509 netdev_warn(dev->netdev, "CID %x not deleted\n", 2510 ctx->cid); 2511 } 2512 } 2513 2514 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2515 { 2516 struct fcoe_kwqe_destroy *req; 2517 union l5cm_specific_data l5_data; 2518 struct cnic_local *cp = dev->cnic_priv; 2519 int ret; 2520 u32 cid; 2521 2522 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2523 2524 req = (struct fcoe_kwqe_destroy *) kwqe; 2525 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2526 2527 memset(&l5_data, 0, sizeof(l5_data)); 2528 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2529 FCOE_CONNECTION_TYPE, &l5_data); 2530 return ret; 2531 } 2532 2533 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) 2534 { 2535 struct cnic_local *cp = dev->cnic_priv; 2536 struct kcqe kcqe; 2537 struct kcqe *cqes[1]; 2538 u32 cid; 2539 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2540 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; 2541 u32 kcqe_op; 2542 int ulp_type; 2543 2544 cid = kwqe->kwqe_info0; 2545 memset(&kcqe, 0, sizeof(kcqe)); 2546 2547 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) { 2548 u32 l5_cid = 0; 2549 2550 ulp_type = CNIC_ULP_FCOE; 2551 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) { 2552 struct fcoe_kwqe_conn_enable_disable *req; 2553 2554 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2555 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN; 2556 cid = req->context_id; 2557 l5_cid = req->conn_id; 2558 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) { 2559 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC; 2560 } else { 2561 return; 2562 } 2563 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT; 2564 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE; 2565 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2566 kcqe.kcqe_info2 = cid; 2567 kcqe.kcqe_info0 = l5_cid; 2568 2569 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { 2570 ulp_type = CNIC_ULP_ISCSI; 2571 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) 2572 cid = kwqe->kwqe_info1; 2573 2574 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; 2575 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; 2576 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR; 2577 kcqe.kcqe_info2 = cid; 2578 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); 2579 2580 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { 2581 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; 2582 2583 ulp_type = CNIC_ULP_L4; 2584 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) 2585 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; 2586 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) 2587 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; 2588 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) 2589 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 2590 else 2591 return; 2592 2593 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | 2594 KCQE_FLAGS_LAYER_MASK_L4; 2595 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2596 l4kcqe->cid = cid; 2597 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); 2598 } else { 2599 return; 2600 } 2601 2602 cqes[0] = &kcqe; 2603 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2604 } 2605 2606 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, 2607 struct kwqe *wqes[], u32 num_wqes) 2608 { 2609 int i, work, ret; 2610 u32 opcode; 2611 struct kwqe *kwqe; 2612 2613 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2614 return -EAGAIN; /* bnx2 is down */ 2615 2616 for (i = 0; i < num_wqes; ) { 2617 kwqe = wqes[i]; 2618 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2619 work = 1; 2620 2621 switch (opcode) { 2622 case ISCSI_KWQE_OPCODE_INIT1: 2623 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2624 break; 2625 case ISCSI_KWQE_OPCODE_INIT2: 2626 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2627 break; 2628 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2629 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2630 num_wqes - i, &work); 2631 break; 2632 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2633 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2634 break; 2635 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2636 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2637 break; 2638 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2639 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2640 &work); 2641 break; 2642 case L4_KWQE_OPCODE_VALUE_CLOSE: 2643 ret = cnic_bnx2x_close(dev, kwqe); 2644 break; 2645 case L4_KWQE_OPCODE_VALUE_RESET: 2646 ret = cnic_bnx2x_reset(dev, kwqe); 2647 break; 2648 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2649 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2650 break; 2651 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2652 ret = cnic_bnx2x_update_pg(dev, kwqe); 2653 break; 2654 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2655 ret = 0; 2656 break; 2657 default: 2658 ret = 0; 2659 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2660 opcode); 2661 break; 2662 } 2663 if (ret < 0) { 2664 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2665 opcode); 2666 2667 /* Possibly bnx2x parity error, send completion 2668 * to ulp drivers with error code to speed up 2669 * cleanup and reset recovery. 2670 */ 2671 if (ret == -EIO || ret == -EAGAIN) 2672 cnic_bnx2x_kwqe_err(dev, kwqe); 2673 } 2674 i += work; 2675 } 2676 return 0; 2677 } 2678 2679 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2680 struct kwqe *wqes[], u32 num_wqes) 2681 { 2682 struct cnic_local *cp = dev->cnic_priv; 2683 int i, work, ret; 2684 u32 opcode; 2685 struct kwqe *kwqe; 2686 2687 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2688 return -EAGAIN; /* bnx2 is down */ 2689 2690 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 2691 return -EINVAL; 2692 2693 for (i = 0; i < num_wqes; ) { 2694 kwqe = wqes[i]; 2695 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2696 work = 1; 2697 2698 switch (opcode) { 2699 case FCOE_KWQE_OPCODE_INIT1: 2700 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], 2701 num_wqes - i, &work); 2702 break; 2703 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: 2704 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], 2705 num_wqes - i, &work); 2706 break; 2707 case FCOE_KWQE_OPCODE_ENABLE_CONN: 2708 ret = cnic_bnx2x_fcoe_enable(dev, kwqe); 2709 break; 2710 case FCOE_KWQE_OPCODE_DISABLE_CONN: 2711 ret = cnic_bnx2x_fcoe_disable(dev, kwqe); 2712 break; 2713 case FCOE_KWQE_OPCODE_DESTROY_CONN: 2714 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); 2715 break; 2716 case FCOE_KWQE_OPCODE_DESTROY: 2717 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); 2718 break; 2719 case FCOE_KWQE_OPCODE_STAT: 2720 ret = cnic_bnx2x_fcoe_stat(dev, kwqe); 2721 break; 2722 default: 2723 ret = 0; 2724 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2725 opcode); 2726 break; 2727 } 2728 if (ret < 0) { 2729 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2730 opcode); 2731 2732 /* Possibly bnx2x parity error, send completion 2733 * to ulp drivers with error code to speed up 2734 * cleanup and reset recovery. 2735 */ 2736 if (ret == -EIO || ret == -EAGAIN) 2737 cnic_bnx2x_kwqe_err(dev, kwqe); 2738 } 2739 i += work; 2740 } 2741 return 0; 2742 } 2743 2744 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2745 u32 num_wqes) 2746 { 2747 int ret = -EINVAL; 2748 u32 layer_code; 2749 2750 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2751 return -EAGAIN; /* bnx2x is down */ 2752 2753 if (!num_wqes) 2754 return 0; 2755 2756 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; 2757 switch (layer_code) { 2758 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: 2759 case KWQE_FLAGS_LAYER_MASK_L4: 2760 case KWQE_FLAGS_LAYER_MASK_L2: 2761 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); 2762 break; 2763 2764 case KWQE_FLAGS_LAYER_MASK_L5_FCOE: 2765 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); 2766 break; 2767 } 2768 return ret; 2769 } 2770 2771 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) 2772 { 2773 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) 2774 return KCQE_FLAGS_LAYER_MASK_L4; 2775 2776 return opflag & KCQE_FLAGS_LAYER_MASK; 2777 } 2778 2779 static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2780 { 2781 struct cnic_local *cp = dev->cnic_priv; 2782 int i, j, comp = 0; 2783 2784 i = 0; 2785 j = 1; 2786 while (num_cqes) { 2787 struct cnic_ulp_ops *ulp_ops; 2788 int ulp_type; 2789 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2790 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); 2791 2792 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2793 comp++; 2794 2795 while (j < num_cqes) { 2796 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2797 2798 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) 2799 break; 2800 2801 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2802 comp++; 2803 j++; 2804 } 2805 2806 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2807 ulp_type = CNIC_ULP_RDMA; 2808 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2809 ulp_type = CNIC_ULP_ISCSI; 2810 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) 2811 ulp_type = CNIC_ULP_FCOE; 2812 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2813 ulp_type = CNIC_ULP_L4; 2814 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2815 goto end; 2816 else { 2817 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2818 kcqe_op_flag); 2819 goto end; 2820 } 2821 2822 rcu_read_lock(); 2823 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2824 if (likely(ulp_ops)) { 2825 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2826 cp->completed_kcq + i, j); 2827 } 2828 rcu_read_unlock(); 2829 end: 2830 num_cqes -= j; 2831 i += j; 2832 j = 1; 2833 } 2834 if (unlikely(comp)) 2835 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2836 } 2837 2838 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2839 { 2840 struct cnic_local *cp = dev->cnic_priv; 2841 u16 i, ri, hw_prod, last; 2842 struct kcqe *kcqe; 2843 int kcqe_cnt = 0, last_cnt = 0; 2844 2845 i = ri = last = info->sw_prod_idx; 2846 ri &= MAX_KCQ_IDX; 2847 hw_prod = *info->hw_prod_idx_ptr; 2848 hw_prod = info->hw_idx(hw_prod); 2849 2850 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2851 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2852 cp->completed_kcq[kcqe_cnt++] = kcqe; 2853 i = info->next_idx(i); 2854 ri = i & MAX_KCQ_IDX; 2855 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2856 last_cnt = kcqe_cnt; 2857 last = i; 2858 } 2859 } 2860 2861 info->sw_prod_idx = last; 2862 return last_cnt; 2863 } 2864 2865 static int cnic_l2_completion(struct cnic_local *cp) 2866 { 2867 u16 hw_cons, sw_cons; 2868 struct cnic_uio_dev *udev = cp->udev; 2869 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2870 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2871 u32 cmd; 2872 int comp = 0; 2873 2874 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2875 return 0; 2876 2877 hw_cons = *cp->rx_cons_ptr; 2878 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2879 hw_cons++; 2880 2881 sw_cons = cp->rx_cons; 2882 while (sw_cons != hw_cons) { 2883 u8 cqe_fp_flags; 2884 2885 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2886 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2887 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2888 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2889 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2890 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2891 cmd == RAMROD_CMD_ID_ETH_HALT) 2892 comp++; 2893 } 2894 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2895 } 2896 return comp; 2897 } 2898 2899 static void cnic_chk_pkt_rings(struct cnic_local *cp) 2900 { 2901 u16 rx_cons, tx_cons; 2902 int comp = 0; 2903 2904 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2905 return; 2906 2907 rx_cons = *cp->rx_cons_ptr; 2908 tx_cons = *cp->tx_cons_ptr; 2909 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2910 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2911 comp = cnic_l2_completion(cp); 2912 2913 cp->tx_cons = tx_cons; 2914 cp->rx_cons = rx_cons; 2915 2916 if (cp->udev) 2917 uio_event_notify(&cp->udev->cnic_uinfo); 2918 } 2919 if (comp) 2920 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2921 } 2922 2923 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2924 { 2925 struct cnic_local *cp = dev->cnic_priv; 2926 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2927 int kcqe_cnt; 2928 2929 /* status block index must be read before reading other fields */ 2930 rmb(); 2931 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2932 2933 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2934 2935 service_kcqes(dev, kcqe_cnt); 2936 2937 /* Tell compiler that status_blk fields can change. */ 2938 barrier(); 2939 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2940 /* status block index must be read first */ 2941 rmb(); 2942 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2943 } 2944 2945 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2946 2947 cnic_chk_pkt_rings(cp); 2948 2949 return status_idx; 2950 } 2951 2952 static int cnic_service_bnx2(void *data, void *status_blk) 2953 { 2954 struct cnic_dev *dev = data; 2955 2956 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2957 struct status_block *sblk = status_blk; 2958 2959 return sblk->status_idx; 2960 } 2961 2962 return cnic_service_bnx2_queues(dev); 2963 } 2964 2965 static void cnic_service_bnx2_msix(unsigned long data) 2966 { 2967 struct cnic_dev *dev = (struct cnic_dev *) data; 2968 struct cnic_local *cp = dev->cnic_priv; 2969 2970 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2971 2972 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2973 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2974 } 2975 2976 static void cnic_doirq(struct cnic_dev *dev) 2977 { 2978 struct cnic_local *cp = dev->cnic_priv; 2979 2980 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2981 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2982 2983 prefetch(cp->status_blk.gen); 2984 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2985 2986 tasklet_schedule(&cp->cnic_irq_task); 2987 } 2988 } 2989 2990 static irqreturn_t cnic_irq(int irq, void *dev_instance) 2991 { 2992 struct cnic_dev *dev = dev_instance; 2993 struct cnic_local *cp = dev->cnic_priv; 2994 2995 if (cp->ack_int) 2996 cp->ack_int(dev); 2997 2998 cnic_doirq(dev); 2999 3000 return IRQ_HANDLED; 3001 } 3002 3003 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 3004 u16 index, u8 op, u8 update) 3005 { 3006 struct cnic_local *cp = dev->cnic_priv; 3007 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 3008 COMMAND_REG_INT_ACK); 3009 struct igu_ack_register igu_ack; 3010 3011 igu_ack.status_block_index = index; 3012 igu_ack.sb_id_and_flags = 3013 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 3014 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 3015 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 3016 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 3017 3018 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 3019 } 3020 3021 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 3022 u16 index, u8 op, u8 update) 3023 { 3024 struct igu_regular cmd_data; 3025 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 3026 3027 cmd_data.sb_id_and_flags = 3028 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 3029 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 3030 (update << IGU_REGULAR_BUPDATE_SHIFT) | 3031 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 3032 3033 3034 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 3035 } 3036 3037 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 3038 { 3039 struct cnic_local *cp = dev->cnic_priv; 3040 3041 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 3042 IGU_INT_DISABLE, 0); 3043 } 3044 3045 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 3046 { 3047 struct cnic_local *cp = dev->cnic_priv; 3048 3049 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 3050 IGU_INT_DISABLE, 0); 3051 } 3052 3053 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 3054 { 3055 u32 last_status = *info->status_idx_ptr; 3056 int kcqe_cnt; 3057 3058 /* status block index must be read before reading the KCQ */ 3059 rmb(); 3060 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 3061 3062 service_kcqes(dev, kcqe_cnt); 3063 3064 /* Tell compiler that sblk fields can change. */ 3065 barrier(); 3066 3067 last_status = *info->status_idx_ptr; 3068 /* status block index must be read before reading the KCQ */ 3069 rmb(); 3070 } 3071 return last_status; 3072 } 3073 3074 static void cnic_service_bnx2x_bh(unsigned long data) 3075 { 3076 struct cnic_dev *dev = (struct cnic_dev *) data; 3077 struct cnic_local *cp = dev->cnic_priv; 3078 u32 status_idx, new_status_idx; 3079 3080 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 3081 return; 3082 3083 while (1) { 3084 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 3085 3086 CNIC_WR16(dev, cp->kcq1.io_addr, 3087 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3088 3089 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 3090 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 3091 status_idx, IGU_INT_ENABLE, 1); 3092 break; 3093 } 3094 3095 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 3096 3097 if (new_status_idx != status_idx) 3098 continue; 3099 3100 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 3101 MAX_KCQ_IDX); 3102 3103 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 3104 status_idx, IGU_INT_ENABLE, 1); 3105 3106 break; 3107 } 3108 } 3109 3110 static int cnic_service_bnx2x(void *data, void *status_blk) 3111 { 3112 struct cnic_dev *dev = data; 3113 struct cnic_local *cp = dev->cnic_priv; 3114 3115 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3116 cnic_doirq(dev); 3117 3118 cnic_chk_pkt_rings(cp); 3119 3120 return 0; 3121 } 3122 3123 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) 3124 { 3125 struct cnic_ulp_ops *ulp_ops; 3126 3127 if (if_type == CNIC_ULP_ISCSI) 3128 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 3129 3130 mutex_lock(&cnic_lock); 3131 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3132 lockdep_is_held(&cnic_lock)); 3133 if (!ulp_ops) { 3134 mutex_unlock(&cnic_lock); 3135 return; 3136 } 3137 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3138 mutex_unlock(&cnic_lock); 3139 3140 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3141 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 3142 3143 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3144 } 3145 3146 static void cnic_ulp_stop(struct cnic_dev *dev) 3147 { 3148 struct cnic_local *cp = dev->cnic_priv; 3149 int if_type; 3150 3151 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) 3152 cnic_ulp_stop_one(cp, if_type); 3153 } 3154 3155 static void cnic_ulp_start(struct cnic_dev *dev) 3156 { 3157 struct cnic_local *cp = dev->cnic_priv; 3158 int if_type; 3159 3160 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 3161 struct cnic_ulp_ops *ulp_ops; 3162 3163 mutex_lock(&cnic_lock); 3164 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3165 lockdep_is_held(&cnic_lock)); 3166 if (!ulp_ops || !ulp_ops->cnic_start) { 3167 mutex_unlock(&cnic_lock); 3168 continue; 3169 } 3170 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3171 mutex_unlock(&cnic_lock); 3172 3173 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3174 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 3175 3176 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3177 } 3178 } 3179 3180 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) 3181 { 3182 struct cnic_local *cp = dev->cnic_priv; 3183 struct cnic_ulp_ops *ulp_ops; 3184 int rc; 3185 3186 mutex_lock(&cnic_lock); 3187 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 3188 if (ulp_ops && ulp_ops->cnic_get_stats) 3189 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); 3190 else 3191 rc = -ENODEV; 3192 mutex_unlock(&cnic_lock); 3193 return rc; 3194 } 3195 3196 static int cnic_ctl(void *data, struct cnic_ctl_info *info) 3197 { 3198 struct cnic_dev *dev = data; 3199 int ulp_type = CNIC_ULP_ISCSI; 3200 3201 switch (info->cmd) { 3202 case CNIC_CTL_STOP_CMD: 3203 cnic_hold(dev); 3204 3205 cnic_ulp_stop(dev); 3206 cnic_stop_hw(dev); 3207 3208 cnic_put(dev); 3209 break; 3210 case CNIC_CTL_START_CMD: 3211 cnic_hold(dev); 3212 3213 if (!cnic_start_hw(dev)) 3214 cnic_ulp_start(dev); 3215 3216 cnic_put(dev); 3217 break; 3218 case CNIC_CTL_STOP_ISCSI_CMD: { 3219 struct cnic_local *cp = dev->cnic_priv; 3220 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); 3221 queue_delayed_work(cnic_wq, &cp->delete_task, 0); 3222 break; 3223 } 3224 case CNIC_CTL_COMPLETION_CMD: { 3225 struct cnic_ctl_completion *comp = &info->data.comp; 3226 u32 cid = BNX2X_SW_CID(comp->cid); 3227 u32 l5_cid; 3228 struct cnic_local *cp = dev->cnic_priv; 3229 3230 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 3231 break; 3232 3233 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3234 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3235 3236 if (unlikely(comp->error)) { 3237 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); 3238 netdev_err(dev->netdev, 3239 "CID %x CFC delete comp error %x\n", 3240 cid, comp->error); 3241 } 3242 3243 ctx->wait_cond = 1; 3244 wake_up(&ctx->waitq); 3245 } 3246 break; 3247 } 3248 case CNIC_CTL_FCOE_STATS_GET_CMD: 3249 ulp_type = CNIC_ULP_FCOE; 3250 /* fall through */ 3251 case CNIC_CTL_ISCSI_STATS_GET_CMD: 3252 cnic_hold(dev); 3253 cnic_copy_ulp_stats(dev, ulp_type); 3254 cnic_put(dev); 3255 break; 3256 3257 default: 3258 return -EINVAL; 3259 } 3260 return 0; 3261 } 3262 3263 static void cnic_ulp_init(struct cnic_dev *dev) 3264 { 3265 int i; 3266 struct cnic_local *cp = dev->cnic_priv; 3267 3268 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3269 struct cnic_ulp_ops *ulp_ops; 3270 3271 mutex_lock(&cnic_lock); 3272 ulp_ops = cnic_ulp_tbl_prot(i); 3273 if (!ulp_ops || !ulp_ops->cnic_init) { 3274 mutex_unlock(&cnic_lock); 3275 continue; 3276 } 3277 ulp_get(ulp_ops); 3278 mutex_unlock(&cnic_lock); 3279 3280 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3281 ulp_ops->cnic_init(dev); 3282 3283 ulp_put(ulp_ops); 3284 } 3285 } 3286 3287 static void cnic_ulp_exit(struct cnic_dev *dev) 3288 { 3289 int i; 3290 struct cnic_local *cp = dev->cnic_priv; 3291 3292 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3293 struct cnic_ulp_ops *ulp_ops; 3294 3295 mutex_lock(&cnic_lock); 3296 ulp_ops = cnic_ulp_tbl_prot(i); 3297 if (!ulp_ops || !ulp_ops->cnic_exit) { 3298 mutex_unlock(&cnic_lock); 3299 continue; 3300 } 3301 ulp_get(ulp_ops); 3302 mutex_unlock(&cnic_lock); 3303 3304 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3305 ulp_ops->cnic_exit(dev); 3306 3307 ulp_put(ulp_ops); 3308 } 3309 } 3310 3311 static int cnic_cm_offload_pg(struct cnic_sock *csk) 3312 { 3313 struct cnic_dev *dev = csk->dev; 3314 struct l4_kwq_offload_pg *l4kwqe; 3315 struct kwqe *wqes[1]; 3316 3317 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 3318 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3319 wqes[0] = (struct kwqe *) l4kwqe; 3320 3321 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 3322 l4kwqe->flags = 3323 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 3324 l4kwqe->l2hdr_nbytes = ETH_HLEN; 3325 3326 l4kwqe->da0 = csk->ha[0]; 3327 l4kwqe->da1 = csk->ha[1]; 3328 l4kwqe->da2 = csk->ha[2]; 3329 l4kwqe->da3 = csk->ha[3]; 3330 l4kwqe->da4 = csk->ha[4]; 3331 l4kwqe->da5 = csk->ha[5]; 3332 3333 l4kwqe->sa0 = dev->mac_addr[0]; 3334 l4kwqe->sa1 = dev->mac_addr[1]; 3335 l4kwqe->sa2 = dev->mac_addr[2]; 3336 l4kwqe->sa3 = dev->mac_addr[3]; 3337 l4kwqe->sa4 = dev->mac_addr[4]; 3338 l4kwqe->sa5 = dev->mac_addr[5]; 3339 3340 l4kwqe->etype = ETH_P_IP; 3341 l4kwqe->ipid_start = DEF_IPID_START; 3342 l4kwqe->host_opaque = csk->l5_cid; 3343 3344 if (csk->vlan_id) { 3345 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 3346 l4kwqe->vlan_tag = csk->vlan_id; 3347 l4kwqe->l2hdr_nbytes += 4; 3348 } 3349 3350 return dev->submit_kwqes(dev, wqes, 1); 3351 } 3352 3353 static int cnic_cm_update_pg(struct cnic_sock *csk) 3354 { 3355 struct cnic_dev *dev = csk->dev; 3356 struct l4_kwq_update_pg *l4kwqe; 3357 struct kwqe *wqes[1]; 3358 3359 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 3360 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3361 wqes[0] = (struct kwqe *) l4kwqe; 3362 3363 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 3364 l4kwqe->flags = 3365 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 3366 l4kwqe->pg_cid = csk->pg_cid; 3367 3368 l4kwqe->da0 = csk->ha[0]; 3369 l4kwqe->da1 = csk->ha[1]; 3370 l4kwqe->da2 = csk->ha[2]; 3371 l4kwqe->da3 = csk->ha[3]; 3372 l4kwqe->da4 = csk->ha[4]; 3373 l4kwqe->da5 = csk->ha[5]; 3374 3375 l4kwqe->pg_host_opaque = csk->l5_cid; 3376 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 3377 3378 return dev->submit_kwqes(dev, wqes, 1); 3379 } 3380 3381 static int cnic_cm_upload_pg(struct cnic_sock *csk) 3382 { 3383 struct cnic_dev *dev = csk->dev; 3384 struct l4_kwq_upload *l4kwqe; 3385 struct kwqe *wqes[1]; 3386 3387 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 3388 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3389 wqes[0] = (struct kwqe *) l4kwqe; 3390 3391 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 3392 l4kwqe->flags = 3393 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 3394 l4kwqe->cid = csk->pg_cid; 3395 3396 return dev->submit_kwqes(dev, wqes, 1); 3397 } 3398 3399 static int cnic_cm_conn_req(struct cnic_sock *csk) 3400 { 3401 struct cnic_dev *dev = csk->dev; 3402 struct l4_kwq_connect_req1 *l4kwqe1; 3403 struct l4_kwq_connect_req2 *l4kwqe2; 3404 struct l4_kwq_connect_req3 *l4kwqe3; 3405 struct kwqe *wqes[3]; 3406 u8 tcp_flags = 0; 3407 int num_wqes = 2; 3408 3409 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 3410 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 3411 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 3412 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 3413 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 3414 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 3415 3416 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 3417 l4kwqe3->flags = 3418 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 3419 l4kwqe3->ka_timeout = csk->ka_timeout; 3420 l4kwqe3->ka_interval = csk->ka_interval; 3421 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 3422 l4kwqe3->tos = csk->tos; 3423 l4kwqe3->ttl = csk->ttl; 3424 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 3425 l4kwqe3->pmtu = csk->mtu; 3426 l4kwqe3->rcv_buf = csk->rcv_buf; 3427 l4kwqe3->snd_buf = csk->snd_buf; 3428 l4kwqe3->seed = csk->seed; 3429 3430 wqes[0] = (struct kwqe *) l4kwqe1; 3431 if (test_bit(SK_F_IPV6, &csk->flags)) { 3432 wqes[1] = (struct kwqe *) l4kwqe2; 3433 wqes[2] = (struct kwqe *) l4kwqe3; 3434 num_wqes = 3; 3435 3436 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 3437 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 3438 l4kwqe2->flags = 3439 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 3440 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 3441 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 3442 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 3443 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 3444 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 3445 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 3446 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 3447 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 3448 sizeof(struct tcphdr); 3449 } else { 3450 wqes[1] = (struct kwqe *) l4kwqe3; 3451 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 3452 sizeof(struct tcphdr); 3453 } 3454 3455 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 3456 l4kwqe1->flags = 3457 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 3458 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 3459 l4kwqe1->cid = csk->cid; 3460 l4kwqe1->pg_cid = csk->pg_cid; 3461 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 3462 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 3463 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 3464 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 3465 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 3466 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 3467 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 3468 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 3469 if (csk->tcp_flags & SK_TCP_NAGLE) 3470 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 3471 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 3472 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 3473 if (csk->tcp_flags & SK_TCP_SACK) 3474 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 3475 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 3476 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 3477 3478 l4kwqe1->tcp_flags = tcp_flags; 3479 3480 return dev->submit_kwqes(dev, wqes, num_wqes); 3481 } 3482 3483 static int cnic_cm_close_req(struct cnic_sock *csk) 3484 { 3485 struct cnic_dev *dev = csk->dev; 3486 struct l4_kwq_close_req *l4kwqe; 3487 struct kwqe *wqes[1]; 3488 3489 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 3490 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3491 wqes[0] = (struct kwqe *) l4kwqe; 3492 3493 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 3494 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 3495 l4kwqe->cid = csk->cid; 3496 3497 return dev->submit_kwqes(dev, wqes, 1); 3498 } 3499 3500 static int cnic_cm_abort_req(struct cnic_sock *csk) 3501 { 3502 struct cnic_dev *dev = csk->dev; 3503 struct l4_kwq_reset_req *l4kwqe; 3504 struct kwqe *wqes[1]; 3505 3506 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 3507 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3508 wqes[0] = (struct kwqe *) l4kwqe; 3509 3510 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 3511 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 3512 l4kwqe->cid = csk->cid; 3513 3514 return dev->submit_kwqes(dev, wqes, 1); 3515 } 3516 3517 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 3518 u32 l5_cid, struct cnic_sock **csk, void *context) 3519 { 3520 struct cnic_local *cp = dev->cnic_priv; 3521 struct cnic_sock *csk1; 3522 3523 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3524 return -EINVAL; 3525 3526 if (cp->ctx_tbl) { 3527 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3528 3529 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3530 return -EAGAIN; 3531 } 3532 3533 csk1 = &cp->csk_tbl[l5_cid]; 3534 if (atomic_read(&csk1->ref_count)) 3535 return -EAGAIN; 3536 3537 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 3538 return -EBUSY; 3539 3540 csk1->dev = dev; 3541 csk1->cid = cid; 3542 csk1->l5_cid = l5_cid; 3543 csk1->ulp_type = ulp_type; 3544 csk1->context = context; 3545 3546 csk1->ka_timeout = DEF_KA_TIMEOUT; 3547 csk1->ka_interval = DEF_KA_INTERVAL; 3548 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 3549 csk1->tos = DEF_TOS; 3550 csk1->ttl = DEF_TTL; 3551 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 3552 csk1->rcv_buf = DEF_RCV_BUF; 3553 csk1->snd_buf = DEF_SND_BUF; 3554 csk1->seed = DEF_SEED; 3555 3556 *csk = csk1; 3557 return 0; 3558 } 3559 3560 static void cnic_cm_cleanup(struct cnic_sock *csk) 3561 { 3562 if (csk->src_port) { 3563 struct cnic_dev *dev = csk->dev; 3564 struct cnic_local *cp = dev->cnic_priv; 3565 3566 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); 3567 csk->src_port = 0; 3568 } 3569 } 3570 3571 static void cnic_close_conn(struct cnic_sock *csk) 3572 { 3573 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 3574 cnic_cm_upload_pg(csk); 3575 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3576 } 3577 cnic_cm_cleanup(csk); 3578 } 3579 3580 static int cnic_cm_destroy(struct cnic_sock *csk) 3581 { 3582 if (!cnic_in_use(csk)) 3583 return -EINVAL; 3584 3585 csk_hold(csk); 3586 clear_bit(SK_F_INUSE, &csk->flags); 3587 smp_mb__after_clear_bit(); 3588 while (atomic_read(&csk->ref_count) != 1) 3589 msleep(1); 3590 cnic_cm_cleanup(csk); 3591 3592 csk->flags = 0; 3593 csk_put(csk); 3594 return 0; 3595 } 3596 3597 static inline u16 cnic_get_vlan(struct net_device *dev, 3598 struct net_device **vlan_dev) 3599 { 3600 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3601 *vlan_dev = vlan_dev_real_dev(dev); 3602 return vlan_dev_vlan_id(dev); 3603 } 3604 *vlan_dev = dev; 3605 return 0; 3606 } 3607 3608 static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 3609 struct dst_entry **dst) 3610 { 3611 #if defined(CONFIG_INET) 3612 struct rtable *rt; 3613 3614 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); 3615 if (!IS_ERR(rt)) { 3616 *dst = &rt->dst; 3617 return 0; 3618 } 3619 return PTR_ERR(rt); 3620 #else 3621 return -ENETUNREACH; 3622 #endif 3623 } 3624 3625 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3626 struct dst_entry **dst) 3627 { 3628 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3629 struct flowi6 fl6; 3630 3631 memset(&fl6, 0, sizeof(fl6)); 3632 fl6.daddr = dst_addr->sin6_addr; 3633 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 3634 fl6.flowi6_oif = dst_addr->sin6_scope_id; 3635 3636 *dst = ip6_route_output(&init_net, NULL, &fl6); 3637 if ((*dst)->error) { 3638 dst_release(*dst); 3639 *dst = NULL; 3640 return -ENETUNREACH; 3641 } else 3642 return 0; 3643 #endif 3644 3645 return -ENETUNREACH; 3646 } 3647 3648 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 3649 int ulp_type) 3650 { 3651 struct cnic_dev *dev = NULL; 3652 struct dst_entry *dst; 3653 struct net_device *netdev = NULL; 3654 int err = -ENETUNREACH; 3655 3656 if (dst_addr->sin_family == AF_INET) 3657 err = cnic_get_v4_route(dst_addr, &dst); 3658 else if (dst_addr->sin_family == AF_INET6) { 3659 struct sockaddr_in6 *dst_addr6 = 3660 (struct sockaddr_in6 *) dst_addr; 3661 3662 err = cnic_get_v6_route(dst_addr6, &dst); 3663 } else 3664 return NULL; 3665 3666 if (err) 3667 return NULL; 3668 3669 if (!dst->dev) 3670 goto done; 3671 3672 cnic_get_vlan(dst->dev, &netdev); 3673 3674 dev = cnic_from_netdev(netdev); 3675 3676 done: 3677 dst_release(dst); 3678 if (dev) 3679 cnic_put(dev); 3680 return dev; 3681 } 3682 3683 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3684 { 3685 struct cnic_dev *dev = csk->dev; 3686 struct cnic_local *cp = dev->cnic_priv; 3687 3688 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3689 } 3690 3691 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3692 { 3693 struct cnic_dev *dev = csk->dev; 3694 struct cnic_local *cp = dev->cnic_priv; 3695 int is_v6, rc = 0; 3696 struct dst_entry *dst = NULL; 3697 struct net_device *realdev; 3698 __be16 local_port; 3699 u32 port_id; 3700 3701 if (saddr->local.v6.sin6_family == AF_INET6 && 3702 saddr->remote.v6.sin6_family == AF_INET6) 3703 is_v6 = 1; 3704 else if (saddr->local.v4.sin_family == AF_INET && 3705 saddr->remote.v4.sin_family == AF_INET) 3706 is_v6 = 0; 3707 else 3708 return -EINVAL; 3709 3710 clear_bit(SK_F_IPV6, &csk->flags); 3711 3712 if (is_v6) { 3713 set_bit(SK_F_IPV6, &csk->flags); 3714 cnic_get_v6_route(&saddr->remote.v6, &dst); 3715 3716 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3717 sizeof(struct in6_addr)); 3718 csk->dst_port = saddr->remote.v6.sin6_port; 3719 local_port = saddr->local.v6.sin6_port; 3720 3721 } else { 3722 cnic_get_v4_route(&saddr->remote.v4, &dst); 3723 3724 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3725 csk->dst_port = saddr->remote.v4.sin_port; 3726 local_port = saddr->local.v4.sin_port; 3727 } 3728 3729 csk->vlan_id = 0; 3730 csk->mtu = dev->netdev->mtu; 3731 if (dst && dst->dev) { 3732 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3733 if (realdev == dev->netdev) { 3734 csk->vlan_id = vlan; 3735 csk->mtu = dst_mtu(dst); 3736 } 3737 } 3738 3739 port_id = be16_to_cpu(local_port); 3740 if (port_id >= CNIC_LOCAL_PORT_MIN && 3741 port_id < CNIC_LOCAL_PORT_MAX) { 3742 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) 3743 port_id = 0; 3744 } else 3745 port_id = 0; 3746 3747 if (!port_id) { 3748 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); 3749 if (port_id == -1) { 3750 rc = -ENOMEM; 3751 goto err_out; 3752 } 3753 local_port = cpu_to_be16(port_id); 3754 } 3755 csk->src_port = local_port; 3756 3757 err_out: 3758 dst_release(dst); 3759 return rc; 3760 } 3761 3762 static void cnic_init_csk_state(struct cnic_sock *csk) 3763 { 3764 csk->state = 0; 3765 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3766 clear_bit(SK_F_CLOSING, &csk->flags); 3767 } 3768 3769 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3770 { 3771 struct cnic_local *cp = csk->dev->cnic_priv; 3772 int err = 0; 3773 3774 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 3775 return -EOPNOTSUPP; 3776 3777 if (!cnic_in_use(csk)) 3778 return -EINVAL; 3779 3780 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3781 return -EINVAL; 3782 3783 cnic_init_csk_state(csk); 3784 3785 err = cnic_get_route(csk, saddr); 3786 if (err) 3787 goto err_out; 3788 3789 err = cnic_resolve_addr(csk, saddr); 3790 if (!err) 3791 return 0; 3792 3793 err_out: 3794 clear_bit(SK_F_CONNECT_START, &csk->flags); 3795 return err; 3796 } 3797 3798 static int cnic_cm_abort(struct cnic_sock *csk) 3799 { 3800 struct cnic_local *cp = csk->dev->cnic_priv; 3801 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3802 3803 if (!cnic_in_use(csk)) 3804 return -EINVAL; 3805 3806 if (cnic_abort_prep(csk)) 3807 return cnic_cm_abort_req(csk); 3808 3809 /* Getting here means that we haven't started connect, or 3810 * connect was not successful. 3811 */ 3812 3813 cp->close_conn(csk, opcode); 3814 if (csk->state != opcode) 3815 return -EALREADY; 3816 3817 return 0; 3818 } 3819 3820 static int cnic_cm_close(struct cnic_sock *csk) 3821 { 3822 if (!cnic_in_use(csk)) 3823 return -EINVAL; 3824 3825 if (cnic_close_prep(csk)) { 3826 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3827 return cnic_cm_close_req(csk); 3828 } else { 3829 return -EALREADY; 3830 } 3831 return 0; 3832 } 3833 3834 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3835 u8 opcode) 3836 { 3837 struct cnic_ulp_ops *ulp_ops; 3838 int ulp_type = csk->ulp_type; 3839 3840 rcu_read_lock(); 3841 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3842 if (ulp_ops) { 3843 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3844 ulp_ops->cm_connect_complete(csk); 3845 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3846 ulp_ops->cm_close_complete(csk); 3847 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3848 ulp_ops->cm_remote_abort(csk); 3849 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3850 ulp_ops->cm_abort_complete(csk); 3851 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3852 ulp_ops->cm_remote_close(csk); 3853 } 3854 rcu_read_unlock(); 3855 } 3856 3857 static int cnic_cm_set_pg(struct cnic_sock *csk) 3858 { 3859 if (cnic_offld_prep(csk)) { 3860 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3861 cnic_cm_update_pg(csk); 3862 else 3863 cnic_cm_offload_pg(csk); 3864 } 3865 return 0; 3866 } 3867 3868 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3869 { 3870 struct cnic_local *cp = dev->cnic_priv; 3871 u32 l5_cid = kcqe->pg_host_opaque; 3872 u8 opcode = kcqe->op_code; 3873 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3874 3875 csk_hold(csk); 3876 if (!cnic_in_use(csk)) 3877 goto done; 3878 3879 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3880 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3881 goto done; 3882 } 3883 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3884 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3885 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3886 cnic_cm_upcall(cp, csk, 3887 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3888 goto done; 3889 } 3890 3891 csk->pg_cid = kcqe->pg_cid; 3892 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3893 cnic_cm_conn_req(csk); 3894 3895 done: 3896 csk_put(csk); 3897 } 3898 3899 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) 3900 { 3901 struct cnic_local *cp = dev->cnic_priv; 3902 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; 3903 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; 3904 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3905 3906 ctx->timestamp = jiffies; 3907 ctx->wait_cond = 1; 3908 wake_up(&ctx->waitq); 3909 } 3910 3911 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3912 { 3913 struct cnic_local *cp = dev->cnic_priv; 3914 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3915 u8 opcode = l4kcqe->op_code; 3916 u32 l5_cid; 3917 struct cnic_sock *csk; 3918 3919 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { 3920 cnic_process_fcoe_term_conn(dev, kcqe); 3921 return; 3922 } 3923 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3924 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3925 cnic_cm_process_offld_pg(dev, l4kcqe); 3926 return; 3927 } 3928 3929 l5_cid = l4kcqe->conn_id; 3930 if (opcode & 0x80) 3931 l5_cid = l4kcqe->cid; 3932 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3933 return; 3934 3935 csk = &cp->csk_tbl[l5_cid]; 3936 csk_hold(csk); 3937 3938 if (!cnic_in_use(csk)) { 3939 csk_put(csk); 3940 return; 3941 } 3942 3943 switch (opcode) { 3944 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3945 if (l4kcqe->status != 0) { 3946 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3947 cnic_cm_upcall(cp, csk, 3948 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3949 } 3950 break; 3951 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3952 if (l4kcqe->status == 0) 3953 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3954 else if (l4kcqe->status == 3955 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 3956 set_bit(SK_F_HW_ERR, &csk->flags); 3957 3958 smp_mb__before_clear_bit(); 3959 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3960 cnic_cm_upcall(cp, csk, opcode); 3961 break; 3962 3963 case L5CM_RAMROD_CMD_ID_CLOSE: 3964 if (l4kcqe->status != 0) { 3965 netdev_warn(dev->netdev, "RAMROD CLOSE compl with " 3966 "status 0x%x\n", l4kcqe->status); 3967 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3968 /* Fall through */ 3969 } else { 3970 break; 3971 } 3972 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3973 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3974 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3975 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3976 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3977 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 3978 set_bit(SK_F_HW_ERR, &csk->flags); 3979 3980 cp->close_conn(csk, opcode); 3981 break; 3982 3983 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3984 /* after we already sent CLOSE_REQ */ 3985 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && 3986 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && 3987 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3988 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); 3989 else 3990 cnic_cm_upcall(cp, csk, opcode); 3991 break; 3992 } 3993 csk_put(csk); 3994 } 3995 3996 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3997 { 3998 struct cnic_dev *dev = data; 3999 int i; 4000 4001 for (i = 0; i < num; i++) 4002 cnic_cm_process_kcqe(dev, kcqe[i]); 4003 } 4004 4005 static struct cnic_ulp_ops cm_ulp_ops = { 4006 .indicate_kcqes = cnic_cm_indicate_kcqe, 4007 }; 4008 4009 static void cnic_cm_free_mem(struct cnic_dev *dev) 4010 { 4011 struct cnic_local *cp = dev->cnic_priv; 4012 4013 kfree(cp->csk_tbl); 4014 cp->csk_tbl = NULL; 4015 cnic_free_id_tbl(&cp->csk_port_tbl); 4016 } 4017 4018 static int cnic_cm_alloc_mem(struct cnic_dev *dev) 4019 { 4020 struct cnic_local *cp = dev->cnic_priv; 4021 u32 port_id; 4022 4023 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 4024 GFP_KERNEL); 4025 if (!cp->csk_tbl) 4026 return -ENOMEM; 4027 4028 port_id = random32(); 4029 port_id %= CNIC_LOCAL_PORT_RANGE; 4030 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 4031 CNIC_LOCAL_PORT_MIN, port_id)) { 4032 cnic_cm_free_mem(dev); 4033 return -ENOMEM; 4034 } 4035 return 0; 4036 } 4037 4038 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 4039 { 4040 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 4041 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 4042 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 4043 csk->state = opcode; 4044 } 4045 4046 /* 1. If event opcode matches the expected event in csk->state 4047 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any 4048 * event 4049 * 3. If the expected event is 0, meaning the connection was never 4050 * never established, we accept the opcode from cm_abort. 4051 */ 4052 if (opcode == csk->state || csk->state == 0 || 4053 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || 4054 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 4055 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 4056 if (csk->state == 0) 4057 csk->state = opcode; 4058 return 1; 4059 } 4060 } 4061 return 0; 4062 } 4063 4064 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 4065 { 4066 struct cnic_dev *dev = csk->dev; 4067 struct cnic_local *cp = dev->cnic_priv; 4068 4069 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 4070 cnic_cm_upcall(cp, csk, opcode); 4071 return; 4072 } 4073 4074 clear_bit(SK_F_CONNECT_START, &csk->flags); 4075 cnic_close_conn(csk); 4076 csk->state = opcode; 4077 cnic_cm_upcall(cp, csk, opcode); 4078 } 4079 4080 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 4081 { 4082 } 4083 4084 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 4085 { 4086 u32 seed; 4087 4088 seed = random32(); 4089 cnic_ctx_wr(dev, 45, 0, seed); 4090 return 0; 4091 } 4092 4093 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 4094 { 4095 struct cnic_dev *dev = csk->dev; 4096 struct cnic_local *cp = dev->cnic_priv; 4097 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 4098 union l5cm_specific_data l5_data; 4099 u32 cmd = 0; 4100 int close_complete = 0; 4101 4102 switch (opcode) { 4103 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4104 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4105 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4106 if (cnic_ready_to_close(csk, opcode)) { 4107 if (test_bit(SK_F_HW_ERR, &csk->flags)) 4108 close_complete = 1; 4109 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 4110 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 4111 else 4112 close_complete = 1; 4113 } 4114 break; 4115 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 4116 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 4117 break; 4118 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 4119 close_complete = 1; 4120 break; 4121 } 4122 if (cmd) { 4123 memset(&l5_data, 0, sizeof(l5_data)); 4124 4125 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 4126 &l5_data); 4127 } else if (close_complete) { 4128 ctx->timestamp = jiffies; 4129 cnic_close_conn(csk); 4130 cnic_cm_upcall(cp, csk, csk->state); 4131 } 4132 } 4133 4134 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 4135 { 4136 struct cnic_local *cp = dev->cnic_priv; 4137 4138 if (!cp->ctx_tbl) 4139 return; 4140 4141 if (!netif_running(dev->netdev)) 4142 return; 4143 4144 cnic_bnx2x_delete_wait(dev, 0); 4145 4146 cancel_delayed_work(&cp->delete_task); 4147 flush_workqueue(cnic_wq); 4148 4149 if (atomic_read(&cp->iscsi_conn) != 0) 4150 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 4151 atomic_read(&cp->iscsi_conn)); 4152 } 4153 4154 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4155 { 4156 struct cnic_local *cp = dev->cnic_priv; 4157 u32 pfid = cp->pfid; 4158 u32 port = CNIC_PORT(cp); 4159 4160 cnic_init_bnx2x_mac(dev); 4161 cnic_bnx2x_set_tcp_timestamp(dev, 1); 4162 4163 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 4164 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 4165 4166 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4167 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 4168 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4169 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 4170 DEF_MAX_DA_COUNT); 4171 4172 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4173 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 4174 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4175 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 4176 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4177 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 4178 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4179 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 4180 4181 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 4182 DEF_MAX_CWND); 4183 return 0; 4184 } 4185 4186 static void cnic_delete_task(struct work_struct *work) 4187 { 4188 struct cnic_local *cp; 4189 struct cnic_dev *dev; 4190 u32 i; 4191 int need_resched = 0; 4192 4193 cp = container_of(work, struct cnic_local, delete_task.work); 4194 dev = cp->dev; 4195 4196 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { 4197 struct drv_ctl_info info; 4198 4199 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); 4200 4201 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; 4202 cp->ethdev->drv_ctl(dev->netdev, &info); 4203 } 4204 4205 for (i = 0; i < cp->max_cid_space; i++) { 4206 struct cnic_context *ctx = &cp->ctx_tbl[i]; 4207 int err; 4208 4209 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 4210 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4211 continue; 4212 4213 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 4214 need_resched = 1; 4215 continue; 4216 } 4217 4218 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4219 continue; 4220 4221 err = cnic_bnx2x_destroy_ramrod(dev, i); 4222 4223 cnic_free_bnx2x_conn_resc(dev, i); 4224 if (!err) { 4225 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 4226 atomic_dec(&cp->iscsi_conn); 4227 4228 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 4229 } 4230 } 4231 4232 if (need_resched) 4233 queue_delayed_work(cnic_wq, &cp->delete_task, 4234 msecs_to_jiffies(10)); 4235 4236 } 4237 4238 static int cnic_cm_open(struct cnic_dev *dev) 4239 { 4240 struct cnic_local *cp = dev->cnic_priv; 4241 int err; 4242 4243 err = cnic_cm_alloc_mem(dev); 4244 if (err) 4245 return err; 4246 4247 err = cp->start_cm(dev); 4248 4249 if (err) 4250 goto err_out; 4251 4252 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 4253 4254 dev->cm_create = cnic_cm_create; 4255 dev->cm_destroy = cnic_cm_destroy; 4256 dev->cm_connect = cnic_cm_connect; 4257 dev->cm_abort = cnic_cm_abort; 4258 dev->cm_close = cnic_cm_close; 4259 dev->cm_select_dev = cnic_cm_select_dev; 4260 4261 cp->ulp_handle[CNIC_ULP_L4] = dev; 4262 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 4263 return 0; 4264 4265 err_out: 4266 cnic_cm_free_mem(dev); 4267 return err; 4268 } 4269 4270 static int cnic_cm_shutdown(struct cnic_dev *dev) 4271 { 4272 struct cnic_local *cp = dev->cnic_priv; 4273 int i; 4274 4275 if (!cp->csk_tbl) 4276 return 0; 4277 4278 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 4279 struct cnic_sock *csk = &cp->csk_tbl[i]; 4280 4281 clear_bit(SK_F_INUSE, &csk->flags); 4282 cnic_cm_cleanup(csk); 4283 } 4284 cnic_cm_free_mem(dev); 4285 4286 return 0; 4287 } 4288 4289 static void cnic_init_context(struct cnic_dev *dev, u32 cid) 4290 { 4291 u32 cid_addr; 4292 int i; 4293 4294 cid_addr = GET_CID_ADDR(cid); 4295 4296 for (i = 0; i < CTX_SIZE; i += 4) 4297 cnic_ctx_wr(dev, cid_addr, i, 0); 4298 } 4299 4300 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 4301 { 4302 struct cnic_local *cp = dev->cnic_priv; 4303 int ret = 0, i; 4304 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4305 4306 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4307 return 0; 4308 4309 for (i = 0; i < cp->ctx_blks; i++) { 4310 int j; 4311 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4312 u32 val; 4313 4314 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4315 4316 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4317 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4318 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 4319 (u64) cp->ctx_arr[i].mapping >> 32); 4320 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 4321 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4322 for (j = 0; j < 10; j++) { 4323 4324 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 4325 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 4326 break; 4327 udelay(5); 4328 } 4329 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 4330 ret = -EBUSY; 4331 break; 4332 } 4333 } 4334 return ret; 4335 } 4336 4337 static void cnic_free_irq(struct cnic_dev *dev) 4338 { 4339 struct cnic_local *cp = dev->cnic_priv; 4340 struct cnic_eth_dev *ethdev = cp->ethdev; 4341 4342 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4343 cp->disable_int_sync(dev); 4344 tasklet_kill(&cp->cnic_irq_task); 4345 free_irq(ethdev->irq_arr[0].vector, dev); 4346 } 4347 } 4348 4349 static int cnic_request_irq(struct cnic_dev *dev) 4350 { 4351 struct cnic_local *cp = dev->cnic_priv; 4352 struct cnic_eth_dev *ethdev = cp->ethdev; 4353 int err; 4354 4355 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 4356 if (err) 4357 tasklet_disable(&cp->cnic_irq_task); 4358 4359 return err; 4360 } 4361 4362 static int cnic_init_bnx2_irq(struct cnic_dev *dev) 4363 { 4364 struct cnic_local *cp = dev->cnic_priv; 4365 struct cnic_eth_dev *ethdev = cp->ethdev; 4366 4367 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4368 int err, i = 0; 4369 int sblk_num = cp->status_blk_num; 4370 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4371 BNX2_HC_SB_CONFIG_1; 4372 4373 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4374 4375 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 4376 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 4377 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 4378 4379 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 4380 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 4381 (unsigned long) dev); 4382 err = cnic_request_irq(dev); 4383 if (err) 4384 return err; 4385 4386 while (cp->status_blk.bnx2->status_completion_producer_index && 4387 i < 10) { 4388 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 4389 1 << (11 + sblk_num)); 4390 udelay(10); 4391 i++; 4392 barrier(); 4393 } 4394 if (cp->status_blk.bnx2->status_completion_producer_index) { 4395 cnic_free_irq(dev); 4396 goto failed; 4397 } 4398 4399 } else { 4400 struct status_block *sblk = cp->status_blk.gen; 4401 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 4402 int i = 0; 4403 4404 while (sblk->status_completion_producer_index && i < 10) { 4405 CNIC_WR(dev, BNX2_HC_COMMAND, 4406 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 4407 udelay(10); 4408 i++; 4409 barrier(); 4410 } 4411 if (sblk->status_completion_producer_index) 4412 goto failed; 4413 4414 } 4415 return 0; 4416 4417 failed: 4418 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 4419 return -EBUSY; 4420 } 4421 4422 static void cnic_enable_bnx2_int(struct cnic_dev *dev) 4423 { 4424 struct cnic_local *cp = dev->cnic_priv; 4425 struct cnic_eth_dev *ethdev = cp->ethdev; 4426 4427 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4428 return; 4429 4430 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4431 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4432 } 4433 4434 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4435 { 4436 struct cnic_local *cp = dev->cnic_priv; 4437 struct cnic_eth_dev *ethdev = cp->ethdev; 4438 4439 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4440 return; 4441 4442 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4443 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4444 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 4445 synchronize_irq(ethdev->irq_arr[0].vector); 4446 } 4447 4448 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 4449 { 4450 struct cnic_local *cp = dev->cnic_priv; 4451 struct cnic_eth_dev *ethdev = cp->ethdev; 4452 struct cnic_uio_dev *udev = cp->udev; 4453 u32 cid_addr, tx_cid, sb_id; 4454 u32 val, offset0, offset1, offset2, offset3; 4455 int i; 4456 struct tx_bd *txbd; 4457 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4458 struct status_block *s_blk = cp->status_blk.gen; 4459 4460 sb_id = cp->status_blk_num; 4461 tx_cid = 20; 4462 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 4463 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4464 struct status_block_msix *sblk = cp->status_blk.bnx2; 4465 4466 tx_cid = TX_TSS_CID + sb_id - 1; 4467 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 4468 (TX_TSS_CID << 7)); 4469 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 4470 } 4471 cp->tx_cons = *cp->tx_cons_ptr; 4472 4473 cid_addr = GET_CID_ADDR(tx_cid); 4474 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 4475 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4476 4477 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4478 cnic_ctx_wr(dev, cid_addr2, i, 0); 4479 4480 offset0 = BNX2_L2CTX_TYPE_XI; 4481 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 4482 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 4483 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 4484 } else { 4485 cnic_init_context(dev, tx_cid); 4486 cnic_init_context(dev, tx_cid + 1); 4487 4488 offset0 = BNX2_L2CTX_TYPE; 4489 offset1 = BNX2_L2CTX_CMD_TYPE; 4490 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 4491 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 4492 } 4493 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 4494 cnic_ctx_wr(dev, cid_addr, offset0, val); 4495 4496 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4497 cnic_ctx_wr(dev, cid_addr, offset1, val); 4498 4499 txbd = udev->l2_ring; 4500 4501 buf_map = udev->l2_buf_map; 4502 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4503 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4504 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4505 } 4506 val = (u64) ring_map >> 32; 4507 cnic_ctx_wr(dev, cid_addr, offset2, val); 4508 txbd->tx_bd_haddr_hi = val; 4509 4510 val = (u64) ring_map & 0xffffffff; 4511 cnic_ctx_wr(dev, cid_addr, offset3, val); 4512 txbd->tx_bd_haddr_lo = val; 4513 } 4514 4515 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 4516 { 4517 struct cnic_local *cp = dev->cnic_priv; 4518 struct cnic_eth_dev *ethdev = cp->ethdev; 4519 struct cnic_uio_dev *udev = cp->udev; 4520 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4521 int i; 4522 struct rx_bd *rxbd; 4523 struct status_block *s_blk = cp->status_blk.gen; 4524 dma_addr_t ring_map = udev->l2_ring_map; 4525 4526 sb_id = cp->status_blk_num; 4527 cnic_init_context(dev, 2); 4528 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 4529 coal_reg = BNX2_HC_COMMAND; 4530 coal_val = CNIC_RD(dev, coal_reg); 4531 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4532 struct status_block_msix *sblk = cp->status_blk.bnx2; 4533 4534 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 4535 coal_reg = BNX2_HC_COALESCE_NOW; 4536 coal_val = 1 << (11 + sb_id); 4537 } 4538 i = 0; 4539 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 4540 CNIC_WR(dev, coal_reg, coal_val); 4541 udelay(10); 4542 i++; 4543 barrier(); 4544 } 4545 cp->rx_cons = *cp->rx_cons_ptr; 4546 4547 cid_addr = GET_CID_ADDR(2); 4548 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4549 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4550 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 4551 4552 if (sb_id == 0) 4553 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 4554 else 4555 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4556 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4557 4558 rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4559 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4560 dma_addr_t buf_map; 4561 int n = (i % cp->l2_rx_ring_size) + 1; 4562 4563 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4564 rxbd->rx_bd_len = cp->l2_single_buf_size; 4565 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4566 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4567 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4568 } 4569 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4570 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4571 rxbd->rx_bd_haddr_hi = val; 4572 4573 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4574 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4575 rxbd->rx_bd_haddr_lo = val; 4576 4577 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 4578 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 4579 } 4580 4581 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 4582 { 4583 struct kwqe *wqes[1], l2kwqe; 4584 4585 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4586 wqes[0] = &l2kwqe; 4587 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | 4588 (L2_KWQE_OPCODE_VALUE_FLUSH << 4589 KWQE_OPCODE_SHIFT) | 2; 4590 dev->submit_kwqes(dev, wqes, 1); 4591 } 4592 4593 static void cnic_set_bnx2_mac(struct cnic_dev *dev) 4594 { 4595 struct cnic_local *cp = dev->cnic_priv; 4596 u32 val; 4597 4598 val = cp->func << 2; 4599 4600 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 4601 4602 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4603 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 4604 dev->mac_addr[0] = (u8) (val >> 8); 4605 dev->mac_addr[1] = (u8) val; 4606 4607 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 4608 4609 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4610 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 4611 dev->mac_addr[2] = (u8) (val >> 24); 4612 dev->mac_addr[3] = (u8) (val >> 16); 4613 dev->mac_addr[4] = (u8) (val >> 8); 4614 dev->mac_addr[5] = (u8) val; 4615 4616 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4617 4618 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4619 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4620 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4621 4622 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4623 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 4624 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 4625 } 4626 4627 static int cnic_start_bnx2_hw(struct cnic_dev *dev) 4628 { 4629 struct cnic_local *cp = dev->cnic_priv; 4630 struct cnic_eth_dev *ethdev = cp->ethdev; 4631 struct status_block *sblk = cp->status_blk.gen; 4632 u32 val, kcq_cid_addr, kwq_cid_addr; 4633 int err; 4634 4635 cnic_set_bnx2_mac(dev); 4636 4637 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4638 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4639 if (BCM_PAGE_BITS > 12) 4640 val |= (12 - 8) << 4; 4641 else 4642 val |= (BCM_PAGE_BITS - 8) << 4; 4643 4644 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4645 4646 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 4647 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 4648 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 4649 4650 err = cnic_setup_5709_context(dev, 1); 4651 if (err) 4652 return err; 4653 4654 cnic_init_context(dev, KWQ_CID); 4655 cnic_init_context(dev, KCQ_CID); 4656 4657 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 4658 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 4659 4660 cp->max_kwq_idx = MAX_KWQ_IDX; 4661 cp->kwq_prod_idx = 0; 4662 cp->kwq_con_idx = 0; 4663 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4664 4665 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 4666 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4667 else 4668 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4669 4670 /* Initialize the kernel work queue context. */ 4671 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4672 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4673 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4674 4675 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4676 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4677 4678 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4679 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4680 4681 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4682 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4683 4684 val = (u32) cp->kwq_info.pgtbl_map; 4685 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4686 4687 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 4688 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 4689 4690 cp->kcq1.sw_prod_idx = 0; 4691 cp->kcq1.hw_prod_idx_ptr = 4692 &sblk->status_completion_producer_index; 4693 4694 cp->kcq1.status_idx_ptr = &sblk->status_idx; 4695 4696 /* Initialize the kernel complete queue context. */ 4697 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4698 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4699 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4700 4701 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4702 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4703 4704 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4705 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4706 4707 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4708 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4709 4710 val = (u32) cp->kcq1.dma.pgtbl_map; 4711 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4712 4713 cp->int_num = 0; 4714 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4715 struct status_block_msix *msblk = cp->status_blk.bnx2; 4716 u32 sb_id = cp->status_blk_num; 4717 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4718 4719 cp->kcq1.hw_prod_idx_ptr = 4720 &msblk->status_completion_producer_index; 4721 cp->kcq1.status_idx_ptr = &msblk->status_idx; 4722 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index; 4723 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4724 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4725 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4726 } 4727 4728 /* Enable Commnad Scheduler notification when we write to the 4729 * host producer index of the kernel contexts. */ 4730 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 4731 4732 /* Enable Command Scheduler notification when we write to either 4733 * the Send Queue or Receive Queue producer indexes of the kernel 4734 * bypass contexts. */ 4735 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4736 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4737 4738 /* Notify COM when the driver post an application buffer. */ 4739 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4740 4741 /* Set the CP and COM doorbells. These two processors polls the 4742 * doorbell for a non zero value before running. This must be done 4743 * after setting up the kernel queue contexts. */ 4744 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4745 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4746 4747 cnic_init_bnx2_tx_ring(dev); 4748 cnic_init_bnx2_rx_ring(dev); 4749 4750 err = cnic_init_bnx2_irq(dev); 4751 if (err) { 4752 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4753 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4754 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4755 return err; 4756 } 4757 4758 return 0; 4759 } 4760 4761 static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4762 { 4763 struct cnic_local *cp = dev->cnic_priv; 4764 struct cnic_eth_dev *ethdev = cp->ethdev; 4765 u32 start_offset = ethdev->ctx_tbl_offset; 4766 int i; 4767 4768 for (i = 0; i < cp->ctx_blks; i++) { 4769 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4770 dma_addr_t map = ctx->mapping; 4771 4772 if (cp->ctx_align) { 4773 unsigned long mask = cp->ctx_align - 1; 4774 4775 map = (map + mask) & ~mask; 4776 } 4777 4778 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4779 } 4780 } 4781 4782 static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4783 { 4784 struct cnic_local *cp = dev->cnic_priv; 4785 struct cnic_eth_dev *ethdev = cp->ethdev; 4786 int err = 0; 4787 4788 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4789 (unsigned long) dev); 4790 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4791 err = cnic_request_irq(dev); 4792 4793 return err; 4794 } 4795 4796 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4797 u16 sb_id, u8 sb_index, 4798 u8 disable) 4799 { 4800 4801 u32 addr = BAR_CSTRORM_INTMEM + 4802 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4803 offsetof(struct hc_status_block_data_e1x, index_data) + 4804 sizeof(struct hc_index_data)*sb_index + 4805 offsetof(struct hc_index_data, flags); 4806 u16 flags = CNIC_RD16(dev, addr); 4807 /* clear and set */ 4808 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4809 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4810 HC_INDEX_DATA_HC_ENABLED); 4811 CNIC_WR16(dev, addr, flags); 4812 } 4813 4814 static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4815 { 4816 struct cnic_local *cp = dev->cnic_priv; 4817 u8 sb_id = cp->status_blk_num; 4818 4819 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4820 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4821 offsetof(struct hc_status_block_data_e1x, index_data) + 4822 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4823 offsetof(struct hc_index_data, timeout), 64 / 4); 4824 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4825 } 4826 4827 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4828 { 4829 } 4830 4831 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4832 struct client_init_ramrod_data *data) 4833 { 4834 struct cnic_local *cp = dev->cnic_priv; 4835 struct cnic_uio_dev *udev = cp->udev; 4836 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4837 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4838 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4839 int i; 4840 u32 cli = cp->ethdev->iscsi_l2_client_id; 4841 u32 val; 4842 4843 memset(txbd, 0, BCM_PAGE_SIZE); 4844 4845 buf_map = udev->l2_buf_map; 4846 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4847 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4848 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4849 4850 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4851 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4852 reg_bd->addr_hi = start_bd->addr_hi; 4853 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4854 start_bd->nbytes = cpu_to_le16(0x10); 4855 start_bd->nbd = cpu_to_le16(3); 4856 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4857 start_bd->general_data = (UNICAST_ADDRESS << 4858 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 4859 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4860 4861 } 4862 4863 val = (u64) ring_map >> 32; 4864 txbd->next_bd.addr_hi = cpu_to_le32(val); 4865 4866 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4867 4868 val = (u64) ring_map & 0xffffffff; 4869 txbd->next_bd.addr_lo = cpu_to_le32(val); 4870 4871 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4872 4873 /* Other ramrod params */ 4874 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4875 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4876 4877 /* reset xstorm per client statistics */ 4878 if (cli < MAX_STAT_COUNTER_ID) { 4879 data->general.statistics_zero_flg = 1; 4880 data->general.statistics_en_flg = 1; 4881 data->general.statistics_counter_id = cli; 4882 } 4883 4884 cp->tx_cons_ptr = 4885 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4886 } 4887 4888 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4889 struct client_init_ramrod_data *data) 4890 { 4891 struct cnic_local *cp = dev->cnic_priv; 4892 struct cnic_uio_dev *udev = cp->udev; 4893 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4894 BCM_PAGE_SIZE); 4895 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4896 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4897 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4898 int i; 4899 u32 cli = cp->ethdev->iscsi_l2_client_id; 4900 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4901 u32 val; 4902 dma_addr_t ring_map = udev->l2_ring_map; 4903 4904 /* General data */ 4905 data->general.client_id = cli; 4906 data->general.activate_flg = 1; 4907 data->general.sp_client_id = cli; 4908 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4909 data->general.func_id = cp->pfid; 4910 4911 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4912 dma_addr_t buf_map; 4913 int n = (i % cp->l2_rx_ring_size) + 1; 4914 4915 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4916 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4917 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4918 } 4919 4920 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4921 rxbd->addr_hi = cpu_to_le32(val); 4922 data->rx.bd_page_base.hi = cpu_to_le32(val); 4923 4924 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4925 rxbd->addr_lo = cpu_to_le32(val); 4926 data->rx.bd_page_base.lo = cpu_to_le32(val); 4927 4928 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4929 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4930 rxcqe->addr_hi = cpu_to_le32(val); 4931 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4932 4933 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4934 rxcqe->addr_lo = cpu_to_le32(val); 4935 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4936 4937 /* Other ramrod params */ 4938 data->rx.client_qzone_id = cl_qzone_id; 4939 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 4940 data->rx.status_block_id = BNX2X_DEF_SB_ID; 4941 4942 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 4943 4944 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); 4945 data->rx.outer_vlan_removal_enable_flg = 1; 4946 data->rx.silent_vlan_removal_flg = 1; 4947 data->rx.silent_vlan_value = 0; 4948 data->rx.silent_vlan_mask = 0xffff; 4949 4950 cp->rx_cons_ptr = 4951 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4952 cp->rx_cons = *cp->rx_cons_ptr; 4953 } 4954 4955 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4956 { 4957 struct cnic_local *cp = dev->cnic_priv; 4958 u32 pfid = cp->pfid; 4959 4960 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4961 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4962 cp->kcq1.sw_prod_idx = 0; 4963 4964 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4965 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4966 4967 cp->kcq1.hw_prod_idx_ptr = 4968 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4969 cp->kcq1.status_idx_ptr = 4970 &sb->sb.running_index[SM_RX_ID]; 4971 } else { 4972 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 4973 4974 cp->kcq1.hw_prod_idx_ptr = 4975 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4976 cp->kcq1.status_idx_ptr = 4977 &sb->sb.running_index[SM_RX_ID]; 4978 } 4979 4980 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4981 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4982 4983 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 4984 USTORM_FCOE_EQ_PROD_OFFSET(pfid); 4985 cp->kcq2.sw_prod_idx = 0; 4986 cp->kcq2.hw_prod_idx_ptr = 4987 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; 4988 cp->kcq2.status_idx_ptr = 4989 &sb->sb.running_index[SM_RX_ID]; 4990 } 4991 } 4992 4993 static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4994 { 4995 struct cnic_local *cp = dev->cnic_priv; 4996 struct cnic_eth_dev *ethdev = cp->ethdev; 4997 int func = CNIC_FUNC(cp), ret; 4998 u32 pfid; 4999 5000 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 5001 cp->port_mode = CHIP_PORT_MODE_NONE; 5002 5003 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5004 u32 val; 5005 5006 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val); 5007 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >> 5008 ME_REG_ABS_PF_NUM_SHIFT); 5009 func = CNIC_FUNC(cp); 5010 5011 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 5012 if (!(val & 1)) 5013 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 5014 else 5015 val = (val >> 1) & 1; 5016 5017 if (val) { 5018 cp->port_mode = CHIP_4_PORT_MODE; 5019 cp->pfid = func >> 1; 5020 } else { 5021 cp->port_mode = CHIP_2_PORT_MODE; 5022 cp->pfid = func & 0x6; 5023 } 5024 } else { 5025 cp->pfid = func; 5026 } 5027 pfid = cp->pfid; 5028 5029 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5030 cp->iscsi_start_cid, 0); 5031 5032 if (ret) 5033 return -ENOMEM; 5034 5035 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5036 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 5037 cp->fcoe_start_cid, 0); 5038 5039 if (ret) 5040 return -ENOMEM; 5041 } 5042 5043 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 5044 5045 cnic_init_bnx2x_kcq(dev); 5046 5047 /* Only 1 EQ */ 5048 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 5049 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5050 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 5051 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5052 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 5053 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 5054 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5055 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 5056 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 5057 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5058 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 5059 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 5060 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5061 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 5062 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 5063 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5064 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 5065 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 5066 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 5067 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5068 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 5069 HC_INDEX_ISCSI_EQ_CONS); 5070 5071 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5072 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 5073 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 5074 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5075 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 5076 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 5077 5078 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 5079 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 5080 5081 cnic_setup_bnx2x_context(dev); 5082 5083 ret = cnic_init_bnx2x_irq(dev); 5084 if (ret) 5085 return ret; 5086 5087 return 0; 5088 } 5089 5090 static void cnic_init_rings(struct cnic_dev *dev) 5091 { 5092 struct cnic_local *cp = dev->cnic_priv; 5093 struct cnic_uio_dev *udev = cp->udev; 5094 5095 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5096 return; 5097 5098 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5099 cnic_init_bnx2_tx_ring(dev); 5100 cnic_init_bnx2_rx_ring(dev); 5101 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5102 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5103 u32 cli = cp->ethdev->iscsi_l2_client_id; 5104 u32 cid = cp->ethdev->iscsi_l2_cid; 5105 u32 cl_qzone_id; 5106 struct client_init_ramrod_data *data; 5107 union l5cm_specific_data l5_data; 5108 struct ustorm_eth_rx_producers rx_prods = {0}; 5109 u32 off, i, *cid_ptr; 5110 5111 rx_prods.bd_prod = 0; 5112 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 5113 barrier(); 5114 5115 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 5116 5117 off = BAR_USTRORM_INTMEM + 5118 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? 5119 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 5120 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 5121 5122 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 5123 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 5124 5125 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5126 5127 data = udev->l2_buf; 5128 cid_ptr = udev->l2_buf + 12; 5129 5130 memset(data, 0, sizeof(*data)); 5131 5132 cnic_init_bnx2x_tx_ring(dev, data); 5133 cnic_init_bnx2x_rx_ring(dev, data); 5134 5135 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 5136 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 5137 5138 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5139 5140 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 5141 cid, ETH_CONNECTION_TYPE, &l5_data); 5142 5143 i = 0; 5144 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5145 ++i < 10) 5146 msleep(1); 5147 5148 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5149 netdev_err(dev->netdev, 5150 "iSCSI CLIENT_SETUP did not complete\n"); 5151 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5152 cnic_ring_ctl(dev, cid, cli, 1); 5153 *cid_ptr = cid; 5154 } 5155 } 5156 5157 static void cnic_shutdown_rings(struct cnic_dev *dev) 5158 { 5159 struct cnic_local *cp = dev->cnic_priv; 5160 struct cnic_uio_dev *udev = cp->udev; 5161 void *rx_ring; 5162 5163 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5164 return; 5165 5166 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5167 cnic_shutdown_bnx2_rx_ring(dev); 5168 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5169 u32 cli = cp->ethdev->iscsi_l2_client_id; 5170 u32 cid = cp->ethdev->iscsi_l2_cid; 5171 union l5cm_specific_data l5_data; 5172 int i; 5173 5174 cnic_ring_ctl(dev, cid, cli, 0); 5175 5176 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5177 5178 l5_data.phy_address.lo = cli; 5179 l5_data.phy_address.hi = 0; 5180 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 5181 cid, ETH_CONNECTION_TYPE, &l5_data); 5182 i = 0; 5183 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5184 ++i < 10) 5185 msleep(1); 5186 5187 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5188 netdev_err(dev->netdev, 5189 "iSCSI CLIENT_HALT did not complete\n"); 5190 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5191 5192 memset(&l5_data, 0, sizeof(l5_data)); 5193 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5194 cid, NONE_CONNECTION_TYPE, &l5_data); 5195 msleep(10); 5196 } 5197 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5198 rx_ring = udev->l2_ring + BCM_PAGE_SIZE; 5199 memset(rx_ring, 0, BCM_PAGE_SIZE); 5200 } 5201 5202 static int cnic_register_netdev(struct cnic_dev *dev) 5203 { 5204 struct cnic_local *cp = dev->cnic_priv; 5205 struct cnic_eth_dev *ethdev = cp->ethdev; 5206 int err; 5207 5208 if (!ethdev) 5209 return -ENODEV; 5210 5211 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 5212 return 0; 5213 5214 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 5215 if (err) 5216 netdev_err(dev->netdev, "register_cnic failed\n"); 5217 5218 return err; 5219 } 5220 5221 static void cnic_unregister_netdev(struct cnic_dev *dev) 5222 { 5223 struct cnic_local *cp = dev->cnic_priv; 5224 struct cnic_eth_dev *ethdev = cp->ethdev; 5225 5226 if (!ethdev) 5227 return; 5228 5229 ethdev->drv_unregister_cnic(dev->netdev); 5230 } 5231 5232 static int cnic_start_hw(struct cnic_dev *dev) 5233 { 5234 struct cnic_local *cp = dev->cnic_priv; 5235 struct cnic_eth_dev *ethdev = cp->ethdev; 5236 int err; 5237 5238 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 5239 return -EALREADY; 5240 5241 dev->regview = ethdev->io_base; 5242 pci_dev_get(dev->pcidev); 5243 cp->func = PCI_FUNC(dev->pcidev->devfn); 5244 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 5245 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 5246 5247 err = cp->alloc_resc(dev); 5248 if (err) { 5249 netdev_err(dev->netdev, "allocate resource failure\n"); 5250 goto err1; 5251 } 5252 5253 err = cp->start_hw(dev); 5254 if (err) 5255 goto err1; 5256 5257 err = cnic_cm_open(dev); 5258 if (err) 5259 goto err1; 5260 5261 set_bit(CNIC_F_CNIC_UP, &dev->flags); 5262 5263 cp->enable_int(dev); 5264 5265 return 0; 5266 5267 err1: 5268 cp->free_resc(dev); 5269 pci_dev_put(dev->pcidev); 5270 return err; 5271 } 5272 5273 static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 5274 { 5275 cnic_disable_bnx2_int_sync(dev); 5276 5277 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 5278 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 5279 5280 cnic_init_context(dev, KWQ_CID); 5281 cnic_init_context(dev, KCQ_CID); 5282 5283 cnic_setup_5709_context(dev, 0); 5284 cnic_free_irq(dev); 5285 5286 cnic_free_resc(dev); 5287 } 5288 5289 5290 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5291 { 5292 struct cnic_local *cp = dev->cnic_priv; 5293 5294 cnic_free_irq(dev); 5295 *cp->kcq1.hw_prod_idx_ptr = 0; 5296 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5297 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5298 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5299 cnic_free_resc(dev); 5300 } 5301 5302 static void cnic_stop_hw(struct cnic_dev *dev) 5303 { 5304 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5305 struct cnic_local *cp = dev->cnic_priv; 5306 int i = 0; 5307 5308 /* Need to wait for the ring shutdown event to complete 5309 * before clearing the CNIC_UP flag. 5310 */ 5311 while (cp->udev->uio_dev != -1 && i < 15) { 5312 msleep(100); 5313 i++; 5314 } 5315 cnic_shutdown_rings(dev); 5316 cp->stop_cm(dev); 5317 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5318 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); 5319 synchronize_rcu(); 5320 cnic_cm_shutdown(dev); 5321 cp->stop_hw(dev); 5322 pci_dev_put(dev->pcidev); 5323 } 5324 } 5325 5326 static void cnic_free_dev(struct cnic_dev *dev) 5327 { 5328 int i = 0; 5329 5330 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 5331 msleep(100); 5332 i++; 5333 } 5334 if (atomic_read(&dev->ref_count) != 0) 5335 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 5336 5337 netdev_info(dev->netdev, "Removed CNIC device\n"); 5338 dev_put(dev->netdev); 5339 kfree(dev); 5340 } 5341 5342 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 5343 struct pci_dev *pdev) 5344 { 5345 struct cnic_dev *cdev; 5346 struct cnic_local *cp; 5347 int alloc_size; 5348 5349 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 5350 5351 cdev = kzalloc(alloc_size , GFP_KERNEL); 5352 if (cdev == NULL) { 5353 netdev_err(dev, "allocate dev struct failure\n"); 5354 return NULL; 5355 } 5356 5357 cdev->netdev = dev; 5358 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 5359 cdev->register_device = cnic_register_device; 5360 cdev->unregister_device = cnic_unregister_device; 5361 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 5362 5363 cp = cdev->cnic_priv; 5364 cp->dev = cdev; 5365 cp->l2_single_buf_size = 0x400; 5366 cp->l2_rx_ring_size = 3; 5367 5368 spin_lock_init(&cp->cnic_ulp_lock); 5369 5370 netdev_info(dev, "Added CNIC device\n"); 5371 5372 return cdev; 5373 } 5374 5375 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 5376 { 5377 struct pci_dev *pdev; 5378 struct cnic_dev *cdev; 5379 struct cnic_local *cp; 5380 struct cnic_eth_dev *ethdev = NULL; 5381 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5382 5383 probe = symbol_get(bnx2_cnic_probe); 5384 if (probe) { 5385 ethdev = (*probe)(dev); 5386 symbol_put(bnx2_cnic_probe); 5387 } 5388 if (!ethdev) 5389 return NULL; 5390 5391 pdev = ethdev->pdev; 5392 if (!pdev) 5393 return NULL; 5394 5395 dev_hold(dev); 5396 pci_dev_get(pdev); 5397 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || 5398 pdev->device == PCI_DEVICE_ID_NX2_5709S) && 5399 (pdev->revision < 0x10)) { 5400 pci_dev_put(pdev); 5401 goto cnic_err; 5402 } 5403 pci_dev_put(pdev); 5404 5405 cdev = cnic_alloc_dev(dev, pdev); 5406 if (cdev == NULL) 5407 goto cnic_err; 5408 5409 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 5410 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 5411 5412 cp = cdev->cnic_priv; 5413 cp->ethdev = ethdev; 5414 cdev->pcidev = pdev; 5415 cp->chip_id = ethdev->chip_id; 5416 5417 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5418 5419 cp->cnic_ops = &cnic_bnx2_ops; 5420 cp->start_hw = cnic_start_bnx2_hw; 5421 cp->stop_hw = cnic_stop_bnx2_hw; 5422 cp->setup_pgtbl = cnic_setup_page_tbl; 5423 cp->alloc_resc = cnic_alloc_bnx2_resc; 5424 cp->free_resc = cnic_free_resc; 5425 cp->start_cm = cnic_cm_init_bnx2_hw; 5426 cp->stop_cm = cnic_cm_stop_bnx2_hw; 5427 cp->enable_int = cnic_enable_bnx2_int; 5428 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 5429 cp->close_conn = cnic_close_bnx2_conn; 5430 return cdev; 5431 5432 cnic_err: 5433 dev_put(dev); 5434 return NULL; 5435 } 5436 5437 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 5438 { 5439 struct pci_dev *pdev; 5440 struct cnic_dev *cdev; 5441 struct cnic_local *cp; 5442 struct cnic_eth_dev *ethdev = NULL; 5443 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5444 5445 probe = symbol_get(bnx2x_cnic_probe); 5446 if (probe) { 5447 ethdev = (*probe)(dev); 5448 symbol_put(bnx2x_cnic_probe); 5449 } 5450 if (!ethdev) 5451 return NULL; 5452 5453 pdev = ethdev->pdev; 5454 if (!pdev) 5455 return NULL; 5456 5457 dev_hold(dev); 5458 cdev = cnic_alloc_dev(dev, pdev); 5459 if (cdev == NULL) { 5460 dev_put(dev); 5461 return NULL; 5462 } 5463 5464 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 5465 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 5466 5467 cp = cdev->cnic_priv; 5468 cp->ethdev = ethdev; 5469 cdev->pcidev = pdev; 5470 cp->chip_id = ethdev->chip_id; 5471 5472 cdev->stats_addr = ethdev->addr_drv_info_to_mcp; 5473 5474 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5475 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5476 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 5477 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) 5478 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5479 5480 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5481 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5482 5483 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); 5484 5485 cp->cnic_ops = &cnic_bnx2x_ops; 5486 cp->start_hw = cnic_start_bnx2x_hw; 5487 cp->stop_hw = cnic_stop_bnx2x_hw; 5488 cp->setup_pgtbl = cnic_setup_page_tbl_le; 5489 cp->alloc_resc = cnic_alloc_bnx2x_resc; 5490 cp->free_resc = cnic_free_resc; 5491 cp->start_cm = cnic_cm_init_bnx2x_hw; 5492 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5493 cp->enable_int = cnic_enable_bnx2x_int; 5494 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5495 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 5496 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5497 else 5498 cp->ack_int = cnic_ack_bnx2x_msix; 5499 cp->close_conn = cnic_close_bnx2x_conn; 5500 return cdev; 5501 } 5502 5503 static struct cnic_dev *is_cnic_dev(struct net_device *dev) 5504 { 5505 struct ethtool_drvinfo drvinfo; 5506 struct cnic_dev *cdev = NULL; 5507 5508 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 5509 memset(&drvinfo, 0, sizeof(drvinfo)); 5510 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 5511 5512 if (!strcmp(drvinfo.driver, "bnx2")) 5513 cdev = init_bnx2_cnic(dev); 5514 if (!strcmp(drvinfo.driver, "bnx2x")) 5515 cdev = init_bnx2x_cnic(dev); 5516 if (cdev) { 5517 write_lock(&cnic_dev_lock); 5518 list_add(&cdev->list, &cnic_dev_list); 5519 write_unlock(&cnic_dev_lock); 5520 } 5521 } 5522 return cdev; 5523 } 5524 5525 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, 5526 u16 vlan_id) 5527 { 5528 int if_type; 5529 5530 rcu_read_lock(); 5531 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5532 struct cnic_ulp_ops *ulp_ops; 5533 void *ctx; 5534 5535 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5536 if (!ulp_ops || !ulp_ops->indicate_netevent) 5537 continue; 5538 5539 ctx = cp->ulp_handle[if_type]; 5540 5541 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5542 } 5543 rcu_read_unlock(); 5544 } 5545 5546 /* netdev event handler */ 5547 static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5548 void *ptr) 5549 { 5550 struct net_device *netdev = ptr; 5551 struct cnic_dev *dev; 5552 int new_dev = 0; 5553 5554 dev = cnic_from_netdev(netdev); 5555 5556 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { 5557 /* Check for the hot-plug device */ 5558 dev = is_cnic_dev(netdev); 5559 if (dev) { 5560 new_dev = 1; 5561 cnic_hold(dev); 5562 } 5563 } 5564 if (dev) { 5565 struct cnic_local *cp = dev->cnic_priv; 5566 5567 if (new_dev) 5568 cnic_ulp_init(dev); 5569 else if (event == NETDEV_UNREGISTER) 5570 cnic_ulp_exit(dev); 5571 5572 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { 5573 if (cnic_register_netdev(dev) != 0) { 5574 cnic_put(dev); 5575 goto done; 5576 } 5577 if (!cnic_start_hw(dev)) 5578 cnic_ulp_start(dev); 5579 } 5580 5581 cnic_rcv_netevent(cp, event, 0); 5582 5583 if (event == NETDEV_GOING_DOWN) { 5584 cnic_ulp_stop(dev); 5585 cnic_stop_hw(dev); 5586 cnic_unregister_netdev(dev); 5587 } else if (event == NETDEV_UNREGISTER) { 5588 write_lock(&cnic_dev_lock); 5589 list_del_init(&dev->list); 5590 write_unlock(&cnic_dev_lock); 5591 5592 cnic_put(dev); 5593 cnic_free_dev(dev); 5594 goto done; 5595 } 5596 cnic_put(dev); 5597 } else { 5598 struct net_device *realdev; 5599 u16 vid; 5600 5601 vid = cnic_get_vlan(netdev, &realdev); 5602 if (realdev) { 5603 dev = cnic_from_netdev(realdev); 5604 if (dev) { 5605 vid |= VLAN_TAG_PRESENT; 5606 cnic_rcv_netevent(dev->cnic_priv, event, vid); 5607 cnic_put(dev); 5608 } 5609 } 5610 } 5611 done: 5612 return NOTIFY_DONE; 5613 } 5614 5615 static struct notifier_block cnic_netdev_notifier = { 5616 .notifier_call = cnic_netdev_event 5617 }; 5618 5619 static void cnic_release(void) 5620 { 5621 struct cnic_dev *dev; 5622 struct cnic_uio_dev *udev; 5623 5624 while (!list_empty(&cnic_dev_list)) { 5625 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 5626 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5627 cnic_ulp_stop(dev); 5628 cnic_stop_hw(dev); 5629 } 5630 5631 cnic_ulp_exit(dev); 5632 cnic_unregister_netdev(dev); 5633 list_del_init(&dev->list); 5634 cnic_free_dev(dev); 5635 } 5636 while (!list_empty(&cnic_udev_list)) { 5637 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5638 list); 5639 cnic_free_uio(udev); 5640 } 5641 } 5642 5643 static int __init cnic_init(void) 5644 { 5645 int rc = 0; 5646 5647 pr_info("%s", version); 5648 5649 rc = register_netdevice_notifier(&cnic_netdev_notifier); 5650 if (rc) { 5651 cnic_release(); 5652 return rc; 5653 } 5654 5655 cnic_wq = create_singlethread_workqueue("cnic_wq"); 5656 if (!cnic_wq) { 5657 cnic_release(); 5658 unregister_netdevice_notifier(&cnic_netdev_notifier); 5659 return -ENOMEM; 5660 } 5661 5662 return 0; 5663 } 5664 5665 static void __exit cnic_exit(void) 5666 { 5667 unregister_netdevice_notifier(&cnic_netdev_notifier); 5668 cnic_release(); 5669 destroy_workqueue(cnic_wq); 5670 } 5671 5672 module_init(cnic_init); 5673 module_exit(cnic_exit); 5674