1 /* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/module.h> 16 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <linux/init.h> 23 #include <linux/netdevice.h> 24 #include <linux/uio_driver.h> 25 #include <linux/in.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/delay.h> 28 #include <linux/ethtool.h> 29 #include <linux/if_vlan.h> 30 #include <linux/prefetch.h> 31 #include <linux/random.h> 32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 33 #define BCM_VLAN 1 34 #endif 35 #include <net/ip.h> 36 #include <net/tcp.h> 37 #include <net/route.h> 38 #include <net/ipv6.h> 39 #include <net/ip6_route.h> 40 #include <net/ip6_checksum.h> 41 #include <scsi/iscsi_if.h> 42 43 #include "cnic_if.h" 44 #include "bnx2.h" 45 #include "bnx2x/bnx2x_reg.h" 46 #include "bnx2x/bnx2x_fw_defs.h" 47 #include "bnx2x/bnx2x_hsi.h" 48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h" 49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" 50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h" 51 #include "cnic.h" 52 #include "cnic_defs.h" 53 54 #define DRV_MODULE_NAME "cnic" 55 56 static char version[] __devinitdata = 57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 58 59 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 60 "Chen (zongxi@broadcom.com"); 61 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 62 MODULE_LICENSE("GPL"); 63 MODULE_VERSION(CNIC_MODULE_VERSION); 64 65 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ 66 static LIST_HEAD(cnic_dev_list); 67 static LIST_HEAD(cnic_udev_list); 68 static DEFINE_RWLOCK(cnic_dev_lock); 69 static DEFINE_MUTEX(cnic_lock); 70 71 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 72 73 /* helper function, assuming cnic_lock is held */ 74 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) 75 { 76 return rcu_dereference_protected(cnic_ulp_tbl[type], 77 lockdep_is_held(&cnic_lock)); 78 } 79 80 static int cnic_service_bnx2(void *, void *); 81 static int cnic_service_bnx2x(void *, void *); 82 static int cnic_ctl(void *, struct cnic_ctl_info *); 83 84 static struct cnic_ops cnic_bnx2_ops = { 85 .cnic_owner = THIS_MODULE, 86 .cnic_handler = cnic_service_bnx2, 87 .cnic_ctl = cnic_ctl, 88 }; 89 90 static struct cnic_ops cnic_bnx2x_ops = { 91 .cnic_owner = THIS_MODULE, 92 .cnic_handler = cnic_service_bnx2x, 93 .cnic_ctl = cnic_ctl, 94 }; 95 96 static struct workqueue_struct *cnic_wq; 97 98 static void cnic_shutdown_rings(struct cnic_dev *); 99 static void cnic_init_rings(struct cnic_dev *); 100 static int cnic_cm_set_pg(struct cnic_sock *); 101 102 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 103 { 104 struct cnic_uio_dev *udev = uinfo->priv; 105 struct cnic_dev *dev; 106 107 if (!capable(CAP_NET_ADMIN)) 108 return -EPERM; 109 110 if (udev->uio_dev != -1) 111 return -EBUSY; 112 113 rtnl_lock(); 114 dev = udev->dev; 115 116 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 117 rtnl_unlock(); 118 return -ENODEV; 119 } 120 121 udev->uio_dev = iminor(inode); 122 123 cnic_shutdown_rings(dev); 124 cnic_init_rings(dev); 125 rtnl_unlock(); 126 127 return 0; 128 } 129 130 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 131 { 132 struct cnic_uio_dev *udev = uinfo->priv; 133 134 udev->uio_dev = -1; 135 return 0; 136 } 137 138 static inline void cnic_hold(struct cnic_dev *dev) 139 { 140 atomic_inc(&dev->ref_count); 141 } 142 143 static inline void cnic_put(struct cnic_dev *dev) 144 { 145 atomic_dec(&dev->ref_count); 146 } 147 148 static inline void csk_hold(struct cnic_sock *csk) 149 { 150 atomic_inc(&csk->ref_count); 151 } 152 153 static inline void csk_put(struct cnic_sock *csk) 154 { 155 atomic_dec(&csk->ref_count); 156 } 157 158 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 159 { 160 struct cnic_dev *cdev; 161 162 read_lock(&cnic_dev_lock); 163 list_for_each_entry(cdev, &cnic_dev_list, list) { 164 if (netdev == cdev->netdev) { 165 cnic_hold(cdev); 166 read_unlock(&cnic_dev_lock); 167 return cdev; 168 } 169 } 170 read_unlock(&cnic_dev_lock); 171 return NULL; 172 } 173 174 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 175 { 176 atomic_inc(&ulp_ops->ref_count); 177 } 178 179 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 180 { 181 atomic_dec(&ulp_ops->ref_count); 182 } 183 184 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 185 { 186 struct cnic_local *cp = dev->cnic_priv; 187 struct cnic_eth_dev *ethdev = cp->ethdev; 188 struct drv_ctl_info info; 189 struct drv_ctl_io *io = &info.data.io; 190 191 info.cmd = DRV_CTL_CTX_WR_CMD; 192 io->cid_addr = cid_addr; 193 io->offset = off; 194 io->data = val; 195 ethdev->drv_ctl(dev->netdev, &info); 196 } 197 198 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 199 { 200 struct cnic_local *cp = dev->cnic_priv; 201 struct cnic_eth_dev *ethdev = cp->ethdev; 202 struct drv_ctl_info info; 203 struct drv_ctl_io *io = &info.data.io; 204 205 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 206 io->offset = off; 207 io->dma_addr = addr; 208 ethdev->drv_ctl(dev->netdev, &info); 209 } 210 211 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 212 { 213 struct cnic_local *cp = dev->cnic_priv; 214 struct cnic_eth_dev *ethdev = cp->ethdev; 215 struct drv_ctl_info info; 216 struct drv_ctl_l2_ring *ring = &info.data.ring; 217 218 if (start) 219 info.cmd = DRV_CTL_START_L2_CMD; 220 else 221 info.cmd = DRV_CTL_STOP_L2_CMD; 222 223 ring->cid = cid; 224 ring->client_id = cl_id; 225 ethdev->drv_ctl(dev->netdev, &info); 226 } 227 228 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 229 { 230 struct cnic_local *cp = dev->cnic_priv; 231 struct cnic_eth_dev *ethdev = cp->ethdev; 232 struct drv_ctl_info info; 233 struct drv_ctl_io *io = &info.data.io; 234 235 info.cmd = DRV_CTL_IO_WR_CMD; 236 io->offset = off; 237 io->data = val; 238 ethdev->drv_ctl(dev->netdev, &info); 239 } 240 241 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 242 { 243 struct cnic_local *cp = dev->cnic_priv; 244 struct cnic_eth_dev *ethdev = cp->ethdev; 245 struct drv_ctl_info info; 246 struct drv_ctl_io *io = &info.data.io; 247 248 info.cmd = DRV_CTL_IO_RD_CMD; 249 io->offset = off; 250 ethdev->drv_ctl(dev->netdev, &info); 251 return io->data; 252 } 253 254 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) 255 { 256 struct cnic_local *cp = dev->cnic_priv; 257 struct cnic_eth_dev *ethdev = cp->ethdev; 258 struct drv_ctl_info info; 259 260 if (reg) 261 info.cmd = DRV_CTL_ULP_REGISTER_CMD; 262 else 263 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; 264 265 info.data.ulp_type = ulp_type; 266 ethdev->drv_ctl(dev->netdev, &info); 267 } 268 269 static int cnic_in_use(struct cnic_sock *csk) 270 { 271 return test_bit(SK_F_INUSE, &csk->flags); 272 } 273 274 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 275 { 276 struct cnic_local *cp = dev->cnic_priv; 277 struct cnic_eth_dev *ethdev = cp->ethdev; 278 struct drv_ctl_info info; 279 280 info.cmd = cmd; 281 info.data.credit.credit_count = count; 282 ethdev->drv_ctl(dev->netdev, &info); 283 } 284 285 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 286 { 287 u32 i; 288 289 for (i = 0; i < cp->max_cid_space; i++) { 290 if (cp->ctx_tbl[i].cid == cid) { 291 *l5_cid = i; 292 return 0; 293 } 294 } 295 return -EINVAL; 296 } 297 298 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 299 struct cnic_sock *csk) 300 { 301 struct iscsi_path path_req; 302 char *buf = NULL; 303 u16 len = 0; 304 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 305 struct cnic_ulp_ops *ulp_ops; 306 struct cnic_uio_dev *udev = cp->udev; 307 int rc = 0, retry = 0; 308 309 if (!udev || udev->uio_dev == -1) 310 return -ENODEV; 311 312 if (csk) { 313 len = sizeof(path_req); 314 buf = (char *) &path_req; 315 memset(&path_req, 0, len); 316 317 msg_type = ISCSI_KEVENT_PATH_REQ; 318 path_req.handle = (u64) csk->l5_cid; 319 if (test_bit(SK_F_IPV6, &csk->flags)) { 320 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 321 sizeof(struct in6_addr)); 322 path_req.ip_addr_len = 16; 323 } else { 324 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 325 sizeof(struct in_addr)); 326 path_req.ip_addr_len = 4; 327 } 328 path_req.vlan_id = csk->vlan_id; 329 path_req.pmtu = csk->mtu; 330 } 331 332 while (retry < 3) { 333 rc = 0; 334 rcu_read_lock(); 335 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 336 if (ulp_ops) 337 rc = ulp_ops->iscsi_nl_send_msg( 338 cp->ulp_handle[CNIC_ULP_ISCSI], 339 msg_type, buf, len); 340 rcu_read_unlock(); 341 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) 342 break; 343 344 msleep(100); 345 retry++; 346 } 347 return rc; 348 } 349 350 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); 351 352 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 353 char *buf, u16 len) 354 { 355 int rc = -EINVAL; 356 357 switch (msg_type) { 358 case ISCSI_UEVENT_PATH_UPDATE: { 359 struct cnic_local *cp; 360 u32 l5_cid; 361 struct cnic_sock *csk; 362 struct iscsi_path *path_resp; 363 364 if (len < sizeof(*path_resp)) 365 break; 366 367 path_resp = (struct iscsi_path *) buf; 368 cp = dev->cnic_priv; 369 l5_cid = (u32) path_resp->handle; 370 if (l5_cid >= MAX_CM_SK_TBL_SZ) 371 break; 372 373 rcu_read_lock(); 374 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 375 rc = -ENODEV; 376 rcu_read_unlock(); 377 break; 378 } 379 csk = &cp->csk_tbl[l5_cid]; 380 csk_hold(csk); 381 if (cnic_in_use(csk) && 382 test_bit(SK_F_CONNECT_START, &csk->flags)) { 383 384 csk->vlan_id = path_resp->vlan_id; 385 386 memcpy(csk->ha, path_resp->mac_addr, 6); 387 if (test_bit(SK_F_IPV6, &csk->flags)) 388 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 389 sizeof(struct in6_addr)); 390 else 391 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 392 sizeof(struct in_addr)); 393 394 if (is_valid_ether_addr(csk->ha)) { 395 cnic_cm_set_pg(csk); 396 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && 397 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 398 399 cnic_cm_upcall(cp, csk, 400 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 401 clear_bit(SK_F_CONNECT_START, &csk->flags); 402 } 403 } 404 csk_put(csk); 405 rcu_read_unlock(); 406 rc = 0; 407 } 408 } 409 410 return rc; 411 } 412 413 static int cnic_offld_prep(struct cnic_sock *csk) 414 { 415 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 416 return 0; 417 418 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 419 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 420 return 0; 421 } 422 423 return 1; 424 } 425 426 static int cnic_close_prep(struct cnic_sock *csk) 427 { 428 clear_bit(SK_F_CONNECT_START, &csk->flags); 429 smp_mb__after_clear_bit(); 430 431 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 432 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 433 msleep(1); 434 435 return 1; 436 } 437 return 0; 438 } 439 440 static int cnic_abort_prep(struct cnic_sock *csk) 441 { 442 clear_bit(SK_F_CONNECT_START, &csk->flags); 443 smp_mb__after_clear_bit(); 444 445 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 446 msleep(1); 447 448 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 449 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 450 return 1; 451 } 452 453 return 0; 454 } 455 456 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 457 { 458 struct cnic_dev *dev; 459 460 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 461 pr_err("%s: Bad type %d\n", __func__, ulp_type); 462 return -EINVAL; 463 } 464 mutex_lock(&cnic_lock); 465 if (cnic_ulp_tbl_prot(ulp_type)) { 466 pr_err("%s: Type %d has already been registered\n", 467 __func__, ulp_type); 468 mutex_unlock(&cnic_lock); 469 return -EBUSY; 470 } 471 472 read_lock(&cnic_dev_lock); 473 list_for_each_entry(dev, &cnic_dev_list, list) { 474 struct cnic_local *cp = dev->cnic_priv; 475 476 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 477 } 478 read_unlock(&cnic_dev_lock); 479 480 atomic_set(&ulp_ops->ref_count, 0); 481 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 482 mutex_unlock(&cnic_lock); 483 484 /* Prevent race conditions with netdev_event */ 485 rtnl_lock(); 486 list_for_each_entry(dev, &cnic_dev_list, list) { 487 struct cnic_local *cp = dev->cnic_priv; 488 489 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 490 ulp_ops->cnic_init(dev); 491 } 492 rtnl_unlock(); 493 494 return 0; 495 } 496 497 int cnic_unregister_driver(int ulp_type) 498 { 499 struct cnic_dev *dev; 500 struct cnic_ulp_ops *ulp_ops; 501 int i = 0; 502 503 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 504 pr_err("%s: Bad type %d\n", __func__, ulp_type); 505 return -EINVAL; 506 } 507 mutex_lock(&cnic_lock); 508 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 509 if (!ulp_ops) { 510 pr_err("%s: Type %d has not been registered\n", 511 __func__, ulp_type); 512 goto out_unlock; 513 } 514 read_lock(&cnic_dev_lock); 515 list_for_each_entry(dev, &cnic_dev_list, list) { 516 struct cnic_local *cp = dev->cnic_priv; 517 518 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 519 pr_err("%s: Type %d still has devices registered\n", 520 __func__, ulp_type); 521 read_unlock(&cnic_dev_lock); 522 goto out_unlock; 523 } 524 } 525 read_unlock(&cnic_dev_lock); 526 527 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); 528 529 mutex_unlock(&cnic_lock); 530 synchronize_rcu(); 531 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 532 msleep(100); 533 i++; 534 } 535 536 if (atomic_read(&ulp_ops->ref_count) != 0) 537 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 538 return 0; 539 540 out_unlock: 541 mutex_unlock(&cnic_lock); 542 return -EINVAL; 543 } 544 545 static int cnic_start_hw(struct cnic_dev *); 546 static void cnic_stop_hw(struct cnic_dev *); 547 548 static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 549 void *ulp_ctx) 550 { 551 struct cnic_local *cp = dev->cnic_priv; 552 struct cnic_ulp_ops *ulp_ops; 553 554 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 555 pr_err("%s: Bad type %d\n", __func__, ulp_type); 556 return -EINVAL; 557 } 558 mutex_lock(&cnic_lock); 559 if (cnic_ulp_tbl_prot(ulp_type) == NULL) { 560 pr_err("%s: Driver with type %d has not been registered\n", 561 __func__, ulp_type); 562 mutex_unlock(&cnic_lock); 563 return -EAGAIN; 564 } 565 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 566 pr_err("%s: Type %d has already been registered to this device\n", 567 __func__, ulp_type); 568 mutex_unlock(&cnic_lock); 569 return -EBUSY; 570 } 571 572 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 573 cp->ulp_handle[ulp_type] = ulp_ctx; 574 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 575 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 576 cnic_hold(dev); 577 578 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 579 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 580 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 581 582 mutex_unlock(&cnic_lock); 583 584 cnic_ulp_ctl(dev, ulp_type, true); 585 586 return 0; 587 588 } 589 EXPORT_SYMBOL(cnic_register_driver); 590 591 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 592 { 593 struct cnic_local *cp = dev->cnic_priv; 594 int i = 0; 595 596 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 597 pr_err("%s: Bad type %d\n", __func__, ulp_type); 598 return -EINVAL; 599 } 600 mutex_lock(&cnic_lock); 601 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 602 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 603 cnic_put(dev); 604 } else { 605 pr_err("%s: device not registered to this ulp type %d\n", 606 __func__, ulp_type); 607 mutex_unlock(&cnic_lock); 608 return -EINVAL; 609 } 610 mutex_unlock(&cnic_lock); 611 612 if (ulp_type == CNIC_ULP_ISCSI) 613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 614 615 synchronize_rcu(); 616 617 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 618 i < 20) { 619 msleep(100); 620 i++; 621 } 622 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 623 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 624 625 cnic_ulp_ctl(dev, ulp_type, false); 626 627 return 0; 628 } 629 EXPORT_SYMBOL(cnic_unregister_driver); 630 631 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, 632 u32 next) 633 { 634 id_tbl->start = start_id; 635 id_tbl->max = size; 636 id_tbl->next = next; 637 spin_lock_init(&id_tbl->lock); 638 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 639 if (!id_tbl->table) 640 return -ENOMEM; 641 642 return 0; 643 } 644 645 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 646 { 647 kfree(id_tbl->table); 648 id_tbl->table = NULL; 649 } 650 651 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 652 { 653 int ret = -1; 654 655 id -= id_tbl->start; 656 if (id >= id_tbl->max) 657 return ret; 658 659 spin_lock(&id_tbl->lock); 660 if (!test_bit(id, id_tbl->table)) { 661 set_bit(id, id_tbl->table); 662 ret = 0; 663 } 664 spin_unlock(&id_tbl->lock); 665 return ret; 666 } 667 668 /* Returns -1 if not successful */ 669 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 670 { 671 u32 id; 672 673 spin_lock(&id_tbl->lock); 674 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 675 if (id >= id_tbl->max) { 676 id = -1; 677 if (id_tbl->next != 0) { 678 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 679 if (id >= id_tbl->next) 680 id = -1; 681 } 682 } 683 684 if (id < id_tbl->max) { 685 set_bit(id, id_tbl->table); 686 id_tbl->next = (id + 1) & (id_tbl->max - 1); 687 id += id_tbl->start; 688 } 689 690 spin_unlock(&id_tbl->lock); 691 692 return id; 693 } 694 695 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 696 { 697 if (id == -1) 698 return; 699 700 id -= id_tbl->start; 701 if (id >= id_tbl->max) 702 return; 703 704 clear_bit(id, id_tbl->table); 705 } 706 707 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 708 { 709 int i; 710 711 if (!dma->pg_arr) 712 return; 713 714 for (i = 0; i < dma->num_pages; i++) { 715 if (dma->pg_arr[i]) { 716 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 717 dma->pg_arr[i], dma->pg_map_arr[i]); 718 dma->pg_arr[i] = NULL; 719 } 720 } 721 if (dma->pgtbl) { 722 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 723 dma->pgtbl, dma->pgtbl_map); 724 dma->pgtbl = NULL; 725 } 726 kfree(dma->pg_arr); 727 dma->pg_arr = NULL; 728 dma->num_pages = 0; 729 } 730 731 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 732 { 733 int i; 734 __le32 *page_table = (__le32 *) dma->pgtbl; 735 736 for (i = 0; i < dma->num_pages; i++) { 737 /* Each entry needs to be in big endian format. */ 738 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 739 page_table++; 740 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 741 page_table++; 742 } 743 } 744 745 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 746 { 747 int i; 748 __le32 *page_table = (__le32 *) dma->pgtbl; 749 750 for (i = 0; i < dma->num_pages; i++) { 751 /* Each entry needs to be in little endian format. */ 752 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 753 page_table++; 754 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 755 page_table++; 756 } 757 } 758 759 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 760 int pages, int use_pg_tbl) 761 { 762 int i, size; 763 struct cnic_local *cp = dev->cnic_priv; 764 765 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 766 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 767 if (dma->pg_arr == NULL) 768 return -ENOMEM; 769 770 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 771 dma->num_pages = pages; 772 773 for (i = 0; i < pages; i++) { 774 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 775 BCM_PAGE_SIZE, 776 &dma->pg_map_arr[i], 777 GFP_ATOMIC); 778 if (dma->pg_arr[i] == NULL) 779 goto error; 780 } 781 if (!use_pg_tbl) 782 return 0; 783 784 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 785 ~(BCM_PAGE_SIZE - 1); 786 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 787 &dma->pgtbl_map, GFP_ATOMIC); 788 if (dma->pgtbl == NULL) 789 goto error; 790 791 cp->setup_pgtbl(dev, dma); 792 793 return 0; 794 795 error: 796 cnic_free_dma(dev, dma); 797 return -ENOMEM; 798 } 799 800 static void cnic_free_context(struct cnic_dev *dev) 801 { 802 struct cnic_local *cp = dev->cnic_priv; 803 int i; 804 805 for (i = 0; i < cp->ctx_blks; i++) { 806 if (cp->ctx_arr[i].ctx) { 807 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 808 cp->ctx_arr[i].ctx, 809 cp->ctx_arr[i].mapping); 810 cp->ctx_arr[i].ctx = NULL; 811 } 812 } 813 } 814 815 static void __cnic_free_uio(struct cnic_uio_dev *udev) 816 { 817 uio_unregister_device(&udev->cnic_uinfo); 818 819 if (udev->l2_buf) { 820 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 821 udev->l2_buf, udev->l2_buf_map); 822 udev->l2_buf = NULL; 823 } 824 825 if (udev->l2_ring) { 826 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 827 udev->l2_ring, udev->l2_ring_map); 828 udev->l2_ring = NULL; 829 } 830 831 pci_dev_put(udev->pdev); 832 kfree(udev); 833 } 834 835 static void cnic_free_uio(struct cnic_uio_dev *udev) 836 { 837 if (!udev) 838 return; 839 840 write_lock(&cnic_dev_lock); 841 list_del_init(&udev->list); 842 write_unlock(&cnic_dev_lock); 843 __cnic_free_uio(udev); 844 } 845 846 static void cnic_free_resc(struct cnic_dev *dev) 847 { 848 struct cnic_local *cp = dev->cnic_priv; 849 struct cnic_uio_dev *udev = cp->udev; 850 851 if (udev) { 852 udev->dev = NULL; 853 cp->udev = NULL; 854 } 855 856 cnic_free_context(dev); 857 kfree(cp->ctx_arr); 858 cp->ctx_arr = NULL; 859 cp->ctx_blks = 0; 860 861 cnic_free_dma(dev, &cp->gbl_buf_info); 862 cnic_free_dma(dev, &cp->kwq_info); 863 cnic_free_dma(dev, &cp->kwq_16_data_info); 864 cnic_free_dma(dev, &cp->kcq2.dma); 865 cnic_free_dma(dev, &cp->kcq1.dma); 866 kfree(cp->iscsi_tbl); 867 cp->iscsi_tbl = NULL; 868 kfree(cp->ctx_tbl); 869 cp->ctx_tbl = NULL; 870 871 cnic_free_id_tbl(&cp->fcoe_cid_tbl); 872 cnic_free_id_tbl(&cp->cid_tbl); 873 } 874 875 static int cnic_alloc_context(struct cnic_dev *dev) 876 { 877 struct cnic_local *cp = dev->cnic_priv; 878 879 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 880 int i, k, arr_size; 881 882 cp->ctx_blk_size = BCM_PAGE_SIZE; 883 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 884 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 885 sizeof(struct cnic_ctx); 886 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 887 if (cp->ctx_arr == NULL) 888 return -ENOMEM; 889 890 k = 0; 891 for (i = 0; i < 2; i++) { 892 u32 j, reg, off, lo, hi; 893 894 if (i == 0) 895 off = BNX2_PG_CTX_MAP; 896 else 897 off = BNX2_ISCSI_CTX_MAP; 898 899 reg = cnic_reg_rd_ind(dev, off); 900 lo = reg >> 16; 901 hi = reg & 0xffff; 902 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 903 cp->ctx_arr[k].cid = j; 904 } 905 906 cp->ctx_blks = k; 907 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 908 cp->ctx_blks = 0; 909 return -ENOMEM; 910 } 911 912 for (i = 0; i < cp->ctx_blks; i++) { 913 cp->ctx_arr[i].ctx = 914 dma_alloc_coherent(&dev->pcidev->dev, 915 BCM_PAGE_SIZE, 916 &cp->ctx_arr[i].mapping, 917 GFP_KERNEL); 918 if (cp->ctx_arr[i].ctx == NULL) 919 return -ENOMEM; 920 } 921 } 922 return 0; 923 } 924 925 static u16 cnic_bnx2_next_idx(u16 idx) 926 { 927 return idx + 1; 928 } 929 930 static u16 cnic_bnx2_hw_idx(u16 idx) 931 { 932 return idx; 933 } 934 935 static u16 cnic_bnx2x_next_idx(u16 idx) 936 { 937 idx++; 938 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 939 idx++; 940 941 return idx; 942 } 943 944 static u16 cnic_bnx2x_hw_idx(u16 idx) 945 { 946 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 947 idx++; 948 return idx; 949 } 950 951 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, 952 bool use_pg_tbl) 953 { 954 int err, i, use_page_tbl = 0; 955 struct kcqe **kcq; 956 957 if (use_pg_tbl) 958 use_page_tbl = 1; 959 960 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); 961 if (err) 962 return err; 963 964 kcq = (struct kcqe **) info->dma.pg_arr; 965 info->kcq = kcq; 966 967 info->next_idx = cnic_bnx2_next_idx; 968 info->hw_idx = cnic_bnx2_hw_idx; 969 if (use_pg_tbl) 970 return 0; 971 972 info->next_idx = cnic_bnx2x_next_idx; 973 info->hw_idx = cnic_bnx2x_hw_idx; 974 975 for (i = 0; i < KCQ_PAGE_CNT; i++) { 976 struct bnx2x_bd_chain_next *next = 977 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 978 int j = i + 1; 979 980 if (j >= KCQ_PAGE_CNT) 981 j = 0; 982 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 983 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 984 } 985 return 0; 986 } 987 988 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 989 { 990 struct cnic_local *cp = dev->cnic_priv; 991 struct cnic_uio_dev *udev; 992 993 read_lock(&cnic_dev_lock); 994 list_for_each_entry(udev, &cnic_udev_list, list) { 995 if (udev->pdev == dev->pcidev) { 996 udev->dev = dev; 997 cp->udev = udev; 998 read_unlock(&cnic_dev_lock); 999 return 0; 1000 } 1001 } 1002 read_unlock(&cnic_dev_lock); 1003 1004 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1005 if (!udev) 1006 return -ENOMEM; 1007 1008 udev->uio_dev = -1; 1009 1010 udev->dev = dev; 1011 udev->pdev = dev->pcidev; 1012 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 1013 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1014 &udev->l2_ring_map, 1015 GFP_KERNEL | __GFP_COMP); 1016 if (!udev->l2_ring) 1017 goto err_udev; 1018 1019 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1020 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 1021 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 1022 &udev->l2_buf_map, 1023 GFP_KERNEL | __GFP_COMP); 1024 if (!udev->l2_buf) 1025 goto err_dma; 1026 1027 write_lock(&cnic_dev_lock); 1028 list_add(&udev->list, &cnic_udev_list); 1029 write_unlock(&cnic_dev_lock); 1030 1031 pci_dev_get(udev->pdev); 1032 1033 cp->udev = udev; 1034 1035 return 0; 1036 err_dma: 1037 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 1038 udev->l2_ring, udev->l2_ring_map); 1039 err_udev: 1040 kfree(udev); 1041 return -ENOMEM; 1042 } 1043 1044 static int cnic_init_uio(struct cnic_dev *dev) 1045 { 1046 struct cnic_local *cp = dev->cnic_priv; 1047 struct cnic_uio_dev *udev = cp->udev; 1048 struct uio_info *uinfo; 1049 int ret = 0; 1050 1051 if (!udev) 1052 return -ENOMEM; 1053 1054 uinfo = &udev->cnic_uinfo; 1055 1056 uinfo->mem[0].addr = dev->netdev->base_addr; 1057 uinfo->mem[0].internal_addr = dev->regview; 1058 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 1059 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1060 1061 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1062 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1063 PAGE_MASK; 1064 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1065 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1066 else 1067 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 1068 1069 uinfo->name = "bnx2_cnic"; 1070 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1071 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1072 PAGE_MASK; 1073 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1074 1075 uinfo->name = "bnx2x_cnic"; 1076 } 1077 1078 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 1079 1080 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1081 uinfo->mem[2].size = udev->l2_ring_size; 1082 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1083 1084 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1085 uinfo->mem[3].size = udev->l2_buf_size; 1086 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1087 1088 uinfo->version = CNIC_MODULE_VERSION; 1089 uinfo->irq = UIO_IRQ_CUSTOM; 1090 1091 uinfo->open = cnic_uio_open; 1092 uinfo->release = cnic_uio_close; 1093 1094 if (udev->uio_dev == -1) { 1095 if (!uinfo->priv) { 1096 uinfo->priv = udev; 1097 1098 ret = uio_register_device(&udev->pdev->dev, uinfo); 1099 } 1100 } else { 1101 cnic_init_rings(dev); 1102 } 1103 1104 return ret; 1105 } 1106 1107 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1108 { 1109 struct cnic_local *cp = dev->cnic_priv; 1110 int ret; 1111 1112 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1113 if (ret) 1114 goto error; 1115 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1116 1117 ret = cnic_alloc_kcq(dev, &cp->kcq1, true); 1118 if (ret) 1119 goto error; 1120 1121 ret = cnic_alloc_context(dev); 1122 if (ret) 1123 goto error; 1124 1125 ret = cnic_alloc_uio_rings(dev, 2); 1126 if (ret) 1127 goto error; 1128 1129 ret = cnic_init_uio(dev); 1130 if (ret) 1131 goto error; 1132 1133 return 0; 1134 1135 error: 1136 cnic_free_resc(dev); 1137 return ret; 1138 } 1139 1140 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1141 { 1142 struct cnic_local *cp = dev->cnic_priv; 1143 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1144 int total_mem, blks, i; 1145 1146 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1147 blks = total_mem / ctx_blk_size; 1148 if (total_mem % ctx_blk_size) 1149 blks++; 1150 1151 if (blks > cp->ethdev->ctx_tbl_len) 1152 return -ENOMEM; 1153 1154 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1155 if (cp->ctx_arr == NULL) 1156 return -ENOMEM; 1157 1158 cp->ctx_blks = blks; 1159 cp->ctx_blk_size = ctx_blk_size; 1160 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1161 cp->ctx_align = 0; 1162 else 1163 cp->ctx_align = ctx_blk_size; 1164 1165 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1166 1167 for (i = 0; i < blks; i++) { 1168 cp->ctx_arr[i].ctx = 1169 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1170 &cp->ctx_arr[i].mapping, 1171 GFP_KERNEL); 1172 if (cp->ctx_arr[i].ctx == NULL) 1173 return -ENOMEM; 1174 1175 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1176 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1177 cnic_free_context(dev); 1178 cp->ctx_blk_size += cp->ctx_align; 1179 i = -1; 1180 continue; 1181 } 1182 } 1183 } 1184 return 0; 1185 } 1186 1187 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1188 { 1189 struct cnic_local *cp = dev->cnic_priv; 1190 struct cnic_eth_dev *ethdev = cp->ethdev; 1191 u32 start_cid = ethdev->starting_cid; 1192 int i, j, n, ret, pages; 1193 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1194 1195 cp->iro_arr = ethdev->iro_arr; 1196 1197 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1198 cp->iscsi_start_cid = start_cid; 1199 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1200 1201 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1202 cp->max_cid_space += dev->max_fcoe_conn; 1203 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1204 if (!cp->fcoe_init_cid) 1205 cp->fcoe_init_cid = 0x10; 1206 } 1207 1208 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1209 GFP_KERNEL); 1210 if (!cp->iscsi_tbl) 1211 goto error; 1212 1213 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1214 cp->max_cid_space, GFP_KERNEL); 1215 if (!cp->ctx_tbl) 1216 goto error; 1217 1218 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1219 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1220 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1221 } 1222 1223 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1224 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1225 1226 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1227 PAGE_SIZE; 1228 1229 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1230 if (ret) 1231 return -ENOMEM; 1232 1233 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1234 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1235 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1236 1237 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1238 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1239 off; 1240 1241 if ((i % n) == (n - 1)) 1242 j++; 1243 } 1244 1245 ret = cnic_alloc_kcq(dev, &cp->kcq1, false); 1246 if (ret) 1247 goto error; 1248 1249 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 1250 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1251 if (ret) 1252 goto error; 1253 } 1254 1255 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1256 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1257 if (ret) 1258 goto error; 1259 1260 ret = cnic_alloc_bnx2x_context(dev); 1261 if (ret) 1262 goto error; 1263 1264 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1265 1266 cp->l2_rx_ring_size = 15; 1267 1268 ret = cnic_alloc_uio_rings(dev, 4); 1269 if (ret) 1270 goto error; 1271 1272 ret = cnic_init_uio(dev); 1273 if (ret) 1274 goto error; 1275 1276 return 0; 1277 1278 error: 1279 cnic_free_resc(dev); 1280 return -ENOMEM; 1281 } 1282 1283 static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1284 { 1285 return cp->max_kwq_idx - 1286 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1287 } 1288 1289 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1290 u32 num_wqes) 1291 { 1292 struct cnic_local *cp = dev->cnic_priv; 1293 struct kwqe *prod_qe; 1294 u16 prod, sw_prod, i; 1295 1296 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1297 return -EAGAIN; /* bnx2 is down */ 1298 1299 spin_lock_bh(&cp->cnic_ulp_lock); 1300 if (num_wqes > cnic_kwq_avail(cp) && 1301 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1302 spin_unlock_bh(&cp->cnic_ulp_lock); 1303 return -EAGAIN; 1304 } 1305 1306 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1307 1308 prod = cp->kwq_prod_idx; 1309 sw_prod = prod & MAX_KWQ_IDX; 1310 for (i = 0; i < num_wqes; i++) { 1311 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1312 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1313 prod++; 1314 sw_prod = prod & MAX_KWQ_IDX; 1315 } 1316 cp->kwq_prod_idx = prod; 1317 1318 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1319 1320 spin_unlock_bh(&cp->cnic_ulp_lock); 1321 return 0; 1322 } 1323 1324 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1325 union l5cm_specific_data *l5_data) 1326 { 1327 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1328 dma_addr_t map; 1329 1330 map = ctx->kwqe_data_mapping; 1331 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1332 l5_data->phy_address.hi = (u64) map >> 32; 1333 return ctx->kwqe_data; 1334 } 1335 1336 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1337 u32 type, union l5cm_specific_data *l5_data) 1338 { 1339 struct cnic_local *cp = dev->cnic_priv; 1340 struct l5cm_spe kwqe; 1341 struct kwqe_16 *kwq[1]; 1342 u16 type_16; 1343 int ret; 1344 1345 kwqe.hdr.conn_and_cmd_data = 1346 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1347 BNX2X_HW_CID(cp, cid))); 1348 1349 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1350 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1351 SPE_HDR_FUNCTION_ID; 1352 1353 kwqe.hdr.type = cpu_to_le16(type_16); 1354 kwqe.hdr.reserved1 = 0; 1355 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1356 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1357 1358 kwq[0] = (struct kwqe_16 *) &kwqe; 1359 1360 spin_lock_bh(&cp->cnic_ulp_lock); 1361 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1362 spin_unlock_bh(&cp->cnic_ulp_lock); 1363 1364 if (ret == 1) 1365 return 0; 1366 1367 return ret; 1368 } 1369 1370 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1371 struct kcqe *cqes[], u32 num_cqes) 1372 { 1373 struct cnic_local *cp = dev->cnic_priv; 1374 struct cnic_ulp_ops *ulp_ops; 1375 1376 rcu_read_lock(); 1377 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1378 if (likely(ulp_ops)) { 1379 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1380 cqes, num_cqes); 1381 } 1382 rcu_read_unlock(); 1383 } 1384 1385 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1386 { 1387 struct cnic_local *cp = dev->cnic_priv; 1388 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1389 int hq_bds, pages; 1390 u32 pfid = cp->pfid; 1391 1392 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1393 cp->num_ccells = req1->num_ccells_per_conn; 1394 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1395 cp->num_iscsi_tasks; 1396 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1397 BNX2X_ISCSI_R2TQE_SIZE; 1398 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1399 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1400 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1401 cp->num_cqs = req1->num_cqs; 1402 1403 if (!dev->max_iscsi_conn) 1404 return 0; 1405 1406 /* init Tstorm RAM */ 1407 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1408 req1->rq_num_wqes); 1409 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1410 PAGE_SIZE); 1411 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1412 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1413 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1414 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1415 req1->num_tasks_per_conn); 1416 1417 /* init Ustorm RAM */ 1418 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1419 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1420 req1->rq_buffer_size); 1421 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1422 PAGE_SIZE); 1423 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1424 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1425 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1426 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1427 req1->num_tasks_per_conn); 1428 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1429 req1->rq_num_wqes); 1430 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1431 req1->cq_num_wqes); 1432 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1433 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1434 1435 /* init Xstorm RAM */ 1436 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1437 PAGE_SIZE); 1438 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1439 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1440 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1441 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1442 req1->num_tasks_per_conn); 1443 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1444 hq_bds); 1445 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1446 req1->num_tasks_per_conn); 1447 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1448 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1449 1450 /* init Cstorm RAM */ 1451 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1452 PAGE_SIZE); 1453 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1454 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1455 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1456 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1457 req1->num_tasks_per_conn); 1458 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1459 req1->cq_num_wqes); 1460 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1461 hq_bds); 1462 1463 return 0; 1464 } 1465 1466 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1467 { 1468 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1469 struct cnic_local *cp = dev->cnic_priv; 1470 u32 pfid = cp->pfid; 1471 struct iscsi_kcqe kcqe; 1472 struct kcqe *cqes[1]; 1473 1474 memset(&kcqe, 0, sizeof(kcqe)); 1475 if (!dev->max_iscsi_conn) { 1476 kcqe.completion_status = 1477 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1478 goto done; 1479 } 1480 1481 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1482 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1483 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1484 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1485 req2->error_bit_map[1]); 1486 1487 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1488 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1489 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1490 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1491 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1492 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1493 req2->error_bit_map[1]); 1494 1495 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1496 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1497 1498 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1499 1500 done: 1501 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1502 cqes[0] = (struct kcqe *) &kcqe; 1503 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1504 1505 return 0; 1506 } 1507 1508 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1509 { 1510 struct cnic_local *cp = dev->cnic_priv; 1511 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1512 1513 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1514 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1515 1516 cnic_free_dma(dev, &iscsi->hq_info); 1517 cnic_free_dma(dev, &iscsi->r2tq_info); 1518 cnic_free_dma(dev, &iscsi->task_array_info); 1519 cnic_free_id(&cp->cid_tbl, ctx->cid); 1520 } else { 1521 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); 1522 } 1523 1524 ctx->cid = 0; 1525 } 1526 1527 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1528 { 1529 u32 cid; 1530 int ret, pages; 1531 struct cnic_local *cp = dev->cnic_priv; 1532 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1533 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1534 1535 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { 1536 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); 1537 if (cid == -1) { 1538 ret = -ENOMEM; 1539 goto error; 1540 } 1541 ctx->cid = cid; 1542 return 0; 1543 } 1544 1545 cid = cnic_alloc_new_id(&cp->cid_tbl); 1546 if (cid == -1) { 1547 ret = -ENOMEM; 1548 goto error; 1549 } 1550 1551 ctx->cid = cid; 1552 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1553 1554 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1555 if (ret) 1556 goto error; 1557 1558 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1559 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1560 if (ret) 1561 goto error; 1562 1563 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1564 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1565 if (ret) 1566 goto error; 1567 1568 return 0; 1569 1570 error: 1571 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1572 return ret; 1573 } 1574 1575 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1576 struct regpair *ctx_addr) 1577 { 1578 struct cnic_local *cp = dev->cnic_priv; 1579 struct cnic_eth_dev *ethdev = cp->ethdev; 1580 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1581 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1582 unsigned long align_off = 0; 1583 dma_addr_t ctx_map; 1584 void *ctx; 1585 1586 if (cp->ctx_align) { 1587 unsigned long mask = cp->ctx_align - 1; 1588 1589 if (cp->ctx_arr[blk].mapping & mask) 1590 align_off = cp->ctx_align - 1591 (cp->ctx_arr[blk].mapping & mask); 1592 } 1593 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1594 (off * BNX2X_CONTEXT_MEM_SIZE); 1595 ctx = cp->ctx_arr[blk].ctx + align_off + 1596 (off * BNX2X_CONTEXT_MEM_SIZE); 1597 if (init) 1598 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1599 1600 ctx_addr->lo = ctx_map & 0xffffffff; 1601 ctx_addr->hi = (u64) ctx_map >> 32; 1602 return ctx; 1603 } 1604 1605 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1606 u32 num) 1607 { 1608 struct cnic_local *cp = dev->cnic_priv; 1609 struct iscsi_kwqe_conn_offload1 *req1 = 1610 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1611 struct iscsi_kwqe_conn_offload2 *req2 = 1612 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1613 struct iscsi_kwqe_conn_offload3 *req3; 1614 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1615 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1616 u32 cid = ctx->cid; 1617 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1618 struct iscsi_context *ictx; 1619 struct regpair context_addr; 1620 int i, j, n = 2, n_max; 1621 u8 port = CNIC_PORT(cp); 1622 1623 ctx->ctx_flags = 0; 1624 if (!req2->num_additional_wqes) 1625 return -EINVAL; 1626 1627 n_max = req2->num_additional_wqes + 2; 1628 1629 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1630 if (ictx == NULL) 1631 return -ENOMEM; 1632 1633 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1634 1635 ictx->xstorm_ag_context.hq_prod = 1; 1636 1637 ictx->xstorm_st_context.iscsi.first_burst_length = 1638 ISCSI_DEF_FIRST_BURST_LEN; 1639 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1640 ISCSI_DEF_MAX_RECV_SEG_LEN; 1641 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1642 req1->sq_page_table_addr_lo; 1643 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1644 req1->sq_page_table_addr_hi; 1645 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1646 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1647 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1648 iscsi->hq_info.pgtbl_map & 0xffffffff; 1649 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1650 (u64) iscsi->hq_info.pgtbl_map >> 32; 1651 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1652 iscsi->hq_info.pgtbl[0]; 1653 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1654 iscsi->hq_info.pgtbl[1]; 1655 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1656 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1657 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1658 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1659 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1660 iscsi->r2tq_info.pgtbl[0]; 1661 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1662 iscsi->r2tq_info.pgtbl[1]; 1663 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1664 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1665 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1666 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1667 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1668 BNX2X_ISCSI_PBL_NOT_CACHED; 1669 ictx->xstorm_st_context.iscsi.flags.flags |= 1670 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1671 ictx->xstorm_st_context.iscsi.flags.flags |= 1672 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1673 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1674 ETH_P_8021Q; 1675 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 1676 cp->port_mode == CHIP_2_PORT_MODE) { 1677 1678 port = 0; 1679 } 1680 ictx->xstorm_st_context.common.flags = 1681 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; 1682 ictx->xstorm_st_context.common.flags = 1683 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; 1684 1685 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1686 /* TSTORM requires the base address of RQ DB & not PTE */ 1687 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1688 req2->rq_page_table_addr_lo & PAGE_MASK; 1689 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1690 req2->rq_page_table_addr_hi; 1691 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1692 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1693 ictx->tstorm_st_context.tcp.flags2 |= 1694 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1695 ictx->tstorm_st_context.tcp.ooo_support_mode = 1696 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1697 1698 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1699 1700 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1701 req2->rq_page_table_addr_lo; 1702 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1703 req2->rq_page_table_addr_hi; 1704 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1705 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1706 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1707 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1708 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1709 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1710 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1711 iscsi->r2tq_info.pgtbl[0]; 1712 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1713 iscsi->r2tq_info.pgtbl[1]; 1714 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1715 req1->cq_page_table_addr_lo; 1716 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1717 req1->cq_page_table_addr_hi; 1718 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1719 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1720 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1721 ictx->ustorm_st_context.task_pbe_cache_index = 1722 BNX2X_ISCSI_PBL_NOT_CACHED; 1723 ictx->ustorm_st_context.task_pdu_cache_index = 1724 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1725 1726 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1727 if (j == 3) { 1728 if (n >= n_max) 1729 break; 1730 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1731 j = 0; 1732 } 1733 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1734 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1735 req3->qp_first_pte[j].hi; 1736 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1737 req3->qp_first_pte[j].lo; 1738 } 1739 1740 ictx->ustorm_st_context.task_pbl_base.lo = 1741 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1742 ictx->ustorm_st_context.task_pbl_base.hi = 1743 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1744 ictx->ustorm_st_context.tce_phy_addr.lo = 1745 iscsi->task_array_info.pgtbl[0]; 1746 ictx->ustorm_st_context.tce_phy_addr.hi = 1747 iscsi->task_array_info.pgtbl[1]; 1748 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1749 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1750 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1751 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1752 ISCSI_DEF_MAX_BURST_LEN; 1753 ictx->ustorm_st_context.negotiated_rx |= 1754 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1755 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1756 1757 ictx->cstorm_st_context.hq_pbl_base.lo = 1758 iscsi->hq_info.pgtbl_map & 0xffffffff; 1759 ictx->cstorm_st_context.hq_pbl_base.hi = 1760 (u64) iscsi->hq_info.pgtbl_map >> 32; 1761 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1762 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1763 ictx->cstorm_st_context.task_pbl_base.lo = 1764 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1765 ictx->cstorm_st_context.task_pbl_base.hi = 1766 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1767 /* CSTORM and USTORM initialization is different, CSTORM requires 1768 * CQ DB base & not PTE addr */ 1769 ictx->cstorm_st_context.cq_db_base.lo = 1770 req1->cq_page_table_addr_lo & PAGE_MASK; 1771 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1772 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1773 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1774 for (i = 0; i < cp->num_cqs; i++) { 1775 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1776 ISCSI_INITIAL_SN; 1777 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1778 ISCSI_INITIAL_SN; 1779 } 1780 1781 ictx->xstorm_ag_context.cdu_reserved = 1782 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1783 ISCSI_CONNECTION_TYPE); 1784 ictx->ustorm_ag_context.cdu_usage = 1785 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1786 ISCSI_CONNECTION_TYPE); 1787 return 0; 1788 1789 } 1790 1791 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1792 u32 num, int *work) 1793 { 1794 struct iscsi_kwqe_conn_offload1 *req1; 1795 struct iscsi_kwqe_conn_offload2 *req2; 1796 struct cnic_local *cp = dev->cnic_priv; 1797 struct cnic_context *ctx; 1798 struct iscsi_kcqe kcqe; 1799 struct kcqe *cqes[1]; 1800 u32 l5_cid; 1801 int ret = 0; 1802 1803 if (num < 2) { 1804 *work = num; 1805 return -EINVAL; 1806 } 1807 1808 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1809 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1810 if ((num - 2) < req2->num_additional_wqes) { 1811 *work = num; 1812 return -EINVAL; 1813 } 1814 *work = 2 + req2->num_additional_wqes; 1815 1816 l5_cid = req1->iscsi_conn_id; 1817 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1818 return -EINVAL; 1819 1820 memset(&kcqe, 0, sizeof(kcqe)); 1821 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1822 kcqe.iscsi_conn_id = l5_cid; 1823 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1824 1825 ctx = &cp->ctx_tbl[l5_cid]; 1826 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1827 kcqe.completion_status = 1828 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1829 goto done; 1830 } 1831 1832 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1833 atomic_dec(&cp->iscsi_conn); 1834 goto done; 1835 } 1836 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1837 if (ret) { 1838 atomic_dec(&cp->iscsi_conn); 1839 ret = 0; 1840 goto done; 1841 } 1842 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1843 if (ret < 0) { 1844 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1845 atomic_dec(&cp->iscsi_conn); 1846 goto done; 1847 } 1848 1849 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1850 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1851 1852 done: 1853 cqes[0] = (struct kcqe *) &kcqe; 1854 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1855 return 0; 1856 } 1857 1858 1859 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1860 { 1861 struct cnic_local *cp = dev->cnic_priv; 1862 struct iscsi_kwqe_conn_update *req = 1863 (struct iscsi_kwqe_conn_update *) kwqe; 1864 void *data; 1865 union l5cm_specific_data l5_data; 1866 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1867 int ret; 1868 1869 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1870 return -EINVAL; 1871 1872 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1873 if (!data) 1874 return -ENOMEM; 1875 1876 memcpy(data, kwqe, sizeof(struct kwqe)); 1877 1878 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1879 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1880 return ret; 1881 } 1882 1883 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1884 { 1885 struct cnic_local *cp = dev->cnic_priv; 1886 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1887 union l5cm_specific_data l5_data; 1888 int ret; 1889 u32 hw_cid; 1890 1891 init_waitqueue_head(&ctx->waitq); 1892 ctx->wait_cond = 0; 1893 memset(&l5_data, 0, sizeof(l5_data)); 1894 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1895 1896 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1897 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1898 1899 if (ret == 0) { 1900 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 1901 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) 1902 return -EBUSY; 1903 } 1904 1905 return 0; 1906 } 1907 1908 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1909 { 1910 struct cnic_local *cp = dev->cnic_priv; 1911 struct iscsi_kwqe_conn_destroy *req = 1912 (struct iscsi_kwqe_conn_destroy *) kwqe; 1913 u32 l5_cid = req->reserved0; 1914 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1915 int ret = 0; 1916 struct iscsi_kcqe kcqe; 1917 struct kcqe *cqes[1]; 1918 1919 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1920 goto skip_cfc_delete; 1921 1922 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1923 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1924 1925 if (delta > (2 * HZ)) 1926 delta = 0; 1927 1928 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 1929 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 1930 goto destroy_reply; 1931 } 1932 1933 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 1934 1935 skip_cfc_delete: 1936 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1937 1938 if (!ret) { 1939 atomic_dec(&cp->iscsi_conn); 1940 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 1941 } 1942 1943 destroy_reply: 1944 memset(&kcqe, 0, sizeof(kcqe)); 1945 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1946 kcqe.iscsi_conn_id = l5_cid; 1947 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1948 kcqe.iscsi_conn_context_id = req->context_id; 1949 1950 cqes[0] = (struct kcqe *) &kcqe; 1951 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1952 1953 return 0; 1954 } 1955 1956 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1957 struct l4_kwq_connect_req1 *kwqe1, 1958 struct l4_kwq_connect_req3 *kwqe3, 1959 struct l5cm_active_conn_buffer *conn_buf) 1960 { 1961 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1962 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1963 &conn_buf->xstorm_conn_buffer; 1964 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1965 &conn_buf->tstorm_conn_buffer; 1966 struct regpair context_addr; 1967 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1968 struct in6_addr src_ip, dst_ip; 1969 int i; 1970 u32 *addrp; 1971 1972 addrp = (u32 *) &conn_addr->local_ip_addr; 1973 for (i = 0; i < 4; i++, addrp++) 1974 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1975 1976 addrp = (u32 *) &conn_addr->remote_ip_addr; 1977 for (i = 0; i < 4; i++, addrp++) 1978 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1979 1980 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1981 1982 xstorm_buf->context_addr.hi = context_addr.hi; 1983 xstorm_buf->context_addr.lo = context_addr.lo; 1984 xstorm_buf->mss = 0xffff; 1985 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1986 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1987 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1988 xstorm_buf->pseudo_header_checksum = 1989 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1990 1991 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1992 tstorm_buf->params |= 1993 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1994 if (kwqe3->ka_timeout) { 1995 tstorm_buf->ka_enable = 1; 1996 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1997 tstorm_buf->ka_interval = kwqe3->ka_interval; 1998 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1999 } 2000 tstorm_buf->max_rt_time = 0xffffffff; 2001 } 2002 2003 static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2004 { 2005 struct cnic_local *cp = dev->cnic_priv; 2006 u32 pfid = cp->pfid; 2007 u8 *mac = dev->mac_addr; 2008 2009 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2010 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 2011 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2012 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 2013 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2014 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 2015 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2016 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 2017 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2018 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 2019 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2020 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 2021 2022 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2023 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 2024 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2025 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2026 mac[4]); 2027 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2028 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 2029 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2030 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2031 mac[2]); 2032 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2033 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); 2034 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2035 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2036 mac[0]); 2037 } 2038 2039 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 2040 { 2041 struct cnic_local *cp = dev->cnic_priv; 2042 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 2043 u16 tstorm_flags = 0; 2044 2045 if (tcp_ts) { 2046 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2047 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 2048 } 2049 2050 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2051 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); 2052 2053 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 2054 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); 2055 } 2056 2057 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2058 u32 num, int *work) 2059 { 2060 struct cnic_local *cp = dev->cnic_priv; 2061 struct l4_kwq_connect_req1 *kwqe1 = 2062 (struct l4_kwq_connect_req1 *) wqes[0]; 2063 struct l4_kwq_connect_req3 *kwqe3; 2064 struct l5cm_active_conn_buffer *conn_buf; 2065 struct l5cm_conn_addr_params *conn_addr; 2066 union l5cm_specific_data l5_data; 2067 u32 l5_cid = kwqe1->pg_cid; 2068 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 2069 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2070 int ret; 2071 2072 if (num < 2) { 2073 *work = num; 2074 return -EINVAL; 2075 } 2076 2077 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 2078 *work = 3; 2079 else 2080 *work = 2; 2081 2082 if (num < *work) { 2083 *work = num; 2084 return -EINVAL; 2085 } 2086 2087 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 2088 netdev_err(dev->netdev, "conn_buf size too big\n"); 2089 return -ENOMEM; 2090 } 2091 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2092 if (!conn_buf) 2093 return -ENOMEM; 2094 2095 memset(conn_buf, 0, sizeof(*conn_buf)); 2096 2097 conn_addr = &conn_buf->conn_addr_buf; 2098 conn_addr->remote_addr_0 = csk->ha[0]; 2099 conn_addr->remote_addr_1 = csk->ha[1]; 2100 conn_addr->remote_addr_2 = csk->ha[2]; 2101 conn_addr->remote_addr_3 = csk->ha[3]; 2102 conn_addr->remote_addr_4 = csk->ha[4]; 2103 conn_addr->remote_addr_5 = csk->ha[5]; 2104 2105 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 2106 struct l4_kwq_connect_req2 *kwqe2 = 2107 (struct l4_kwq_connect_req2 *) wqes[1]; 2108 2109 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 2110 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 2111 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 2112 2113 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2114 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2115 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2116 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2117 } 2118 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2119 2120 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2121 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2122 conn_addr->local_tcp_port = kwqe1->src_port; 2123 conn_addr->remote_tcp_port = kwqe1->dst_port; 2124 2125 conn_addr->pmtu = kwqe3->pmtu; 2126 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2127 2128 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2129 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2130 2131 cnic_bnx2x_set_tcp_timestamp(dev, 2132 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2133 2134 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2135 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2136 if (!ret) 2137 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2138 2139 return ret; 2140 } 2141 2142 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2143 { 2144 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2145 union l5cm_specific_data l5_data; 2146 int ret; 2147 2148 memset(&l5_data, 0, sizeof(l5_data)); 2149 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2150 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2151 return ret; 2152 } 2153 2154 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2155 { 2156 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2157 union l5cm_specific_data l5_data; 2158 int ret; 2159 2160 memset(&l5_data, 0, sizeof(l5_data)); 2161 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2162 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2163 return ret; 2164 } 2165 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2166 { 2167 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2168 struct l4_kcq kcqe; 2169 struct kcqe *cqes[1]; 2170 2171 memset(&kcqe, 0, sizeof(kcqe)); 2172 kcqe.pg_host_opaque = req->host_opaque; 2173 kcqe.pg_cid = req->host_opaque; 2174 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2175 cqes[0] = (struct kcqe *) &kcqe; 2176 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2177 return 0; 2178 } 2179 2180 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2181 { 2182 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2183 struct l4_kcq kcqe; 2184 struct kcqe *cqes[1]; 2185 2186 memset(&kcqe, 0, sizeof(kcqe)); 2187 kcqe.pg_host_opaque = req->pg_host_opaque; 2188 kcqe.pg_cid = req->pg_cid; 2189 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2190 cqes[0] = (struct kcqe *) &kcqe; 2191 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2192 return 0; 2193 } 2194 2195 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) 2196 { 2197 struct fcoe_kwqe_stat *req; 2198 struct fcoe_stat_ramrod_params *fcoe_stat; 2199 union l5cm_specific_data l5_data; 2200 struct cnic_local *cp = dev->cnic_priv; 2201 int ret; 2202 u32 cid; 2203 2204 req = (struct fcoe_kwqe_stat *) kwqe; 2205 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2206 2207 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2208 if (!fcoe_stat) 2209 return -ENOMEM; 2210 2211 memset(fcoe_stat, 0, sizeof(*fcoe_stat)); 2212 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); 2213 2214 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, 2215 FCOE_CONNECTION_TYPE, &l5_data); 2216 return ret; 2217 } 2218 2219 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], 2220 u32 num, int *work) 2221 { 2222 int ret; 2223 struct cnic_local *cp = dev->cnic_priv; 2224 u32 cid; 2225 struct fcoe_init_ramrod_params *fcoe_init; 2226 struct fcoe_kwqe_init1 *req1; 2227 struct fcoe_kwqe_init2 *req2; 2228 struct fcoe_kwqe_init3 *req3; 2229 union l5cm_specific_data l5_data; 2230 2231 if (num < 3) { 2232 *work = num; 2233 return -EINVAL; 2234 } 2235 req1 = (struct fcoe_kwqe_init1 *) wqes[0]; 2236 req2 = (struct fcoe_kwqe_init2 *) wqes[1]; 2237 req3 = (struct fcoe_kwqe_init3 *) wqes[2]; 2238 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { 2239 *work = 1; 2240 return -EINVAL; 2241 } 2242 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { 2243 *work = 2; 2244 return -EINVAL; 2245 } 2246 2247 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { 2248 netdev_err(dev->netdev, "fcoe_init size too big\n"); 2249 return -ENOMEM; 2250 } 2251 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2252 if (!fcoe_init) 2253 return -ENOMEM; 2254 2255 memset(fcoe_init, 0, sizeof(*fcoe_init)); 2256 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); 2257 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); 2258 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); 2259 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; 2260 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; 2261 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; 2262 2263 fcoe_init->sb_num = cp->status_blk_num; 2264 fcoe_init->eq_prod = MAX_KCQ_IDX; 2265 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2266 cp->kcq2.sw_prod_idx = 0; 2267 2268 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2269 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2270 FCOE_CONNECTION_TYPE, &l5_data); 2271 *work = 3; 2272 return ret; 2273 } 2274 2275 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 2276 u32 num, int *work) 2277 { 2278 int ret = 0; 2279 u32 cid = -1, l5_cid; 2280 struct cnic_local *cp = dev->cnic_priv; 2281 struct fcoe_kwqe_conn_offload1 *req1; 2282 struct fcoe_kwqe_conn_offload2 *req2; 2283 struct fcoe_kwqe_conn_offload3 *req3; 2284 struct fcoe_kwqe_conn_offload4 *req4; 2285 struct fcoe_conn_offload_ramrod_params *fcoe_offload; 2286 struct cnic_context *ctx; 2287 struct fcoe_context *fctx; 2288 struct regpair ctx_addr; 2289 union l5cm_specific_data l5_data; 2290 struct fcoe_kcqe kcqe; 2291 struct kcqe *cqes[1]; 2292 2293 if (num < 4) { 2294 *work = num; 2295 return -EINVAL; 2296 } 2297 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; 2298 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; 2299 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; 2300 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; 2301 2302 *work = 4; 2303 2304 l5_cid = req1->fcoe_conn_id; 2305 if (l5_cid >= dev->max_fcoe_conn) 2306 goto err_reply; 2307 2308 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2309 2310 ctx = &cp->ctx_tbl[l5_cid]; 2311 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2312 goto err_reply; 2313 2314 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 2315 if (ret) { 2316 ret = 0; 2317 goto err_reply; 2318 } 2319 cid = ctx->cid; 2320 2321 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2322 if (fctx) { 2323 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2324 u32 val; 2325 2326 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2327 FCOE_CONNECTION_TYPE); 2328 fctx->xstorm_ag_context.cdu_reserved = val; 2329 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 2330 FCOE_CONNECTION_TYPE); 2331 fctx->ustorm_ag_context.cdu_usage = val; 2332 } 2333 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { 2334 netdev_err(dev->netdev, "fcoe_offload size too big\n"); 2335 goto err_reply; 2336 } 2337 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2338 if (!fcoe_offload) 2339 goto err_reply; 2340 2341 memset(fcoe_offload, 0, sizeof(*fcoe_offload)); 2342 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); 2343 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); 2344 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2345 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2346 2347 cid = BNX2X_HW_CID(cp, cid); 2348 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2349 FCOE_CONNECTION_TYPE, &l5_data); 2350 if (!ret) 2351 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2352 2353 return ret; 2354 2355 err_reply: 2356 if (cid != -1) 2357 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2358 2359 memset(&kcqe, 0, sizeof(kcqe)); 2360 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; 2361 kcqe.fcoe_conn_id = req1->fcoe_conn_id; 2362 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 2363 2364 cqes[0] = (struct kcqe *) &kcqe; 2365 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2366 return ret; 2367 } 2368 2369 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) 2370 { 2371 struct fcoe_kwqe_conn_enable_disable *req; 2372 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; 2373 union l5cm_specific_data l5_data; 2374 int ret; 2375 u32 cid, l5_cid; 2376 struct cnic_local *cp = dev->cnic_priv; 2377 2378 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2379 cid = req->context_id; 2380 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; 2381 2382 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { 2383 netdev_err(dev->netdev, "fcoe_enable size too big\n"); 2384 return -ENOMEM; 2385 } 2386 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2387 if (!fcoe_enable) 2388 return -ENOMEM; 2389 2390 memset(fcoe_enable, 0, sizeof(*fcoe_enable)); 2391 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); 2392 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, 2393 FCOE_CONNECTION_TYPE, &l5_data); 2394 return ret; 2395 } 2396 2397 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) 2398 { 2399 struct fcoe_kwqe_conn_enable_disable *req; 2400 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; 2401 union l5cm_specific_data l5_data; 2402 int ret; 2403 u32 cid, l5_cid; 2404 struct cnic_local *cp = dev->cnic_priv; 2405 2406 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2407 cid = req->context_id; 2408 l5_cid = req->conn_id; 2409 if (l5_cid >= dev->max_fcoe_conn) 2410 return -EINVAL; 2411 2412 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2413 2414 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { 2415 netdev_err(dev->netdev, "fcoe_disable size too big\n"); 2416 return -ENOMEM; 2417 } 2418 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2419 if (!fcoe_disable) 2420 return -ENOMEM; 2421 2422 memset(fcoe_disable, 0, sizeof(*fcoe_disable)); 2423 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); 2424 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, 2425 FCOE_CONNECTION_TYPE, &l5_data); 2426 return ret; 2427 } 2428 2429 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2430 { 2431 struct fcoe_kwqe_conn_destroy *req; 2432 union l5cm_specific_data l5_data; 2433 int ret; 2434 u32 cid, l5_cid; 2435 struct cnic_local *cp = dev->cnic_priv; 2436 struct cnic_context *ctx; 2437 struct fcoe_kcqe kcqe; 2438 struct kcqe *cqes[1]; 2439 2440 req = (struct fcoe_kwqe_conn_destroy *) kwqe; 2441 cid = req->context_id; 2442 l5_cid = req->conn_id; 2443 if (l5_cid >= dev->max_fcoe_conn) 2444 return -EINVAL; 2445 2446 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2447 2448 ctx = &cp->ctx_tbl[l5_cid]; 2449 2450 init_waitqueue_head(&ctx->waitq); 2451 ctx->wait_cond = 0; 2452 2453 memset(&kcqe, 0, sizeof(kcqe)); 2454 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR; 2455 memset(&l5_data, 0, sizeof(l5_data)); 2456 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, 2457 FCOE_CONNECTION_TYPE, &l5_data); 2458 if (ret == 0) { 2459 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 2460 if (ctx->wait_cond) 2461 kcqe.completion_status = 0; 2462 } 2463 2464 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2465 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); 2466 2467 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; 2468 kcqe.fcoe_conn_id = req->conn_id; 2469 kcqe.fcoe_conn_context_id = cid; 2470 2471 cqes[0] = (struct kcqe *) &kcqe; 2472 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2473 return ret; 2474 } 2475 2476 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) 2477 { 2478 struct cnic_local *cp = dev->cnic_priv; 2479 u32 i; 2480 2481 for (i = start_cid; i < cp->max_cid_space; i++) { 2482 struct cnic_context *ctx = &cp->ctx_tbl[i]; 2483 int j; 2484 2485 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 2486 msleep(10); 2487 2488 for (j = 0; j < 5; j++) { 2489 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2490 break; 2491 msleep(20); 2492 } 2493 2494 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2495 netdev_warn(dev->netdev, "CID %x not deleted\n", 2496 ctx->cid); 2497 } 2498 } 2499 2500 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2501 { 2502 struct fcoe_kwqe_destroy *req; 2503 union l5cm_specific_data l5_data; 2504 struct cnic_local *cp = dev->cnic_priv; 2505 int ret; 2506 u32 cid; 2507 2508 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2509 2510 req = (struct fcoe_kwqe_destroy *) kwqe; 2511 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2512 2513 memset(&l5_data, 0, sizeof(l5_data)); 2514 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2515 FCOE_CONNECTION_TYPE, &l5_data); 2516 return ret; 2517 } 2518 2519 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) 2520 { 2521 struct cnic_local *cp = dev->cnic_priv; 2522 struct kcqe kcqe; 2523 struct kcqe *cqes[1]; 2524 u32 cid; 2525 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2526 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; 2527 u32 kcqe_op; 2528 int ulp_type; 2529 2530 cid = kwqe->kwqe_info0; 2531 memset(&kcqe, 0, sizeof(kcqe)); 2532 2533 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) { 2534 u32 l5_cid = 0; 2535 2536 ulp_type = CNIC_ULP_FCOE; 2537 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) { 2538 struct fcoe_kwqe_conn_enable_disable *req; 2539 2540 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2541 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN; 2542 cid = req->context_id; 2543 l5_cid = req->conn_id; 2544 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) { 2545 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC; 2546 } else { 2547 return; 2548 } 2549 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT; 2550 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE; 2551 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2552 kcqe.kcqe_info2 = cid; 2553 kcqe.kcqe_info0 = l5_cid; 2554 2555 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { 2556 ulp_type = CNIC_ULP_ISCSI; 2557 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) 2558 cid = kwqe->kwqe_info1; 2559 2560 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; 2561 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; 2562 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR; 2563 kcqe.kcqe_info2 = cid; 2564 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); 2565 2566 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { 2567 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; 2568 2569 ulp_type = CNIC_ULP_L4; 2570 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) 2571 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; 2572 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) 2573 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; 2574 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) 2575 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 2576 else 2577 return; 2578 2579 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | 2580 KCQE_FLAGS_LAYER_MASK_L4; 2581 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2582 l4kcqe->cid = cid; 2583 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); 2584 } else { 2585 return; 2586 } 2587 2588 cqes[0] = (struct kcqe *) &kcqe; 2589 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2590 } 2591 2592 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, 2593 struct kwqe *wqes[], u32 num_wqes) 2594 { 2595 int i, work, ret; 2596 u32 opcode; 2597 struct kwqe *kwqe; 2598 2599 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2600 return -EAGAIN; /* bnx2 is down */ 2601 2602 for (i = 0; i < num_wqes; ) { 2603 kwqe = wqes[i]; 2604 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2605 work = 1; 2606 2607 switch (opcode) { 2608 case ISCSI_KWQE_OPCODE_INIT1: 2609 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2610 break; 2611 case ISCSI_KWQE_OPCODE_INIT2: 2612 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2613 break; 2614 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2615 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2616 num_wqes - i, &work); 2617 break; 2618 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2619 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2620 break; 2621 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2622 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2623 break; 2624 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2625 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2626 &work); 2627 break; 2628 case L4_KWQE_OPCODE_VALUE_CLOSE: 2629 ret = cnic_bnx2x_close(dev, kwqe); 2630 break; 2631 case L4_KWQE_OPCODE_VALUE_RESET: 2632 ret = cnic_bnx2x_reset(dev, kwqe); 2633 break; 2634 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2635 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2636 break; 2637 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2638 ret = cnic_bnx2x_update_pg(dev, kwqe); 2639 break; 2640 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2641 ret = 0; 2642 break; 2643 default: 2644 ret = 0; 2645 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2646 opcode); 2647 break; 2648 } 2649 if (ret < 0) { 2650 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2651 opcode); 2652 2653 /* Possibly bnx2x parity error, send completion 2654 * to ulp drivers with error code to speed up 2655 * cleanup and reset recovery. 2656 */ 2657 if (ret == -EIO || ret == -EAGAIN) 2658 cnic_bnx2x_kwqe_err(dev, kwqe); 2659 } 2660 i += work; 2661 } 2662 return 0; 2663 } 2664 2665 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2666 struct kwqe *wqes[], u32 num_wqes) 2667 { 2668 struct cnic_local *cp = dev->cnic_priv; 2669 int i, work, ret; 2670 u32 opcode; 2671 struct kwqe *kwqe; 2672 2673 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2674 return -EAGAIN; /* bnx2 is down */ 2675 2676 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 2677 return -EINVAL; 2678 2679 for (i = 0; i < num_wqes; ) { 2680 kwqe = wqes[i]; 2681 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2682 work = 1; 2683 2684 switch (opcode) { 2685 case FCOE_KWQE_OPCODE_INIT1: 2686 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], 2687 num_wqes - i, &work); 2688 break; 2689 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: 2690 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], 2691 num_wqes - i, &work); 2692 break; 2693 case FCOE_KWQE_OPCODE_ENABLE_CONN: 2694 ret = cnic_bnx2x_fcoe_enable(dev, kwqe); 2695 break; 2696 case FCOE_KWQE_OPCODE_DISABLE_CONN: 2697 ret = cnic_bnx2x_fcoe_disable(dev, kwqe); 2698 break; 2699 case FCOE_KWQE_OPCODE_DESTROY_CONN: 2700 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); 2701 break; 2702 case FCOE_KWQE_OPCODE_DESTROY: 2703 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); 2704 break; 2705 case FCOE_KWQE_OPCODE_STAT: 2706 ret = cnic_bnx2x_fcoe_stat(dev, kwqe); 2707 break; 2708 default: 2709 ret = 0; 2710 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2711 opcode); 2712 break; 2713 } 2714 if (ret < 0) { 2715 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2716 opcode); 2717 2718 /* Possibly bnx2x parity error, send completion 2719 * to ulp drivers with error code to speed up 2720 * cleanup and reset recovery. 2721 */ 2722 if (ret == -EIO || ret == -EAGAIN) 2723 cnic_bnx2x_kwqe_err(dev, kwqe); 2724 } 2725 i += work; 2726 } 2727 return 0; 2728 } 2729 2730 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2731 u32 num_wqes) 2732 { 2733 int ret = -EINVAL; 2734 u32 layer_code; 2735 2736 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2737 return -EAGAIN; /* bnx2x is down */ 2738 2739 if (!num_wqes) 2740 return 0; 2741 2742 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; 2743 switch (layer_code) { 2744 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: 2745 case KWQE_FLAGS_LAYER_MASK_L4: 2746 case KWQE_FLAGS_LAYER_MASK_L2: 2747 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); 2748 break; 2749 2750 case KWQE_FLAGS_LAYER_MASK_L5_FCOE: 2751 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); 2752 break; 2753 } 2754 return ret; 2755 } 2756 2757 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) 2758 { 2759 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) 2760 return KCQE_FLAGS_LAYER_MASK_L4; 2761 2762 return opflag & KCQE_FLAGS_LAYER_MASK; 2763 } 2764 2765 static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2766 { 2767 struct cnic_local *cp = dev->cnic_priv; 2768 int i, j, comp = 0; 2769 2770 i = 0; 2771 j = 1; 2772 while (num_cqes) { 2773 struct cnic_ulp_ops *ulp_ops; 2774 int ulp_type; 2775 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2776 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); 2777 2778 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2779 comp++; 2780 2781 while (j < num_cqes) { 2782 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2783 2784 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) 2785 break; 2786 2787 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2788 comp++; 2789 j++; 2790 } 2791 2792 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2793 ulp_type = CNIC_ULP_RDMA; 2794 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2795 ulp_type = CNIC_ULP_ISCSI; 2796 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) 2797 ulp_type = CNIC_ULP_FCOE; 2798 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2799 ulp_type = CNIC_ULP_L4; 2800 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2801 goto end; 2802 else { 2803 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2804 kcqe_op_flag); 2805 goto end; 2806 } 2807 2808 rcu_read_lock(); 2809 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2810 if (likely(ulp_ops)) { 2811 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2812 cp->completed_kcq + i, j); 2813 } 2814 rcu_read_unlock(); 2815 end: 2816 num_cqes -= j; 2817 i += j; 2818 j = 1; 2819 } 2820 if (unlikely(comp)) 2821 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2822 } 2823 2824 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2825 { 2826 struct cnic_local *cp = dev->cnic_priv; 2827 u16 i, ri, hw_prod, last; 2828 struct kcqe *kcqe; 2829 int kcqe_cnt = 0, last_cnt = 0; 2830 2831 i = ri = last = info->sw_prod_idx; 2832 ri &= MAX_KCQ_IDX; 2833 hw_prod = *info->hw_prod_idx_ptr; 2834 hw_prod = info->hw_idx(hw_prod); 2835 2836 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2837 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2838 cp->completed_kcq[kcqe_cnt++] = kcqe; 2839 i = info->next_idx(i); 2840 ri = i & MAX_KCQ_IDX; 2841 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2842 last_cnt = kcqe_cnt; 2843 last = i; 2844 } 2845 } 2846 2847 info->sw_prod_idx = last; 2848 return last_cnt; 2849 } 2850 2851 static int cnic_l2_completion(struct cnic_local *cp) 2852 { 2853 u16 hw_cons, sw_cons; 2854 struct cnic_uio_dev *udev = cp->udev; 2855 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2856 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2857 u32 cmd; 2858 int comp = 0; 2859 2860 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2861 return 0; 2862 2863 hw_cons = *cp->rx_cons_ptr; 2864 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2865 hw_cons++; 2866 2867 sw_cons = cp->rx_cons; 2868 while (sw_cons != hw_cons) { 2869 u8 cqe_fp_flags; 2870 2871 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2872 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2873 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2874 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2875 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2876 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2877 cmd == RAMROD_CMD_ID_ETH_HALT) 2878 comp++; 2879 } 2880 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2881 } 2882 return comp; 2883 } 2884 2885 static void cnic_chk_pkt_rings(struct cnic_local *cp) 2886 { 2887 u16 rx_cons, tx_cons; 2888 int comp = 0; 2889 2890 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2891 return; 2892 2893 rx_cons = *cp->rx_cons_ptr; 2894 tx_cons = *cp->tx_cons_ptr; 2895 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2896 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2897 comp = cnic_l2_completion(cp); 2898 2899 cp->tx_cons = tx_cons; 2900 cp->rx_cons = rx_cons; 2901 2902 if (cp->udev) 2903 uio_event_notify(&cp->udev->cnic_uinfo); 2904 } 2905 if (comp) 2906 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2907 } 2908 2909 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2910 { 2911 struct cnic_local *cp = dev->cnic_priv; 2912 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2913 int kcqe_cnt; 2914 2915 /* status block index must be read before reading other fields */ 2916 rmb(); 2917 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2918 2919 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2920 2921 service_kcqes(dev, kcqe_cnt); 2922 2923 /* Tell compiler that status_blk fields can change. */ 2924 barrier(); 2925 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2926 /* status block index must be read first */ 2927 rmb(); 2928 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2929 } 2930 2931 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2932 2933 cnic_chk_pkt_rings(cp); 2934 2935 return status_idx; 2936 } 2937 2938 static int cnic_service_bnx2(void *data, void *status_blk) 2939 { 2940 struct cnic_dev *dev = data; 2941 2942 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2943 struct status_block *sblk = status_blk; 2944 2945 return sblk->status_idx; 2946 } 2947 2948 return cnic_service_bnx2_queues(dev); 2949 } 2950 2951 static void cnic_service_bnx2_msix(unsigned long data) 2952 { 2953 struct cnic_dev *dev = (struct cnic_dev *) data; 2954 struct cnic_local *cp = dev->cnic_priv; 2955 2956 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2957 2958 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2959 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2960 } 2961 2962 static void cnic_doirq(struct cnic_dev *dev) 2963 { 2964 struct cnic_local *cp = dev->cnic_priv; 2965 2966 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2967 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2968 2969 prefetch(cp->status_blk.gen); 2970 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2971 2972 tasklet_schedule(&cp->cnic_irq_task); 2973 } 2974 } 2975 2976 static irqreturn_t cnic_irq(int irq, void *dev_instance) 2977 { 2978 struct cnic_dev *dev = dev_instance; 2979 struct cnic_local *cp = dev->cnic_priv; 2980 2981 if (cp->ack_int) 2982 cp->ack_int(dev); 2983 2984 cnic_doirq(dev); 2985 2986 return IRQ_HANDLED; 2987 } 2988 2989 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2990 u16 index, u8 op, u8 update) 2991 { 2992 struct cnic_local *cp = dev->cnic_priv; 2993 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2994 COMMAND_REG_INT_ACK); 2995 struct igu_ack_register igu_ack; 2996 2997 igu_ack.status_block_index = index; 2998 igu_ack.sb_id_and_flags = 2999 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 3000 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 3001 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 3002 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 3003 3004 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 3005 } 3006 3007 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 3008 u16 index, u8 op, u8 update) 3009 { 3010 struct igu_regular cmd_data; 3011 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 3012 3013 cmd_data.sb_id_and_flags = 3014 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 3015 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 3016 (update << IGU_REGULAR_BUPDATE_SHIFT) | 3017 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 3018 3019 3020 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 3021 } 3022 3023 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 3024 { 3025 struct cnic_local *cp = dev->cnic_priv; 3026 3027 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 3028 IGU_INT_DISABLE, 0); 3029 } 3030 3031 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 3032 { 3033 struct cnic_local *cp = dev->cnic_priv; 3034 3035 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 3036 IGU_INT_DISABLE, 0); 3037 } 3038 3039 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 3040 { 3041 u32 last_status = *info->status_idx_ptr; 3042 int kcqe_cnt; 3043 3044 /* status block index must be read before reading the KCQ */ 3045 rmb(); 3046 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 3047 3048 service_kcqes(dev, kcqe_cnt); 3049 3050 /* Tell compiler that sblk fields can change. */ 3051 barrier(); 3052 3053 last_status = *info->status_idx_ptr; 3054 /* status block index must be read before reading the KCQ */ 3055 rmb(); 3056 } 3057 return last_status; 3058 } 3059 3060 static void cnic_service_bnx2x_bh(unsigned long data) 3061 { 3062 struct cnic_dev *dev = (struct cnic_dev *) data; 3063 struct cnic_local *cp = dev->cnic_priv; 3064 u32 status_idx, new_status_idx; 3065 3066 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 3067 return; 3068 3069 while (1) { 3070 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 3071 3072 CNIC_WR16(dev, cp->kcq1.io_addr, 3073 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3074 3075 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 3076 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 3077 status_idx, IGU_INT_ENABLE, 1); 3078 break; 3079 } 3080 3081 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 3082 3083 if (new_status_idx != status_idx) 3084 continue; 3085 3086 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 3087 MAX_KCQ_IDX); 3088 3089 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 3090 status_idx, IGU_INT_ENABLE, 1); 3091 3092 break; 3093 } 3094 } 3095 3096 static int cnic_service_bnx2x(void *data, void *status_blk) 3097 { 3098 struct cnic_dev *dev = data; 3099 struct cnic_local *cp = dev->cnic_priv; 3100 3101 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3102 cnic_doirq(dev); 3103 3104 cnic_chk_pkt_rings(cp); 3105 3106 return 0; 3107 } 3108 3109 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) 3110 { 3111 struct cnic_ulp_ops *ulp_ops; 3112 3113 if (if_type == CNIC_ULP_ISCSI) 3114 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 3115 3116 mutex_lock(&cnic_lock); 3117 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3118 lockdep_is_held(&cnic_lock)); 3119 if (!ulp_ops) { 3120 mutex_unlock(&cnic_lock); 3121 return; 3122 } 3123 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3124 mutex_unlock(&cnic_lock); 3125 3126 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3127 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 3128 3129 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3130 } 3131 3132 static void cnic_ulp_stop(struct cnic_dev *dev) 3133 { 3134 struct cnic_local *cp = dev->cnic_priv; 3135 int if_type; 3136 3137 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) 3138 cnic_ulp_stop_one(cp, if_type); 3139 } 3140 3141 static void cnic_ulp_start(struct cnic_dev *dev) 3142 { 3143 struct cnic_local *cp = dev->cnic_priv; 3144 int if_type; 3145 3146 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 3147 struct cnic_ulp_ops *ulp_ops; 3148 3149 mutex_lock(&cnic_lock); 3150 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3151 lockdep_is_held(&cnic_lock)); 3152 if (!ulp_ops || !ulp_ops->cnic_start) { 3153 mutex_unlock(&cnic_lock); 3154 continue; 3155 } 3156 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3157 mutex_unlock(&cnic_lock); 3158 3159 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3160 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 3161 3162 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3163 } 3164 } 3165 3166 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) 3167 { 3168 struct cnic_local *cp = dev->cnic_priv; 3169 struct cnic_ulp_ops *ulp_ops; 3170 int rc; 3171 3172 mutex_lock(&cnic_lock); 3173 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 3174 if (ulp_ops && ulp_ops->cnic_get_stats) 3175 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); 3176 else 3177 rc = -ENODEV; 3178 mutex_unlock(&cnic_lock); 3179 return rc; 3180 } 3181 3182 static int cnic_ctl(void *data, struct cnic_ctl_info *info) 3183 { 3184 struct cnic_dev *dev = data; 3185 int ulp_type = CNIC_ULP_ISCSI; 3186 3187 switch (info->cmd) { 3188 case CNIC_CTL_STOP_CMD: 3189 cnic_hold(dev); 3190 3191 cnic_ulp_stop(dev); 3192 cnic_stop_hw(dev); 3193 3194 cnic_put(dev); 3195 break; 3196 case CNIC_CTL_START_CMD: 3197 cnic_hold(dev); 3198 3199 if (!cnic_start_hw(dev)) 3200 cnic_ulp_start(dev); 3201 3202 cnic_put(dev); 3203 break; 3204 case CNIC_CTL_STOP_ISCSI_CMD: { 3205 struct cnic_local *cp = dev->cnic_priv; 3206 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); 3207 queue_delayed_work(cnic_wq, &cp->delete_task, 0); 3208 break; 3209 } 3210 case CNIC_CTL_COMPLETION_CMD: { 3211 struct cnic_ctl_completion *comp = &info->data.comp; 3212 u32 cid = BNX2X_SW_CID(comp->cid); 3213 u32 l5_cid; 3214 struct cnic_local *cp = dev->cnic_priv; 3215 3216 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3217 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3218 3219 if (unlikely(comp->error)) { 3220 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); 3221 netdev_err(dev->netdev, 3222 "CID %x CFC delete comp error %x\n", 3223 cid, comp->error); 3224 } 3225 3226 ctx->wait_cond = 1; 3227 wake_up(&ctx->waitq); 3228 } 3229 break; 3230 } 3231 case CNIC_CTL_FCOE_STATS_GET_CMD: 3232 ulp_type = CNIC_ULP_FCOE; 3233 /* fall through */ 3234 case CNIC_CTL_ISCSI_STATS_GET_CMD: 3235 cnic_hold(dev); 3236 cnic_copy_ulp_stats(dev, ulp_type); 3237 cnic_put(dev); 3238 break; 3239 3240 default: 3241 return -EINVAL; 3242 } 3243 return 0; 3244 } 3245 3246 static void cnic_ulp_init(struct cnic_dev *dev) 3247 { 3248 int i; 3249 struct cnic_local *cp = dev->cnic_priv; 3250 3251 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3252 struct cnic_ulp_ops *ulp_ops; 3253 3254 mutex_lock(&cnic_lock); 3255 ulp_ops = cnic_ulp_tbl_prot(i); 3256 if (!ulp_ops || !ulp_ops->cnic_init) { 3257 mutex_unlock(&cnic_lock); 3258 continue; 3259 } 3260 ulp_get(ulp_ops); 3261 mutex_unlock(&cnic_lock); 3262 3263 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3264 ulp_ops->cnic_init(dev); 3265 3266 ulp_put(ulp_ops); 3267 } 3268 } 3269 3270 static void cnic_ulp_exit(struct cnic_dev *dev) 3271 { 3272 int i; 3273 struct cnic_local *cp = dev->cnic_priv; 3274 3275 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3276 struct cnic_ulp_ops *ulp_ops; 3277 3278 mutex_lock(&cnic_lock); 3279 ulp_ops = cnic_ulp_tbl_prot(i); 3280 if (!ulp_ops || !ulp_ops->cnic_exit) { 3281 mutex_unlock(&cnic_lock); 3282 continue; 3283 } 3284 ulp_get(ulp_ops); 3285 mutex_unlock(&cnic_lock); 3286 3287 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3288 ulp_ops->cnic_exit(dev); 3289 3290 ulp_put(ulp_ops); 3291 } 3292 } 3293 3294 static int cnic_cm_offload_pg(struct cnic_sock *csk) 3295 { 3296 struct cnic_dev *dev = csk->dev; 3297 struct l4_kwq_offload_pg *l4kwqe; 3298 struct kwqe *wqes[1]; 3299 3300 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 3301 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3302 wqes[0] = (struct kwqe *) l4kwqe; 3303 3304 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 3305 l4kwqe->flags = 3306 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 3307 l4kwqe->l2hdr_nbytes = ETH_HLEN; 3308 3309 l4kwqe->da0 = csk->ha[0]; 3310 l4kwqe->da1 = csk->ha[1]; 3311 l4kwqe->da2 = csk->ha[2]; 3312 l4kwqe->da3 = csk->ha[3]; 3313 l4kwqe->da4 = csk->ha[4]; 3314 l4kwqe->da5 = csk->ha[5]; 3315 3316 l4kwqe->sa0 = dev->mac_addr[0]; 3317 l4kwqe->sa1 = dev->mac_addr[1]; 3318 l4kwqe->sa2 = dev->mac_addr[2]; 3319 l4kwqe->sa3 = dev->mac_addr[3]; 3320 l4kwqe->sa4 = dev->mac_addr[4]; 3321 l4kwqe->sa5 = dev->mac_addr[5]; 3322 3323 l4kwqe->etype = ETH_P_IP; 3324 l4kwqe->ipid_start = DEF_IPID_START; 3325 l4kwqe->host_opaque = csk->l5_cid; 3326 3327 if (csk->vlan_id) { 3328 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 3329 l4kwqe->vlan_tag = csk->vlan_id; 3330 l4kwqe->l2hdr_nbytes += 4; 3331 } 3332 3333 return dev->submit_kwqes(dev, wqes, 1); 3334 } 3335 3336 static int cnic_cm_update_pg(struct cnic_sock *csk) 3337 { 3338 struct cnic_dev *dev = csk->dev; 3339 struct l4_kwq_update_pg *l4kwqe; 3340 struct kwqe *wqes[1]; 3341 3342 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 3343 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3344 wqes[0] = (struct kwqe *) l4kwqe; 3345 3346 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 3347 l4kwqe->flags = 3348 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 3349 l4kwqe->pg_cid = csk->pg_cid; 3350 3351 l4kwqe->da0 = csk->ha[0]; 3352 l4kwqe->da1 = csk->ha[1]; 3353 l4kwqe->da2 = csk->ha[2]; 3354 l4kwqe->da3 = csk->ha[3]; 3355 l4kwqe->da4 = csk->ha[4]; 3356 l4kwqe->da5 = csk->ha[5]; 3357 3358 l4kwqe->pg_host_opaque = csk->l5_cid; 3359 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 3360 3361 return dev->submit_kwqes(dev, wqes, 1); 3362 } 3363 3364 static int cnic_cm_upload_pg(struct cnic_sock *csk) 3365 { 3366 struct cnic_dev *dev = csk->dev; 3367 struct l4_kwq_upload *l4kwqe; 3368 struct kwqe *wqes[1]; 3369 3370 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 3371 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3372 wqes[0] = (struct kwqe *) l4kwqe; 3373 3374 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 3375 l4kwqe->flags = 3376 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 3377 l4kwqe->cid = csk->pg_cid; 3378 3379 return dev->submit_kwqes(dev, wqes, 1); 3380 } 3381 3382 static int cnic_cm_conn_req(struct cnic_sock *csk) 3383 { 3384 struct cnic_dev *dev = csk->dev; 3385 struct l4_kwq_connect_req1 *l4kwqe1; 3386 struct l4_kwq_connect_req2 *l4kwqe2; 3387 struct l4_kwq_connect_req3 *l4kwqe3; 3388 struct kwqe *wqes[3]; 3389 u8 tcp_flags = 0; 3390 int num_wqes = 2; 3391 3392 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 3393 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 3394 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 3395 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 3396 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 3397 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 3398 3399 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 3400 l4kwqe3->flags = 3401 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 3402 l4kwqe3->ka_timeout = csk->ka_timeout; 3403 l4kwqe3->ka_interval = csk->ka_interval; 3404 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 3405 l4kwqe3->tos = csk->tos; 3406 l4kwqe3->ttl = csk->ttl; 3407 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 3408 l4kwqe3->pmtu = csk->mtu; 3409 l4kwqe3->rcv_buf = csk->rcv_buf; 3410 l4kwqe3->snd_buf = csk->snd_buf; 3411 l4kwqe3->seed = csk->seed; 3412 3413 wqes[0] = (struct kwqe *) l4kwqe1; 3414 if (test_bit(SK_F_IPV6, &csk->flags)) { 3415 wqes[1] = (struct kwqe *) l4kwqe2; 3416 wqes[2] = (struct kwqe *) l4kwqe3; 3417 num_wqes = 3; 3418 3419 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 3420 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 3421 l4kwqe2->flags = 3422 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 3423 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 3424 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 3425 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 3426 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 3427 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 3428 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 3429 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 3430 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 3431 sizeof(struct tcphdr); 3432 } else { 3433 wqes[1] = (struct kwqe *) l4kwqe3; 3434 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 3435 sizeof(struct tcphdr); 3436 } 3437 3438 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 3439 l4kwqe1->flags = 3440 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 3441 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 3442 l4kwqe1->cid = csk->cid; 3443 l4kwqe1->pg_cid = csk->pg_cid; 3444 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 3445 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 3446 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 3447 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 3448 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 3449 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 3450 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 3451 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 3452 if (csk->tcp_flags & SK_TCP_NAGLE) 3453 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 3454 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 3455 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 3456 if (csk->tcp_flags & SK_TCP_SACK) 3457 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 3458 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 3459 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 3460 3461 l4kwqe1->tcp_flags = tcp_flags; 3462 3463 return dev->submit_kwqes(dev, wqes, num_wqes); 3464 } 3465 3466 static int cnic_cm_close_req(struct cnic_sock *csk) 3467 { 3468 struct cnic_dev *dev = csk->dev; 3469 struct l4_kwq_close_req *l4kwqe; 3470 struct kwqe *wqes[1]; 3471 3472 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 3473 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3474 wqes[0] = (struct kwqe *) l4kwqe; 3475 3476 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 3477 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 3478 l4kwqe->cid = csk->cid; 3479 3480 return dev->submit_kwqes(dev, wqes, 1); 3481 } 3482 3483 static int cnic_cm_abort_req(struct cnic_sock *csk) 3484 { 3485 struct cnic_dev *dev = csk->dev; 3486 struct l4_kwq_reset_req *l4kwqe; 3487 struct kwqe *wqes[1]; 3488 3489 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 3490 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3491 wqes[0] = (struct kwqe *) l4kwqe; 3492 3493 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 3494 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 3495 l4kwqe->cid = csk->cid; 3496 3497 return dev->submit_kwqes(dev, wqes, 1); 3498 } 3499 3500 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 3501 u32 l5_cid, struct cnic_sock **csk, void *context) 3502 { 3503 struct cnic_local *cp = dev->cnic_priv; 3504 struct cnic_sock *csk1; 3505 3506 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3507 return -EINVAL; 3508 3509 if (cp->ctx_tbl) { 3510 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3511 3512 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3513 return -EAGAIN; 3514 } 3515 3516 csk1 = &cp->csk_tbl[l5_cid]; 3517 if (atomic_read(&csk1->ref_count)) 3518 return -EAGAIN; 3519 3520 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 3521 return -EBUSY; 3522 3523 csk1->dev = dev; 3524 csk1->cid = cid; 3525 csk1->l5_cid = l5_cid; 3526 csk1->ulp_type = ulp_type; 3527 csk1->context = context; 3528 3529 csk1->ka_timeout = DEF_KA_TIMEOUT; 3530 csk1->ka_interval = DEF_KA_INTERVAL; 3531 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 3532 csk1->tos = DEF_TOS; 3533 csk1->ttl = DEF_TTL; 3534 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 3535 csk1->rcv_buf = DEF_RCV_BUF; 3536 csk1->snd_buf = DEF_SND_BUF; 3537 csk1->seed = DEF_SEED; 3538 3539 *csk = csk1; 3540 return 0; 3541 } 3542 3543 static void cnic_cm_cleanup(struct cnic_sock *csk) 3544 { 3545 if (csk->src_port) { 3546 struct cnic_dev *dev = csk->dev; 3547 struct cnic_local *cp = dev->cnic_priv; 3548 3549 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); 3550 csk->src_port = 0; 3551 } 3552 } 3553 3554 static void cnic_close_conn(struct cnic_sock *csk) 3555 { 3556 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 3557 cnic_cm_upload_pg(csk); 3558 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3559 } 3560 cnic_cm_cleanup(csk); 3561 } 3562 3563 static int cnic_cm_destroy(struct cnic_sock *csk) 3564 { 3565 if (!cnic_in_use(csk)) 3566 return -EINVAL; 3567 3568 csk_hold(csk); 3569 clear_bit(SK_F_INUSE, &csk->flags); 3570 smp_mb__after_clear_bit(); 3571 while (atomic_read(&csk->ref_count) != 1) 3572 msleep(1); 3573 cnic_cm_cleanup(csk); 3574 3575 csk->flags = 0; 3576 csk_put(csk); 3577 return 0; 3578 } 3579 3580 static inline u16 cnic_get_vlan(struct net_device *dev, 3581 struct net_device **vlan_dev) 3582 { 3583 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3584 *vlan_dev = vlan_dev_real_dev(dev); 3585 return vlan_dev_vlan_id(dev); 3586 } 3587 *vlan_dev = dev; 3588 return 0; 3589 } 3590 3591 static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 3592 struct dst_entry **dst) 3593 { 3594 #if defined(CONFIG_INET) 3595 struct rtable *rt; 3596 3597 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); 3598 if (!IS_ERR(rt)) { 3599 *dst = &rt->dst; 3600 return 0; 3601 } 3602 return PTR_ERR(rt); 3603 #else 3604 return -ENETUNREACH; 3605 #endif 3606 } 3607 3608 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3609 struct dst_entry **dst) 3610 { 3611 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3612 struct flowi6 fl6; 3613 3614 memset(&fl6, 0, sizeof(fl6)); 3615 fl6.daddr = dst_addr->sin6_addr; 3616 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 3617 fl6.flowi6_oif = dst_addr->sin6_scope_id; 3618 3619 *dst = ip6_route_output(&init_net, NULL, &fl6); 3620 if ((*dst)->error) { 3621 dst_release(*dst); 3622 *dst = NULL; 3623 return -ENETUNREACH; 3624 } else 3625 return 0; 3626 #endif 3627 3628 return -ENETUNREACH; 3629 } 3630 3631 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 3632 int ulp_type) 3633 { 3634 struct cnic_dev *dev = NULL; 3635 struct dst_entry *dst; 3636 struct net_device *netdev = NULL; 3637 int err = -ENETUNREACH; 3638 3639 if (dst_addr->sin_family == AF_INET) 3640 err = cnic_get_v4_route(dst_addr, &dst); 3641 else if (dst_addr->sin_family == AF_INET6) { 3642 struct sockaddr_in6 *dst_addr6 = 3643 (struct sockaddr_in6 *) dst_addr; 3644 3645 err = cnic_get_v6_route(dst_addr6, &dst); 3646 } else 3647 return NULL; 3648 3649 if (err) 3650 return NULL; 3651 3652 if (!dst->dev) 3653 goto done; 3654 3655 cnic_get_vlan(dst->dev, &netdev); 3656 3657 dev = cnic_from_netdev(netdev); 3658 3659 done: 3660 dst_release(dst); 3661 if (dev) 3662 cnic_put(dev); 3663 return dev; 3664 } 3665 3666 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3667 { 3668 struct cnic_dev *dev = csk->dev; 3669 struct cnic_local *cp = dev->cnic_priv; 3670 3671 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3672 } 3673 3674 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3675 { 3676 struct cnic_dev *dev = csk->dev; 3677 struct cnic_local *cp = dev->cnic_priv; 3678 int is_v6, rc = 0; 3679 struct dst_entry *dst = NULL; 3680 struct net_device *realdev; 3681 __be16 local_port; 3682 u32 port_id; 3683 3684 if (saddr->local.v6.sin6_family == AF_INET6 && 3685 saddr->remote.v6.sin6_family == AF_INET6) 3686 is_v6 = 1; 3687 else if (saddr->local.v4.sin_family == AF_INET && 3688 saddr->remote.v4.sin_family == AF_INET) 3689 is_v6 = 0; 3690 else 3691 return -EINVAL; 3692 3693 clear_bit(SK_F_IPV6, &csk->flags); 3694 3695 if (is_v6) { 3696 set_bit(SK_F_IPV6, &csk->flags); 3697 cnic_get_v6_route(&saddr->remote.v6, &dst); 3698 3699 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3700 sizeof(struct in6_addr)); 3701 csk->dst_port = saddr->remote.v6.sin6_port; 3702 local_port = saddr->local.v6.sin6_port; 3703 3704 } else { 3705 cnic_get_v4_route(&saddr->remote.v4, &dst); 3706 3707 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3708 csk->dst_port = saddr->remote.v4.sin_port; 3709 local_port = saddr->local.v4.sin_port; 3710 } 3711 3712 csk->vlan_id = 0; 3713 csk->mtu = dev->netdev->mtu; 3714 if (dst && dst->dev) { 3715 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3716 if (realdev == dev->netdev) { 3717 csk->vlan_id = vlan; 3718 csk->mtu = dst_mtu(dst); 3719 } 3720 } 3721 3722 port_id = be16_to_cpu(local_port); 3723 if (port_id >= CNIC_LOCAL_PORT_MIN && 3724 port_id < CNIC_LOCAL_PORT_MAX) { 3725 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) 3726 port_id = 0; 3727 } else 3728 port_id = 0; 3729 3730 if (!port_id) { 3731 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); 3732 if (port_id == -1) { 3733 rc = -ENOMEM; 3734 goto err_out; 3735 } 3736 local_port = cpu_to_be16(port_id); 3737 } 3738 csk->src_port = local_port; 3739 3740 err_out: 3741 dst_release(dst); 3742 return rc; 3743 } 3744 3745 static void cnic_init_csk_state(struct cnic_sock *csk) 3746 { 3747 csk->state = 0; 3748 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3749 clear_bit(SK_F_CLOSING, &csk->flags); 3750 } 3751 3752 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3753 { 3754 struct cnic_local *cp = csk->dev->cnic_priv; 3755 int err = 0; 3756 3757 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 3758 return -EOPNOTSUPP; 3759 3760 if (!cnic_in_use(csk)) 3761 return -EINVAL; 3762 3763 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3764 return -EINVAL; 3765 3766 cnic_init_csk_state(csk); 3767 3768 err = cnic_get_route(csk, saddr); 3769 if (err) 3770 goto err_out; 3771 3772 err = cnic_resolve_addr(csk, saddr); 3773 if (!err) 3774 return 0; 3775 3776 err_out: 3777 clear_bit(SK_F_CONNECT_START, &csk->flags); 3778 return err; 3779 } 3780 3781 static int cnic_cm_abort(struct cnic_sock *csk) 3782 { 3783 struct cnic_local *cp = csk->dev->cnic_priv; 3784 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3785 3786 if (!cnic_in_use(csk)) 3787 return -EINVAL; 3788 3789 if (cnic_abort_prep(csk)) 3790 return cnic_cm_abort_req(csk); 3791 3792 /* Getting here means that we haven't started connect, or 3793 * connect was not successful. 3794 */ 3795 3796 cp->close_conn(csk, opcode); 3797 if (csk->state != opcode) 3798 return -EALREADY; 3799 3800 return 0; 3801 } 3802 3803 static int cnic_cm_close(struct cnic_sock *csk) 3804 { 3805 if (!cnic_in_use(csk)) 3806 return -EINVAL; 3807 3808 if (cnic_close_prep(csk)) { 3809 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3810 return cnic_cm_close_req(csk); 3811 } else { 3812 return -EALREADY; 3813 } 3814 return 0; 3815 } 3816 3817 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3818 u8 opcode) 3819 { 3820 struct cnic_ulp_ops *ulp_ops; 3821 int ulp_type = csk->ulp_type; 3822 3823 rcu_read_lock(); 3824 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3825 if (ulp_ops) { 3826 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3827 ulp_ops->cm_connect_complete(csk); 3828 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3829 ulp_ops->cm_close_complete(csk); 3830 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3831 ulp_ops->cm_remote_abort(csk); 3832 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3833 ulp_ops->cm_abort_complete(csk); 3834 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3835 ulp_ops->cm_remote_close(csk); 3836 } 3837 rcu_read_unlock(); 3838 } 3839 3840 static int cnic_cm_set_pg(struct cnic_sock *csk) 3841 { 3842 if (cnic_offld_prep(csk)) { 3843 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3844 cnic_cm_update_pg(csk); 3845 else 3846 cnic_cm_offload_pg(csk); 3847 } 3848 return 0; 3849 } 3850 3851 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3852 { 3853 struct cnic_local *cp = dev->cnic_priv; 3854 u32 l5_cid = kcqe->pg_host_opaque; 3855 u8 opcode = kcqe->op_code; 3856 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3857 3858 csk_hold(csk); 3859 if (!cnic_in_use(csk)) 3860 goto done; 3861 3862 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3863 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3864 goto done; 3865 } 3866 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3867 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3868 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3869 cnic_cm_upcall(cp, csk, 3870 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3871 goto done; 3872 } 3873 3874 csk->pg_cid = kcqe->pg_cid; 3875 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3876 cnic_cm_conn_req(csk); 3877 3878 done: 3879 csk_put(csk); 3880 } 3881 3882 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) 3883 { 3884 struct cnic_local *cp = dev->cnic_priv; 3885 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; 3886 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; 3887 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3888 3889 ctx->timestamp = jiffies; 3890 ctx->wait_cond = 1; 3891 wake_up(&ctx->waitq); 3892 } 3893 3894 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3895 { 3896 struct cnic_local *cp = dev->cnic_priv; 3897 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3898 u8 opcode = l4kcqe->op_code; 3899 u32 l5_cid; 3900 struct cnic_sock *csk; 3901 3902 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { 3903 cnic_process_fcoe_term_conn(dev, kcqe); 3904 return; 3905 } 3906 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3907 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3908 cnic_cm_process_offld_pg(dev, l4kcqe); 3909 return; 3910 } 3911 3912 l5_cid = l4kcqe->conn_id; 3913 if (opcode & 0x80) 3914 l5_cid = l4kcqe->cid; 3915 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3916 return; 3917 3918 csk = &cp->csk_tbl[l5_cid]; 3919 csk_hold(csk); 3920 3921 if (!cnic_in_use(csk)) { 3922 csk_put(csk); 3923 return; 3924 } 3925 3926 switch (opcode) { 3927 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3928 if (l4kcqe->status != 0) { 3929 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3930 cnic_cm_upcall(cp, csk, 3931 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3932 } 3933 break; 3934 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3935 if (l4kcqe->status == 0) 3936 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3937 else if (l4kcqe->status == 3938 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 3939 set_bit(SK_F_HW_ERR, &csk->flags); 3940 3941 smp_mb__before_clear_bit(); 3942 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3943 cnic_cm_upcall(cp, csk, opcode); 3944 break; 3945 3946 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3947 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3948 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3949 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3950 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3951 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 3952 set_bit(SK_F_HW_ERR, &csk->flags); 3953 3954 cp->close_conn(csk, opcode); 3955 break; 3956 3957 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3958 /* after we already sent CLOSE_REQ */ 3959 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && 3960 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && 3961 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3962 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); 3963 else 3964 cnic_cm_upcall(cp, csk, opcode); 3965 break; 3966 } 3967 csk_put(csk); 3968 } 3969 3970 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3971 { 3972 struct cnic_dev *dev = data; 3973 int i; 3974 3975 for (i = 0; i < num; i++) 3976 cnic_cm_process_kcqe(dev, kcqe[i]); 3977 } 3978 3979 static struct cnic_ulp_ops cm_ulp_ops = { 3980 .indicate_kcqes = cnic_cm_indicate_kcqe, 3981 }; 3982 3983 static void cnic_cm_free_mem(struct cnic_dev *dev) 3984 { 3985 struct cnic_local *cp = dev->cnic_priv; 3986 3987 kfree(cp->csk_tbl); 3988 cp->csk_tbl = NULL; 3989 cnic_free_id_tbl(&cp->csk_port_tbl); 3990 } 3991 3992 static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3993 { 3994 struct cnic_local *cp = dev->cnic_priv; 3995 u32 port_id; 3996 3997 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3998 GFP_KERNEL); 3999 if (!cp->csk_tbl) 4000 return -ENOMEM; 4001 4002 port_id = random32(); 4003 port_id %= CNIC_LOCAL_PORT_RANGE; 4004 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 4005 CNIC_LOCAL_PORT_MIN, port_id)) { 4006 cnic_cm_free_mem(dev); 4007 return -ENOMEM; 4008 } 4009 return 0; 4010 } 4011 4012 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 4013 { 4014 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 4015 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 4016 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 4017 csk->state = opcode; 4018 } 4019 4020 /* 1. If event opcode matches the expected event in csk->state 4021 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any 4022 * event 4023 * 3. If the expected event is 0, meaning the connection was never 4024 * never established, we accept the opcode from cm_abort. 4025 */ 4026 if (opcode == csk->state || csk->state == 0 || 4027 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || 4028 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 4029 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 4030 if (csk->state == 0) 4031 csk->state = opcode; 4032 return 1; 4033 } 4034 } 4035 return 0; 4036 } 4037 4038 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 4039 { 4040 struct cnic_dev *dev = csk->dev; 4041 struct cnic_local *cp = dev->cnic_priv; 4042 4043 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 4044 cnic_cm_upcall(cp, csk, opcode); 4045 return; 4046 } 4047 4048 clear_bit(SK_F_CONNECT_START, &csk->flags); 4049 cnic_close_conn(csk); 4050 csk->state = opcode; 4051 cnic_cm_upcall(cp, csk, opcode); 4052 } 4053 4054 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 4055 { 4056 } 4057 4058 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 4059 { 4060 u32 seed; 4061 4062 seed = random32(); 4063 cnic_ctx_wr(dev, 45, 0, seed); 4064 return 0; 4065 } 4066 4067 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 4068 { 4069 struct cnic_dev *dev = csk->dev; 4070 struct cnic_local *cp = dev->cnic_priv; 4071 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 4072 union l5cm_specific_data l5_data; 4073 u32 cmd = 0; 4074 int close_complete = 0; 4075 4076 switch (opcode) { 4077 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4078 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4079 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4080 if (cnic_ready_to_close(csk, opcode)) { 4081 if (test_bit(SK_F_HW_ERR, &csk->flags)) 4082 close_complete = 1; 4083 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 4084 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 4085 else 4086 close_complete = 1; 4087 } 4088 break; 4089 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 4090 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 4091 break; 4092 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 4093 close_complete = 1; 4094 break; 4095 } 4096 if (cmd) { 4097 memset(&l5_data, 0, sizeof(l5_data)); 4098 4099 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 4100 &l5_data); 4101 } else if (close_complete) { 4102 ctx->timestamp = jiffies; 4103 cnic_close_conn(csk); 4104 cnic_cm_upcall(cp, csk, csk->state); 4105 } 4106 } 4107 4108 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 4109 { 4110 struct cnic_local *cp = dev->cnic_priv; 4111 4112 if (!cp->ctx_tbl) 4113 return; 4114 4115 if (!netif_running(dev->netdev)) 4116 return; 4117 4118 cnic_bnx2x_delete_wait(dev, 0); 4119 4120 cancel_delayed_work(&cp->delete_task); 4121 flush_workqueue(cnic_wq); 4122 4123 if (atomic_read(&cp->iscsi_conn) != 0) 4124 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 4125 atomic_read(&cp->iscsi_conn)); 4126 } 4127 4128 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4129 { 4130 struct cnic_local *cp = dev->cnic_priv; 4131 u32 pfid = cp->pfid; 4132 u32 port = CNIC_PORT(cp); 4133 4134 cnic_init_bnx2x_mac(dev); 4135 cnic_bnx2x_set_tcp_timestamp(dev, 1); 4136 4137 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 4138 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 4139 4140 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4141 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 4142 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4143 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 4144 DEF_MAX_DA_COUNT); 4145 4146 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4147 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 4148 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4149 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 4150 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4151 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 4152 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4153 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 4154 4155 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 4156 DEF_MAX_CWND); 4157 return 0; 4158 } 4159 4160 static void cnic_delete_task(struct work_struct *work) 4161 { 4162 struct cnic_local *cp; 4163 struct cnic_dev *dev; 4164 u32 i; 4165 int need_resched = 0; 4166 4167 cp = container_of(work, struct cnic_local, delete_task.work); 4168 dev = cp->dev; 4169 4170 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { 4171 struct drv_ctl_info info; 4172 4173 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); 4174 4175 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; 4176 cp->ethdev->drv_ctl(dev->netdev, &info); 4177 } 4178 4179 for (i = 0; i < cp->max_cid_space; i++) { 4180 struct cnic_context *ctx = &cp->ctx_tbl[i]; 4181 int err; 4182 4183 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 4184 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4185 continue; 4186 4187 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 4188 need_resched = 1; 4189 continue; 4190 } 4191 4192 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4193 continue; 4194 4195 err = cnic_bnx2x_destroy_ramrod(dev, i); 4196 4197 cnic_free_bnx2x_conn_resc(dev, i); 4198 if (!err) { 4199 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 4200 atomic_dec(&cp->iscsi_conn); 4201 4202 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 4203 } 4204 } 4205 4206 if (need_resched) 4207 queue_delayed_work(cnic_wq, &cp->delete_task, 4208 msecs_to_jiffies(10)); 4209 4210 } 4211 4212 static int cnic_cm_open(struct cnic_dev *dev) 4213 { 4214 struct cnic_local *cp = dev->cnic_priv; 4215 int err; 4216 4217 err = cnic_cm_alloc_mem(dev); 4218 if (err) 4219 return err; 4220 4221 err = cp->start_cm(dev); 4222 4223 if (err) 4224 goto err_out; 4225 4226 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 4227 4228 dev->cm_create = cnic_cm_create; 4229 dev->cm_destroy = cnic_cm_destroy; 4230 dev->cm_connect = cnic_cm_connect; 4231 dev->cm_abort = cnic_cm_abort; 4232 dev->cm_close = cnic_cm_close; 4233 dev->cm_select_dev = cnic_cm_select_dev; 4234 4235 cp->ulp_handle[CNIC_ULP_L4] = dev; 4236 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 4237 return 0; 4238 4239 err_out: 4240 cnic_cm_free_mem(dev); 4241 return err; 4242 } 4243 4244 static int cnic_cm_shutdown(struct cnic_dev *dev) 4245 { 4246 struct cnic_local *cp = dev->cnic_priv; 4247 int i; 4248 4249 cp->stop_cm(dev); 4250 4251 if (!cp->csk_tbl) 4252 return 0; 4253 4254 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 4255 struct cnic_sock *csk = &cp->csk_tbl[i]; 4256 4257 clear_bit(SK_F_INUSE, &csk->flags); 4258 cnic_cm_cleanup(csk); 4259 } 4260 cnic_cm_free_mem(dev); 4261 4262 return 0; 4263 } 4264 4265 static void cnic_init_context(struct cnic_dev *dev, u32 cid) 4266 { 4267 u32 cid_addr; 4268 int i; 4269 4270 cid_addr = GET_CID_ADDR(cid); 4271 4272 for (i = 0; i < CTX_SIZE; i += 4) 4273 cnic_ctx_wr(dev, cid_addr, i, 0); 4274 } 4275 4276 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 4277 { 4278 struct cnic_local *cp = dev->cnic_priv; 4279 int ret = 0, i; 4280 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4281 4282 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4283 return 0; 4284 4285 for (i = 0; i < cp->ctx_blks; i++) { 4286 int j; 4287 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4288 u32 val; 4289 4290 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4291 4292 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4293 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4294 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 4295 (u64) cp->ctx_arr[i].mapping >> 32); 4296 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 4297 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4298 for (j = 0; j < 10; j++) { 4299 4300 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 4301 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 4302 break; 4303 udelay(5); 4304 } 4305 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 4306 ret = -EBUSY; 4307 break; 4308 } 4309 } 4310 return ret; 4311 } 4312 4313 static void cnic_free_irq(struct cnic_dev *dev) 4314 { 4315 struct cnic_local *cp = dev->cnic_priv; 4316 struct cnic_eth_dev *ethdev = cp->ethdev; 4317 4318 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4319 cp->disable_int_sync(dev); 4320 tasklet_kill(&cp->cnic_irq_task); 4321 free_irq(ethdev->irq_arr[0].vector, dev); 4322 } 4323 } 4324 4325 static int cnic_request_irq(struct cnic_dev *dev) 4326 { 4327 struct cnic_local *cp = dev->cnic_priv; 4328 struct cnic_eth_dev *ethdev = cp->ethdev; 4329 int err; 4330 4331 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 4332 if (err) 4333 tasklet_disable(&cp->cnic_irq_task); 4334 4335 return err; 4336 } 4337 4338 static int cnic_init_bnx2_irq(struct cnic_dev *dev) 4339 { 4340 struct cnic_local *cp = dev->cnic_priv; 4341 struct cnic_eth_dev *ethdev = cp->ethdev; 4342 4343 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4344 int err, i = 0; 4345 int sblk_num = cp->status_blk_num; 4346 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4347 BNX2_HC_SB_CONFIG_1; 4348 4349 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4350 4351 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 4352 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 4353 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 4354 4355 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 4356 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 4357 (unsigned long) dev); 4358 err = cnic_request_irq(dev); 4359 if (err) 4360 return err; 4361 4362 while (cp->status_blk.bnx2->status_completion_producer_index && 4363 i < 10) { 4364 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 4365 1 << (11 + sblk_num)); 4366 udelay(10); 4367 i++; 4368 barrier(); 4369 } 4370 if (cp->status_blk.bnx2->status_completion_producer_index) { 4371 cnic_free_irq(dev); 4372 goto failed; 4373 } 4374 4375 } else { 4376 struct status_block *sblk = cp->status_blk.gen; 4377 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 4378 int i = 0; 4379 4380 while (sblk->status_completion_producer_index && i < 10) { 4381 CNIC_WR(dev, BNX2_HC_COMMAND, 4382 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 4383 udelay(10); 4384 i++; 4385 barrier(); 4386 } 4387 if (sblk->status_completion_producer_index) 4388 goto failed; 4389 4390 } 4391 return 0; 4392 4393 failed: 4394 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 4395 return -EBUSY; 4396 } 4397 4398 static void cnic_enable_bnx2_int(struct cnic_dev *dev) 4399 { 4400 struct cnic_local *cp = dev->cnic_priv; 4401 struct cnic_eth_dev *ethdev = cp->ethdev; 4402 4403 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4404 return; 4405 4406 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4407 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4408 } 4409 4410 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4411 { 4412 struct cnic_local *cp = dev->cnic_priv; 4413 struct cnic_eth_dev *ethdev = cp->ethdev; 4414 4415 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4416 return; 4417 4418 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4419 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4420 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 4421 synchronize_irq(ethdev->irq_arr[0].vector); 4422 } 4423 4424 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 4425 { 4426 struct cnic_local *cp = dev->cnic_priv; 4427 struct cnic_eth_dev *ethdev = cp->ethdev; 4428 struct cnic_uio_dev *udev = cp->udev; 4429 u32 cid_addr, tx_cid, sb_id; 4430 u32 val, offset0, offset1, offset2, offset3; 4431 int i; 4432 struct tx_bd *txbd; 4433 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4434 struct status_block *s_blk = cp->status_blk.gen; 4435 4436 sb_id = cp->status_blk_num; 4437 tx_cid = 20; 4438 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 4439 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4440 struct status_block_msix *sblk = cp->status_blk.bnx2; 4441 4442 tx_cid = TX_TSS_CID + sb_id - 1; 4443 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 4444 (TX_TSS_CID << 7)); 4445 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 4446 } 4447 cp->tx_cons = *cp->tx_cons_ptr; 4448 4449 cid_addr = GET_CID_ADDR(tx_cid); 4450 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 4451 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4452 4453 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4454 cnic_ctx_wr(dev, cid_addr2, i, 0); 4455 4456 offset0 = BNX2_L2CTX_TYPE_XI; 4457 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 4458 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 4459 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 4460 } else { 4461 cnic_init_context(dev, tx_cid); 4462 cnic_init_context(dev, tx_cid + 1); 4463 4464 offset0 = BNX2_L2CTX_TYPE; 4465 offset1 = BNX2_L2CTX_CMD_TYPE; 4466 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 4467 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 4468 } 4469 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 4470 cnic_ctx_wr(dev, cid_addr, offset0, val); 4471 4472 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4473 cnic_ctx_wr(dev, cid_addr, offset1, val); 4474 4475 txbd = udev->l2_ring; 4476 4477 buf_map = udev->l2_buf_map; 4478 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4479 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4480 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4481 } 4482 val = (u64) ring_map >> 32; 4483 cnic_ctx_wr(dev, cid_addr, offset2, val); 4484 txbd->tx_bd_haddr_hi = val; 4485 4486 val = (u64) ring_map & 0xffffffff; 4487 cnic_ctx_wr(dev, cid_addr, offset3, val); 4488 txbd->tx_bd_haddr_lo = val; 4489 } 4490 4491 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 4492 { 4493 struct cnic_local *cp = dev->cnic_priv; 4494 struct cnic_eth_dev *ethdev = cp->ethdev; 4495 struct cnic_uio_dev *udev = cp->udev; 4496 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4497 int i; 4498 struct rx_bd *rxbd; 4499 struct status_block *s_blk = cp->status_blk.gen; 4500 dma_addr_t ring_map = udev->l2_ring_map; 4501 4502 sb_id = cp->status_blk_num; 4503 cnic_init_context(dev, 2); 4504 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 4505 coal_reg = BNX2_HC_COMMAND; 4506 coal_val = CNIC_RD(dev, coal_reg); 4507 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4508 struct status_block_msix *sblk = cp->status_blk.bnx2; 4509 4510 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 4511 coal_reg = BNX2_HC_COALESCE_NOW; 4512 coal_val = 1 << (11 + sb_id); 4513 } 4514 i = 0; 4515 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 4516 CNIC_WR(dev, coal_reg, coal_val); 4517 udelay(10); 4518 i++; 4519 barrier(); 4520 } 4521 cp->rx_cons = *cp->rx_cons_ptr; 4522 4523 cid_addr = GET_CID_ADDR(2); 4524 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4525 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4526 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 4527 4528 if (sb_id == 0) 4529 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 4530 else 4531 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4532 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4533 4534 rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4535 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4536 dma_addr_t buf_map; 4537 int n = (i % cp->l2_rx_ring_size) + 1; 4538 4539 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4540 rxbd->rx_bd_len = cp->l2_single_buf_size; 4541 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4542 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4543 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4544 } 4545 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4546 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4547 rxbd->rx_bd_haddr_hi = val; 4548 4549 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4550 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4551 rxbd->rx_bd_haddr_lo = val; 4552 4553 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 4554 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 4555 } 4556 4557 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 4558 { 4559 struct kwqe *wqes[1], l2kwqe; 4560 4561 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4562 wqes[0] = &l2kwqe; 4563 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | 4564 (L2_KWQE_OPCODE_VALUE_FLUSH << 4565 KWQE_OPCODE_SHIFT) | 2; 4566 dev->submit_kwqes(dev, wqes, 1); 4567 } 4568 4569 static void cnic_set_bnx2_mac(struct cnic_dev *dev) 4570 { 4571 struct cnic_local *cp = dev->cnic_priv; 4572 u32 val; 4573 4574 val = cp->func << 2; 4575 4576 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 4577 4578 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4579 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 4580 dev->mac_addr[0] = (u8) (val >> 8); 4581 dev->mac_addr[1] = (u8) val; 4582 4583 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 4584 4585 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4586 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 4587 dev->mac_addr[2] = (u8) (val >> 24); 4588 dev->mac_addr[3] = (u8) (val >> 16); 4589 dev->mac_addr[4] = (u8) (val >> 8); 4590 dev->mac_addr[5] = (u8) val; 4591 4592 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4593 4594 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4595 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4596 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4597 4598 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4599 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 4600 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 4601 } 4602 4603 static int cnic_start_bnx2_hw(struct cnic_dev *dev) 4604 { 4605 struct cnic_local *cp = dev->cnic_priv; 4606 struct cnic_eth_dev *ethdev = cp->ethdev; 4607 struct status_block *sblk = cp->status_blk.gen; 4608 u32 val, kcq_cid_addr, kwq_cid_addr; 4609 int err; 4610 4611 cnic_set_bnx2_mac(dev); 4612 4613 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4614 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4615 if (BCM_PAGE_BITS > 12) 4616 val |= (12 - 8) << 4; 4617 else 4618 val |= (BCM_PAGE_BITS - 8) << 4; 4619 4620 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4621 4622 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 4623 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 4624 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 4625 4626 err = cnic_setup_5709_context(dev, 1); 4627 if (err) 4628 return err; 4629 4630 cnic_init_context(dev, KWQ_CID); 4631 cnic_init_context(dev, KCQ_CID); 4632 4633 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 4634 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 4635 4636 cp->max_kwq_idx = MAX_KWQ_IDX; 4637 cp->kwq_prod_idx = 0; 4638 cp->kwq_con_idx = 0; 4639 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4640 4641 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 4642 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4643 else 4644 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4645 4646 /* Initialize the kernel work queue context. */ 4647 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4648 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4649 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4650 4651 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4652 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4653 4654 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4655 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4656 4657 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4658 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4659 4660 val = (u32) cp->kwq_info.pgtbl_map; 4661 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4662 4663 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 4664 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 4665 4666 cp->kcq1.sw_prod_idx = 0; 4667 cp->kcq1.hw_prod_idx_ptr = 4668 (u16 *) &sblk->status_completion_producer_index; 4669 4670 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4671 4672 /* Initialize the kernel complete queue context. */ 4673 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4674 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4675 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4676 4677 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4678 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4679 4680 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4681 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4682 4683 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4684 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4685 4686 val = (u32) cp->kcq1.dma.pgtbl_map; 4687 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4688 4689 cp->int_num = 0; 4690 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4691 struct status_block_msix *msblk = cp->status_blk.bnx2; 4692 u32 sb_id = cp->status_blk_num; 4693 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4694 4695 cp->kcq1.hw_prod_idx_ptr = 4696 (u16 *) &msblk->status_completion_producer_index; 4697 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4698 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4699 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4700 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4701 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4702 } 4703 4704 /* Enable Commnad Scheduler notification when we write to the 4705 * host producer index of the kernel contexts. */ 4706 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 4707 4708 /* Enable Command Scheduler notification when we write to either 4709 * the Send Queue or Receive Queue producer indexes of the kernel 4710 * bypass contexts. */ 4711 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4712 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4713 4714 /* Notify COM when the driver post an application buffer. */ 4715 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4716 4717 /* Set the CP and COM doorbells. These two processors polls the 4718 * doorbell for a non zero value before running. This must be done 4719 * after setting up the kernel queue contexts. */ 4720 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4721 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4722 4723 cnic_init_bnx2_tx_ring(dev); 4724 cnic_init_bnx2_rx_ring(dev); 4725 4726 err = cnic_init_bnx2_irq(dev); 4727 if (err) { 4728 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4729 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4730 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4731 return err; 4732 } 4733 4734 return 0; 4735 } 4736 4737 static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4738 { 4739 struct cnic_local *cp = dev->cnic_priv; 4740 struct cnic_eth_dev *ethdev = cp->ethdev; 4741 u32 start_offset = ethdev->ctx_tbl_offset; 4742 int i; 4743 4744 for (i = 0; i < cp->ctx_blks; i++) { 4745 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4746 dma_addr_t map = ctx->mapping; 4747 4748 if (cp->ctx_align) { 4749 unsigned long mask = cp->ctx_align - 1; 4750 4751 map = (map + mask) & ~mask; 4752 } 4753 4754 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4755 } 4756 } 4757 4758 static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4759 { 4760 struct cnic_local *cp = dev->cnic_priv; 4761 struct cnic_eth_dev *ethdev = cp->ethdev; 4762 int err = 0; 4763 4764 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4765 (unsigned long) dev); 4766 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4767 err = cnic_request_irq(dev); 4768 4769 return err; 4770 } 4771 4772 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4773 u16 sb_id, u8 sb_index, 4774 u8 disable) 4775 { 4776 4777 u32 addr = BAR_CSTRORM_INTMEM + 4778 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4779 offsetof(struct hc_status_block_data_e1x, index_data) + 4780 sizeof(struct hc_index_data)*sb_index + 4781 offsetof(struct hc_index_data, flags); 4782 u16 flags = CNIC_RD16(dev, addr); 4783 /* clear and set */ 4784 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4785 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4786 HC_INDEX_DATA_HC_ENABLED); 4787 CNIC_WR16(dev, addr, flags); 4788 } 4789 4790 static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4791 { 4792 struct cnic_local *cp = dev->cnic_priv; 4793 u8 sb_id = cp->status_blk_num; 4794 4795 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4796 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4797 offsetof(struct hc_status_block_data_e1x, index_data) + 4798 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4799 offsetof(struct hc_index_data, timeout), 64 / 4); 4800 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4801 } 4802 4803 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4804 { 4805 } 4806 4807 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4808 struct client_init_ramrod_data *data) 4809 { 4810 struct cnic_local *cp = dev->cnic_priv; 4811 struct cnic_uio_dev *udev = cp->udev; 4812 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4813 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4814 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4815 int i; 4816 u32 cli = cp->ethdev->iscsi_l2_client_id; 4817 u32 val; 4818 4819 memset(txbd, 0, BCM_PAGE_SIZE); 4820 4821 buf_map = udev->l2_buf_map; 4822 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4823 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4824 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4825 4826 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4827 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4828 reg_bd->addr_hi = start_bd->addr_hi; 4829 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4830 start_bd->nbytes = cpu_to_le16(0x10); 4831 start_bd->nbd = cpu_to_le16(3); 4832 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4833 start_bd->general_data = (UNICAST_ADDRESS << 4834 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 4835 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4836 4837 } 4838 4839 val = (u64) ring_map >> 32; 4840 txbd->next_bd.addr_hi = cpu_to_le32(val); 4841 4842 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4843 4844 val = (u64) ring_map & 0xffffffff; 4845 txbd->next_bd.addr_lo = cpu_to_le32(val); 4846 4847 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4848 4849 /* Other ramrod params */ 4850 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4851 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4852 4853 /* reset xstorm per client statistics */ 4854 if (cli < MAX_STAT_COUNTER_ID) { 4855 data->general.statistics_zero_flg = 1; 4856 data->general.statistics_en_flg = 1; 4857 data->general.statistics_counter_id = cli; 4858 } 4859 4860 cp->tx_cons_ptr = 4861 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4862 } 4863 4864 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4865 struct client_init_ramrod_data *data) 4866 { 4867 struct cnic_local *cp = dev->cnic_priv; 4868 struct cnic_uio_dev *udev = cp->udev; 4869 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4870 BCM_PAGE_SIZE); 4871 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4872 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4873 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4874 int i; 4875 u32 cli = cp->ethdev->iscsi_l2_client_id; 4876 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4877 u32 val; 4878 dma_addr_t ring_map = udev->l2_ring_map; 4879 4880 /* General data */ 4881 data->general.client_id = cli; 4882 data->general.activate_flg = 1; 4883 data->general.sp_client_id = cli; 4884 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4885 data->general.func_id = cp->pfid; 4886 4887 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4888 dma_addr_t buf_map; 4889 int n = (i % cp->l2_rx_ring_size) + 1; 4890 4891 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4892 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4893 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4894 } 4895 4896 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4897 rxbd->addr_hi = cpu_to_le32(val); 4898 data->rx.bd_page_base.hi = cpu_to_le32(val); 4899 4900 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4901 rxbd->addr_lo = cpu_to_le32(val); 4902 data->rx.bd_page_base.lo = cpu_to_le32(val); 4903 4904 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4905 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4906 rxcqe->addr_hi = cpu_to_le32(val); 4907 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4908 4909 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4910 rxcqe->addr_lo = cpu_to_le32(val); 4911 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4912 4913 /* Other ramrod params */ 4914 data->rx.client_qzone_id = cl_qzone_id; 4915 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 4916 data->rx.status_block_id = BNX2X_DEF_SB_ID; 4917 4918 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 4919 4920 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); 4921 data->rx.outer_vlan_removal_enable_flg = 1; 4922 data->rx.silent_vlan_removal_flg = 1; 4923 data->rx.silent_vlan_value = 0; 4924 data->rx.silent_vlan_mask = 0xffff; 4925 4926 cp->rx_cons_ptr = 4927 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4928 cp->rx_cons = *cp->rx_cons_ptr; 4929 } 4930 4931 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4932 { 4933 struct cnic_local *cp = dev->cnic_priv; 4934 u32 pfid = cp->pfid; 4935 4936 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4937 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4938 cp->kcq1.sw_prod_idx = 0; 4939 4940 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4941 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4942 4943 cp->kcq1.hw_prod_idx_ptr = 4944 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4945 cp->kcq1.status_idx_ptr = 4946 &sb->sb.running_index[SM_RX_ID]; 4947 } else { 4948 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 4949 4950 cp->kcq1.hw_prod_idx_ptr = 4951 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4952 cp->kcq1.status_idx_ptr = 4953 &sb->sb.running_index[SM_RX_ID]; 4954 } 4955 4956 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4957 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4958 4959 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 4960 USTORM_FCOE_EQ_PROD_OFFSET(pfid); 4961 cp->kcq2.sw_prod_idx = 0; 4962 cp->kcq2.hw_prod_idx_ptr = 4963 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; 4964 cp->kcq2.status_idx_ptr = 4965 &sb->sb.running_index[SM_RX_ID]; 4966 } 4967 } 4968 4969 static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4970 { 4971 struct cnic_local *cp = dev->cnic_priv; 4972 struct cnic_eth_dev *ethdev = cp->ethdev; 4973 int func = CNIC_FUNC(cp), ret; 4974 u32 pfid; 4975 4976 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 4977 cp->port_mode = CHIP_PORT_MODE_NONE; 4978 4979 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 4980 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 4981 4982 if (!(val & 1)) 4983 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 4984 else 4985 val = (val >> 1) & 1; 4986 4987 if (val) { 4988 cp->port_mode = CHIP_4_PORT_MODE; 4989 cp->pfid = func >> 1; 4990 } else { 4991 cp->port_mode = CHIP_2_PORT_MODE; 4992 cp->pfid = func & 0x6; 4993 } 4994 } else { 4995 cp->pfid = func; 4996 } 4997 pfid = cp->pfid; 4998 4999 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5000 cp->iscsi_start_cid, 0); 5001 5002 if (ret) 5003 return -ENOMEM; 5004 5005 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5006 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 5007 cp->fcoe_start_cid, 0); 5008 5009 if (ret) 5010 return -ENOMEM; 5011 } 5012 5013 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 5014 5015 cnic_init_bnx2x_kcq(dev); 5016 5017 /* Only 1 EQ */ 5018 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 5019 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5020 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 5021 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5022 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 5023 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 5024 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5025 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 5026 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 5027 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5028 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 5029 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 5030 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5031 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 5032 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 5033 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5034 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 5035 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 5036 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 5037 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5038 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 5039 HC_INDEX_ISCSI_EQ_CONS); 5040 5041 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5042 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 5043 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 5044 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5045 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 5046 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 5047 5048 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 5049 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 5050 5051 cnic_setup_bnx2x_context(dev); 5052 5053 ret = cnic_init_bnx2x_irq(dev); 5054 if (ret) 5055 return ret; 5056 5057 return 0; 5058 } 5059 5060 static void cnic_init_rings(struct cnic_dev *dev) 5061 { 5062 struct cnic_local *cp = dev->cnic_priv; 5063 struct cnic_uio_dev *udev = cp->udev; 5064 5065 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5066 return; 5067 5068 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5069 cnic_init_bnx2_tx_ring(dev); 5070 cnic_init_bnx2_rx_ring(dev); 5071 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5072 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5073 u32 cli = cp->ethdev->iscsi_l2_client_id; 5074 u32 cid = cp->ethdev->iscsi_l2_cid; 5075 u32 cl_qzone_id; 5076 struct client_init_ramrod_data *data; 5077 union l5cm_specific_data l5_data; 5078 struct ustorm_eth_rx_producers rx_prods = {0}; 5079 u32 off, i, *cid_ptr; 5080 5081 rx_prods.bd_prod = 0; 5082 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 5083 barrier(); 5084 5085 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 5086 5087 off = BAR_USTRORM_INTMEM + 5088 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? 5089 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 5090 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 5091 5092 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 5093 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 5094 5095 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5096 5097 data = udev->l2_buf; 5098 cid_ptr = udev->l2_buf + 12; 5099 5100 memset(data, 0, sizeof(*data)); 5101 5102 cnic_init_bnx2x_tx_ring(dev, data); 5103 cnic_init_bnx2x_rx_ring(dev, data); 5104 5105 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 5106 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 5107 5108 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5109 5110 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 5111 cid, ETH_CONNECTION_TYPE, &l5_data); 5112 5113 i = 0; 5114 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5115 ++i < 10) 5116 msleep(1); 5117 5118 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5119 netdev_err(dev->netdev, 5120 "iSCSI CLIENT_SETUP did not complete\n"); 5121 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5122 cnic_ring_ctl(dev, cid, cli, 1); 5123 *cid_ptr = cid; 5124 } 5125 } 5126 5127 static void cnic_shutdown_rings(struct cnic_dev *dev) 5128 { 5129 struct cnic_local *cp = dev->cnic_priv; 5130 struct cnic_uio_dev *udev = cp->udev; 5131 void *rx_ring; 5132 5133 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5134 return; 5135 5136 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5137 cnic_shutdown_bnx2_rx_ring(dev); 5138 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5139 u32 cli = cp->ethdev->iscsi_l2_client_id; 5140 u32 cid = cp->ethdev->iscsi_l2_cid; 5141 union l5cm_specific_data l5_data; 5142 int i; 5143 5144 cnic_ring_ctl(dev, cid, cli, 0); 5145 5146 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5147 5148 l5_data.phy_address.lo = cli; 5149 l5_data.phy_address.hi = 0; 5150 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 5151 cid, ETH_CONNECTION_TYPE, &l5_data); 5152 i = 0; 5153 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5154 ++i < 10) 5155 msleep(1); 5156 5157 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5158 netdev_err(dev->netdev, 5159 "iSCSI CLIENT_HALT did not complete\n"); 5160 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5161 5162 memset(&l5_data, 0, sizeof(l5_data)); 5163 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5164 cid, NONE_CONNECTION_TYPE, &l5_data); 5165 msleep(10); 5166 } 5167 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5168 rx_ring = udev->l2_ring + BCM_PAGE_SIZE; 5169 memset(rx_ring, 0, BCM_PAGE_SIZE); 5170 } 5171 5172 static int cnic_register_netdev(struct cnic_dev *dev) 5173 { 5174 struct cnic_local *cp = dev->cnic_priv; 5175 struct cnic_eth_dev *ethdev = cp->ethdev; 5176 int err; 5177 5178 if (!ethdev) 5179 return -ENODEV; 5180 5181 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 5182 return 0; 5183 5184 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 5185 if (err) 5186 netdev_err(dev->netdev, "register_cnic failed\n"); 5187 5188 return err; 5189 } 5190 5191 static void cnic_unregister_netdev(struct cnic_dev *dev) 5192 { 5193 struct cnic_local *cp = dev->cnic_priv; 5194 struct cnic_eth_dev *ethdev = cp->ethdev; 5195 5196 if (!ethdev) 5197 return; 5198 5199 ethdev->drv_unregister_cnic(dev->netdev); 5200 } 5201 5202 static int cnic_start_hw(struct cnic_dev *dev) 5203 { 5204 struct cnic_local *cp = dev->cnic_priv; 5205 struct cnic_eth_dev *ethdev = cp->ethdev; 5206 int err; 5207 5208 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 5209 return -EALREADY; 5210 5211 dev->regview = ethdev->io_base; 5212 pci_dev_get(dev->pcidev); 5213 cp->func = PCI_FUNC(dev->pcidev->devfn); 5214 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 5215 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 5216 5217 err = cp->alloc_resc(dev); 5218 if (err) { 5219 netdev_err(dev->netdev, "allocate resource failure\n"); 5220 goto err1; 5221 } 5222 5223 err = cp->start_hw(dev); 5224 if (err) 5225 goto err1; 5226 5227 err = cnic_cm_open(dev); 5228 if (err) 5229 goto err1; 5230 5231 set_bit(CNIC_F_CNIC_UP, &dev->flags); 5232 5233 cp->enable_int(dev); 5234 5235 return 0; 5236 5237 err1: 5238 cp->free_resc(dev); 5239 pci_dev_put(dev->pcidev); 5240 return err; 5241 } 5242 5243 static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 5244 { 5245 cnic_disable_bnx2_int_sync(dev); 5246 5247 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 5248 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 5249 5250 cnic_init_context(dev, KWQ_CID); 5251 cnic_init_context(dev, KCQ_CID); 5252 5253 cnic_setup_5709_context(dev, 0); 5254 cnic_free_irq(dev); 5255 5256 cnic_free_resc(dev); 5257 } 5258 5259 5260 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5261 { 5262 struct cnic_local *cp = dev->cnic_priv; 5263 5264 cnic_free_irq(dev); 5265 *cp->kcq1.hw_prod_idx_ptr = 0; 5266 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5267 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5268 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5269 cnic_free_resc(dev); 5270 } 5271 5272 static void cnic_stop_hw(struct cnic_dev *dev) 5273 { 5274 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5275 struct cnic_local *cp = dev->cnic_priv; 5276 int i = 0; 5277 5278 /* Need to wait for the ring shutdown event to complete 5279 * before clearing the CNIC_UP flag. 5280 */ 5281 while (cp->udev->uio_dev != -1 && i < 15) { 5282 msleep(100); 5283 i++; 5284 } 5285 cnic_shutdown_rings(dev); 5286 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5287 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); 5288 synchronize_rcu(); 5289 cnic_cm_shutdown(dev); 5290 cp->stop_hw(dev); 5291 pci_dev_put(dev->pcidev); 5292 } 5293 } 5294 5295 static void cnic_free_dev(struct cnic_dev *dev) 5296 { 5297 int i = 0; 5298 5299 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 5300 msleep(100); 5301 i++; 5302 } 5303 if (atomic_read(&dev->ref_count) != 0) 5304 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 5305 5306 netdev_info(dev->netdev, "Removed CNIC device\n"); 5307 dev_put(dev->netdev); 5308 kfree(dev); 5309 } 5310 5311 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 5312 struct pci_dev *pdev) 5313 { 5314 struct cnic_dev *cdev; 5315 struct cnic_local *cp; 5316 int alloc_size; 5317 5318 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 5319 5320 cdev = kzalloc(alloc_size , GFP_KERNEL); 5321 if (cdev == NULL) { 5322 netdev_err(dev, "allocate dev struct failure\n"); 5323 return NULL; 5324 } 5325 5326 cdev->netdev = dev; 5327 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 5328 cdev->register_device = cnic_register_device; 5329 cdev->unregister_device = cnic_unregister_device; 5330 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 5331 5332 cp = cdev->cnic_priv; 5333 cp->dev = cdev; 5334 cp->l2_single_buf_size = 0x400; 5335 cp->l2_rx_ring_size = 3; 5336 5337 spin_lock_init(&cp->cnic_ulp_lock); 5338 5339 netdev_info(dev, "Added CNIC device\n"); 5340 5341 return cdev; 5342 } 5343 5344 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 5345 { 5346 struct pci_dev *pdev; 5347 struct cnic_dev *cdev; 5348 struct cnic_local *cp; 5349 struct cnic_eth_dev *ethdev = NULL; 5350 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5351 5352 probe = symbol_get(bnx2_cnic_probe); 5353 if (probe) { 5354 ethdev = (*probe)(dev); 5355 symbol_put(bnx2_cnic_probe); 5356 } 5357 if (!ethdev) 5358 return NULL; 5359 5360 pdev = ethdev->pdev; 5361 if (!pdev) 5362 return NULL; 5363 5364 dev_hold(dev); 5365 pci_dev_get(pdev); 5366 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || 5367 pdev->device == PCI_DEVICE_ID_NX2_5709S) && 5368 (pdev->revision < 0x10)) { 5369 pci_dev_put(pdev); 5370 goto cnic_err; 5371 } 5372 pci_dev_put(pdev); 5373 5374 cdev = cnic_alloc_dev(dev, pdev); 5375 if (cdev == NULL) 5376 goto cnic_err; 5377 5378 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 5379 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 5380 5381 cp = cdev->cnic_priv; 5382 cp->ethdev = ethdev; 5383 cdev->pcidev = pdev; 5384 cp->chip_id = ethdev->chip_id; 5385 5386 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5387 5388 cp->cnic_ops = &cnic_bnx2_ops; 5389 cp->start_hw = cnic_start_bnx2_hw; 5390 cp->stop_hw = cnic_stop_bnx2_hw; 5391 cp->setup_pgtbl = cnic_setup_page_tbl; 5392 cp->alloc_resc = cnic_alloc_bnx2_resc; 5393 cp->free_resc = cnic_free_resc; 5394 cp->start_cm = cnic_cm_init_bnx2_hw; 5395 cp->stop_cm = cnic_cm_stop_bnx2_hw; 5396 cp->enable_int = cnic_enable_bnx2_int; 5397 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 5398 cp->close_conn = cnic_close_bnx2_conn; 5399 return cdev; 5400 5401 cnic_err: 5402 dev_put(dev); 5403 return NULL; 5404 } 5405 5406 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 5407 { 5408 struct pci_dev *pdev; 5409 struct cnic_dev *cdev; 5410 struct cnic_local *cp; 5411 struct cnic_eth_dev *ethdev = NULL; 5412 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5413 5414 probe = symbol_get(bnx2x_cnic_probe); 5415 if (probe) { 5416 ethdev = (*probe)(dev); 5417 symbol_put(bnx2x_cnic_probe); 5418 } 5419 if (!ethdev) 5420 return NULL; 5421 5422 pdev = ethdev->pdev; 5423 if (!pdev) 5424 return NULL; 5425 5426 dev_hold(dev); 5427 cdev = cnic_alloc_dev(dev, pdev); 5428 if (cdev == NULL) { 5429 dev_put(dev); 5430 return NULL; 5431 } 5432 5433 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 5434 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 5435 5436 cp = cdev->cnic_priv; 5437 cp->ethdev = ethdev; 5438 cdev->pcidev = pdev; 5439 cp->chip_id = ethdev->chip_id; 5440 5441 cdev->stats_addr = ethdev->addr_drv_info_to_mcp; 5442 5443 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5444 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5445 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && 5446 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) 5447 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5448 5449 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5450 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5451 5452 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); 5453 5454 cp->cnic_ops = &cnic_bnx2x_ops; 5455 cp->start_hw = cnic_start_bnx2x_hw; 5456 cp->stop_hw = cnic_stop_bnx2x_hw; 5457 cp->setup_pgtbl = cnic_setup_page_tbl_le; 5458 cp->alloc_resc = cnic_alloc_bnx2x_resc; 5459 cp->free_resc = cnic_free_resc; 5460 cp->start_cm = cnic_cm_init_bnx2x_hw; 5461 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5462 cp->enable_int = cnic_enable_bnx2x_int; 5463 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5464 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 5465 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5466 else 5467 cp->ack_int = cnic_ack_bnx2x_msix; 5468 cp->close_conn = cnic_close_bnx2x_conn; 5469 return cdev; 5470 } 5471 5472 static struct cnic_dev *is_cnic_dev(struct net_device *dev) 5473 { 5474 struct ethtool_drvinfo drvinfo; 5475 struct cnic_dev *cdev = NULL; 5476 5477 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 5478 memset(&drvinfo, 0, sizeof(drvinfo)); 5479 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 5480 5481 if (!strcmp(drvinfo.driver, "bnx2")) 5482 cdev = init_bnx2_cnic(dev); 5483 if (!strcmp(drvinfo.driver, "bnx2x")) 5484 cdev = init_bnx2x_cnic(dev); 5485 if (cdev) { 5486 write_lock(&cnic_dev_lock); 5487 list_add(&cdev->list, &cnic_dev_list); 5488 write_unlock(&cnic_dev_lock); 5489 } 5490 } 5491 return cdev; 5492 } 5493 5494 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, 5495 u16 vlan_id) 5496 { 5497 int if_type; 5498 5499 rcu_read_lock(); 5500 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5501 struct cnic_ulp_ops *ulp_ops; 5502 void *ctx; 5503 5504 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5505 if (!ulp_ops || !ulp_ops->indicate_netevent) 5506 continue; 5507 5508 ctx = cp->ulp_handle[if_type]; 5509 5510 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5511 } 5512 rcu_read_unlock(); 5513 } 5514 5515 /** 5516 * netdev event handler 5517 */ 5518 static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5519 void *ptr) 5520 { 5521 struct net_device *netdev = ptr; 5522 struct cnic_dev *dev; 5523 int new_dev = 0; 5524 5525 dev = cnic_from_netdev(netdev); 5526 5527 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { 5528 /* Check for the hot-plug device */ 5529 dev = is_cnic_dev(netdev); 5530 if (dev) { 5531 new_dev = 1; 5532 cnic_hold(dev); 5533 } 5534 } 5535 if (dev) { 5536 struct cnic_local *cp = dev->cnic_priv; 5537 5538 if (new_dev) 5539 cnic_ulp_init(dev); 5540 else if (event == NETDEV_UNREGISTER) 5541 cnic_ulp_exit(dev); 5542 5543 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { 5544 if (cnic_register_netdev(dev) != 0) { 5545 cnic_put(dev); 5546 goto done; 5547 } 5548 if (!cnic_start_hw(dev)) 5549 cnic_ulp_start(dev); 5550 } 5551 5552 cnic_rcv_netevent(cp, event, 0); 5553 5554 if (event == NETDEV_GOING_DOWN) { 5555 cnic_ulp_stop(dev); 5556 cnic_stop_hw(dev); 5557 cnic_unregister_netdev(dev); 5558 } else if (event == NETDEV_UNREGISTER) { 5559 write_lock(&cnic_dev_lock); 5560 list_del_init(&dev->list); 5561 write_unlock(&cnic_dev_lock); 5562 5563 cnic_put(dev); 5564 cnic_free_dev(dev); 5565 goto done; 5566 } 5567 cnic_put(dev); 5568 } else { 5569 struct net_device *realdev; 5570 u16 vid; 5571 5572 vid = cnic_get_vlan(netdev, &realdev); 5573 if (realdev) { 5574 dev = cnic_from_netdev(realdev); 5575 if (dev) { 5576 vid |= VLAN_TAG_PRESENT; 5577 cnic_rcv_netevent(dev->cnic_priv, event, vid); 5578 cnic_put(dev); 5579 } 5580 } 5581 } 5582 done: 5583 return NOTIFY_DONE; 5584 } 5585 5586 static struct notifier_block cnic_netdev_notifier = { 5587 .notifier_call = cnic_netdev_event 5588 }; 5589 5590 static void cnic_release(void) 5591 { 5592 struct cnic_dev *dev; 5593 struct cnic_uio_dev *udev; 5594 5595 while (!list_empty(&cnic_dev_list)) { 5596 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 5597 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5598 cnic_ulp_stop(dev); 5599 cnic_stop_hw(dev); 5600 } 5601 5602 cnic_ulp_exit(dev); 5603 cnic_unregister_netdev(dev); 5604 list_del_init(&dev->list); 5605 cnic_free_dev(dev); 5606 } 5607 while (!list_empty(&cnic_udev_list)) { 5608 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5609 list); 5610 cnic_free_uio(udev); 5611 } 5612 } 5613 5614 static int __init cnic_init(void) 5615 { 5616 int rc = 0; 5617 5618 pr_info("%s", version); 5619 5620 rc = register_netdevice_notifier(&cnic_netdev_notifier); 5621 if (rc) { 5622 cnic_release(); 5623 return rc; 5624 } 5625 5626 cnic_wq = create_singlethread_workqueue("cnic_wq"); 5627 if (!cnic_wq) { 5628 cnic_release(); 5629 unregister_netdevice_notifier(&cnic_netdev_notifier); 5630 return -ENOMEM; 5631 } 5632 5633 return 0; 5634 } 5635 5636 static void __exit cnic_exit(void) 5637 { 5638 unregister_netdevice_notifier(&cnic_netdev_notifier); 5639 cnic_release(); 5640 destroy_workqueue(cnic_wq); 5641 } 5642 5643 module_init(cnic_init); 5644 module_exit(cnic_exit); 5645