1 /* 2 * QLogic iSCSI Offload Driver 3 * Copyright (c) 2016 Cavium Inc. 4 * 5 * This software is available under the terms of the GNU General Public License 6 * (GPL) Version 2, available from the file COPYING in the main directory of 7 * this source tree. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/kernel.h> 13 #include <linux/if_arp.h> 14 #include <scsi/iscsi_if.h> 15 #include <linux/inet.h> 16 #include <net/arp.h> 17 #include <linux/list.h> 18 #include <linux/kthread.h> 19 #include <linux/mm.h> 20 #include <linux/if_vlan.h> 21 #include <linux/cpu.h> 22 23 #include <scsi/scsi_cmnd.h> 24 #include <scsi/scsi_device.h> 25 #include <scsi/scsi_eh.h> 26 #include <scsi/scsi_host.h> 27 #include <scsi/scsi.h> 28 29 #include "qedi.h" 30 #include "qedi_gbl.h" 31 #include "qedi_iscsi.h" 32 33 static uint qedi_fw_debug; 34 module_param(qedi_fw_debug, uint, 0644); 35 MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); 36 37 uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM; 38 module_param(qedi_dbg_log, uint, 0644); 39 MODULE_PARM_DESC(qedi_dbg_log, " Default debug level"); 40 41 uint qedi_io_tracing; 42 module_param(qedi_io_tracing, uint, 0644); 43 MODULE_PARM_DESC(qedi_io_tracing, 44 " Enable logging of SCSI requests/completions into trace buffer. (default off)."); 45 46 const struct qed_iscsi_ops *qedi_ops; 47 static struct scsi_transport_template *qedi_scsi_transport; 48 static struct pci_driver qedi_pci_driver; 49 static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu); 50 static LIST_HEAD(qedi_udev_list); 51 /* Static function declaration */ 52 static int qedi_alloc_global_queues(struct qedi_ctx *qedi); 53 static void qedi_free_global_queues(struct qedi_ctx *qedi); 54 static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); 55 static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); 56 static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); 57 58 static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) 59 { 60 struct qedi_ctx *qedi; 61 struct qedi_endpoint *qedi_ep; 62 struct async_data *data; 63 int rval = 0; 64 65 if (!context || !fw_handle) { 66 QEDI_ERR(NULL, "Recv event with ctx NULL\n"); 67 return -EINVAL; 68 } 69 70 qedi = (struct qedi_ctx *)context; 71 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 72 "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); 73 74 data = (struct async_data *)fw_handle; 75 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 76 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n", 77 data->cid, data->itid, data->error_code, 78 data->fw_debug_param); 79 80 qedi_ep = qedi->ep_tbl[data->cid]; 81 82 if (!qedi_ep) { 83 QEDI_WARN(&qedi->dbg_ctx, 84 "Cannot process event, ep already disconnected, cid=0x%x\n", 85 data->cid); 86 WARN_ON(1); 87 return -ENODEV; 88 } 89 90 switch (fw_event_code) { 91 case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE: 92 if (qedi_ep->state == EP_STATE_OFLDCONN_START) 93 qedi_ep->state = EP_STATE_OFLDCONN_COMPL; 94 95 wake_up_interruptible(&qedi_ep->tcp_ofld_wait); 96 break; 97 case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE: 98 qedi_ep->state = EP_STATE_DISCONN_COMPL; 99 wake_up_interruptible(&qedi_ep->tcp_ofld_wait); 100 break; 101 case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR: 102 qedi_process_iscsi_error(qedi_ep, data); 103 break; 104 case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD: 105 case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD: 106 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME: 107 case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT: 108 case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT: 109 case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2: 110 case ISCSI_EVENT_TYPE_TCP_CONN_ERROR: 111 qedi_process_tcp_error(qedi_ep, data); 112 break; 113 default: 114 QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n", 115 fw_event_code); 116 } 117 118 return rval; 119 } 120 121 static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode) 122 { 123 struct qedi_uio_dev *udev = uinfo->priv; 124 struct qedi_ctx *qedi = udev->qedi; 125 126 if (!capable(CAP_NET_ADMIN)) 127 return -EPERM; 128 129 if (udev->uio_dev != -1) 130 return -EBUSY; 131 132 rtnl_lock(); 133 udev->uio_dev = iminor(inode); 134 qedi_reset_uio_rings(udev); 135 set_bit(UIO_DEV_OPENED, &qedi->flags); 136 rtnl_unlock(); 137 138 return 0; 139 } 140 141 static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) 142 { 143 struct qedi_uio_dev *udev = uinfo->priv; 144 struct qedi_ctx *qedi = udev->qedi; 145 146 udev->uio_dev = -1; 147 clear_bit(UIO_DEV_OPENED, &qedi->flags); 148 qedi_ll2_free_skbs(qedi); 149 return 0; 150 } 151 152 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) 153 { 154 if (udev->uctrl) { 155 free_page((unsigned long)udev->uctrl); 156 udev->uctrl = NULL; 157 } 158 159 if (udev->ll2_ring) { 160 free_page((unsigned long)udev->ll2_ring); 161 udev->ll2_ring = NULL; 162 } 163 164 if (udev->ll2_buf) { 165 free_pages((unsigned long)udev->ll2_buf, 2); 166 udev->ll2_buf = NULL; 167 } 168 } 169 170 static void __qedi_free_uio(struct qedi_uio_dev *udev) 171 { 172 uio_unregister_device(&udev->qedi_uinfo); 173 174 __qedi_free_uio_rings(udev); 175 176 pci_dev_put(udev->pdev); 177 kfree(udev); 178 } 179 180 static void qedi_free_uio(struct qedi_uio_dev *udev) 181 { 182 if (!udev) 183 return; 184 185 list_del_init(&udev->list); 186 __qedi_free_uio(udev); 187 } 188 189 static void qedi_reset_uio_rings(struct qedi_uio_dev *udev) 190 { 191 struct qedi_ctx *qedi = NULL; 192 struct qedi_uio_ctrl *uctrl = NULL; 193 194 qedi = udev->qedi; 195 uctrl = udev->uctrl; 196 197 spin_lock_bh(&qedi->ll2_lock); 198 uctrl->host_rx_cons = 0; 199 uctrl->hw_rx_prod = 0; 200 uctrl->hw_rx_bd_prod = 0; 201 uctrl->host_rx_bd_cons = 0; 202 203 memset(udev->ll2_ring, 0, udev->ll2_ring_size); 204 memset(udev->ll2_buf, 0, udev->ll2_buf_size); 205 spin_unlock_bh(&qedi->ll2_lock); 206 } 207 208 static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) 209 { 210 int rc = 0; 211 212 if (udev->ll2_ring || udev->ll2_buf) 213 return rc; 214 215 /* Memory for control area. */ 216 udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); 217 if (!udev->uctrl) 218 return -ENOMEM; 219 220 /* Allocating memory for LL2 ring */ 221 udev->ll2_ring_size = QEDI_PAGE_SIZE; 222 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); 223 if (!udev->ll2_ring) { 224 rc = -ENOMEM; 225 goto exit_alloc_ring; 226 } 227 228 /* Allocating memory for Tx/Rx pkt buffer */ 229 udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE; 230 udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); 231 udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | 232 __GFP_ZERO, 2); 233 if (!udev->ll2_buf) { 234 rc = -ENOMEM; 235 goto exit_alloc_buf; 236 } 237 return rc; 238 239 exit_alloc_buf: 240 free_page((unsigned long)udev->ll2_ring); 241 udev->ll2_ring = NULL; 242 exit_alloc_ring: 243 return rc; 244 } 245 246 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) 247 { 248 struct qedi_uio_dev *udev = NULL; 249 int rc = 0; 250 251 list_for_each_entry(udev, &qedi_udev_list, list) { 252 if (udev->pdev == qedi->pdev) { 253 udev->qedi = qedi; 254 if (__qedi_alloc_uio_rings(udev)) { 255 udev->qedi = NULL; 256 return -ENOMEM; 257 } 258 qedi->udev = udev; 259 return 0; 260 } 261 } 262 263 udev = kzalloc(sizeof(*udev), GFP_KERNEL); 264 if (!udev) { 265 rc = -ENOMEM; 266 goto err_udev; 267 } 268 269 udev->uio_dev = -1; 270 271 udev->qedi = qedi; 272 udev->pdev = qedi->pdev; 273 274 rc = __qedi_alloc_uio_rings(udev); 275 if (rc) 276 goto err_uctrl; 277 278 list_add(&udev->list, &qedi_udev_list); 279 280 pci_dev_get(udev->pdev); 281 qedi->udev = udev; 282 283 udev->tx_pkt = udev->ll2_buf; 284 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 285 return 0; 286 287 err_uctrl: 288 kfree(udev); 289 err_udev: 290 return -ENOMEM; 291 } 292 293 static int qedi_init_uio(struct qedi_ctx *qedi) 294 { 295 struct qedi_uio_dev *udev = qedi->udev; 296 struct uio_info *uinfo; 297 int ret = 0; 298 299 if (!udev) 300 return -ENOMEM; 301 302 uinfo = &udev->qedi_uinfo; 303 304 uinfo->mem[0].addr = (unsigned long)udev->uctrl; 305 uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl); 306 uinfo->mem[0].memtype = UIO_MEM_LOGICAL; 307 308 uinfo->mem[1].addr = (unsigned long)udev->ll2_ring; 309 uinfo->mem[1].size = udev->ll2_ring_size; 310 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 311 312 uinfo->mem[2].addr = (unsigned long)udev->ll2_buf; 313 uinfo->mem[2].size = udev->ll2_buf_size; 314 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 315 316 uinfo->name = "qedi_uio"; 317 uinfo->version = QEDI_MODULE_VERSION; 318 uinfo->irq = UIO_IRQ_CUSTOM; 319 320 uinfo->open = qedi_uio_open; 321 uinfo->release = qedi_uio_close; 322 323 if (udev->uio_dev == -1) { 324 if (!uinfo->priv) { 325 uinfo->priv = udev; 326 327 ret = uio_register_device(&udev->pdev->dev, uinfo); 328 if (ret) { 329 QEDI_ERR(&qedi->dbg_ctx, 330 "UIO registration failed\n"); 331 } 332 } 333 } 334 335 return ret; 336 } 337 338 static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, 339 struct qed_sb_info *sb_info, u16 sb_id) 340 { 341 struct status_block *sb_virt; 342 dma_addr_t sb_phys; 343 int ret; 344 345 sb_virt = dma_alloc_coherent(&qedi->pdev->dev, 346 sizeof(struct status_block), &sb_phys, 347 GFP_KERNEL); 348 if (!sb_virt) { 349 QEDI_ERR(&qedi->dbg_ctx, 350 "Status block allocation failed for id = %d.\n", 351 sb_id); 352 return -ENOMEM; 353 } 354 355 ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, 356 sb_id, QED_SB_TYPE_STORAGE); 357 if (ret) { 358 QEDI_ERR(&qedi->dbg_ctx, 359 "Status block initialization failed for id = %d.\n", 360 sb_id); 361 return ret; 362 } 363 364 return 0; 365 } 366 367 static void qedi_free_sb(struct qedi_ctx *qedi) 368 { 369 struct qed_sb_info *sb_info; 370 int id; 371 372 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { 373 sb_info = &qedi->sb_array[id]; 374 if (sb_info->sb_virt) 375 dma_free_coherent(&qedi->pdev->dev, 376 sizeof(*sb_info->sb_virt), 377 (void *)sb_info->sb_virt, 378 sb_info->sb_phys); 379 } 380 } 381 382 static void qedi_free_fp(struct qedi_ctx *qedi) 383 { 384 kfree(qedi->fp_array); 385 kfree(qedi->sb_array); 386 } 387 388 static void qedi_destroy_fp(struct qedi_ctx *qedi) 389 { 390 qedi_free_sb(qedi); 391 qedi_free_fp(qedi); 392 } 393 394 static int qedi_alloc_fp(struct qedi_ctx *qedi) 395 { 396 int ret = 0; 397 398 qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), 399 sizeof(struct qedi_fastpath), GFP_KERNEL); 400 if (!qedi->fp_array) { 401 QEDI_ERR(&qedi->dbg_ctx, 402 "fastpath fp array allocation failed.\n"); 403 return -ENOMEM; 404 } 405 406 qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), 407 sizeof(struct qed_sb_info), GFP_KERNEL); 408 if (!qedi->sb_array) { 409 QEDI_ERR(&qedi->dbg_ctx, 410 "fastpath sb array allocation failed.\n"); 411 ret = -ENOMEM; 412 goto free_fp; 413 } 414 415 return ret; 416 417 free_fp: 418 qedi_free_fp(qedi); 419 return ret; 420 } 421 422 static void qedi_int_fp(struct qedi_ctx *qedi) 423 { 424 struct qedi_fastpath *fp; 425 int id; 426 427 memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) * 428 sizeof(*qedi->fp_array)); 429 memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) * 430 sizeof(*qedi->sb_array)); 431 432 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { 433 fp = &qedi->fp_array[id]; 434 fp->sb_info = &qedi->sb_array[id]; 435 fp->sb_id = id; 436 fp->qedi = qedi; 437 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 438 "qedi", id); 439 440 /* fp_array[i] ---- irq cookie 441 * So init data which is needed in int ctx 442 */ 443 } 444 } 445 446 static int qedi_prepare_fp(struct qedi_ctx *qedi) 447 { 448 struct qedi_fastpath *fp; 449 int id, ret = 0; 450 451 ret = qedi_alloc_fp(qedi); 452 if (ret) 453 goto err; 454 455 qedi_int_fp(qedi); 456 457 for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { 458 fp = &qedi->fp_array[id]; 459 ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id); 460 if (ret) { 461 QEDI_ERR(&qedi->dbg_ctx, 462 "SB allocation and initialization failed.\n"); 463 ret = -EIO; 464 goto err_init; 465 } 466 } 467 468 return 0; 469 470 err_init: 471 qedi_free_sb(qedi); 472 qedi_free_fp(qedi); 473 err: 474 return ret; 475 } 476 477 static int qedi_setup_cid_que(struct qedi_ctx *qedi) 478 { 479 int i; 480 481 qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns, 482 sizeof(u32), GFP_KERNEL); 483 if (!qedi->cid_que.cid_que_base) 484 return -ENOMEM; 485 486 qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns, 487 sizeof(struct qedi_conn *), 488 GFP_KERNEL); 489 if (!qedi->cid_que.conn_cid_tbl) { 490 kfree(qedi->cid_que.cid_que_base); 491 qedi->cid_que.cid_que_base = NULL; 492 return -ENOMEM; 493 } 494 495 qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base; 496 qedi->cid_que.cid_q_prod_idx = 0; 497 qedi->cid_que.cid_q_cons_idx = 0; 498 qedi->cid_que.cid_q_max_idx = qedi->max_active_conns; 499 qedi->cid_que.cid_free_cnt = qedi->max_active_conns; 500 501 for (i = 0; i < qedi->max_active_conns; i++) { 502 qedi->cid_que.cid_que[i] = i; 503 qedi->cid_que.conn_cid_tbl[i] = NULL; 504 } 505 506 return 0; 507 } 508 509 static void qedi_release_cid_que(struct qedi_ctx *qedi) 510 { 511 kfree(qedi->cid_que.cid_que_base); 512 qedi->cid_que.cid_que_base = NULL; 513 514 kfree(qedi->cid_que.conn_cid_tbl); 515 qedi->cid_que.conn_cid_tbl = NULL; 516 } 517 518 static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, 519 u16 start_id, u16 next) 520 { 521 id_tbl->start = start_id; 522 id_tbl->max = size; 523 id_tbl->next = next; 524 spin_lock_init(&id_tbl->lock); 525 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 526 if (!id_tbl->table) 527 return -ENOMEM; 528 529 return 0; 530 } 531 532 static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl) 533 { 534 kfree(id_tbl->table); 535 id_tbl->table = NULL; 536 } 537 538 int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id) 539 { 540 int ret = -1; 541 542 id -= id_tbl->start; 543 if (id >= id_tbl->max) 544 return ret; 545 546 spin_lock(&id_tbl->lock); 547 if (!test_bit(id, id_tbl->table)) { 548 set_bit(id, id_tbl->table); 549 ret = 0; 550 } 551 spin_unlock(&id_tbl->lock); 552 return ret; 553 } 554 555 u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl) 556 { 557 u16 id; 558 559 spin_lock(&id_tbl->lock); 560 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 561 if (id >= id_tbl->max) { 562 id = QEDI_LOCAL_PORT_INVALID; 563 if (id_tbl->next != 0) { 564 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 565 if (id >= id_tbl->next) 566 id = QEDI_LOCAL_PORT_INVALID; 567 } 568 } 569 570 if (id < id_tbl->max) { 571 set_bit(id, id_tbl->table); 572 id_tbl->next = (id + 1) & (id_tbl->max - 1); 573 id += id_tbl->start; 574 } 575 576 spin_unlock(&id_tbl->lock); 577 578 return id; 579 } 580 581 void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id) 582 { 583 if (id == QEDI_LOCAL_PORT_INVALID) 584 return; 585 586 id -= id_tbl->start; 587 if (id >= id_tbl->max) 588 return; 589 590 clear_bit(id, id_tbl->table); 591 } 592 593 static void qedi_cm_free_mem(struct qedi_ctx *qedi) 594 { 595 kfree(qedi->ep_tbl); 596 qedi->ep_tbl = NULL; 597 qedi_free_id_tbl(&qedi->lcl_port_tbl); 598 } 599 600 static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) 601 { 602 u16 port_id; 603 604 qedi->ep_tbl = kzalloc((qedi->max_active_conns * 605 sizeof(struct qedi_endpoint *)), GFP_KERNEL); 606 if (!qedi->ep_tbl) 607 return -ENOMEM; 608 port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE; 609 if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, 610 QEDI_LOCAL_PORT_MIN, port_id)) { 611 qedi_cm_free_mem(qedi); 612 return -ENOMEM; 613 } 614 615 return 0; 616 } 617 618 static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) 619 { 620 struct Scsi_Host *shost; 621 struct qedi_ctx *qedi = NULL; 622 623 shost = iscsi_host_alloc(&qedi_host_template, 624 sizeof(struct qedi_ctx), 0); 625 if (!shost) { 626 QEDI_ERR(NULL, "Could not allocate shost\n"); 627 goto exit_setup_shost; 628 } 629 630 shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA; 631 shost->max_channel = 0; 632 shost->max_lun = ~0; 633 shost->max_cmd_len = 16; 634 shost->transportt = qedi_scsi_transport; 635 636 qedi = iscsi_host_priv(shost); 637 memset(qedi, 0, sizeof(*qedi)); 638 qedi->shost = shost; 639 qedi->dbg_ctx.host_no = shost->host_no; 640 qedi->pdev = pdev; 641 qedi->dbg_ctx.pdev = pdev; 642 qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; 643 qedi->max_sqes = QEDI_SQ_SIZE; 644 645 if (shost_use_blk_mq(shost)) 646 shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi); 647 648 pci_set_drvdata(pdev, qedi); 649 650 exit_setup_shost: 651 return qedi; 652 } 653 654 static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) 655 { 656 struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; 657 struct qedi_uio_dev *udev; 658 struct qedi_uio_ctrl *uctrl; 659 struct skb_work_list *work; 660 u32 prod; 661 662 if (!qedi) { 663 QEDI_ERR(NULL, "qedi is NULL\n"); 664 return -1; 665 } 666 667 if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) { 668 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO, 669 "UIO DEV is not opened\n"); 670 kfree_skb(skb); 671 return 0; 672 } 673 674 udev = qedi->udev; 675 uctrl = udev->uctrl; 676 677 work = kzalloc(sizeof(*work), GFP_ATOMIC); 678 if (!work) { 679 QEDI_WARN(&qedi->dbg_ctx, 680 "Could not allocate work so dropping frame.\n"); 681 kfree_skb(skb); 682 return 0; 683 } 684 685 INIT_LIST_HEAD(&work->list); 686 work->skb = skb; 687 688 if (skb_vlan_tag_present(skb)) 689 work->vlan_id = skb_vlan_tag_get(skb); 690 691 if (work->vlan_id) 692 __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); 693 694 spin_lock_bh(&qedi->ll2_lock); 695 list_add_tail(&work->list, &qedi->ll2_skb_list); 696 697 ++uctrl->hw_rx_prod_cnt; 698 prod = (uctrl->hw_rx_prod + 1) % RX_RING; 699 if (prod != uctrl->host_rx_cons) { 700 uctrl->hw_rx_prod = prod; 701 spin_unlock_bh(&qedi->ll2_lock); 702 wake_up_process(qedi->ll2_recv_thread); 703 return 0; 704 } 705 706 spin_unlock_bh(&qedi->ll2_lock); 707 return 0; 708 } 709 710 /* map this skb to iscsiuio mmaped region */ 711 static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb, 712 u16 vlan_id) 713 { 714 struct qedi_uio_dev *udev = NULL; 715 struct qedi_uio_ctrl *uctrl = NULL; 716 struct qedi_rx_bd rxbd; 717 struct qedi_rx_bd *p_rxbd; 718 u32 rx_bd_prod; 719 void *pkt; 720 int len = 0; 721 722 if (!qedi) { 723 QEDI_ERR(NULL, "qedi is NULL\n"); 724 return -1; 725 } 726 727 udev = qedi->udev; 728 uctrl = udev->uctrl; 729 pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE); 730 len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE); 731 memcpy(pkt, skb->data, len); 732 733 memset(&rxbd, 0, sizeof(rxbd)); 734 rxbd.rx_pkt_index = uctrl->hw_rx_prod; 735 rxbd.rx_pkt_len = len; 736 rxbd.vlan_id = vlan_id; 737 738 uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD; 739 rx_bd_prod = uctrl->hw_rx_bd_prod; 740 p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring; 741 p_rxbd += rx_bd_prod; 742 743 memcpy(p_rxbd, &rxbd, sizeof(rxbd)); 744 745 /* notify the iscsiuio about new packet */ 746 uio_event_notify(&udev->qedi_uinfo); 747 748 return 0; 749 } 750 751 static void qedi_ll2_free_skbs(struct qedi_ctx *qedi) 752 { 753 struct skb_work_list *work, *work_tmp; 754 755 spin_lock_bh(&qedi->ll2_lock); 756 list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { 757 list_del(&work->list); 758 if (work->skb) 759 kfree_skb(work->skb); 760 kfree(work); 761 } 762 spin_unlock_bh(&qedi->ll2_lock); 763 } 764 765 static int qedi_ll2_recv_thread(void *arg) 766 { 767 struct qedi_ctx *qedi = (struct qedi_ctx *)arg; 768 struct skb_work_list *work, *work_tmp; 769 770 set_user_nice(current, -20); 771 772 while (!kthread_should_stop()) { 773 spin_lock_bh(&qedi->ll2_lock); 774 list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, 775 list) { 776 list_del(&work->list); 777 qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); 778 kfree_skb(work->skb); 779 kfree(work); 780 } 781 set_current_state(TASK_INTERRUPTIBLE); 782 spin_unlock_bh(&qedi->ll2_lock); 783 schedule(); 784 } 785 786 __set_current_state(TASK_RUNNING); 787 return 0; 788 } 789 790 static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) 791 { 792 u8 num_sq_pages; 793 u32 log_page_size; 794 int rval = 0; 795 796 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n", 797 MIN_NUM_CPUS_MSIX(qedi)); 798 799 num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE; 800 801 qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); 802 803 memset(&qedi->pf_params.iscsi_pf_params, 0, 804 sizeof(qedi->pf_params.iscsi_pf_params)); 805 806 qedi->p_cpuq = pci_alloc_consistent(qedi->pdev, 807 qedi->num_queues * sizeof(struct qedi_glbl_q_params), 808 &qedi->hw_p_cpuq); 809 if (!qedi->p_cpuq) { 810 QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n"); 811 rval = -1; 812 goto err_alloc_mem; 813 } 814 815 rval = qedi_alloc_global_queues(qedi); 816 if (rval) { 817 QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n"); 818 rval = -1; 819 goto err_alloc_mem; 820 } 821 822 qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA; 823 qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK; 824 qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10; 825 qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages; 826 qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages; 827 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; 828 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; 829 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; 830 qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; 831 qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; 832 833 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 834 if ((1 << log_page_size) == PAGE_SIZE) 835 break; 836 } 837 qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; 838 839 qedi->pf_params.iscsi_pf_params.glbl_q_params_addr = 840 (u64)qedi->hw_p_cpuq; 841 842 /* RQ BDQ initializations. 843 * rq_num_entries: suggested value for Initiator is 16 (4KB RQ) 844 * rqe_log_size: 8 for 256B RQE 845 */ 846 qedi->pf_params.iscsi_pf_params.rqe_log_size = 8; 847 /* BDQ address and size */ 848 qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] = 849 qedi->bdq_pbl_list_dma; 850 qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] = 851 qedi->bdq_pbl_list_num_entries; 852 qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE; 853 854 /* cq_num_entries: num_tasks + rq_num_entries */ 855 qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048; 856 857 qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; 858 qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; 859 qedi->pf_params.iscsi_pf_params.ooo_enable = 1; 860 861 err_alloc_mem: 862 return rval; 863 } 864 865 /* Free DMA coherent memory for array of queue pointers we pass to qed */ 866 static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) 867 { 868 size_t size = 0; 869 870 if (qedi->p_cpuq) { 871 size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); 872 pci_free_consistent(qedi->pdev, size, qedi->p_cpuq, 873 qedi->hw_p_cpuq); 874 } 875 876 qedi_free_global_queues(qedi); 877 878 kfree(qedi->global_queues); 879 } 880 881 static void qedi_link_update(void *dev, struct qed_link_output *link) 882 { 883 struct qedi_ctx *qedi = (struct qedi_ctx *)dev; 884 885 if (link->link_up) { 886 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n"); 887 atomic_set(&qedi->link_state, QEDI_LINK_UP); 888 } else { 889 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 890 "Link Down event.\n"); 891 atomic_set(&qedi->link_state, QEDI_LINK_DOWN); 892 } 893 } 894 895 static struct qed_iscsi_cb_ops qedi_cb_ops = { 896 { 897 .link_update = qedi_link_update, 898 } 899 }; 900 901 static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, 902 u16 que_idx, struct qedi_percpu_s *p) 903 { 904 struct qedi_work *qedi_work; 905 struct qedi_conn *q_conn; 906 struct iscsi_conn *conn; 907 struct qedi_cmd *qedi_cmd; 908 u32 iscsi_cid; 909 int rc = 0; 910 911 iscsi_cid = cqe->cqe_common.conn_id; 912 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 913 if (!q_conn) { 914 QEDI_WARN(&qedi->dbg_ctx, 915 "Session no longer exists for cid=0x%x!!\n", 916 iscsi_cid); 917 return -1; 918 } 919 conn = q_conn->cls_conn->dd_data; 920 921 switch (cqe->cqe_common.cqe_type) { 922 case ISCSI_CQE_TYPE_SOLICITED: 923 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: 924 qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid); 925 if (!qedi_cmd) { 926 rc = -1; 927 break; 928 } 929 INIT_LIST_HEAD(&qedi_cmd->cqe_work.list); 930 qedi_cmd->cqe_work.qedi = qedi; 931 memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe)); 932 qedi_cmd->cqe_work.que_idx = que_idx; 933 qedi_cmd->cqe_work.is_solicited = true; 934 list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list); 935 break; 936 case ISCSI_CQE_TYPE_UNSOLICITED: 937 case ISCSI_CQE_TYPE_DUMMY: 938 case ISCSI_CQE_TYPE_TASK_CLEANUP: 939 qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC); 940 if (!qedi_work) { 941 rc = -1; 942 break; 943 } 944 INIT_LIST_HEAD(&qedi_work->list); 945 qedi_work->qedi = qedi; 946 memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe)); 947 qedi_work->que_idx = que_idx; 948 qedi_work->is_solicited = false; 949 list_add_tail(&qedi_work->list, &p->work_list); 950 break; 951 default: 952 rc = -1; 953 QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n"); 954 } 955 return rc; 956 } 957 958 static bool qedi_process_completions(struct qedi_fastpath *fp) 959 { 960 struct qedi_ctx *qedi = fp->qedi; 961 struct qed_sb_info *sb_info = fp->sb_info; 962 struct status_block *sb = sb_info->sb_virt; 963 struct qedi_percpu_s *p = NULL; 964 struct global_queue *que; 965 u16 prod_idx; 966 unsigned long flags; 967 union iscsi_cqe *cqe; 968 int cpu; 969 int ret; 970 971 /* Get the current firmware producer index */ 972 prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; 973 974 if (prod_idx >= QEDI_CQ_SIZE) 975 prod_idx = prod_idx % QEDI_CQ_SIZE; 976 977 que = qedi->global_queues[fp->sb_id]; 978 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, 979 "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n", 980 que, prod_idx, que->cq_cons_idx, fp->sb_id); 981 982 qedi->intr_cpu = fp->sb_id; 983 cpu = smp_processor_id(); 984 p = &per_cpu(qedi_percpu, cpu); 985 986 if (unlikely(!p->iothread)) 987 WARN_ON(1); 988 989 spin_lock_irqsave(&p->p_work_lock, flags); 990 while (que->cq_cons_idx != prod_idx) { 991 cqe = &que->cq[que->cq_cons_idx]; 992 993 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, 994 "cqe=%p prod_idx=%d cons_idx=%d.\n", 995 cqe, prod_idx, que->cq_cons_idx); 996 997 ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); 998 if (ret) 999 continue; 1000 1001 que->cq_cons_idx++; 1002 if (que->cq_cons_idx == QEDI_CQ_SIZE) 1003 que->cq_cons_idx = 0; 1004 } 1005 wake_up_process(p->iothread); 1006 spin_unlock_irqrestore(&p->p_work_lock, flags); 1007 1008 return true; 1009 } 1010 1011 static bool qedi_fp_has_work(struct qedi_fastpath *fp) 1012 { 1013 struct qedi_ctx *qedi = fp->qedi; 1014 struct global_queue *que; 1015 struct qed_sb_info *sb_info = fp->sb_info; 1016 struct status_block *sb = sb_info->sb_virt; 1017 u16 prod_idx; 1018 1019 barrier(); 1020 1021 /* Get the current firmware producer index */ 1022 prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; 1023 1024 /* Get the pointer to the global CQ this completion is on */ 1025 que = qedi->global_queues[fp->sb_id]; 1026 1027 /* prod idx wrap around uint16 */ 1028 if (prod_idx >= QEDI_CQ_SIZE) 1029 prod_idx = prod_idx % QEDI_CQ_SIZE; 1030 1031 return (que->cq_cons_idx != prod_idx); 1032 } 1033 1034 /* MSI-X fastpath handler code */ 1035 static irqreturn_t qedi_msix_handler(int irq, void *dev_id) 1036 { 1037 struct qedi_fastpath *fp = dev_id; 1038 struct qedi_ctx *qedi = fp->qedi; 1039 bool wake_io_thread = true; 1040 1041 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 1042 1043 process_again: 1044 wake_io_thread = qedi_process_completions(fp); 1045 if (wake_io_thread) { 1046 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1047 "process already running\n"); 1048 } 1049 1050 if (qedi_fp_has_work(fp) == 0) 1051 qed_sb_update_sb_idx(fp->sb_info); 1052 1053 /* Check for more work */ 1054 rmb(); 1055 1056 if (qedi_fp_has_work(fp) == 0) 1057 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 1058 else 1059 goto process_again; 1060 1061 return IRQ_HANDLED; 1062 } 1063 1064 /* simd handler for MSI/INTa */ 1065 static void qedi_simd_int_handler(void *cookie) 1066 { 1067 /* Cookie is qedi_ctx struct */ 1068 struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; 1069 1070 QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi); 1071 } 1072 1073 #define QEDI_SIMD_HANDLER_NUM 0 1074 static void qedi_sync_free_irqs(struct qedi_ctx *qedi) 1075 { 1076 int i; 1077 1078 if (qedi->int_info.msix_cnt) { 1079 for (i = 0; i < qedi->int_info.used_cnt; i++) { 1080 synchronize_irq(qedi->int_info.msix[i].vector); 1081 irq_set_affinity_hint(qedi->int_info.msix[i].vector, 1082 NULL); 1083 free_irq(qedi->int_info.msix[i].vector, 1084 &qedi->fp_array[i]); 1085 } 1086 } else { 1087 qedi_ops->common->simd_handler_clean(qedi->cdev, 1088 QEDI_SIMD_HANDLER_NUM); 1089 } 1090 1091 qedi->int_info.used_cnt = 0; 1092 qedi_ops->common->set_fp_int(qedi->cdev, 0); 1093 } 1094 1095 static int qedi_request_msix_irq(struct qedi_ctx *qedi) 1096 { 1097 int i, rc, cpu; 1098 1099 cpu = cpumask_first(cpu_online_mask); 1100 for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) { 1101 rc = request_irq(qedi->int_info.msix[i].vector, 1102 qedi_msix_handler, 0, "qedi", 1103 &qedi->fp_array[i]); 1104 1105 if (rc) { 1106 QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n"); 1107 qedi_sync_free_irqs(qedi); 1108 return rc; 1109 } 1110 qedi->int_info.used_cnt++; 1111 rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector, 1112 get_cpu_mask(cpu)); 1113 cpu = cpumask_next(cpu, cpu_online_mask); 1114 } 1115 1116 return 0; 1117 } 1118 1119 static int qedi_setup_int(struct qedi_ctx *qedi) 1120 { 1121 int rc = 0; 1122 1123 rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus()); 1124 rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info); 1125 if (rc) 1126 goto exit_setup_int; 1127 1128 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1129 "Number of msix_cnt = 0x%x num of cpus = 0x%x\n", 1130 qedi->int_info.msix_cnt, num_online_cpus()); 1131 1132 if (qedi->int_info.msix_cnt) { 1133 rc = qedi_request_msix_irq(qedi); 1134 goto exit_setup_int; 1135 } else { 1136 qedi_ops->common->simd_handler_config(qedi->cdev, &qedi, 1137 QEDI_SIMD_HANDLER_NUM, 1138 qedi_simd_int_handler); 1139 qedi->int_info.used_cnt = 1; 1140 } 1141 1142 exit_setup_int: 1143 return rc; 1144 } 1145 1146 static void qedi_free_bdq(struct qedi_ctx *qedi) 1147 { 1148 int i; 1149 1150 if (qedi->bdq_pbl_list) 1151 dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE, 1152 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); 1153 1154 if (qedi->bdq_pbl) 1155 dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, 1156 qedi->bdq_pbl, qedi->bdq_pbl_dma); 1157 1158 for (i = 0; i < QEDI_BDQ_NUM; i++) { 1159 if (qedi->bdq[i].buf_addr) { 1160 dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, 1161 qedi->bdq[i].buf_addr, 1162 qedi->bdq[i].buf_dma); 1163 } 1164 } 1165 } 1166 1167 static void qedi_free_global_queues(struct qedi_ctx *qedi) 1168 { 1169 int i; 1170 struct global_queue **gl = qedi->global_queues; 1171 1172 for (i = 0; i < qedi->num_queues; i++) { 1173 if (!gl[i]) 1174 continue; 1175 1176 if (gl[i]->cq) 1177 dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, 1178 gl[i]->cq, gl[i]->cq_dma); 1179 if (gl[i]->cq_pbl) 1180 dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, 1181 gl[i]->cq_pbl, gl[i]->cq_pbl_dma); 1182 1183 kfree(gl[i]); 1184 } 1185 qedi_free_bdq(qedi); 1186 } 1187 1188 static int qedi_alloc_bdq(struct qedi_ctx *qedi) 1189 { 1190 int i; 1191 struct scsi_bd *pbl; 1192 u64 *list; 1193 dma_addr_t page; 1194 1195 /* Alloc dma memory for BDQ buffers */ 1196 for (i = 0; i < QEDI_BDQ_NUM; i++) { 1197 qedi->bdq[i].buf_addr = 1198 dma_alloc_coherent(&qedi->pdev->dev, 1199 QEDI_BDQ_BUF_SIZE, 1200 &qedi->bdq[i].buf_dma, 1201 GFP_KERNEL); 1202 if (!qedi->bdq[i].buf_addr) { 1203 QEDI_ERR(&qedi->dbg_ctx, 1204 "Could not allocate BDQ buffer %d.\n", i); 1205 return -ENOMEM; 1206 } 1207 } 1208 1209 /* Alloc dma memory for BDQ page buffer list */ 1210 qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); 1211 qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE); 1212 qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); 1213 1214 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", 1215 qedi->rq_num_entries); 1216 1217 qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev, 1218 qedi->bdq_pbl_mem_size, 1219 &qedi->bdq_pbl_dma, GFP_KERNEL); 1220 if (!qedi->bdq_pbl) { 1221 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n"); 1222 return -ENOMEM; 1223 } 1224 1225 /* 1226 * Populate BDQ PBL with physical and virtual address of individual 1227 * BDQ buffers 1228 */ 1229 pbl = (struct scsi_bd *)qedi->bdq_pbl; 1230 for (i = 0; i < QEDI_BDQ_NUM; i++) { 1231 pbl->address.hi = 1232 cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma)); 1233 pbl->address.lo = 1234 cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma)); 1235 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1236 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", 1237 pbl, pbl->address.hi, pbl->address.lo, i); 1238 pbl->opaque.hi = 0; 1239 pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i)); 1240 pbl++; 1241 } 1242 1243 /* Allocate list of PBL pages */ 1244 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, 1245 PAGE_SIZE, 1246 &qedi->bdq_pbl_list_dma, 1247 GFP_KERNEL); 1248 if (!qedi->bdq_pbl_list) { 1249 QEDI_ERR(&qedi->dbg_ctx, 1250 "Could not allocate list of PBL pages.\n"); 1251 return -ENOMEM; 1252 } 1253 memset(qedi->bdq_pbl_list, 0, PAGE_SIZE); 1254 1255 /* 1256 * Now populate PBL list with pages that contain pointers to the 1257 * individual buffers. 1258 */ 1259 qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE; 1260 list = (u64 *)qedi->bdq_pbl_list; 1261 page = qedi->bdq_pbl_list_dma; 1262 for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { 1263 *list = qedi->bdq_pbl_dma; 1264 list++; 1265 page += PAGE_SIZE; 1266 } 1267 1268 return 0; 1269 } 1270 1271 static int qedi_alloc_global_queues(struct qedi_ctx *qedi) 1272 { 1273 u32 *list; 1274 int i; 1275 int status = 0, rc; 1276 u32 *pbl; 1277 dma_addr_t page; 1278 int num_pages; 1279 1280 /* 1281 * Number of global queues (CQ / RQ). This should 1282 * be <= number of available MSIX vectors for the PF 1283 */ 1284 if (!qedi->num_queues) { 1285 QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); 1286 return 1; 1287 } 1288 1289 /* Make sure we allocated the PBL that will contain the physical 1290 * addresses of our queues 1291 */ 1292 if (!qedi->p_cpuq) { 1293 status = 1; 1294 goto mem_alloc_failure; 1295 } 1296 1297 qedi->global_queues = kzalloc((sizeof(struct global_queue *) * 1298 qedi->num_queues), GFP_KERNEL); 1299 if (!qedi->global_queues) { 1300 QEDI_ERR(&qedi->dbg_ctx, 1301 "Unable to allocate global queues array ptr memory\n"); 1302 return -ENOMEM; 1303 } 1304 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1305 "qedi->global_queues=%p.\n", qedi->global_queues); 1306 1307 /* Allocate DMA coherent buffers for BDQ */ 1308 rc = qedi_alloc_bdq(qedi); 1309 if (rc) 1310 goto mem_alloc_failure; 1311 1312 /* Allocate a CQ and an associated PBL for each MSI-X 1313 * vector. 1314 */ 1315 for (i = 0; i < qedi->num_queues; i++) { 1316 qedi->global_queues[i] = 1317 kzalloc(sizeof(*qedi->global_queues[0]), 1318 GFP_KERNEL); 1319 if (!qedi->global_queues[i]) { 1320 QEDI_ERR(&qedi->dbg_ctx, 1321 "Unable to allocation global queue %d.\n", i); 1322 goto mem_alloc_failure; 1323 } 1324 1325 qedi->global_queues[i]->cq_mem_size = 1326 (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe); 1327 qedi->global_queues[i]->cq_mem_size = 1328 (qedi->global_queues[i]->cq_mem_size + 1329 (QEDI_PAGE_SIZE - 1)); 1330 1331 qedi->global_queues[i]->cq_pbl_size = 1332 (qedi->global_queues[i]->cq_mem_size / 1333 QEDI_PAGE_SIZE) * sizeof(void *); 1334 qedi->global_queues[i]->cq_pbl_size = 1335 (qedi->global_queues[i]->cq_pbl_size + 1336 (QEDI_PAGE_SIZE - 1)); 1337 1338 qedi->global_queues[i]->cq = 1339 dma_alloc_coherent(&qedi->pdev->dev, 1340 qedi->global_queues[i]->cq_mem_size, 1341 &qedi->global_queues[i]->cq_dma, 1342 GFP_KERNEL); 1343 1344 if (!qedi->global_queues[i]->cq) { 1345 QEDI_WARN(&qedi->dbg_ctx, 1346 "Could not allocate cq.\n"); 1347 status = -ENOMEM; 1348 goto mem_alloc_failure; 1349 } 1350 memset(qedi->global_queues[i]->cq, 0, 1351 qedi->global_queues[i]->cq_mem_size); 1352 1353 qedi->global_queues[i]->cq_pbl = 1354 dma_alloc_coherent(&qedi->pdev->dev, 1355 qedi->global_queues[i]->cq_pbl_size, 1356 &qedi->global_queues[i]->cq_pbl_dma, 1357 GFP_KERNEL); 1358 1359 if (!qedi->global_queues[i]->cq_pbl) { 1360 QEDI_WARN(&qedi->dbg_ctx, 1361 "Could not allocate cq PBL.\n"); 1362 status = -ENOMEM; 1363 goto mem_alloc_failure; 1364 } 1365 memset(qedi->global_queues[i]->cq_pbl, 0, 1366 qedi->global_queues[i]->cq_pbl_size); 1367 1368 /* Create PBL */ 1369 num_pages = qedi->global_queues[i]->cq_mem_size / 1370 QEDI_PAGE_SIZE; 1371 page = qedi->global_queues[i]->cq_dma; 1372 pbl = (u32 *)qedi->global_queues[i]->cq_pbl; 1373 1374 while (num_pages--) { 1375 *pbl = (u32)page; 1376 pbl++; 1377 *pbl = (u32)((u64)page >> 32); 1378 pbl++; 1379 page += QEDI_PAGE_SIZE; 1380 } 1381 } 1382 1383 list = (u32 *)qedi->p_cpuq; 1384 1385 /* 1386 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, 1387 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points 1388 * to the physical address which contains an array of pointers to the 1389 * physical addresses of the specific queue pages. 1390 */ 1391 for (i = 0; i < qedi->num_queues; i++) { 1392 *list = (u32)qedi->global_queues[i]->cq_pbl_dma; 1393 list++; 1394 *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32); 1395 list++; 1396 1397 *list = (u32)0; 1398 list++; 1399 *list = (u32)((u64)0 >> 32); 1400 list++; 1401 } 1402 1403 return 0; 1404 1405 mem_alloc_failure: 1406 qedi_free_global_queues(qedi); 1407 return status; 1408 } 1409 1410 int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) 1411 { 1412 int rval = 0; 1413 u32 *pbl; 1414 dma_addr_t page; 1415 int num_pages; 1416 1417 if (!ep) 1418 return -EIO; 1419 1420 /* Calculate appropriate queue and PBL sizes */ 1421 ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe); 1422 ep->sq_mem_size += QEDI_PAGE_SIZE - 1; 1423 1424 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1425 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1426 1427 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1428 &ep->sq_dma, GFP_KERNEL); 1429 if (!ep->sq) { 1430 QEDI_WARN(&qedi->dbg_ctx, 1431 "Could not allocate send queue.\n"); 1432 rval = -ENOMEM; 1433 goto out; 1434 } 1435 memset(ep->sq, 0, ep->sq_mem_size); 1436 1437 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1438 &ep->sq_pbl_dma, GFP_KERNEL); 1439 if (!ep->sq_pbl) { 1440 QEDI_WARN(&qedi->dbg_ctx, 1441 "Could not allocate send queue PBL.\n"); 1442 rval = -ENOMEM; 1443 goto out_free_sq; 1444 } 1445 memset(ep->sq_pbl, 0, ep->sq_pbl_size); 1446 1447 /* Create PBL */ 1448 num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; 1449 page = ep->sq_dma; 1450 pbl = (u32 *)ep->sq_pbl; 1451 1452 while (num_pages--) { 1453 *pbl = (u32)page; 1454 pbl++; 1455 *pbl = (u32)((u64)page >> 32); 1456 pbl++; 1457 page += QEDI_PAGE_SIZE; 1458 } 1459 1460 return rval; 1461 1462 out_free_sq: 1463 dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, 1464 ep->sq_dma); 1465 out: 1466 return rval; 1467 } 1468 1469 void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) 1470 { 1471 if (ep->sq_pbl) 1472 dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl, 1473 ep->sq_pbl_dma); 1474 if (ep->sq) 1475 dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, 1476 ep->sq_dma); 1477 } 1478 1479 int qedi_get_task_idx(struct qedi_ctx *qedi) 1480 { 1481 s16 tmp_idx; 1482 1483 again: 1484 tmp_idx = find_first_zero_bit(qedi->task_idx_map, 1485 MAX_ISCSI_TASK_ENTRIES); 1486 1487 if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) { 1488 QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n"); 1489 tmp_idx = -1; 1490 goto err_idx; 1491 } 1492 1493 if (test_and_set_bit(tmp_idx, qedi->task_idx_map)) 1494 goto again; 1495 1496 err_idx: 1497 return tmp_idx; 1498 } 1499 1500 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) 1501 { 1502 if (!test_and_clear_bit(idx, qedi->task_idx_map)) 1503 QEDI_ERR(&qedi->dbg_ctx, 1504 "FW task context, already cleared, tid=0x%x\n", idx); 1505 } 1506 1507 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, 1508 struct qedi_cmd *cmd) 1509 { 1510 qedi->itt_map[tid].itt = proto_itt; 1511 qedi->itt_map[tid].p_cmd = cmd; 1512 1513 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1514 "update itt map tid=0x%x, with proto itt=0x%x\n", tid, 1515 qedi->itt_map[tid].itt); 1516 } 1517 1518 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) 1519 { 1520 u16 i; 1521 1522 for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) { 1523 if (qedi->itt_map[i].itt == itt) { 1524 *tid = i; 1525 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1526 "Ref itt=0x%x, found at tid=0x%x\n", 1527 itt, *tid); 1528 return; 1529 } 1530 } 1531 1532 WARN_ON(1); 1533 } 1534 1535 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) 1536 { 1537 *proto_itt = qedi->itt_map[tid].itt; 1538 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1539 "Get itt map tid [0x%x with proto itt[0x%x]", 1540 tid, *proto_itt); 1541 } 1542 1543 struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) 1544 { 1545 struct qedi_cmd *cmd = NULL; 1546 1547 if (tid > MAX_ISCSI_TASK_ENTRIES) 1548 return NULL; 1549 1550 cmd = qedi->itt_map[tid].p_cmd; 1551 if (cmd->task_id != tid) 1552 return NULL; 1553 1554 qedi->itt_map[tid].p_cmd = NULL; 1555 1556 return cmd; 1557 } 1558 1559 static int qedi_alloc_itt(struct qedi_ctx *qedi) 1560 { 1561 qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES, 1562 sizeof(struct qedi_itt_map), GFP_KERNEL); 1563 if (!qedi->itt_map) { 1564 QEDI_ERR(&qedi->dbg_ctx, 1565 "Unable to allocate itt map array memory\n"); 1566 return -ENOMEM; 1567 } 1568 return 0; 1569 } 1570 1571 static void qedi_free_itt(struct qedi_ctx *qedi) 1572 { 1573 kfree(qedi->itt_map); 1574 } 1575 1576 static struct qed_ll2_cb_ops qedi_ll2_cb_ops = { 1577 .rx_cb = qedi_ll2_rx, 1578 .tx_cb = NULL, 1579 }; 1580 1581 static int qedi_percpu_io_thread(void *arg) 1582 { 1583 struct qedi_percpu_s *p = arg; 1584 struct qedi_work *work, *tmp; 1585 unsigned long flags; 1586 LIST_HEAD(work_list); 1587 1588 set_user_nice(current, -20); 1589 1590 while (!kthread_should_stop()) { 1591 spin_lock_irqsave(&p->p_work_lock, flags); 1592 while (!list_empty(&p->work_list)) { 1593 list_splice_init(&p->work_list, &work_list); 1594 spin_unlock_irqrestore(&p->p_work_lock, flags); 1595 1596 list_for_each_entry_safe(work, tmp, &work_list, list) { 1597 list_del_init(&work->list); 1598 qedi_fp_process_cqes(work); 1599 if (!work->is_solicited) 1600 kfree(work); 1601 } 1602 cond_resched(); 1603 spin_lock_irqsave(&p->p_work_lock, flags); 1604 } 1605 set_current_state(TASK_INTERRUPTIBLE); 1606 spin_unlock_irqrestore(&p->p_work_lock, flags); 1607 schedule(); 1608 } 1609 __set_current_state(TASK_RUNNING); 1610 1611 return 0; 1612 } 1613 1614 static int qedi_cpu_online(unsigned int cpu) 1615 { 1616 struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); 1617 struct task_struct *thread; 1618 1619 thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, 1620 cpu_to_node(cpu), 1621 "qedi_thread/%d", cpu); 1622 if (IS_ERR(thread)) 1623 return PTR_ERR(thread); 1624 1625 kthread_bind(thread, cpu); 1626 p->iothread = thread; 1627 wake_up_process(thread); 1628 return 0; 1629 } 1630 1631 static int qedi_cpu_offline(unsigned int cpu) 1632 { 1633 struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); 1634 struct qedi_work *work, *tmp; 1635 struct task_struct *thread; 1636 1637 spin_lock_bh(&p->p_work_lock); 1638 thread = p->iothread; 1639 p->iothread = NULL; 1640 1641 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 1642 list_del_init(&work->list); 1643 qedi_fp_process_cqes(work); 1644 if (!work->is_solicited) 1645 kfree(work); 1646 } 1647 1648 spin_unlock_bh(&p->p_work_lock); 1649 if (thread) 1650 kthread_stop(thread); 1651 return 0; 1652 } 1653 1654 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) 1655 { 1656 struct qed_ll2_params params; 1657 1658 qedi_recover_all_conns(qedi); 1659 1660 qedi_ops->ll2->stop(qedi->cdev); 1661 qedi_ll2_free_skbs(qedi); 1662 1663 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n", 1664 qedi->ll2_mtu, mtu); 1665 memset(¶ms, 0, sizeof(params)); 1666 qedi->ll2_mtu = mtu; 1667 params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN; 1668 params.drop_ttl0_packets = 0; 1669 params.rx_vlan_stripping = 1; 1670 ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); 1671 qedi_ops->ll2->start(qedi->cdev, ¶ms); 1672 } 1673 1674 static void __qedi_remove(struct pci_dev *pdev, int mode) 1675 { 1676 struct qedi_ctx *qedi = pci_get_drvdata(pdev); 1677 1678 if (qedi->tmf_thread) { 1679 flush_workqueue(qedi->tmf_thread); 1680 destroy_workqueue(qedi->tmf_thread); 1681 qedi->tmf_thread = NULL; 1682 } 1683 1684 if (qedi->offload_thread) { 1685 flush_workqueue(qedi->offload_thread); 1686 destroy_workqueue(qedi->offload_thread); 1687 qedi->offload_thread = NULL; 1688 } 1689 1690 #ifdef CONFIG_DEBUG_FS 1691 qedi_dbg_host_exit(&qedi->dbg_ctx); 1692 #endif 1693 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) 1694 qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); 1695 1696 qedi_sync_free_irqs(qedi); 1697 1698 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { 1699 qedi_ops->stop(qedi->cdev); 1700 qedi_ops->ll2->stop(qedi->cdev); 1701 } 1702 1703 if (mode == QEDI_MODE_NORMAL) 1704 qedi_free_iscsi_pf_param(qedi); 1705 1706 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { 1707 qedi_ops->common->slowpath_stop(qedi->cdev); 1708 qedi_ops->common->remove(qedi->cdev); 1709 } 1710 1711 qedi_destroy_fp(qedi); 1712 1713 if (mode == QEDI_MODE_NORMAL) { 1714 qedi_release_cid_que(qedi); 1715 qedi_cm_free_mem(qedi); 1716 qedi_free_uio(qedi->udev); 1717 qedi_free_itt(qedi); 1718 1719 iscsi_host_remove(qedi->shost); 1720 iscsi_host_free(qedi->shost); 1721 1722 if (qedi->ll2_recv_thread) { 1723 kthread_stop(qedi->ll2_recv_thread); 1724 qedi->ll2_recv_thread = NULL; 1725 } 1726 qedi_ll2_free_skbs(qedi); 1727 } 1728 } 1729 1730 static int __qedi_probe(struct pci_dev *pdev, int mode) 1731 { 1732 struct qedi_ctx *qedi; 1733 struct qed_ll2_params params; 1734 u32 dp_module = 0; 1735 u8 dp_level = 0; 1736 bool is_vf = false; 1737 char host_buf[16]; 1738 struct qed_link_params link_params; 1739 struct qed_slowpath_params sp_params; 1740 struct qed_probe_params qed_params; 1741 void *task_start, *task_end; 1742 int rc; 1743 u16 tmp; 1744 1745 if (mode != QEDI_MODE_RECOVERY) { 1746 qedi = qedi_host_alloc(pdev); 1747 if (!qedi) { 1748 rc = -ENOMEM; 1749 goto exit_probe; 1750 } 1751 } else { 1752 qedi = pci_get_drvdata(pdev); 1753 } 1754 1755 memset(&qed_params, 0, sizeof(qed_params)); 1756 qed_params.protocol = QED_PROTOCOL_ISCSI; 1757 qed_params.dp_module = dp_module; 1758 qed_params.dp_level = dp_level; 1759 qed_params.is_vf = is_vf; 1760 qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); 1761 if (!qedi->cdev) { 1762 rc = -ENODEV; 1763 QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n"); 1764 goto free_host; 1765 } 1766 1767 qedi->msix_count = MAX_NUM_MSIX_PF; 1768 atomic_set(&qedi->link_state, QEDI_LINK_DOWN); 1769 1770 if (mode != QEDI_MODE_RECOVERY) { 1771 rc = qedi_set_iscsi_pf_param(qedi); 1772 if (rc) { 1773 rc = -ENOMEM; 1774 QEDI_ERR(&qedi->dbg_ctx, 1775 "Set iSCSI pf param fail\n"); 1776 goto free_host; 1777 } 1778 } 1779 1780 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); 1781 1782 rc = qedi_prepare_fp(qedi); 1783 if (rc) { 1784 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n"); 1785 goto free_pf_params; 1786 } 1787 1788 /* Start the Slowpath-process */ 1789 memset(&sp_params, 0, sizeof(struct qed_slowpath_params)); 1790 sp_params.int_mode = QED_INT_MODE_MSIX; 1791 sp_params.drv_major = QEDI_DRIVER_MAJOR_VER; 1792 sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; 1793 sp_params.drv_rev = QEDI_DRIVER_REV_VER; 1794 sp_params.drv_eng = QEDI_DRIVER_ENG_VER; 1795 strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); 1796 rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); 1797 if (rc) { 1798 QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); 1799 goto stop_hw; 1800 } 1801 1802 /* update_pf_params needs to be called before and after slowpath 1803 * start 1804 */ 1805 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); 1806 1807 rc = qedi_setup_int(qedi); 1808 if (rc) 1809 goto stop_iscsi_func; 1810 1811 qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); 1812 1813 /* Learn information crucial for qedi to progress */ 1814 rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); 1815 if (rc) 1816 goto stop_iscsi_func; 1817 1818 /* Record BDQ producer doorbell addresses */ 1819 qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr; 1820 qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr; 1821 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1822 "BDQ primary_prod=%p secondary_prod=%p.\n", 1823 qedi->bdq_primary_prod, 1824 qedi->bdq_secondary_prod); 1825 1826 /* 1827 * We need to write the number of BDs in the BDQ we've preallocated so 1828 * the f/w will do a prefetch and we'll get an unsolicited CQE when a 1829 * packet arrives. 1830 */ 1831 qedi->bdq_prod_idx = QEDI_BDQ_NUM; 1832 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1833 "Writing %d to primary and secondary BDQ doorbell registers.\n", 1834 qedi->bdq_prod_idx); 1835 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); 1836 tmp = readw(qedi->bdq_primary_prod); 1837 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); 1838 tmp = readw(qedi->bdq_secondary_prod); 1839 1840 ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac); 1841 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n", 1842 qedi->mac); 1843 1844 sprintf(host_buf, "host_%d", qedi->shost->host_no); 1845 qedi_ops->common->set_name(qedi->cdev, host_buf); 1846 1847 qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); 1848 1849 memset(¶ms, 0, sizeof(params)); 1850 params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN; 1851 qedi->ll2_mtu = DEF_PATH_MTU; 1852 params.drop_ttl0_packets = 0; 1853 params.rx_vlan_stripping = 1; 1854 ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); 1855 1856 if (mode != QEDI_MODE_RECOVERY) { 1857 /* set up rx path */ 1858 INIT_LIST_HEAD(&qedi->ll2_skb_list); 1859 spin_lock_init(&qedi->ll2_lock); 1860 /* start qedi context */ 1861 spin_lock_init(&qedi->hba_lock); 1862 spin_lock_init(&qedi->task_idx_lock); 1863 } 1864 qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); 1865 qedi_ops->ll2->start(qedi->cdev, ¶ms); 1866 1867 if (mode != QEDI_MODE_RECOVERY) { 1868 qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread, 1869 (void *)qedi, 1870 "qedi_ll2_thread"); 1871 } 1872 1873 rc = qedi_ops->start(qedi->cdev, &qedi->tasks, 1874 qedi, qedi_iscsi_event_cb); 1875 if (rc) { 1876 rc = -ENODEV; 1877 QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n"); 1878 goto stop_slowpath; 1879 } 1880 1881 task_start = qedi_get_task_mem(&qedi->tasks, 0); 1882 task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); 1883 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, 1884 "Task context start=%p, end=%p block_size=%u.\n", 1885 task_start, task_end, qedi->tasks.size); 1886 1887 memset(&link_params, 0, sizeof(link_params)); 1888 link_params.link_up = true; 1889 rc = qedi_ops->common->set_link(qedi->cdev, &link_params); 1890 if (rc) { 1891 QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n"); 1892 atomic_set(&qedi->link_state, QEDI_LINK_DOWN); 1893 } 1894 1895 #ifdef CONFIG_DEBUG_FS 1896 qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops, 1897 &qedi_dbg_fops); 1898 #endif 1899 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1900 "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n", 1901 QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, 1902 FW_REVISION_VERSION, FW_ENGINEERING_VERSION); 1903 1904 if (mode == QEDI_MODE_NORMAL) { 1905 if (iscsi_host_add(qedi->shost, &pdev->dev)) { 1906 QEDI_ERR(&qedi->dbg_ctx, 1907 "Could not add iscsi host\n"); 1908 rc = -ENOMEM; 1909 goto remove_host; 1910 } 1911 1912 /* Allocate uio buffers */ 1913 rc = qedi_alloc_uio_rings(qedi); 1914 if (rc) { 1915 QEDI_ERR(&qedi->dbg_ctx, 1916 "UIO alloc ring failed err=%d\n", rc); 1917 goto remove_host; 1918 } 1919 1920 rc = qedi_init_uio(qedi); 1921 if (rc) { 1922 QEDI_ERR(&qedi->dbg_ctx, 1923 "UIO init failed, err=%d\n", rc); 1924 goto free_uio; 1925 } 1926 1927 /* host the array on iscsi_conn */ 1928 rc = qedi_setup_cid_que(qedi); 1929 if (rc) { 1930 QEDI_ERR(&qedi->dbg_ctx, 1931 "Could not setup cid que\n"); 1932 goto free_uio; 1933 } 1934 1935 rc = qedi_cm_alloc_mem(qedi); 1936 if (rc) { 1937 QEDI_ERR(&qedi->dbg_ctx, 1938 "Could not alloc cm memory\n"); 1939 goto free_cid_que; 1940 } 1941 1942 rc = qedi_alloc_itt(qedi); 1943 if (rc) { 1944 QEDI_ERR(&qedi->dbg_ctx, 1945 "Could not alloc itt memory\n"); 1946 goto free_cid_que; 1947 } 1948 1949 sprintf(host_buf, "host_%d", qedi->shost->host_no); 1950 qedi->tmf_thread = create_singlethread_workqueue(host_buf); 1951 if (!qedi->tmf_thread) { 1952 QEDI_ERR(&qedi->dbg_ctx, 1953 "Unable to start tmf thread!\n"); 1954 rc = -ENODEV; 1955 goto free_cid_que; 1956 } 1957 1958 sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); 1959 qedi->offload_thread = create_workqueue(host_buf); 1960 if (!qedi->offload_thread) { 1961 QEDI_ERR(&qedi->dbg_ctx, 1962 "Unable to start offload thread!\n"); 1963 rc = -ENODEV; 1964 goto free_cid_que; 1965 } 1966 1967 /* F/w needs 1st task context memory entry for performance */ 1968 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); 1969 atomic_set(&qedi->num_offloads, 0); 1970 } 1971 1972 return 0; 1973 1974 free_cid_que: 1975 qedi_release_cid_que(qedi); 1976 free_uio: 1977 qedi_free_uio(qedi->udev); 1978 remove_host: 1979 #ifdef CONFIG_DEBUG_FS 1980 qedi_dbg_host_exit(&qedi->dbg_ctx); 1981 #endif 1982 iscsi_host_remove(qedi->shost); 1983 stop_iscsi_func: 1984 qedi_ops->stop(qedi->cdev); 1985 stop_slowpath: 1986 qedi_ops->common->slowpath_stop(qedi->cdev); 1987 stop_hw: 1988 qedi_ops->common->remove(qedi->cdev); 1989 free_pf_params: 1990 qedi_free_iscsi_pf_param(qedi); 1991 free_host: 1992 iscsi_host_free(qedi->shost); 1993 exit_probe: 1994 return rc; 1995 } 1996 1997 static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1998 { 1999 return __qedi_probe(pdev, QEDI_MODE_NORMAL); 2000 } 2001 2002 static void qedi_remove(struct pci_dev *pdev) 2003 { 2004 __qedi_remove(pdev, QEDI_MODE_NORMAL); 2005 } 2006 2007 static struct pci_device_id qedi_pci_tbl[] = { 2008 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, 2010 { 0 }, 2011 }; 2012 MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2013 2014 static enum cpuhp_state qedi_cpuhp_state; 2015 2016 static struct pci_driver qedi_pci_driver = { 2017 .name = QEDI_MODULE_NAME, 2018 .id_table = qedi_pci_tbl, 2019 .probe = qedi_probe, 2020 .remove = qedi_remove, 2021 }; 2022 2023 static int __init qedi_init(void) 2024 { 2025 struct qedi_percpu_s *p; 2026 int cpu, rc = 0; 2027 2028 qedi_ops = qed_get_iscsi_ops(); 2029 if (!qedi_ops) { 2030 QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); 2031 return -EINVAL; 2032 } 2033 2034 #ifdef CONFIG_DEBUG_FS 2035 qedi_dbg_init("qedi"); 2036 #endif 2037 2038 qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport); 2039 if (!qedi_scsi_transport) { 2040 QEDI_ERR(NULL, "Could not register qedi transport"); 2041 rc = -ENOMEM; 2042 goto exit_qedi_init_1; 2043 } 2044 2045 for_each_possible_cpu(cpu) { 2046 p = &per_cpu(qedi_percpu, cpu); 2047 INIT_LIST_HEAD(&p->work_list); 2048 spin_lock_init(&p->p_work_lock); 2049 p->iothread = NULL; 2050 } 2051 2052 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online", 2053 qedi_cpu_online, qedi_cpu_offline); 2054 if (rc < 0) 2055 goto exit_qedi_init_2; 2056 qedi_cpuhp_state = rc; 2057 2058 rc = pci_register_driver(&qedi_pci_driver); 2059 if (rc) { 2060 QEDI_ERR(NULL, "Failed to register driver\n"); 2061 goto exit_qedi_hp; 2062 } 2063 2064 return 0; 2065 2066 exit_qedi_hp: 2067 cpuhp_remove_state(qedi_cpuhp_state); 2068 exit_qedi_init_2: 2069 iscsi_unregister_transport(&qedi_iscsi_transport); 2070 exit_qedi_init_1: 2071 #ifdef CONFIG_DEBUG_FS 2072 qedi_dbg_exit(); 2073 #endif 2074 qed_put_iscsi_ops(); 2075 return rc; 2076 } 2077 2078 static void __exit qedi_cleanup(void) 2079 { 2080 pci_unregister_driver(&qedi_pci_driver); 2081 cpuhp_remove_state(qedi_cpuhp_state); 2082 iscsi_unregister_transport(&qedi_iscsi_transport); 2083 2084 #ifdef CONFIG_DEBUG_FS 2085 qedi_dbg_exit(); 2086 #endif 2087 qed_put_iscsi_ops(); 2088 } 2089 2090 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module"); 2091 MODULE_LICENSE("GPL"); 2092 MODULE_AUTHOR("QLogic Corporation"); 2093 MODULE_VERSION(QEDI_MODULE_VERSION); 2094 module_init(qedi_init); 2095 module_exit(qedi_cleanup); 2096