1 /** 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 11 * 12 * Contact Information: 13 * linux-drivers@emulex.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 33 #include <scsi/libiscsi.h> 34 #include <scsi/scsi_bsg_iscsi.h> 35 #include <scsi/scsi_netlink.h> 36 #include <scsi/scsi_transport_iscsi.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi.h> 42 #include "be_main.h" 43 #include "be_iscsi.h" 44 #include "be_mgmt.h" 45 #include "be_cmds.h" 46 47 static unsigned int be_iopoll_budget = 10; 48 static unsigned int be_max_phys_size = 64; 49 static unsigned int enable_msix = 1; 50 static unsigned int gcrashmode = 0; 51 static unsigned int num_hba = 0; 52 53 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 54 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 55 MODULE_VERSION(BUILD_STR); 56 MODULE_AUTHOR("Emulex Corporation"); 57 MODULE_LICENSE("GPL"); 58 module_param(be_iopoll_budget, int, 0); 59 module_param(enable_msix, int, 0); 60 module_param(be_max_phys_size, uint, S_IRUGO); 61 MODULE_PARM_DESC(be_max_phys_size, 62 "Maximum Size (In Kilobytes) of physically contiguous " 63 "memory that can be allocated. Range is 16 - 128"); 64 65 #define beiscsi_disp_param(_name)\ 66 ssize_t \ 67 beiscsi_##_name##_disp(struct device *dev,\ 68 struct device_attribute *attrib, char *buf) \ 69 { \ 70 struct Scsi_Host *shost = class_to_shost(dev);\ 71 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 72 uint32_t param_val = 0; \ 73 param_val = phba->attr_##_name;\ 74 return snprintf(buf, PAGE_SIZE, "%d\n",\ 75 phba->attr_##_name);\ 76 } 77 78 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 79 int \ 80 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 81 {\ 82 if (val >= _minval && val <= _maxval) {\ 83 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 84 "BA_%d : beiscsi_"#_name" updated "\ 85 "from 0x%x ==> 0x%x\n",\ 86 phba->attr_##_name, val); \ 87 phba->attr_##_name = val;\ 88 return 0;\ 89 } \ 90 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 91 "BA_%d beiscsi_"#_name" attribute "\ 92 "cannot be updated to 0x%x, "\ 93 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 94 return -EINVAL;\ 95 } 96 97 #define beiscsi_store_param(_name) \ 98 ssize_t \ 99 beiscsi_##_name##_store(struct device *dev,\ 100 struct device_attribute *attr, const char *buf,\ 101 size_t count) \ 102 { \ 103 struct Scsi_Host *shost = class_to_shost(dev);\ 104 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 105 uint32_t param_val = 0;\ 106 if (!isdigit(buf[0]))\ 107 return -EINVAL;\ 108 if (sscanf(buf, "%i", ¶m_val) != 1)\ 109 return -EINVAL;\ 110 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 111 return strlen(buf);\ 112 else \ 113 return -EINVAL;\ 114 } 115 116 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 117 int \ 118 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 119 { \ 120 if (val >= _minval && val <= _maxval) {\ 121 phba->attr_##_name = val;\ 122 return 0;\ 123 } \ 124 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 125 "BA_%d beiscsi_"#_name" attribute " \ 126 "cannot be updated to 0x%x, "\ 127 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 128 phba->attr_##_name = _defval;\ 129 return -EINVAL;\ 130 } 131 132 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 133 static uint beiscsi_##_name = _defval;\ 134 module_param(beiscsi_##_name, uint, S_IRUGO);\ 135 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 136 beiscsi_disp_param(_name)\ 137 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 138 beiscsi_store_param(_name)\ 139 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 140 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 141 beiscsi_##_name##_disp, beiscsi_##_name##_store) 142 143 /* 144 * When new log level added update the 145 * the MAX allowed value for log_enable 146 */ 147 BEISCSI_RW_ATTR(log_enable, 0x00, 148 0xFF, 0x00, "Enable logging Bit Mask\n" 149 "\t\t\t\tInitialization Events : 0x01\n" 150 "\t\t\t\tMailbox Events : 0x02\n" 151 "\t\t\t\tMiscellaneous Events : 0x04\n" 152 "\t\t\t\tError Handling : 0x08\n" 153 "\t\t\t\tIO Path Events : 0x10\n" 154 "\t\t\t\tConfiguration Path : 0x20\n"); 155 156 struct device_attribute *beiscsi_attrs[] = { 157 &dev_attr_beiscsi_log_enable, 158 NULL, 159 }; 160 161 static int beiscsi_slave_configure(struct scsi_device *sdev) 162 { 163 blk_queue_max_segment_size(sdev->request_queue, 65536); 164 return 0; 165 } 166 167 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 168 { 169 struct iscsi_cls_session *cls_session; 170 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 171 struct beiscsi_io_task *aborted_io_task; 172 struct iscsi_conn *conn; 173 struct beiscsi_conn *beiscsi_conn; 174 struct beiscsi_hba *phba; 175 struct iscsi_session *session; 176 struct invalidate_command_table *inv_tbl; 177 struct be_dma_mem nonemb_cmd; 178 unsigned int cid, tag, num_invalidate; 179 180 cls_session = starget_to_session(scsi_target(sc->device)); 181 session = cls_session->dd_data; 182 183 spin_lock_bh(&session->lock); 184 if (!aborted_task || !aborted_task->sc) { 185 /* we raced */ 186 spin_unlock_bh(&session->lock); 187 return SUCCESS; 188 } 189 190 aborted_io_task = aborted_task->dd_data; 191 if (!aborted_io_task->scsi_cmnd) { 192 /* raced or invalid command */ 193 spin_unlock_bh(&session->lock); 194 return SUCCESS; 195 } 196 spin_unlock_bh(&session->lock); 197 conn = aborted_task->conn; 198 beiscsi_conn = conn->dd_data; 199 phba = beiscsi_conn->phba; 200 201 /* invalidate iocb */ 202 cid = beiscsi_conn->beiscsi_conn_cid; 203 inv_tbl = phba->inv_tbl; 204 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 205 inv_tbl->cid = cid; 206 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 207 num_invalidate = 1; 208 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 209 sizeof(struct invalidate_commands_params_in), 210 &nonemb_cmd.dma); 211 if (nonemb_cmd.va == NULL) { 212 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 213 "BM_%d : Failed to allocate memory for" 214 "mgmt_invalidate_icds\n"); 215 return FAILED; 216 } 217 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 218 219 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 220 cid, &nonemb_cmd); 221 if (!tag) { 222 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 223 "BM_%d : mgmt_invalidate_icds could not be" 224 "submitted\n"); 225 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 226 nonemb_cmd.va, nonemb_cmd.dma); 227 228 return FAILED; 229 } else { 230 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 231 phba->ctrl.mcc_numtag[tag]); 232 free_mcc_tag(&phba->ctrl, tag); 233 } 234 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 235 nonemb_cmd.va, nonemb_cmd.dma); 236 return iscsi_eh_abort(sc); 237 } 238 239 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 240 { 241 struct iscsi_task *abrt_task; 242 struct beiscsi_io_task *abrt_io_task; 243 struct iscsi_conn *conn; 244 struct beiscsi_conn *beiscsi_conn; 245 struct beiscsi_hba *phba; 246 struct iscsi_session *session; 247 struct iscsi_cls_session *cls_session; 248 struct invalidate_command_table *inv_tbl; 249 struct be_dma_mem nonemb_cmd; 250 unsigned int cid, tag, i, num_invalidate; 251 252 /* invalidate iocbs */ 253 cls_session = starget_to_session(scsi_target(sc->device)); 254 session = cls_session->dd_data; 255 spin_lock_bh(&session->lock); 256 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 257 spin_unlock_bh(&session->lock); 258 return FAILED; 259 } 260 conn = session->leadconn; 261 beiscsi_conn = conn->dd_data; 262 phba = beiscsi_conn->phba; 263 cid = beiscsi_conn->beiscsi_conn_cid; 264 inv_tbl = phba->inv_tbl; 265 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 266 num_invalidate = 0; 267 for (i = 0; i < conn->session->cmds_max; i++) { 268 abrt_task = conn->session->cmds[i]; 269 abrt_io_task = abrt_task->dd_data; 270 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 271 continue; 272 273 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 274 continue; 275 276 inv_tbl->cid = cid; 277 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 278 num_invalidate++; 279 inv_tbl++; 280 } 281 spin_unlock_bh(&session->lock); 282 inv_tbl = phba->inv_tbl; 283 284 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 285 sizeof(struct invalidate_commands_params_in), 286 &nonemb_cmd.dma); 287 if (nonemb_cmd.va == NULL) { 288 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 289 "BM_%d : Failed to allocate memory for" 290 "mgmt_invalidate_icds\n"); 291 return FAILED; 292 } 293 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 294 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 295 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 296 cid, &nonemb_cmd); 297 if (!tag) { 298 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 299 "BM_%d : mgmt_invalidate_icds could not be" 300 " submitted\n"); 301 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 302 nonemb_cmd.va, nonemb_cmd.dma); 303 return FAILED; 304 } else { 305 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 306 phba->ctrl.mcc_numtag[tag]); 307 free_mcc_tag(&phba->ctrl, tag); 308 } 309 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 310 nonemb_cmd.va, nonemb_cmd.dma); 311 return iscsi_eh_device_reset(sc); 312 } 313 314 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 315 { 316 struct beiscsi_hba *phba = data; 317 struct mgmt_session_info *boot_sess = &phba->boot_sess; 318 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 319 char *str = buf; 320 int rc; 321 322 switch (type) { 323 case ISCSI_BOOT_TGT_NAME: 324 rc = sprintf(buf, "%.*s\n", 325 (int)strlen(boot_sess->target_name), 326 (char *)&boot_sess->target_name); 327 break; 328 case ISCSI_BOOT_TGT_IP_ADDR: 329 if (boot_conn->dest_ipaddr.ip_type == 0x1) 330 rc = sprintf(buf, "%pI4\n", 331 (char *)&boot_conn->dest_ipaddr.addr); 332 else 333 rc = sprintf(str, "%pI6\n", 334 (char *)&boot_conn->dest_ipaddr.addr); 335 break; 336 case ISCSI_BOOT_TGT_PORT: 337 rc = sprintf(str, "%d\n", boot_conn->dest_port); 338 break; 339 340 case ISCSI_BOOT_TGT_CHAP_NAME: 341 rc = sprintf(str, "%.*s\n", 342 boot_conn->negotiated_login_options.auth_data.chap. 343 target_chap_name_length, 344 (char *)&boot_conn->negotiated_login_options. 345 auth_data.chap.target_chap_name); 346 break; 347 case ISCSI_BOOT_TGT_CHAP_SECRET: 348 rc = sprintf(str, "%.*s\n", 349 boot_conn->negotiated_login_options.auth_data.chap. 350 target_secret_length, 351 (char *)&boot_conn->negotiated_login_options. 352 auth_data.chap.target_secret); 353 break; 354 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 355 rc = sprintf(str, "%.*s\n", 356 boot_conn->negotiated_login_options.auth_data.chap. 357 intr_chap_name_length, 358 (char *)&boot_conn->negotiated_login_options. 359 auth_data.chap.intr_chap_name); 360 break; 361 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 362 rc = sprintf(str, "%.*s\n", 363 boot_conn->negotiated_login_options.auth_data.chap. 364 intr_secret_length, 365 (char *)&boot_conn->negotiated_login_options. 366 auth_data.chap.intr_secret); 367 break; 368 case ISCSI_BOOT_TGT_FLAGS: 369 rc = sprintf(str, "2\n"); 370 break; 371 case ISCSI_BOOT_TGT_NIC_ASSOC: 372 rc = sprintf(str, "0\n"); 373 break; 374 default: 375 rc = -ENOSYS; 376 break; 377 } 378 return rc; 379 } 380 381 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 382 { 383 struct beiscsi_hba *phba = data; 384 char *str = buf; 385 int rc; 386 387 switch (type) { 388 case ISCSI_BOOT_INI_INITIATOR_NAME: 389 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 390 break; 391 default: 392 rc = -ENOSYS; 393 break; 394 } 395 return rc; 396 } 397 398 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 399 { 400 struct beiscsi_hba *phba = data; 401 char *str = buf; 402 int rc; 403 404 switch (type) { 405 case ISCSI_BOOT_ETH_FLAGS: 406 rc = sprintf(str, "2\n"); 407 break; 408 case ISCSI_BOOT_ETH_INDEX: 409 rc = sprintf(str, "0\n"); 410 break; 411 case ISCSI_BOOT_ETH_MAC: 412 rc = beiscsi_get_macaddr(str, phba); 413 break; 414 default: 415 rc = -ENOSYS; 416 break; 417 } 418 return rc; 419 } 420 421 422 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 423 { 424 umode_t rc; 425 426 switch (type) { 427 case ISCSI_BOOT_TGT_NAME: 428 case ISCSI_BOOT_TGT_IP_ADDR: 429 case ISCSI_BOOT_TGT_PORT: 430 case ISCSI_BOOT_TGT_CHAP_NAME: 431 case ISCSI_BOOT_TGT_CHAP_SECRET: 432 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 433 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 434 case ISCSI_BOOT_TGT_NIC_ASSOC: 435 case ISCSI_BOOT_TGT_FLAGS: 436 rc = S_IRUGO; 437 break; 438 default: 439 rc = 0; 440 break; 441 } 442 return rc; 443 } 444 445 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 446 { 447 umode_t rc; 448 449 switch (type) { 450 case ISCSI_BOOT_INI_INITIATOR_NAME: 451 rc = S_IRUGO; 452 break; 453 default: 454 rc = 0; 455 break; 456 } 457 return rc; 458 } 459 460 461 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 462 { 463 umode_t rc; 464 465 switch (type) { 466 case ISCSI_BOOT_ETH_FLAGS: 467 case ISCSI_BOOT_ETH_MAC: 468 case ISCSI_BOOT_ETH_INDEX: 469 rc = S_IRUGO; 470 break; 471 default: 472 rc = 0; 473 break; 474 } 475 return rc; 476 } 477 478 /*------------------- PCI Driver operations and data ----------------- */ 479 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 480 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 481 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 482 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 483 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 484 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 485 { 0 } 486 }; 487 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 488 489 490 static struct scsi_host_template beiscsi_sht = { 491 .module = THIS_MODULE, 492 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 493 .proc_name = DRV_NAME, 494 .queuecommand = iscsi_queuecommand, 495 .change_queue_depth = iscsi_change_queue_depth, 496 .slave_configure = beiscsi_slave_configure, 497 .target_alloc = iscsi_target_alloc, 498 .eh_abort_handler = beiscsi_eh_abort, 499 .eh_device_reset_handler = beiscsi_eh_device_reset, 500 .eh_target_reset_handler = iscsi_eh_session_reset, 501 .shost_attrs = beiscsi_attrs, 502 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 503 .can_queue = BE2_IO_DEPTH, 504 .this_id = -1, 505 .max_sectors = BEISCSI_MAX_SECTORS, 506 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 507 .use_clustering = ENABLE_CLUSTERING, 508 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 509 510 }; 511 512 static struct scsi_transport_template *beiscsi_scsi_transport; 513 514 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 515 { 516 struct beiscsi_hba *phba; 517 struct Scsi_Host *shost; 518 519 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 520 if (!shost) { 521 dev_err(&pcidev->dev, 522 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 523 return NULL; 524 } 525 shost->dma_boundary = pcidev->dma_mask; 526 shost->max_id = BE2_MAX_SESSIONS; 527 shost->max_channel = 0; 528 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 529 shost->max_lun = BEISCSI_NUM_MAX_LUN; 530 shost->transportt = beiscsi_scsi_transport; 531 phba = iscsi_host_priv(shost); 532 memset(phba, 0, sizeof(*phba)); 533 phba->shost = shost; 534 phba->pcidev = pci_dev_get(pcidev); 535 pci_set_drvdata(pcidev, phba); 536 phba->interface_handle = 0xFFFFFFFF; 537 538 if (iscsi_host_add(shost, &phba->pcidev->dev)) 539 goto free_devices; 540 541 return phba; 542 543 free_devices: 544 pci_dev_put(phba->pcidev); 545 iscsi_host_free(phba->shost); 546 return NULL; 547 } 548 549 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 550 { 551 if (phba->csr_va) { 552 iounmap(phba->csr_va); 553 phba->csr_va = NULL; 554 } 555 if (phba->db_va) { 556 iounmap(phba->db_va); 557 phba->db_va = NULL; 558 } 559 if (phba->pci_va) { 560 iounmap(phba->pci_va); 561 phba->pci_va = NULL; 562 } 563 } 564 565 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 566 struct pci_dev *pcidev) 567 { 568 u8 __iomem *addr; 569 int pcicfg_reg; 570 571 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 572 pci_resource_len(pcidev, 2)); 573 if (addr == NULL) 574 return -ENOMEM; 575 phba->ctrl.csr = addr; 576 phba->csr_va = addr; 577 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 578 579 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 580 if (addr == NULL) 581 goto pci_map_err; 582 phba->ctrl.db = addr; 583 phba->db_va = addr; 584 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 585 586 if (phba->generation == BE_GEN2) 587 pcicfg_reg = 1; 588 else 589 pcicfg_reg = 0; 590 591 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 592 pci_resource_len(pcidev, pcicfg_reg)); 593 594 if (addr == NULL) 595 goto pci_map_err; 596 phba->ctrl.pcicfg = addr; 597 phba->pci_va = addr; 598 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 599 return 0; 600 601 pci_map_err: 602 beiscsi_unmap_pci_function(phba); 603 return -ENOMEM; 604 } 605 606 static int beiscsi_enable_pci(struct pci_dev *pcidev) 607 { 608 int ret; 609 610 ret = pci_enable_device(pcidev); 611 if (ret) { 612 dev_err(&pcidev->dev, 613 "beiscsi_enable_pci - enable device failed\n"); 614 return ret; 615 } 616 617 pci_set_master(pcidev); 618 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 619 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 620 if (ret) { 621 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 622 pci_disable_device(pcidev); 623 return ret; 624 } 625 } 626 return 0; 627 } 628 629 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 630 { 631 struct be_ctrl_info *ctrl = &phba->ctrl; 632 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 633 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 634 int status = 0; 635 636 ctrl->pdev = pdev; 637 status = beiscsi_map_pci_bars(phba, pdev); 638 if (status) 639 return status; 640 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 641 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 642 mbox_mem_alloc->size, 643 &mbox_mem_alloc->dma); 644 if (!mbox_mem_alloc->va) { 645 beiscsi_unmap_pci_function(phba); 646 return -ENOMEM; 647 } 648 649 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 650 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 651 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 652 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 653 spin_lock_init(&ctrl->mbox_lock); 654 spin_lock_init(&phba->ctrl.mcc_lock); 655 spin_lock_init(&phba->ctrl.mcc_cq_lock); 656 657 return status; 658 } 659 660 static void beiscsi_get_params(struct beiscsi_hba *phba) 661 { 662 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count 663 - (phba->fw_config.iscsi_cid_count 664 + BE2_TMFS 665 + BE2_NOPOUT_REQ)); 666 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 667 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; 668 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 669 phba->params.num_sge_per_io = BE2_SGE; 670 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 671 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 672 phba->params.eq_timer = 64; 673 phba->params.num_eq_entries = 674 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 675 + BE2_TMFS) / 512) + 1) * 512; 676 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 677 ? 1024 : phba->params.num_eq_entries; 678 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 679 "BM_%d : phba->params.num_eq_entries=%d\n", 680 phba->params.num_eq_entries); 681 phba->params.num_cq_entries = 682 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 683 + BE2_TMFS) / 512) + 1) * 512; 684 phba->params.wrbs_per_cxn = 256; 685 } 686 687 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 688 unsigned int id, unsigned int clr_interrupt, 689 unsigned int num_processed, 690 unsigned char rearm, unsigned char event) 691 { 692 u32 val = 0; 693 val |= id & DB_EQ_RING_ID_MASK; 694 if (rearm) 695 val |= 1 << DB_EQ_REARM_SHIFT; 696 if (clr_interrupt) 697 val |= 1 << DB_EQ_CLR_SHIFT; 698 if (event) 699 val |= 1 << DB_EQ_EVNT_SHIFT; 700 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 701 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 702 } 703 704 /** 705 * be_isr_mcc - The isr routine of the driver. 706 * @irq: Not used 707 * @dev_id: Pointer to host adapter structure 708 */ 709 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 710 { 711 struct beiscsi_hba *phba; 712 struct be_eq_entry *eqe = NULL; 713 struct be_queue_info *eq; 714 struct be_queue_info *mcc; 715 unsigned int num_eq_processed; 716 struct be_eq_obj *pbe_eq; 717 unsigned long flags; 718 719 pbe_eq = dev_id; 720 eq = &pbe_eq->q; 721 phba = pbe_eq->phba; 722 mcc = &phba->ctrl.mcc_obj.cq; 723 eqe = queue_tail_node(eq); 724 725 num_eq_processed = 0; 726 727 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 728 & EQE_VALID_MASK) { 729 if (((eqe->dw[offsetof(struct amap_eq_entry, 730 resource_id) / 32] & 731 EQE_RESID_MASK) >> 16) == mcc->id) { 732 spin_lock_irqsave(&phba->isr_lock, flags); 733 phba->todo_mcc_cq = 1; 734 spin_unlock_irqrestore(&phba->isr_lock, flags); 735 } 736 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 737 queue_tail_inc(eq); 738 eqe = queue_tail_node(eq); 739 num_eq_processed++; 740 } 741 if (phba->todo_mcc_cq) 742 queue_work(phba->wq, &phba->work_cqs); 743 if (num_eq_processed) 744 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 745 746 return IRQ_HANDLED; 747 } 748 749 /** 750 * be_isr_msix - The isr routine of the driver. 751 * @irq: Not used 752 * @dev_id: Pointer to host adapter structure 753 */ 754 static irqreturn_t be_isr_msix(int irq, void *dev_id) 755 { 756 struct beiscsi_hba *phba; 757 struct be_eq_entry *eqe = NULL; 758 struct be_queue_info *eq; 759 struct be_queue_info *cq; 760 unsigned int num_eq_processed; 761 struct be_eq_obj *pbe_eq; 762 unsigned long flags; 763 764 pbe_eq = dev_id; 765 eq = &pbe_eq->q; 766 cq = pbe_eq->cq; 767 eqe = queue_tail_node(eq); 768 769 phba = pbe_eq->phba; 770 num_eq_processed = 0; 771 if (blk_iopoll_enabled) { 772 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 773 & EQE_VALID_MASK) { 774 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 775 blk_iopoll_sched(&pbe_eq->iopoll); 776 777 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 778 queue_tail_inc(eq); 779 eqe = queue_tail_node(eq); 780 num_eq_processed++; 781 } 782 if (num_eq_processed) 783 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 784 785 return IRQ_HANDLED; 786 } else { 787 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 788 & EQE_VALID_MASK) { 789 spin_lock_irqsave(&phba->isr_lock, flags); 790 phba->todo_cq = 1; 791 spin_unlock_irqrestore(&phba->isr_lock, flags); 792 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 793 queue_tail_inc(eq); 794 eqe = queue_tail_node(eq); 795 num_eq_processed++; 796 } 797 if (phba->todo_cq) 798 queue_work(phba->wq, &phba->work_cqs); 799 800 if (num_eq_processed) 801 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 802 803 return IRQ_HANDLED; 804 } 805 } 806 807 /** 808 * be_isr - The isr routine of the driver. 809 * @irq: Not used 810 * @dev_id: Pointer to host adapter structure 811 */ 812 static irqreturn_t be_isr(int irq, void *dev_id) 813 { 814 struct beiscsi_hba *phba; 815 struct hwi_controller *phwi_ctrlr; 816 struct hwi_context_memory *phwi_context; 817 struct be_eq_entry *eqe = NULL; 818 struct be_queue_info *eq; 819 struct be_queue_info *cq; 820 struct be_queue_info *mcc; 821 unsigned long flags, index; 822 unsigned int num_mcceq_processed, num_ioeq_processed; 823 struct be_ctrl_info *ctrl; 824 struct be_eq_obj *pbe_eq; 825 int isr; 826 827 phba = dev_id; 828 ctrl = &phba->ctrl; 829 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 830 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 831 if (!isr) 832 return IRQ_NONE; 833 834 phwi_ctrlr = phba->phwi_ctrlr; 835 phwi_context = phwi_ctrlr->phwi_ctxt; 836 pbe_eq = &phwi_context->be_eq[0]; 837 838 eq = &phwi_context->be_eq[0].q; 839 mcc = &phba->ctrl.mcc_obj.cq; 840 index = 0; 841 eqe = queue_tail_node(eq); 842 843 num_ioeq_processed = 0; 844 num_mcceq_processed = 0; 845 if (blk_iopoll_enabled) { 846 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 847 & EQE_VALID_MASK) { 848 if (((eqe->dw[offsetof(struct amap_eq_entry, 849 resource_id) / 32] & 850 EQE_RESID_MASK) >> 16) == mcc->id) { 851 spin_lock_irqsave(&phba->isr_lock, flags); 852 phba->todo_mcc_cq = 1; 853 spin_unlock_irqrestore(&phba->isr_lock, flags); 854 num_mcceq_processed++; 855 } else { 856 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 857 blk_iopoll_sched(&pbe_eq->iopoll); 858 num_ioeq_processed++; 859 } 860 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 861 queue_tail_inc(eq); 862 eqe = queue_tail_node(eq); 863 } 864 if (num_ioeq_processed || num_mcceq_processed) { 865 if (phba->todo_mcc_cq) 866 queue_work(phba->wq, &phba->work_cqs); 867 868 if ((num_mcceq_processed) && (!num_ioeq_processed)) 869 hwi_ring_eq_db(phba, eq->id, 0, 870 (num_ioeq_processed + 871 num_mcceq_processed) , 1, 1); 872 else 873 hwi_ring_eq_db(phba, eq->id, 0, 874 (num_ioeq_processed + 875 num_mcceq_processed), 0, 1); 876 877 return IRQ_HANDLED; 878 } else 879 return IRQ_NONE; 880 } else { 881 cq = &phwi_context->be_cq[0]; 882 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 883 & EQE_VALID_MASK) { 884 885 if (((eqe->dw[offsetof(struct amap_eq_entry, 886 resource_id) / 32] & 887 EQE_RESID_MASK) >> 16) != cq->id) { 888 spin_lock_irqsave(&phba->isr_lock, flags); 889 phba->todo_mcc_cq = 1; 890 spin_unlock_irqrestore(&phba->isr_lock, flags); 891 } else { 892 spin_lock_irqsave(&phba->isr_lock, flags); 893 phba->todo_cq = 1; 894 spin_unlock_irqrestore(&phba->isr_lock, flags); 895 } 896 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 897 queue_tail_inc(eq); 898 eqe = queue_tail_node(eq); 899 num_ioeq_processed++; 900 } 901 if (phba->todo_cq || phba->todo_mcc_cq) 902 queue_work(phba->wq, &phba->work_cqs); 903 904 if (num_ioeq_processed) { 905 hwi_ring_eq_db(phba, eq->id, 0, 906 num_ioeq_processed, 1, 1); 907 return IRQ_HANDLED; 908 } else 909 return IRQ_NONE; 910 } 911 } 912 913 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 914 { 915 struct pci_dev *pcidev = phba->pcidev; 916 struct hwi_controller *phwi_ctrlr; 917 struct hwi_context_memory *phwi_context; 918 int ret, msix_vec, i, j; 919 920 phwi_ctrlr = phba->phwi_ctrlr; 921 phwi_context = phwi_ctrlr->phwi_ctxt; 922 923 if (phba->msix_enabled) { 924 for (i = 0; i < phba->num_cpus; i++) { 925 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 926 GFP_KERNEL); 927 if (!phba->msi_name[i]) { 928 ret = -ENOMEM; 929 goto free_msix_irqs; 930 } 931 932 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 933 phba->shost->host_no, i); 934 msix_vec = phba->msix_entries[i].vector; 935 ret = request_irq(msix_vec, be_isr_msix, 0, 936 phba->msi_name[i], 937 &phwi_context->be_eq[i]); 938 if (ret) { 939 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 940 "BM_%d : beiscsi_init_irqs-Failed to" 941 "register msix for i = %d\n", 942 i); 943 kfree(phba->msi_name[i]); 944 goto free_msix_irqs; 945 } 946 } 947 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 948 if (!phba->msi_name[i]) { 949 ret = -ENOMEM; 950 goto free_msix_irqs; 951 } 952 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 953 phba->shost->host_no); 954 msix_vec = phba->msix_entries[i].vector; 955 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 956 &phwi_context->be_eq[i]); 957 if (ret) { 958 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 959 "BM_%d : beiscsi_init_irqs-" 960 "Failed to register beiscsi_msix_mcc\n"); 961 kfree(phba->msi_name[i]); 962 goto free_msix_irqs; 963 } 964 965 } else { 966 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 967 "beiscsi", phba); 968 if (ret) { 969 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 970 "BM_%d : beiscsi_init_irqs-" 971 "Failed to register irq\\n"); 972 return ret; 973 } 974 } 975 return 0; 976 free_msix_irqs: 977 for (j = i - 1; j >= 0; j--) { 978 kfree(phba->msi_name[j]); 979 msix_vec = phba->msix_entries[j].vector; 980 free_irq(msix_vec, &phwi_context->be_eq[j]); 981 } 982 return ret; 983 } 984 985 static void hwi_ring_cq_db(struct beiscsi_hba *phba, 986 unsigned int id, unsigned int num_processed, 987 unsigned char rearm, unsigned char event) 988 { 989 u32 val = 0; 990 val |= id & DB_CQ_RING_ID_MASK; 991 if (rearm) 992 val |= 1 << DB_CQ_REARM_SHIFT; 993 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 994 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 995 } 996 997 static unsigned int 998 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 999 struct beiscsi_hba *phba, 1000 unsigned short cid, 1001 struct pdu_base *ppdu, 1002 unsigned long pdu_len, 1003 void *pbuffer, unsigned long buf_len) 1004 { 1005 struct iscsi_conn *conn = beiscsi_conn->conn; 1006 struct iscsi_session *session = conn->session; 1007 struct iscsi_task *task; 1008 struct beiscsi_io_task *io_task; 1009 struct iscsi_hdr *login_hdr; 1010 1011 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 1012 PDUBASE_OPCODE_MASK) { 1013 case ISCSI_OP_NOOP_IN: 1014 pbuffer = NULL; 1015 buf_len = 0; 1016 break; 1017 case ISCSI_OP_ASYNC_EVENT: 1018 break; 1019 case ISCSI_OP_REJECT: 1020 WARN_ON(!pbuffer); 1021 WARN_ON(!(buf_len == 48)); 1022 beiscsi_log(phba, KERN_ERR, 1023 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1024 "BM_%d : In ISCSI_OP_REJECT\n"); 1025 break; 1026 case ISCSI_OP_LOGIN_RSP: 1027 case ISCSI_OP_TEXT_RSP: 1028 task = conn->login_task; 1029 io_task = task->dd_data; 1030 login_hdr = (struct iscsi_hdr *)ppdu; 1031 login_hdr->itt = io_task->libiscsi_itt; 1032 break; 1033 default: 1034 beiscsi_log(phba, KERN_WARNING, 1035 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1036 "BM_%d : Unrecognized opcode 0x%x in async msg\n", 1037 (ppdu-> 1038 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1039 & PDUBASE_OPCODE_MASK)); 1040 return 1; 1041 } 1042 1043 spin_lock_bh(&session->lock); 1044 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1045 spin_unlock_bh(&session->lock); 1046 return 0; 1047 } 1048 1049 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 1050 { 1051 struct sgl_handle *psgl_handle; 1052 1053 if (phba->io_sgl_hndl_avbl) { 1054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1055 "BM_%d : In alloc_io_sgl_handle," 1056 " io_sgl_alloc_index=%d\n", 1057 phba->io_sgl_alloc_index); 1058 1059 psgl_handle = phba->io_sgl_hndl_base[phba-> 1060 io_sgl_alloc_index]; 1061 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1062 phba->io_sgl_hndl_avbl--; 1063 if (phba->io_sgl_alloc_index == (phba->params. 1064 ios_per_ctrl - 1)) 1065 phba->io_sgl_alloc_index = 0; 1066 else 1067 phba->io_sgl_alloc_index++; 1068 } else 1069 psgl_handle = NULL; 1070 return psgl_handle; 1071 } 1072 1073 static void 1074 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1075 { 1076 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1077 "BM_%d : In free_,io_sgl_free_index=%d\n", 1078 phba->io_sgl_free_index); 1079 1080 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1081 /* 1082 * this can happen if clean_task is called on a task that 1083 * failed in xmit_task or alloc_pdu. 1084 */ 1085 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1086 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 1087 "value there=%p\n", phba->io_sgl_free_index, 1088 phba->io_sgl_hndl_base 1089 [phba->io_sgl_free_index]); 1090 return; 1091 } 1092 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1093 phba->io_sgl_hndl_avbl++; 1094 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 1095 phba->io_sgl_free_index = 0; 1096 else 1097 phba->io_sgl_free_index++; 1098 } 1099 1100 /** 1101 * alloc_wrb_handle - To allocate a wrb handle 1102 * @phba: The hba pointer 1103 * @cid: The cid to use for allocation 1104 * 1105 * This happens under session_lock until submission to chip 1106 */ 1107 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) 1108 { 1109 struct hwi_wrb_context *pwrb_context; 1110 struct hwi_controller *phwi_ctrlr; 1111 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1112 1113 phwi_ctrlr = phba->phwi_ctrlr; 1114 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 1115 if (pwrb_context->wrb_handles_available >= 2) { 1116 pwrb_handle = pwrb_context->pwrb_handle_base[ 1117 pwrb_context->alloc_index]; 1118 pwrb_context->wrb_handles_available--; 1119 if (pwrb_context->alloc_index == 1120 (phba->params.wrbs_per_cxn - 1)) 1121 pwrb_context->alloc_index = 0; 1122 else 1123 pwrb_context->alloc_index++; 1124 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ 1125 pwrb_context->alloc_index]; 1126 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; 1127 } else 1128 pwrb_handle = NULL; 1129 return pwrb_handle; 1130 } 1131 1132 /** 1133 * free_wrb_handle - To free the wrb handle back to pool 1134 * @phba: The hba pointer 1135 * @pwrb_context: The context to free from 1136 * @pwrb_handle: The wrb_handle to free 1137 * 1138 * This happens under session_lock until submission to chip 1139 */ 1140 static void 1141 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1142 struct wrb_handle *pwrb_handle) 1143 { 1144 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1145 pwrb_context->wrb_handles_available++; 1146 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1147 pwrb_context->free_index = 0; 1148 else 1149 pwrb_context->free_index++; 1150 1151 beiscsi_log(phba, KERN_INFO, 1152 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1153 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1154 "wrb_handles_available=%d\n", 1155 pwrb_handle, pwrb_context->free_index, 1156 pwrb_context->wrb_handles_available); 1157 } 1158 1159 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1160 { 1161 struct sgl_handle *psgl_handle; 1162 1163 if (phba->eh_sgl_hndl_avbl) { 1164 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1165 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1166 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1167 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1168 phba->eh_sgl_alloc_index, 1169 phba->eh_sgl_alloc_index); 1170 1171 phba->eh_sgl_hndl_avbl--; 1172 if (phba->eh_sgl_alloc_index == 1173 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1174 1)) 1175 phba->eh_sgl_alloc_index = 0; 1176 else 1177 phba->eh_sgl_alloc_index++; 1178 } else 1179 psgl_handle = NULL; 1180 return psgl_handle; 1181 } 1182 1183 void 1184 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1185 { 1186 1187 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1188 "BM_%d : In free_mgmt_sgl_handle," 1189 "eh_sgl_free_index=%d\n", 1190 phba->eh_sgl_free_index); 1191 1192 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1193 /* 1194 * this can happen if clean_task is called on a task that 1195 * failed in xmit_task or alloc_pdu. 1196 */ 1197 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1198 "BM_%d : Double Free in eh SGL ," 1199 "eh_sgl_free_index=%d\n", 1200 phba->eh_sgl_free_index); 1201 return; 1202 } 1203 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1204 phba->eh_sgl_hndl_avbl++; 1205 if (phba->eh_sgl_free_index == 1206 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1207 phba->eh_sgl_free_index = 0; 1208 else 1209 phba->eh_sgl_free_index++; 1210 } 1211 1212 static void 1213 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1214 struct iscsi_task *task, struct sol_cqe *psol) 1215 { 1216 struct beiscsi_io_task *io_task = task->dd_data; 1217 struct be_status_bhs *sts_bhs = 1218 (struct be_status_bhs *)io_task->cmd_bhs; 1219 struct iscsi_conn *conn = beiscsi_conn->conn; 1220 unsigned char *sense; 1221 u32 resid = 0, exp_cmdsn, max_cmdsn; 1222 u8 rsp, status, flags; 1223 1224 exp_cmdsn = (psol-> 1225 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1226 & SOL_EXP_CMD_SN_MASK); 1227 max_cmdsn = ((psol-> 1228 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1229 & SOL_EXP_CMD_SN_MASK) + 1230 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1231 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1232 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32] 1233 & SOL_RESP_MASK) >> 16); 1234 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32] 1235 & SOL_STS_MASK) >> 8); 1236 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1237 & SOL_FLAGS_MASK) >> 24) | 0x80; 1238 if (!task->sc) { 1239 if (io_task->scsi_cmnd) 1240 scsi_dma_unmap(io_task->scsi_cmnd); 1241 1242 return; 1243 } 1244 task->sc->result = (DID_OK << 16) | status; 1245 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1246 task->sc->result = DID_ERROR << 16; 1247 goto unmap; 1248 } 1249 1250 /* bidi not initially supported */ 1251 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1252 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 1253 32] & SOL_RES_CNT_MASK); 1254 1255 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1256 task->sc->result = DID_ERROR << 16; 1257 1258 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1259 scsi_set_resid(task->sc, resid); 1260 if (!status && (scsi_bufflen(task->sc) - resid < 1261 task->sc->underflow)) 1262 task->sc->result = DID_ERROR << 16; 1263 } 1264 } 1265 1266 if (status == SAM_STAT_CHECK_CONDITION) { 1267 u16 sense_len; 1268 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1269 1270 sense = sts_bhs->sense_info + sizeof(unsigned short); 1271 sense_len = be16_to_cpu(*slen); 1272 memcpy(task->sc->sense_buffer, sense, 1273 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1274 } 1275 1276 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { 1277 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 1278 & SOL_RES_CNT_MASK) 1279 conn->rxdata_octets += (psol-> 1280 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 1281 & SOL_RES_CNT_MASK); 1282 } 1283 unmap: 1284 scsi_dma_unmap(io_task->scsi_cmnd); 1285 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1286 } 1287 1288 static void 1289 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1290 struct iscsi_task *task, struct sol_cqe *psol) 1291 { 1292 struct iscsi_logout_rsp *hdr; 1293 struct beiscsi_io_task *io_task = task->dd_data; 1294 struct iscsi_conn *conn = beiscsi_conn->conn; 1295 1296 hdr = (struct iscsi_logout_rsp *)task->hdr; 1297 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1298 hdr->t2wait = 5; 1299 hdr->t2retain = 0; 1300 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1301 & SOL_FLAGS_MASK) >> 24) | 0x80; 1302 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 1303 32] & SOL_RESP_MASK); 1304 hdr->exp_cmdsn = cpu_to_be32(psol-> 1305 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1306 & SOL_EXP_CMD_SN_MASK); 1307 hdr->max_cmdsn = be32_to_cpu((psol-> 1308 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1309 & SOL_EXP_CMD_SN_MASK) + 1310 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1311 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1312 hdr->dlength[0] = 0; 1313 hdr->dlength[1] = 0; 1314 hdr->dlength[2] = 0; 1315 hdr->hlength = 0; 1316 hdr->itt = io_task->libiscsi_itt; 1317 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1318 } 1319 1320 static void 1321 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1322 struct iscsi_task *task, struct sol_cqe *psol) 1323 { 1324 struct iscsi_tm_rsp *hdr; 1325 struct iscsi_conn *conn = beiscsi_conn->conn; 1326 struct beiscsi_io_task *io_task = task->dd_data; 1327 1328 hdr = (struct iscsi_tm_rsp *)task->hdr; 1329 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1330 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1331 & SOL_FLAGS_MASK) >> 24) | 0x80; 1332 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 1333 32] & SOL_RESP_MASK); 1334 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 1335 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 1336 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 1337 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 1338 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1339 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1340 hdr->itt = io_task->libiscsi_itt; 1341 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1342 } 1343 1344 static void 1345 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1346 struct beiscsi_hba *phba, struct sol_cqe *psol) 1347 { 1348 struct hwi_wrb_context *pwrb_context; 1349 struct wrb_handle *pwrb_handle = NULL; 1350 struct hwi_controller *phwi_ctrlr; 1351 struct iscsi_task *task; 1352 struct beiscsi_io_task *io_task; 1353 struct iscsi_conn *conn = beiscsi_conn->conn; 1354 struct iscsi_session *session = conn->session; 1355 1356 phwi_ctrlr = phba->phwi_ctrlr; 1357 pwrb_context = &phwi_ctrlr->wrb_context[((psol-> 1358 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1359 SOL_CID_MASK) >> 6) - 1360 phba->fw_config.iscsi_cid_start]; 1361 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1362 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1363 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1364 task = pwrb_handle->pio_handle; 1365 1366 io_task = task->dd_data; 1367 spin_lock_bh(&phba->mgmt_sgl_lock); 1368 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 1369 spin_unlock_bh(&phba->mgmt_sgl_lock); 1370 spin_lock_bh(&session->lock); 1371 free_wrb_handle(phba, pwrb_context, pwrb_handle); 1372 spin_unlock_bh(&session->lock); 1373 } 1374 1375 static void 1376 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1377 struct iscsi_task *task, struct sol_cqe *psol) 1378 { 1379 struct iscsi_nopin *hdr; 1380 struct iscsi_conn *conn = beiscsi_conn->conn; 1381 struct beiscsi_io_task *io_task = task->dd_data; 1382 1383 hdr = (struct iscsi_nopin *)task->hdr; 1384 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1385 & SOL_FLAGS_MASK) >> 24) | 0x80; 1386 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 1387 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 1388 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 1389 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 1390 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1391 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1392 hdr->opcode = ISCSI_OP_NOOP_IN; 1393 hdr->itt = io_task->libiscsi_itt; 1394 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1395 } 1396 1397 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1398 struct beiscsi_hba *phba, struct sol_cqe *psol) 1399 { 1400 struct hwi_wrb_context *pwrb_context; 1401 struct wrb_handle *pwrb_handle; 1402 struct iscsi_wrb *pwrb = NULL; 1403 struct hwi_controller *phwi_ctrlr; 1404 struct iscsi_task *task; 1405 unsigned int type; 1406 struct iscsi_conn *conn = beiscsi_conn->conn; 1407 struct iscsi_session *session = conn->session; 1408 1409 phwi_ctrlr = phba->phwi_ctrlr; 1410 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof 1411 (struct amap_sol_cqe, cid) / 32] 1412 & SOL_CID_MASK) >> 6) - 1413 phba->fw_config.iscsi_cid_start]; 1414 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1415 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1416 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1417 task = pwrb_handle->pio_handle; 1418 pwrb = pwrb_handle->pwrb; 1419 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & 1420 WRB_TYPE_MASK) >> 28; 1421 1422 spin_lock_bh(&session->lock); 1423 switch (type) { 1424 case HWH_TYPE_IO: 1425 case HWH_TYPE_IO_RD: 1426 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1427 ISCSI_OP_NOOP_OUT) 1428 be_complete_nopin_resp(beiscsi_conn, task, psol); 1429 else 1430 be_complete_io(beiscsi_conn, task, psol); 1431 break; 1432 1433 case HWH_TYPE_LOGOUT: 1434 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1435 be_complete_logout(beiscsi_conn, task, psol); 1436 else 1437 be_complete_tmf(beiscsi_conn, task, psol); 1438 1439 break; 1440 1441 case HWH_TYPE_LOGIN: 1442 beiscsi_log(phba, KERN_ERR, 1443 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1444 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1445 " hwi_complete_cmd- Solicited path\n"); 1446 break; 1447 1448 case HWH_TYPE_NOP: 1449 be_complete_nopin_resp(beiscsi_conn, task, psol); 1450 break; 1451 1452 default: 1453 beiscsi_log(phba, KERN_WARNING, 1454 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1455 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1456 "wrb_index 0x%x CID 0x%x\n", type, 1457 ((psol->dw[offsetof(struct amap_iscsi_wrb, 1458 type) / 32] & SOL_WRB_INDEX_MASK) >> 16), 1459 ((psol->dw[offsetof(struct amap_sol_cqe, 1460 cid) / 32] & SOL_CID_MASK) >> 6)); 1461 break; 1462 } 1463 1464 spin_unlock_bh(&session->lock); 1465 } 1466 1467 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1468 *pasync_ctx, unsigned int is_header, 1469 unsigned int host_write_ptr) 1470 { 1471 if (is_header) 1472 return &pasync_ctx->async_entry[host_write_ptr]. 1473 header_busy_list; 1474 else 1475 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1476 } 1477 1478 static struct async_pdu_handle * 1479 hwi_get_async_handle(struct beiscsi_hba *phba, 1480 struct beiscsi_conn *beiscsi_conn, 1481 struct hwi_async_pdu_context *pasync_ctx, 1482 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1483 { 1484 struct be_bus_address phys_addr; 1485 struct list_head *pbusy_list; 1486 struct async_pdu_handle *pasync_handle = NULL; 1487 unsigned char is_header = 0; 1488 1489 phys_addr.u.a32.address_lo = 1490 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] - 1491 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1492 & PDUCQE_DPL_MASK) >> 16); 1493 phys_addr.u.a32.address_hi = 1494 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32]; 1495 1496 phys_addr.u.a64.address = 1497 *((unsigned long long *)(&phys_addr.u.a64.address)); 1498 1499 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1500 & PDUCQE_CODE_MASK) { 1501 case UNSOL_HDR_NOTIFY: 1502 is_header = 1; 1503 1504 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1, 1505 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1506 index) / 32] & PDUCQE_INDEX_MASK)); 1507 break; 1508 case UNSOL_DATA_NOTIFY: 1509 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe-> 1510 dw[offsetof(struct amap_i_t_dpdu_cqe, 1511 index) / 32] & PDUCQE_INDEX_MASK)); 1512 break; 1513 default: 1514 pbusy_list = NULL; 1515 beiscsi_log(phba, KERN_WARNING, 1516 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1517 "BM_%d : Unexpected code=%d\n", 1518 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1519 code) / 32] & PDUCQE_CODE_MASK); 1520 return NULL; 1521 } 1522 1523 WARN_ON(list_empty(pbusy_list)); 1524 list_for_each_entry(pasync_handle, pbusy_list, link) { 1525 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1526 break; 1527 } 1528 1529 WARN_ON(!pasync_handle); 1530 1531 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - 1532 phba->fw_config.iscsi_cid_start; 1533 pasync_handle->is_header = is_header; 1534 pasync_handle->buffer_len = ((pdpdu_cqe-> 1535 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1536 & PDUCQE_DPL_MASK) >> 16); 1537 1538 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1539 index) / 32] & PDUCQE_INDEX_MASK); 1540 return pasync_handle; 1541 } 1542 1543 static unsigned int 1544 hwi_update_async_writables(struct beiscsi_hba *phba, 1545 struct hwi_async_pdu_context *pasync_ctx, 1546 unsigned int is_header, unsigned int cq_index) 1547 { 1548 struct list_head *pbusy_list; 1549 struct async_pdu_handle *pasync_handle; 1550 unsigned int num_entries, writables = 0; 1551 unsigned int *pep_read_ptr, *pwritables; 1552 1553 num_entries = pasync_ctx->num_entries; 1554 if (is_header) { 1555 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1556 pwritables = &pasync_ctx->async_header.writables; 1557 } else { 1558 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1559 pwritables = &pasync_ctx->async_data.writables; 1560 } 1561 1562 while ((*pep_read_ptr) != cq_index) { 1563 (*pep_read_ptr)++; 1564 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1565 1566 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1567 *pep_read_ptr); 1568 if (writables == 0) 1569 WARN_ON(list_empty(pbusy_list)); 1570 1571 if (!list_empty(pbusy_list)) { 1572 pasync_handle = list_entry(pbusy_list->next, 1573 struct async_pdu_handle, 1574 link); 1575 WARN_ON(!pasync_handle); 1576 pasync_handle->consumed = 1; 1577 } 1578 1579 writables++; 1580 } 1581 1582 if (!writables) { 1583 beiscsi_log(phba, KERN_ERR, 1584 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1585 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1586 cq_index); 1587 WARN_ON(1); 1588 } 1589 1590 *pwritables = *pwritables + writables; 1591 return 0; 1592 } 1593 1594 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1595 unsigned int cri) 1596 { 1597 struct hwi_controller *phwi_ctrlr; 1598 struct hwi_async_pdu_context *pasync_ctx; 1599 struct async_pdu_handle *pasync_handle, *tmp_handle; 1600 struct list_head *plist; 1601 1602 phwi_ctrlr = phba->phwi_ctrlr; 1603 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1604 1605 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1606 1607 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1608 list_del(&pasync_handle->link); 1609 1610 if (pasync_handle->is_header) { 1611 list_add_tail(&pasync_handle->link, 1612 &pasync_ctx->async_header.free_list); 1613 pasync_ctx->async_header.free_entries++; 1614 } else { 1615 list_add_tail(&pasync_handle->link, 1616 &pasync_ctx->async_data.free_list); 1617 pasync_ctx->async_data.free_entries++; 1618 } 1619 } 1620 1621 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1622 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1623 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1624 } 1625 1626 static struct phys_addr * 1627 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1628 unsigned int is_header, unsigned int host_write_ptr) 1629 { 1630 struct phys_addr *pasync_sge = NULL; 1631 1632 if (is_header) 1633 pasync_sge = pasync_ctx->async_header.ring_base; 1634 else 1635 pasync_sge = pasync_ctx->async_data.ring_base; 1636 1637 return pasync_sge + host_write_ptr; 1638 } 1639 1640 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1641 unsigned int is_header) 1642 { 1643 struct hwi_controller *phwi_ctrlr; 1644 struct hwi_async_pdu_context *pasync_ctx; 1645 struct async_pdu_handle *pasync_handle; 1646 struct list_head *pfree_link, *pbusy_list; 1647 struct phys_addr *pasync_sge; 1648 unsigned int ring_id, num_entries; 1649 unsigned int host_write_num; 1650 unsigned int writables; 1651 unsigned int i = 0; 1652 u32 doorbell = 0; 1653 1654 phwi_ctrlr = phba->phwi_ctrlr; 1655 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1656 num_entries = pasync_ctx->num_entries; 1657 1658 if (is_header) { 1659 writables = min(pasync_ctx->async_header.writables, 1660 pasync_ctx->async_header.free_entries); 1661 pfree_link = pasync_ctx->async_header.free_list.next; 1662 host_write_num = pasync_ctx->async_header.host_write_ptr; 1663 ring_id = phwi_ctrlr->default_pdu_hdr.id; 1664 } else { 1665 writables = min(pasync_ctx->async_data.writables, 1666 pasync_ctx->async_data.free_entries); 1667 pfree_link = pasync_ctx->async_data.free_list.next; 1668 host_write_num = pasync_ctx->async_data.host_write_ptr; 1669 ring_id = phwi_ctrlr->default_pdu_data.id; 1670 } 1671 1672 writables = (writables / 8) * 8; 1673 if (writables) { 1674 for (i = 0; i < writables; i++) { 1675 pbusy_list = 1676 hwi_get_async_busy_list(pasync_ctx, is_header, 1677 host_write_num); 1678 pasync_handle = 1679 list_entry(pfree_link, struct async_pdu_handle, 1680 link); 1681 WARN_ON(!pasync_handle); 1682 pasync_handle->consumed = 0; 1683 1684 pfree_link = pfree_link->next; 1685 1686 pasync_sge = hwi_get_ring_address(pasync_ctx, 1687 is_header, host_write_num); 1688 1689 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1690 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1691 1692 list_move(&pasync_handle->link, pbusy_list); 1693 1694 host_write_num++; 1695 host_write_num = host_write_num % num_entries; 1696 } 1697 1698 if (is_header) { 1699 pasync_ctx->async_header.host_write_ptr = 1700 host_write_num; 1701 pasync_ctx->async_header.free_entries -= writables; 1702 pasync_ctx->async_header.writables -= writables; 1703 pasync_ctx->async_header.busy_entries += writables; 1704 } else { 1705 pasync_ctx->async_data.host_write_ptr = host_write_num; 1706 pasync_ctx->async_data.free_entries -= writables; 1707 pasync_ctx->async_data.writables -= writables; 1708 pasync_ctx->async_data.busy_entries += writables; 1709 } 1710 1711 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1712 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1713 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1714 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1715 << DB_DEF_PDU_CQPROC_SHIFT; 1716 1717 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); 1718 } 1719 } 1720 1721 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1722 struct beiscsi_conn *beiscsi_conn, 1723 struct i_t_dpdu_cqe *pdpdu_cqe) 1724 { 1725 struct hwi_controller *phwi_ctrlr; 1726 struct hwi_async_pdu_context *pasync_ctx; 1727 struct async_pdu_handle *pasync_handle = NULL; 1728 unsigned int cq_index = -1; 1729 1730 phwi_ctrlr = phba->phwi_ctrlr; 1731 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1732 1733 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1734 pdpdu_cqe, &cq_index); 1735 BUG_ON(pasync_handle->is_header != 0); 1736 if (pasync_handle->consumed == 0) 1737 hwi_update_async_writables(phba, pasync_ctx, 1738 pasync_handle->is_header, cq_index); 1739 1740 hwi_free_async_msg(phba, pasync_handle->cri); 1741 hwi_post_async_buffers(phba, pasync_handle->is_header); 1742 } 1743 1744 static unsigned int 1745 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1746 struct beiscsi_hba *phba, 1747 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1748 { 1749 struct list_head *plist; 1750 struct async_pdu_handle *pasync_handle; 1751 void *phdr = NULL; 1752 unsigned int hdr_len = 0, buf_len = 0; 1753 unsigned int status, index = 0, offset = 0; 1754 void *pfirst_buffer = NULL; 1755 unsigned int num_buf = 0; 1756 1757 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1758 1759 list_for_each_entry(pasync_handle, plist, link) { 1760 if (index == 0) { 1761 phdr = pasync_handle->pbuffer; 1762 hdr_len = pasync_handle->buffer_len; 1763 } else { 1764 buf_len = pasync_handle->buffer_len; 1765 if (!num_buf) { 1766 pfirst_buffer = pasync_handle->pbuffer; 1767 num_buf++; 1768 } 1769 memcpy(pfirst_buffer + offset, 1770 pasync_handle->pbuffer, buf_len); 1771 offset += buf_len; 1772 } 1773 index++; 1774 } 1775 1776 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1777 (beiscsi_conn->beiscsi_conn_cid - 1778 phba->fw_config.iscsi_cid_start), 1779 phdr, hdr_len, pfirst_buffer, 1780 offset); 1781 1782 hwi_free_async_msg(phba, cri); 1783 return 0; 1784 } 1785 1786 static unsigned int 1787 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1788 struct beiscsi_hba *phba, 1789 struct async_pdu_handle *pasync_handle) 1790 { 1791 struct hwi_async_pdu_context *pasync_ctx; 1792 struct hwi_controller *phwi_ctrlr; 1793 unsigned int bytes_needed = 0, status = 0; 1794 unsigned short cri = pasync_handle->cri; 1795 struct pdu_base *ppdu; 1796 1797 phwi_ctrlr = phba->phwi_ctrlr; 1798 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1799 1800 list_del(&pasync_handle->link); 1801 if (pasync_handle->is_header) { 1802 pasync_ctx->async_header.busy_entries--; 1803 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1804 hwi_free_async_msg(phba, cri); 1805 BUG(); 1806 } 1807 1808 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1809 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1810 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1811 (unsigned short)pasync_handle->buffer_len; 1812 list_add_tail(&pasync_handle->link, 1813 &pasync_ctx->async_entry[cri].wait_queue.list); 1814 1815 ppdu = pasync_handle->pbuffer; 1816 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1817 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1818 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1819 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1820 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1821 1822 if (status == 0) { 1823 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1824 bytes_needed; 1825 1826 if (bytes_needed == 0) 1827 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1828 pasync_ctx, cri); 1829 } 1830 } else { 1831 pasync_ctx->async_data.busy_entries--; 1832 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1833 list_add_tail(&pasync_handle->link, 1834 &pasync_ctx->async_entry[cri].wait_queue. 1835 list); 1836 pasync_ctx->async_entry[cri].wait_queue. 1837 bytes_received += 1838 (unsigned short)pasync_handle->buffer_len; 1839 1840 if (pasync_ctx->async_entry[cri].wait_queue. 1841 bytes_received >= 1842 pasync_ctx->async_entry[cri].wait_queue. 1843 bytes_needed) 1844 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1845 pasync_ctx, cri); 1846 } 1847 } 1848 return status; 1849 } 1850 1851 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 1852 struct beiscsi_hba *phba, 1853 struct i_t_dpdu_cqe *pdpdu_cqe) 1854 { 1855 struct hwi_controller *phwi_ctrlr; 1856 struct hwi_async_pdu_context *pasync_ctx; 1857 struct async_pdu_handle *pasync_handle = NULL; 1858 unsigned int cq_index = -1; 1859 1860 phwi_ctrlr = phba->phwi_ctrlr; 1861 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1862 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1863 pdpdu_cqe, &cq_index); 1864 1865 if (pasync_handle->consumed == 0) 1866 hwi_update_async_writables(phba, pasync_ctx, 1867 pasync_handle->is_header, cq_index); 1868 1869 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 1870 hwi_post_async_buffers(phba, pasync_handle->is_header); 1871 } 1872 1873 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 1874 { 1875 struct be_queue_info *mcc_cq; 1876 struct be_mcc_compl *mcc_compl; 1877 unsigned int num_processed = 0; 1878 1879 mcc_cq = &phba->ctrl.mcc_obj.cq; 1880 mcc_compl = queue_tail_node(mcc_cq); 1881 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1882 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1883 1884 if (num_processed >= 32) { 1885 hwi_ring_cq_db(phba, mcc_cq->id, 1886 num_processed, 0, 0); 1887 num_processed = 0; 1888 } 1889 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1890 /* Interpret flags as an async trailer */ 1891 if (is_link_state_evt(mcc_compl->flags)) 1892 /* Interpret compl as a async link evt */ 1893 beiscsi_async_link_state_process(phba, 1894 (struct be_async_event_link_state *) mcc_compl); 1895 else 1896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 1897 "BM_%d : Unsupported Async Event, flags" 1898 " = 0x%08x\n", 1899 mcc_compl->flags); 1900 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1901 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 1902 atomic_dec(&phba->ctrl.mcc_obj.q.used); 1903 } 1904 1905 mcc_compl->flags = 0; 1906 queue_tail_inc(mcc_cq); 1907 mcc_compl = queue_tail_node(mcc_cq); 1908 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1909 num_processed++; 1910 } 1911 1912 if (num_processed > 0) 1913 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 1914 1915 } 1916 1917 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 1918 { 1919 struct be_queue_info *cq; 1920 struct sol_cqe *sol; 1921 struct dmsg_cqe *dmsg; 1922 unsigned int num_processed = 0; 1923 unsigned int tot_nump = 0; 1924 unsigned short code = 0, cid = 0; 1925 struct beiscsi_conn *beiscsi_conn; 1926 struct beiscsi_endpoint *beiscsi_ep; 1927 struct iscsi_endpoint *ep; 1928 struct beiscsi_hba *phba; 1929 1930 cq = pbe_eq->cq; 1931 sol = queue_tail_node(cq); 1932 phba = pbe_eq->phba; 1933 1934 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1935 CQE_VALID_MASK) { 1936 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1937 1938 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] & 1939 CQE_CID_MASK) >> 6); 1940 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] & 1941 CQE_CODE_MASK); 1942 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; 1943 1944 beiscsi_ep = ep->dd_data; 1945 beiscsi_conn = beiscsi_ep->conn; 1946 1947 if (num_processed >= 32) { 1948 hwi_ring_cq_db(phba, cq->id, 1949 num_processed, 0, 0); 1950 tot_nump += num_processed; 1951 num_processed = 0; 1952 } 1953 1954 switch (code) { 1955 case SOL_CMD_COMPLETE: 1956 hwi_complete_cmd(beiscsi_conn, phba, sol); 1957 break; 1958 case DRIVERMSG_NOTIFY: 1959 beiscsi_log(phba, KERN_INFO, 1960 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1961 "BM_%d : Received DRIVERMSG_NOTIFY\n"); 1962 1963 dmsg = (struct dmsg_cqe *)sol; 1964 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1965 break; 1966 case UNSOL_HDR_NOTIFY: 1967 beiscsi_log(phba, KERN_INFO, 1968 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1969 "BM_%d : Received UNSOL_HDR_ NOTIFY\n"); 1970 1971 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1972 (struct i_t_dpdu_cqe *)sol); 1973 break; 1974 case UNSOL_DATA_NOTIFY: 1975 beiscsi_log(phba, KERN_INFO, 1976 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1977 "BM_%d : Received UNSOL_DATA_NOTIFY\n"); 1978 1979 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1980 (struct i_t_dpdu_cqe *)sol); 1981 break; 1982 case CXN_INVALIDATE_INDEX_NOTIFY: 1983 case CMD_INVALIDATED_NOTIFY: 1984 case CXN_INVALIDATE_NOTIFY: 1985 beiscsi_log(phba, KERN_ERR, 1986 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1987 "BM_%d : Ignoring CQ Error notification for" 1988 " cmd/cxn invalidate\n"); 1989 break; 1990 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1991 case CMD_KILLED_INVALID_STATSN_RCVD: 1992 case CMD_KILLED_INVALID_R2T_RCVD: 1993 case CMD_CXN_KILLED_LUN_INVALID: 1994 case CMD_CXN_KILLED_ICD_INVALID: 1995 case CMD_CXN_KILLED_ITT_INVALID: 1996 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1997 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1998 beiscsi_log(phba, KERN_ERR, 1999 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2000 "BM_%d : CQ Error notification for cmd.. " 2001 "code %d cid 0x%x\n", code, cid); 2002 break; 2003 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2004 beiscsi_log(phba, KERN_ERR, 2005 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2006 "BM_%d : Digest error on def pdu ring," 2007 " dropping..\n"); 2008 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2009 (struct i_t_dpdu_cqe *) sol); 2010 break; 2011 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2012 case CXN_KILLED_BURST_LEN_MISMATCH: 2013 case CXN_KILLED_AHS_RCVD: 2014 case CXN_KILLED_HDR_DIGEST_ERR: 2015 case CXN_KILLED_UNKNOWN_HDR: 2016 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2017 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2018 case CXN_KILLED_TIMED_OUT: 2019 case CXN_KILLED_FIN_RCVD: 2020 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2021 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2022 case CXN_KILLED_OVER_RUN_RESIDUAL: 2023 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2024 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2025 beiscsi_log(phba, KERN_ERR, 2026 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2027 "BM_%d : CQ Error %d, reset CID 0x%x...\n", 2028 code, cid); 2029 if (beiscsi_conn) 2030 iscsi_conn_failure(beiscsi_conn->conn, 2031 ISCSI_ERR_CONN_FAILED); 2032 break; 2033 case CXN_KILLED_RST_SENT: 2034 case CXN_KILLED_RST_RCVD: 2035 beiscsi_log(phba, KERN_ERR, 2036 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2037 "BM_%d : CQ Error %d, reset" 2038 "received/sent on CID 0x%x...\n", 2039 code, cid); 2040 if (beiscsi_conn) 2041 iscsi_conn_failure(beiscsi_conn->conn, 2042 ISCSI_ERR_CONN_FAILED); 2043 break; 2044 default: 2045 beiscsi_log(phba, KERN_ERR, 2046 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2047 "BM_%d : CQ Error Invalid code= %d " 2048 "received on CID 0x%x...\n", 2049 code, cid); 2050 break; 2051 } 2052 2053 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2054 queue_tail_inc(cq); 2055 sol = queue_tail_node(cq); 2056 num_processed++; 2057 } 2058 2059 if (num_processed > 0) { 2060 tot_nump += num_processed; 2061 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 2062 } 2063 return tot_nump; 2064 } 2065 2066 void beiscsi_process_all_cqs(struct work_struct *work) 2067 { 2068 unsigned long flags; 2069 struct hwi_controller *phwi_ctrlr; 2070 struct hwi_context_memory *phwi_context; 2071 struct be_eq_obj *pbe_eq; 2072 struct beiscsi_hba *phba = 2073 container_of(work, struct beiscsi_hba, work_cqs); 2074 2075 phwi_ctrlr = phba->phwi_ctrlr; 2076 phwi_context = phwi_ctrlr->phwi_ctxt; 2077 if (phba->msix_enabled) 2078 pbe_eq = &phwi_context->be_eq[phba->num_cpus]; 2079 else 2080 pbe_eq = &phwi_context->be_eq[0]; 2081 2082 if (phba->todo_mcc_cq) { 2083 spin_lock_irqsave(&phba->isr_lock, flags); 2084 phba->todo_mcc_cq = 0; 2085 spin_unlock_irqrestore(&phba->isr_lock, flags); 2086 beiscsi_process_mcc_isr(phba); 2087 } 2088 2089 if (phba->todo_cq) { 2090 spin_lock_irqsave(&phba->isr_lock, flags); 2091 phba->todo_cq = 0; 2092 spin_unlock_irqrestore(&phba->isr_lock, flags); 2093 beiscsi_process_cq(pbe_eq); 2094 } 2095 } 2096 2097 static int be_iopoll(struct blk_iopoll *iop, int budget) 2098 { 2099 static unsigned int ret; 2100 struct beiscsi_hba *phba; 2101 struct be_eq_obj *pbe_eq; 2102 2103 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2104 ret = beiscsi_process_cq(pbe_eq); 2105 if (ret < budget) { 2106 phba = pbe_eq->phba; 2107 blk_iopoll_complete(iop); 2108 beiscsi_log(phba, KERN_INFO, 2109 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2110 "BM_%d : rearm pbe_eq->q.id =%d\n", 2111 pbe_eq->q.id); 2112 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2113 } 2114 return ret; 2115 } 2116 2117 static void 2118 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2119 unsigned int num_sg, struct beiscsi_io_task *io_task) 2120 { 2121 struct iscsi_sge *psgl; 2122 unsigned int sg_len, index; 2123 unsigned int sge_len = 0; 2124 unsigned long long addr; 2125 struct scatterlist *l_sg; 2126 unsigned int offset; 2127 2128 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2129 io_task->bhs_pa.u.a32.address_lo); 2130 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2131 io_task->bhs_pa.u.a32.address_hi); 2132 2133 l_sg = sg; 2134 for (index = 0; (index < num_sg) && (index < 2); index++, 2135 sg = sg_next(sg)) { 2136 if (index == 0) { 2137 sg_len = sg_dma_len(sg); 2138 addr = (u64) sg_dma_address(sg); 2139 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2140 ((u32)(addr & 0xFFFFFFFF))); 2141 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2142 ((u32)(addr >> 32))); 2143 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2144 sg_len); 2145 sge_len = sg_len; 2146 } else { 2147 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2148 pwrb, sge_len); 2149 sg_len = sg_dma_len(sg); 2150 addr = (u64) sg_dma_address(sg); 2151 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2152 ((u32)(addr & 0xFFFFFFFF))); 2153 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2154 ((u32)(addr >> 32))); 2155 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2156 sg_len); 2157 } 2158 } 2159 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2160 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2161 2162 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2163 2164 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2165 io_task->bhs_pa.u.a32.address_hi); 2166 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2167 io_task->bhs_pa.u.a32.address_lo); 2168 2169 if (num_sg == 1) { 2170 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2171 1); 2172 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2173 0); 2174 } else if (num_sg == 2) { 2175 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2176 0); 2177 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2178 1); 2179 } else { 2180 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2181 0); 2182 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2183 0); 2184 } 2185 sg = l_sg; 2186 psgl++; 2187 psgl++; 2188 offset = 0; 2189 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2190 sg_len = sg_dma_len(sg); 2191 addr = (u64) sg_dma_address(sg); 2192 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2193 (addr & 0xFFFFFFFF)); 2194 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2195 (addr >> 32)); 2196 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2197 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2198 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2199 offset += sg_len; 2200 } 2201 psgl--; 2202 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2203 } 2204 2205 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2206 { 2207 struct iscsi_sge *psgl; 2208 unsigned long long addr; 2209 struct beiscsi_io_task *io_task = task->dd_data; 2210 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2211 struct beiscsi_hba *phba = beiscsi_conn->phba; 2212 2213 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2214 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2215 io_task->bhs_pa.u.a32.address_lo); 2216 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2217 io_task->bhs_pa.u.a32.address_hi); 2218 2219 if (task->data) { 2220 if (task->data_count) { 2221 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 2222 addr = (u64) pci_map_single(phba->pcidev, 2223 task->data, 2224 task->data_count, 1); 2225 } else { 2226 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2227 addr = 0; 2228 } 2229 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2230 ((u32)(addr & 0xFFFFFFFF))); 2231 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2232 ((u32)(addr >> 32))); 2233 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2234 task->data_count); 2235 2236 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2237 } else { 2238 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2239 addr = 0; 2240 } 2241 2242 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2243 2244 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2245 2246 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2247 io_task->bhs_pa.u.a32.address_hi); 2248 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2249 io_task->bhs_pa.u.a32.address_lo); 2250 if (task->data) { 2251 psgl++; 2252 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2253 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2254 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2255 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2256 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2257 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2258 2259 psgl++; 2260 if (task->data) { 2261 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2262 ((u32)(addr & 0xFFFFFFFF))); 2263 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2264 ((u32)(addr >> 32))); 2265 } 2266 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2267 } 2268 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2269 } 2270 2271 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2272 { 2273 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2274 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2275 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2276 2277 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2278 sizeof(struct sol_cqe)); 2279 num_async_pdu_buf_pages = 2280 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2281 phba->params.defpdu_hdr_sz); 2282 num_async_pdu_buf_sgl_pages = 2283 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2284 sizeof(struct phys_addr)); 2285 num_async_pdu_data_pages = 2286 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2287 phba->params.defpdu_data_sz); 2288 num_async_pdu_data_sgl_pages = 2289 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2290 sizeof(struct phys_addr)); 2291 2292 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2293 2294 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2295 BE_ISCSI_PDU_HEADER_SIZE; 2296 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2297 sizeof(struct hwi_context_memory); 2298 2299 2300 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2301 * (phba->params.wrbs_per_cxn) 2302 * phba->params.cxns_per_ctrl; 2303 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2304 (phba->params.wrbs_per_cxn); 2305 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2306 phba->params.cxns_per_ctrl); 2307 2308 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2309 phba->params.icds_per_ctrl; 2310 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2311 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2312 2313 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = 2314 num_async_pdu_buf_pages * PAGE_SIZE; 2315 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = 2316 num_async_pdu_data_pages * PAGE_SIZE; 2317 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = 2318 num_async_pdu_buf_sgl_pages * PAGE_SIZE; 2319 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = 2320 num_async_pdu_data_sgl_pages * PAGE_SIZE; 2321 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = 2322 phba->params.asyncpdus_per_ctrl * 2323 sizeof(struct async_pdu_handle); 2324 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = 2325 phba->params.asyncpdus_per_ctrl * 2326 sizeof(struct async_pdu_handle); 2327 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = 2328 sizeof(struct hwi_async_pdu_context) + 2329 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); 2330 } 2331 2332 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2333 { 2334 struct be_mem_descriptor *mem_descr; 2335 dma_addr_t bus_add; 2336 struct mem_array *mem_arr, *mem_arr_orig; 2337 unsigned int i, j, alloc_size, curr_alloc_size; 2338 2339 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2340 if (!phba->phwi_ctrlr) 2341 return -ENOMEM; 2342 2343 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2344 GFP_KERNEL); 2345 if (!phba->init_mem) { 2346 kfree(phba->phwi_ctrlr); 2347 return -ENOMEM; 2348 } 2349 2350 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2351 GFP_KERNEL); 2352 if (!mem_arr_orig) { 2353 kfree(phba->init_mem); 2354 kfree(phba->phwi_ctrlr); 2355 return -ENOMEM; 2356 } 2357 2358 mem_descr = phba->init_mem; 2359 for (i = 0; i < SE_MEM_MAX; i++) { 2360 j = 0; 2361 mem_arr = mem_arr_orig; 2362 alloc_size = phba->mem_req[i]; 2363 memset(mem_arr, 0, sizeof(struct mem_array) * 2364 BEISCSI_MAX_FRAGS_INIT); 2365 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2366 do { 2367 mem_arr->virtual_address = pci_alloc_consistent( 2368 phba->pcidev, 2369 curr_alloc_size, 2370 &bus_add); 2371 if (!mem_arr->virtual_address) { 2372 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2373 goto free_mem; 2374 if (curr_alloc_size - 2375 rounddown_pow_of_two(curr_alloc_size)) 2376 curr_alloc_size = rounddown_pow_of_two 2377 (curr_alloc_size); 2378 else 2379 curr_alloc_size = curr_alloc_size / 2; 2380 } else { 2381 mem_arr->bus_address.u. 2382 a64.address = (__u64) bus_add; 2383 mem_arr->size = curr_alloc_size; 2384 alloc_size -= curr_alloc_size; 2385 curr_alloc_size = min(be_max_phys_size * 2386 1024, alloc_size); 2387 j++; 2388 mem_arr++; 2389 } 2390 } while (alloc_size); 2391 mem_descr->num_elements = j; 2392 mem_descr->size_in_bytes = phba->mem_req[i]; 2393 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2394 GFP_KERNEL); 2395 if (!mem_descr->mem_array) 2396 goto free_mem; 2397 2398 memcpy(mem_descr->mem_array, mem_arr_orig, 2399 sizeof(struct mem_array) * j); 2400 mem_descr++; 2401 } 2402 kfree(mem_arr_orig); 2403 return 0; 2404 free_mem: 2405 mem_descr->num_elements = j; 2406 while ((i) || (j)) { 2407 for (j = mem_descr->num_elements; j > 0; j--) { 2408 pci_free_consistent(phba->pcidev, 2409 mem_descr->mem_array[j - 1].size, 2410 mem_descr->mem_array[j - 1]. 2411 virtual_address, 2412 (unsigned long)mem_descr-> 2413 mem_array[j - 1]. 2414 bus_address.u.a64.address); 2415 } 2416 if (i) { 2417 i--; 2418 kfree(mem_descr->mem_array); 2419 mem_descr--; 2420 } 2421 } 2422 kfree(mem_arr_orig); 2423 kfree(phba->init_mem); 2424 kfree(phba->phwi_ctrlr); 2425 return -ENOMEM; 2426 } 2427 2428 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2429 { 2430 beiscsi_find_mem_req(phba); 2431 return beiscsi_alloc_mem(phba); 2432 } 2433 2434 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2435 { 2436 struct pdu_data_out *pdata_out; 2437 struct pdu_nop_out *pnop_out; 2438 struct be_mem_descriptor *mem_descr; 2439 2440 mem_descr = phba->init_mem; 2441 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2442 pdata_out = 2443 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2444 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2445 2446 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2447 IIOC_SCSI_DATA); 2448 2449 pnop_out = 2450 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2451 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2452 2453 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2454 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2455 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2456 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2457 } 2458 2459 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2460 { 2461 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2462 struct wrb_handle *pwrb_handle = NULL; 2463 struct hwi_controller *phwi_ctrlr; 2464 struct hwi_wrb_context *pwrb_context; 2465 struct iscsi_wrb *pwrb = NULL; 2466 unsigned int num_cxn_wrbh = 0; 2467 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2468 2469 mem_descr_wrbh = phba->init_mem; 2470 mem_descr_wrbh += HWI_MEM_WRBH; 2471 2472 mem_descr_wrb = phba->init_mem; 2473 mem_descr_wrb += HWI_MEM_WRB; 2474 phwi_ctrlr = phba->phwi_ctrlr; 2475 2476 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2477 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2478 pwrb_context->pwrb_handle_base = 2479 kzalloc(sizeof(struct wrb_handle *) * 2480 phba->params.wrbs_per_cxn, GFP_KERNEL); 2481 if (!pwrb_context->pwrb_handle_base) { 2482 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2483 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2484 goto init_wrb_hndl_failed; 2485 } 2486 pwrb_context->pwrb_handle_basestd = 2487 kzalloc(sizeof(struct wrb_handle *) * 2488 phba->params.wrbs_per_cxn, GFP_KERNEL); 2489 if (!pwrb_context->pwrb_handle_basestd) { 2490 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2491 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2492 goto init_wrb_hndl_failed; 2493 } 2494 if (!num_cxn_wrbh) { 2495 pwrb_handle = 2496 mem_descr_wrbh->mem_array[idx].virtual_address; 2497 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2498 ((sizeof(struct wrb_handle)) * 2499 phba->params.wrbs_per_cxn)); 2500 idx++; 2501 } 2502 pwrb_context->alloc_index = 0; 2503 pwrb_context->wrb_handles_available = 0; 2504 pwrb_context->free_index = 0; 2505 2506 if (num_cxn_wrbh) { 2507 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2508 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2509 pwrb_context->pwrb_handle_basestd[j] = 2510 pwrb_handle; 2511 pwrb_context->wrb_handles_available++; 2512 pwrb_handle->wrb_index = j; 2513 pwrb_handle++; 2514 } 2515 num_cxn_wrbh--; 2516 } 2517 } 2518 idx = 0; 2519 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2520 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2521 if (!num_cxn_wrb) { 2522 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2523 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2524 ((sizeof(struct iscsi_wrb) * 2525 phba->params.wrbs_per_cxn)); 2526 idx++; 2527 } 2528 2529 if (num_cxn_wrb) { 2530 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2531 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2532 pwrb_handle->pwrb = pwrb; 2533 pwrb++; 2534 } 2535 num_cxn_wrb--; 2536 } 2537 } 2538 return 0; 2539 init_wrb_hndl_failed: 2540 for (j = index; j > 0; j--) { 2541 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2542 kfree(pwrb_context->pwrb_handle_base); 2543 kfree(pwrb_context->pwrb_handle_basestd); 2544 } 2545 return -ENOMEM; 2546 } 2547 2548 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2549 { 2550 struct hwi_controller *phwi_ctrlr; 2551 struct hba_parameters *p = &phba->params; 2552 struct hwi_async_pdu_context *pasync_ctx; 2553 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2554 unsigned int index, idx, num_per_mem, num_async_data; 2555 struct be_mem_descriptor *mem_descr; 2556 2557 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2558 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; 2559 2560 phwi_ctrlr = phba->phwi_ctrlr; 2561 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) 2562 mem_descr->mem_array[0].virtual_address; 2563 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2564 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2565 2566 pasync_ctx->num_entries = p->asyncpdus_per_ctrl; 2567 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2568 2569 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2570 mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2571 if (mem_descr->mem_array[0].virtual_address) { 2572 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2573 "BM_%d : hwi_init_async_pdu_ctx" 2574 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", 2575 mem_descr->mem_array[0].virtual_address); 2576 } else 2577 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2578 "BM_%d : No Virtual address\n"); 2579 2580 pasync_ctx->async_header.va_base = 2581 mem_descr->mem_array[0].virtual_address; 2582 2583 pasync_ctx->async_header.pa_base.u.a64.address = 2584 mem_descr->mem_array[0].bus_address.u.a64.address; 2585 2586 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2587 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2588 if (mem_descr->mem_array[0].virtual_address) { 2589 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2590 "BM_%d : hwi_init_async_pdu_ctx" 2591 " HWI_MEM_ASYNC_HEADER_RING va=%p\n", 2592 mem_descr->mem_array[0].virtual_address); 2593 } else 2594 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2595 "BM_%d : No Virtual address\n"); 2596 2597 pasync_ctx->async_header.ring_base = 2598 mem_descr->mem_array[0].virtual_address; 2599 2600 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2601 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; 2602 if (mem_descr->mem_array[0].virtual_address) { 2603 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2604 "BM_%d : hwi_init_async_pdu_ctx" 2605 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", 2606 mem_descr->mem_array[0].virtual_address); 2607 } else 2608 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2609 "BM_%d : No Virtual address\n"); 2610 2611 pasync_ctx->async_header.handle_base = 2612 mem_descr->mem_array[0].virtual_address; 2613 pasync_ctx->async_header.writables = 0; 2614 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2615 2616 2617 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2618 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2619 if (mem_descr->mem_array[0].virtual_address) { 2620 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2621 "BM_%d : hwi_init_async_pdu_ctx" 2622 " HWI_MEM_ASYNC_DATA_RING va=%p\n", 2623 mem_descr->mem_array[0].virtual_address); 2624 } else 2625 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2626 "BM_%d : No Virtual address\n"); 2627 2628 pasync_ctx->async_data.ring_base = 2629 mem_descr->mem_array[0].virtual_address; 2630 2631 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2632 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 2633 if (!mem_descr->mem_array[0].virtual_address) 2634 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2635 "BM_%d : No Virtual address\n"); 2636 2637 pasync_ctx->async_data.handle_base = 2638 mem_descr->mem_array[0].virtual_address; 2639 pasync_ctx->async_data.writables = 0; 2640 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2641 2642 pasync_header_h = 2643 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; 2644 pasync_data_h = 2645 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 2646 2647 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2648 mem_descr += HWI_MEM_ASYNC_DATA_BUF; 2649 if (mem_descr->mem_array[0].virtual_address) { 2650 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2651 "BM_%d : hwi_init_async_pdu_ctx" 2652 " HWI_MEM_ASYNC_DATA_BUF va=%p\n", 2653 mem_descr->mem_array[0].virtual_address); 2654 } else 2655 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2656 "BM_%d : No Virtual address\n"); 2657 2658 idx = 0; 2659 pasync_ctx->async_data.va_base = 2660 mem_descr->mem_array[idx].virtual_address; 2661 pasync_ctx->async_data.pa_base.u.a64.address = 2662 mem_descr->mem_array[idx].bus_address.u.a64.address; 2663 2664 num_async_data = ((mem_descr->mem_array[idx].size) / 2665 phba->params.defpdu_data_sz); 2666 num_per_mem = 0; 2667 2668 for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 2669 pasync_header_h->cri = -1; 2670 pasync_header_h->index = (char)index; 2671 INIT_LIST_HEAD(&pasync_header_h->link); 2672 pasync_header_h->pbuffer = 2673 (void *)((unsigned long) 2674 (pasync_ctx->async_header.va_base) + 2675 (p->defpdu_hdr_sz * index)); 2676 2677 pasync_header_h->pa.u.a64.address = 2678 pasync_ctx->async_header.pa_base.u.a64.address + 2679 (p->defpdu_hdr_sz * index); 2680 2681 list_add_tail(&pasync_header_h->link, 2682 &pasync_ctx->async_header.free_list); 2683 pasync_header_h++; 2684 pasync_ctx->async_header.free_entries++; 2685 pasync_ctx->async_header.writables++; 2686 2687 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); 2688 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2689 header_busy_list); 2690 pasync_data_h->cri = -1; 2691 pasync_data_h->index = (char)index; 2692 INIT_LIST_HEAD(&pasync_data_h->link); 2693 2694 if (!num_async_data) { 2695 num_per_mem = 0; 2696 idx++; 2697 pasync_ctx->async_data.va_base = 2698 mem_descr->mem_array[idx].virtual_address; 2699 pasync_ctx->async_data.pa_base.u.a64.address = 2700 mem_descr->mem_array[idx]. 2701 bus_address.u.a64.address; 2702 2703 num_async_data = ((mem_descr->mem_array[idx].size) / 2704 phba->params.defpdu_data_sz); 2705 } 2706 pasync_data_h->pbuffer = 2707 (void *)((unsigned long) 2708 (pasync_ctx->async_data.va_base) + 2709 (p->defpdu_data_sz * num_per_mem)); 2710 2711 pasync_data_h->pa.u.a64.address = 2712 pasync_ctx->async_data.pa_base.u.a64.address + 2713 (p->defpdu_data_sz * num_per_mem); 2714 num_per_mem++; 2715 num_async_data--; 2716 2717 list_add_tail(&pasync_data_h->link, 2718 &pasync_ctx->async_data.free_list); 2719 pasync_data_h++; 2720 pasync_ctx->async_data.free_entries++; 2721 pasync_ctx->async_data.writables++; 2722 2723 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); 2724 } 2725 2726 pasync_ctx->async_header.host_write_ptr = 0; 2727 pasync_ctx->async_header.ep_read_ptr = -1; 2728 pasync_ctx->async_data.host_write_ptr = 0; 2729 pasync_ctx->async_data.ep_read_ptr = -1; 2730 } 2731 2732 static int 2733 be_sgl_create_contiguous(void *virtual_address, 2734 u64 physical_address, u32 length, 2735 struct be_dma_mem *sgl) 2736 { 2737 WARN_ON(!virtual_address); 2738 WARN_ON(!physical_address); 2739 WARN_ON(!length > 0); 2740 WARN_ON(!sgl); 2741 2742 sgl->va = virtual_address; 2743 sgl->dma = (unsigned long)physical_address; 2744 sgl->size = length; 2745 2746 return 0; 2747 } 2748 2749 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2750 { 2751 memset(sgl, 0, sizeof(*sgl)); 2752 } 2753 2754 static void 2755 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2756 struct mem_array *pmem, struct be_dma_mem *sgl) 2757 { 2758 if (sgl->va) 2759 be_sgl_destroy_contiguous(sgl); 2760 2761 be_sgl_create_contiguous(pmem->virtual_address, 2762 pmem->bus_address.u.a64.address, 2763 pmem->size, sgl); 2764 } 2765 2766 static void 2767 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2768 struct mem_array *pmem, struct be_dma_mem *sgl) 2769 { 2770 if (sgl->va) 2771 be_sgl_destroy_contiguous(sgl); 2772 2773 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2774 pmem->bus_address.u.a64.address, 2775 pmem->size, sgl); 2776 } 2777 2778 static int be_fill_queue(struct be_queue_info *q, 2779 u16 len, u16 entry_size, void *vaddress) 2780 { 2781 struct be_dma_mem *mem = &q->dma_mem; 2782 2783 memset(q, 0, sizeof(*q)); 2784 q->len = len; 2785 q->entry_size = entry_size; 2786 mem->size = len * entry_size; 2787 mem->va = vaddress; 2788 if (!mem->va) 2789 return -ENOMEM; 2790 memset(mem->va, 0, mem->size); 2791 return 0; 2792 } 2793 2794 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 2795 struct hwi_context_memory *phwi_context) 2796 { 2797 unsigned int i, num_eq_pages; 2798 int ret = 0, eq_for_mcc; 2799 struct be_queue_info *eq; 2800 struct be_dma_mem *mem; 2801 void *eq_vaddress; 2802 dma_addr_t paddr; 2803 2804 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 2805 sizeof(struct be_eq_entry)); 2806 2807 if (phba->msix_enabled) 2808 eq_for_mcc = 1; 2809 else 2810 eq_for_mcc = 0; 2811 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 2812 eq = &phwi_context->be_eq[i].q; 2813 mem = &eq->dma_mem; 2814 phwi_context->be_eq[i].phba = phba; 2815 eq_vaddress = pci_alloc_consistent(phba->pcidev, 2816 num_eq_pages * PAGE_SIZE, 2817 &paddr); 2818 if (!eq_vaddress) 2819 goto create_eq_error; 2820 2821 mem->va = eq_vaddress; 2822 ret = be_fill_queue(eq, phba->params.num_eq_entries, 2823 sizeof(struct be_eq_entry), eq_vaddress); 2824 if (ret) { 2825 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2826 "BM_%d : be_fill_queue Failed for EQ\n"); 2827 goto create_eq_error; 2828 } 2829 2830 mem->dma = paddr; 2831 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 2832 phwi_context->cur_eqd); 2833 if (ret) { 2834 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2835 "BM_%d : beiscsi_cmd_eq_create" 2836 "Failed for EQ\n"); 2837 goto create_eq_error; 2838 } 2839 2840 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2841 "BM_%d : eqid = %d\n", 2842 phwi_context->be_eq[i].q.id); 2843 } 2844 return 0; 2845 create_eq_error: 2846 for (i = 0; i < (phba->num_cpus + 1); i++) { 2847 eq = &phwi_context->be_eq[i].q; 2848 mem = &eq->dma_mem; 2849 if (mem->va) 2850 pci_free_consistent(phba->pcidev, num_eq_pages 2851 * PAGE_SIZE, 2852 mem->va, mem->dma); 2853 } 2854 return ret; 2855 } 2856 2857 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 2858 struct hwi_context_memory *phwi_context) 2859 { 2860 unsigned int i, num_cq_pages; 2861 int ret = 0; 2862 struct be_queue_info *cq, *eq; 2863 struct be_dma_mem *mem; 2864 struct be_eq_obj *pbe_eq; 2865 void *cq_vaddress; 2866 dma_addr_t paddr; 2867 2868 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2869 sizeof(struct sol_cqe)); 2870 2871 for (i = 0; i < phba->num_cpus; i++) { 2872 cq = &phwi_context->be_cq[i]; 2873 eq = &phwi_context->be_eq[i].q; 2874 pbe_eq = &phwi_context->be_eq[i]; 2875 pbe_eq->cq = cq; 2876 pbe_eq->phba = phba; 2877 mem = &cq->dma_mem; 2878 cq_vaddress = pci_alloc_consistent(phba->pcidev, 2879 num_cq_pages * PAGE_SIZE, 2880 &paddr); 2881 if (!cq_vaddress) 2882 goto create_cq_error; 2883 ret = be_fill_queue(cq, phba->params.num_cq_entries, 2884 sizeof(struct sol_cqe), cq_vaddress); 2885 if (ret) { 2886 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2887 "BM_%d : be_fill_queue Failed " 2888 "for ISCSI CQ\n"); 2889 goto create_cq_error; 2890 } 2891 2892 mem->dma = paddr; 2893 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 2894 false, 0); 2895 if (ret) { 2896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2897 "BM_%d : beiscsi_cmd_eq_create" 2898 "Failed for ISCSI CQ\n"); 2899 goto create_cq_error; 2900 } 2901 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2902 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 2903 "iSCSI CQ CREATED\n", cq->id, eq->id); 2904 } 2905 return 0; 2906 2907 create_cq_error: 2908 for (i = 0; i < phba->num_cpus; i++) { 2909 cq = &phwi_context->be_cq[i]; 2910 mem = &cq->dma_mem; 2911 if (mem->va) 2912 pci_free_consistent(phba->pcidev, num_cq_pages 2913 * PAGE_SIZE, 2914 mem->va, mem->dma); 2915 } 2916 return ret; 2917 2918 } 2919 2920 static int 2921 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 2922 struct hwi_context_memory *phwi_context, 2923 struct hwi_controller *phwi_ctrlr, 2924 unsigned int def_pdu_ring_sz) 2925 { 2926 unsigned int idx; 2927 int ret; 2928 struct be_queue_info *dq, *cq; 2929 struct be_dma_mem *mem; 2930 struct be_mem_descriptor *mem_descr; 2931 void *dq_vaddress; 2932 2933 idx = 0; 2934 dq = &phwi_context->be_def_hdrq; 2935 cq = &phwi_context->be_cq[0]; 2936 mem = &dq->dma_mem; 2937 mem_descr = phba->init_mem; 2938 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2939 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 2940 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 2941 sizeof(struct phys_addr), 2942 sizeof(struct phys_addr), dq_vaddress); 2943 if (ret) { 2944 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2945 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); 2946 return ret; 2947 } 2948 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2949 bus_address.u.a64.address; 2950 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 2951 def_pdu_ring_sz, 2952 phba->params.defpdu_hdr_sz); 2953 if (ret) { 2954 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2955 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 2956 return ret; 2957 } 2958 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; 2959 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2960 "BM_%d : iscsi def pdu id is %d\n", 2961 phwi_context->be_def_hdrq.id); 2962 2963 hwi_post_async_buffers(phba, 1); 2964 return 0; 2965 } 2966 2967 static int 2968 beiscsi_create_def_data(struct beiscsi_hba *phba, 2969 struct hwi_context_memory *phwi_context, 2970 struct hwi_controller *phwi_ctrlr, 2971 unsigned int def_pdu_ring_sz) 2972 { 2973 unsigned int idx; 2974 int ret; 2975 struct be_queue_info *dataq, *cq; 2976 struct be_dma_mem *mem; 2977 struct be_mem_descriptor *mem_descr; 2978 void *dq_vaddress; 2979 2980 idx = 0; 2981 dataq = &phwi_context->be_def_dataq; 2982 cq = &phwi_context->be_cq[0]; 2983 mem = &dataq->dma_mem; 2984 mem_descr = phba->init_mem; 2985 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2986 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 2987 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 2988 sizeof(struct phys_addr), 2989 sizeof(struct phys_addr), dq_vaddress); 2990 if (ret) { 2991 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2992 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); 2993 return ret; 2994 } 2995 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2996 bus_address.u.a64.address; 2997 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 2998 def_pdu_ring_sz, 2999 phba->params.defpdu_data_sz); 3000 if (ret) { 3001 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3002 "BM_%d be_cmd_create_default_pdu_queue" 3003 " Failed for DEF PDU DATA\n"); 3004 return ret; 3005 } 3006 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 3007 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3008 "BM_%d : iscsi def data id is %d\n", 3009 phwi_context->be_def_dataq.id); 3010 3011 hwi_post_async_buffers(phba, 0); 3012 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3013 "BM_%d : DEFAULT PDU DATA RING CREATED\n"); 3014 3015 return 0; 3016 } 3017 3018 static int 3019 beiscsi_post_pages(struct beiscsi_hba *phba) 3020 { 3021 struct be_mem_descriptor *mem_descr; 3022 struct mem_array *pm_arr; 3023 unsigned int page_offset, i; 3024 struct be_dma_mem sgl; 3025 int status; 3026 3027 mem_descr = phba->init_mem; 3028 mem_descr += HWI_MEM_SGE; 3029 pm_arr = mem_descr->mem_array; 3030 3031 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3032 phba->fw_config.iscsi_icd_start) / PAGE_SIZE; 3033 for (i = 0; i < mem_descr->num_elements; i++) { 3034 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3035 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3036 page_offset, 3037 (pm_arr->size / PAGE_SIZE)); 3038 page_offset += pm_arr->size / PAGE_SIZE; 3039 if (status != 0) { 3040 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3041 "BM_%d : post sgl failed.\n"); 3042 return status; 3043 } 3044 pm_arr++; 3045 } 3046 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3047 "BM_%d : POSTED PAGES\n"); 3048 return 0; 3049 } 3050 3051 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3052 { 3053 struct be_dma_mem *mem = &q->dma_mem; 3054 if (mem->va) { 3055 pci_free_consistent(phba->pcidev, mem->size, 3056 mem->va, mem->dma); 3057 mem->va = NULL; 3058 } 3059 } 3060 3061 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3062 u16 len, u16 entry_size) 3063 { 3064 struct be_dma_mem *mem = &q->dma_mem; 3065 3066 memset(q, 0, sizeof(*q)); 3067 q->len = len; 3068 q->entry_size = entry_size; 3069 mem->size = len * entry_size; 3070 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); 3071 if (!mem->va) 3072 return -ENOMEM; 3073 memset(mem->va, 0, mem->size); 3074 return 0; 3075 } 3076 3077 static int 3078 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3079 struct hwi_context_memory *phwi_context, 3080 struct hwi_controller *phwi_ctrlr) 3081 { 3082 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3083 u64 pa_addr_lo; 3084 unsigned int idx, num, i; 3085 struct mem_array *pwrb_arr; 3086 void *wrb_vaddr; 3087 struct be_dma_mem sgl; 3088 struct be_mem_descriptor *mem_descr; 3089 int status; 3090 3091 idx = 0; 3092 mem_descr = phba->init_mem; 3093 mem_descr += HWI_MEM_WRB; 3094 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3095 GFP_KERNEL); 3096 if (!pwrb_arr) { 3097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3098 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3099 return -ENOMEM; 3100 } 3101 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3102 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3103 num_wrb_rings = mem_descr->mem_array[idx].size / 3104 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3105 3106 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3107 if (num_wrb_rings) { 3108 pwrb_arr[num].virtual_address = wrb_vaddr; 3109 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3110 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3111 sizeof(struct iscsi_wrb); 3112 wrb_vaddr += pwrb_arr[num].size; 3113 pa_addr_lo += pwrb_arr[num].size; 3114 num_wrb_rings--; 3115 } else { 3116 idx++; 3117 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3118 pa_addr_lo = mem_descr->mem_array[idx].\ 3119 bus_address.u.a64.address; 3120 num_wrb_rings = mem_descr->mem_array[idx].size / 3121 (phba->params.wrbs_per_cxn * 3122 sizeof(struct iscsi_wrb)); 3123 pwrb_arr[num].virtual_address = wrb_vaddr; 3124 pwrb_arr[num].bus_address.u.a64.address\ 3125 = pa_addr_lo; 3126 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3127 sizeof(struct iscsi_wrb); 3128 wrb_vaddr += pwrb_arr[num].size; 3129 pa_addr_lo += pwrb_arr[num].size; 3130 num_wrb_rings--; 3131 } 3132 } 3133 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3134 wrb_mem_index = 0; 3135 offset = 0; 3136 size = 0; 3137 3138 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3139 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3140 &phwi_context->be_wrbq[i]); 3141 if (status != 0) { 3142 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3143 "BM_%d : wrbq create failed."); 3144 kfree(pwrb_arr); 3145 return status; 3146 } 3147 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. 3148 id; 3149 } 3150 kfree(pwrb_arr); 3151 return 0; 3152 } 3153 3154 static void free_wrb_handles(struct beiscsi_hba *phba) 3155 { 3156 unsigned int index; 3157 struct hwi_controller *phwi_ctrlr; 3158 struct hwi_wrb_context *pwrb_context; 3159 3160 phwi_ctrlr = phba->phwi_ctrlr; 3161 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 3162 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3163 kfree(pwrb_context->pwrb_handle_base); 3164 kfree(pwrb_context->pwrb_handle_basestd); 3165 } 3166 } 3167 3168 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3169 { 3170 struct be_queue_info *q; 3171 struct be_ctrl_info *ctrl = &phba->ctrl; 3172 3173 q = &phba->ctrl.mcc_obj.q; 3174 if (q->created) 3175 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3176 be_queue_free(phba, q); 3177 3178 q = &phba->ctrl.mcc_obj.cq; 3179 if (q->created) 3180 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3181 be_queue_free(phba, q); 3182 } 3183 3184 static void hwi_cleanup(struct beiscsi_hba *phba) 3185 { 3186 struct be_queue_info *q; 3187 struct be_ctrl_info *ctrl = &phba->ctrl; 3188 struct hwi_controller *phwi_ctrlr; 3189 struct hwi_context_memory *phwi_context; 3190 int i, eq_num; 3191 3192 phwi_ctrlr = phba->phwi_ctrlr; 3193 phwi_context = phwi_ctrlr->phwi_ctxt; 3194 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3195 q = &phwi_context->be_wrbq[i]; 3196 if (q->created) 3197 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3198 } 3199 free_wrb_handles(phba); 3200 3201 q = &phwi_context->be_def_hdrq; 3202 if (q->created) 3203 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3204 3205 q = &phwi_context->be_def_dataq; 3206 if (q->created) 3207 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3208 3209 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3210 3211 for (i = 0; i < (phba->num_cpus); i++) { 3212 q = &phwi_context->be_cq[i]; 3213 if (q->created) 3214 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3215 } 3216 if (phba->msix_enabled) 3217 eq_num = 1; 3218 else 3219 eq_num = 0; 3220 for (i = 0; i < (phba->num_cpus + eq_num); i++) { 3221 q = &phwi_context->be_eq[i].q; 3222 if (q->created) 3223 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3224 } 3225 be_mcc_queues_destroy(phba); 3226 } 3227 3228 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3229 struct hwi_context_memory *phwi_context) 3230 { 3231 struct be_queue_info *q, *cq; 3232 struct be_ctrl_info *ctrl = &phba->ctrl; 3233 3234 /* Alloc MCC compl queue */ 3235 cq = &phba->ctrl.mcc_obj.cq; 3236 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3237 sizeof(struct be_mcc_compl))) 3238 goto err; 3239 /* Ask BE to create MCC compl queue; */ 3240 if (phba->msix_enabled) { 3241 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3242 [phba->num_cpus].q, false, true, 0)) 3243 goto mcc_cq_free; 3244 } else { 3245 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3246 false, true, 0)) 3247 goto mcc_cq_free; 3248 } 3249 3250 /* Alloc MCC queue */ 3251 q = &phba->ctrl.mcc_obj.q; 3252 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3253 goto mcc_cq_destroy; 3254 3255 /* Ask BE to create MCC queue */ 3256 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3257 goto mcc_q_free; 3258 3259 return 0; 3260 3261 mcc_q_free: 3262 be_queue_free(phba, q); 3263 mcc_cq_destroy: 3264 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3265 mcc_cq_free: 3266 be_queue_free(phba, cq); 3267 err: 3268 return -ENOMEM; 3269 } 3270 3271 static int find_num_cpus(void) 3272 { 3273 int num_cpus = 0; 3274 3275 num_cpus = num_online_cpus(); 3276 if (num_cpus >= MAX_CPUS) 3277 num_cpus = MAX_CPUS - 1; 3278 3279 return num_cpus; 3280 } 3281 3282 static int hwi_init_port(struct beiscsi_hba *phba) 3283 { 3284 struct hwi_controller *phwi_ctrlr; 3285 struct hwi_context_memory *phwi_context; 3286 unsigned int def_pdu_ring_sz; 3287 struct be_ctrl_info *ctrl = &phba->ctrl; 3288 int status; 3289 3290 def_pdu_ring_sz = 3291 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 3292 phwi_ctrlr = phba->phwi_ctrlr; 3293 phwi_context = phwi_ctrlr->phwi_ctxt; 3294 phwi_context->max_eqd = 0; 3295 phwi_context->min_eqd = 0; 3296 phwi_context->cur_eqd = 64; 3297 be_cmd_fw_initialize(&phba->ctrl); 3298 3299 status = beiscsi_create_eqs(phba, phwi_context); 3300 if (status != 0) { 3301 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3302 "BM_%d : EQ not created\n"); 3303 goto error; 3304 } 3305 3306 status = be_mcc_queues_create(phba, phwi_context); 3307 if (status != 0) 3308 goto error; 3309 3310 status = mgmt_check_supported_fw(ctrl, phba); 3311 if (status != 0) { 3312 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3313 "BM_%d : Unsupported fw version\n"); 3314 goto error; 3315 } 3316 3317 status = beiscsi_create_cqs(phba, phwi_context); 3318 if (status != 0) { 3319 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3320 "BM_%d : CQ not created\n"); 3321 goto error; 3322 } 3323 3324 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3325 def_pdu_ring_sz); 3326 if (status != 0) { 3327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3328 "BM_%d : Default Header not created\n"); 3329 goto error; 3330 } 3331 3332 status = beiscsi_create_def_data(phba, phwi_context, 3333 phwi_ctrlr, def_pdu_ring_sz); 3334 if (status != 0) { 3335 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3336 "BM_%d : Default Data not created\n"); 3337 goto error; 3338 } 3339 3340 status = beiscsi_post_pages(phba); 3341 if (status != 0) { 3342 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3343 "BM_%d : Post SGL Pages Failed\n"); 3344 goto error; 3345 } 3346 3347 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3348 if (status != 0) { 3349 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3350 "BM_%d : WRB Rings not created\n"); 3351 goto error; 3352 } 3353 3354 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3355 "BM_%d : hwi_init_port success\n"); 3356 return 0; 3357 3358 error: 3359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3360 "BM_%d : hwi_init_port failed"); 3361 hwi_cleanup(phba); 3362 return status; 3363 } 3364 3365 static int hwi_init_controller(struct beiscsi_hba *phba) 3366 { 3367 struct hwi_controller *phwi_ctrlr; 3368 3369 phwi_ctrlr = phba->phwi_ctrlr; 3370 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3371 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3372 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3373 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3374 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3375 phwi_ctrlr->phwi_ctxt); 3376 } else { 3377 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3378 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3379 "than one element.Failing to load\n"); 3380 return -ENOMEM; 3381 } 3382 3383 iscsi_init_global_templates(phba); 3384 if (beiscsi_init_wrb_handle(phba)) 3385 return -ENOMEM; 3386 3387 hwi_init_async_pdu_ctx(phba); 3388 if (hwi_init_port(phba) != 0) { 3389 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3390 "BM_%d : hwi_init_controller failed\n"); 3391 3392 return -ENOMEM; 3393 } 3394 return 0; 3395 } 3396 3397 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3398 { 3399 struct be_mem_descriptor *mem_descr; 3400 int i, j; 3401 3402 mem_descr = phba->init_mem; 3403 i = 0; 3404 j = 0; 3405 for (i = 0; i < SE_MEM_MAX; i++) { 3406 for (j = mem_descr->num_elements; j > 0; j--) { 3407 pci_free_consistent(phba->pcidev, 3408 mem_descr->mem_array[j - 1].size, 3409 mem_descr->mem_array[j - 1].virtual_address, 3410 (unsigned long)mem_descr->mem_array[j - 1]. 3411 bus_address.u.a64.address); 3412 } 3413 kfree(mem_descr->mem_array); 3414 mem_descr++; 3415 } 3416 kfree(phba->init_mem); 3417 kfree(phba->phwi_ctrlr); 3418 } 3419 3420 static int beiscsi_init_controller(struct beiscsi_hba *phba) 3421 { 3422 int ret = -ENOMEM; 3423 3424 ret = beiscsi_get_memory(phba); 3425 if (ret < 0) { 3426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3427 "BM_%d : beiscsi_dev_probe -" 3428 "Failed in beiscsi_alloc_memory\n"); 3429 return ret; 3430 } 3431 3432 ret = hwi_init_controller(phba); 3433 if (ret) 3434 goto free_init; 3435 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3436 "BM_%d : Return success from beiscsi_init_controller"); 3437 3438 return 0; 3439 3440 free_init: 3441 beiscsi_free_mem(phba); 3442 return ret; 3443 } 3444 3445 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3446 { 3447 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3448 struct sgl_handle *psgl_handle; 3449 struct iscsi_sge *pfrag; 3450 unsigned int arr_index, i, idx; 3451 3452 phba->io_sgl_hndl_avbl = 0; 3453 phba->eh_sgl_hndl_avbl = 0; 3454 3455 mem_descr_sglh = phba->init_mem; 3456 mem_descr_sglh += HWI_MEM_SGLH; 3457 if (1 == mem_descr_sglh->num_elements) { 3458 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3459 phba->params.ios_per_ctrl, 3460 GFP_KERNEL); 3461 if (!phba->io_sgl_hndl_base) { 3462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3463 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3464 return -ENOMEM; 3465 } 3466 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3467 (phba->params.icds_per_ctrl - 3468 phba->params.ios_per_ctrl), 3469 GFP_KERNEL); 3470 if (!phba->eh_sgl_hndl_base) { 3471 kfree(phba->io_sgl_hndl_base); 3472 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3473 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3474 return -ENOMEM; 3475 } 3476 } else { 3477 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3478 "BM_%d : HWI_MEM_SGLH is more than one element." 3479 "Failing to load\n"); 3480 return -ENOMEM; 3481 } 3482 3483 arr_index = 0; 3484 idx = 0; 3485 while (idx < mem_descr_sglh->num_elements) { 3486 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3487 3488 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3489 sizeof(struct sgl_handle)); i++) { 3490 if (arr_index < phba->params.ios_per_ctrl) { 3491 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3492 phba->io_sgl_hndl_avbl++; 3493 arr_index++; 3494 } else { 3495 phba->eh_sgl_hndl_base[arr_index - 3496 phba->params.ios_per_ctrl] = 3497 psgl_handle; 3498 arr_index++; 3499 phba->eh_sgl_hndl_avbl++; 3500 } 3501 psgl_handle++; 3502 } 3503 idx++; 3504 } 3505 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3506 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3507 "phba->eh_sgl_hndl_avbl=%d\n", 3508 phba->io_sgl_hndl_avbl, 3509 phba->eh_sgl_hndl_avbl); 3510 3511 mem_descr_sg = phba->init_mem; 3512 mem_descr_sg += HWI_MEM_SGE; 3513 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3514 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3515 mem_descr_sg->num_elements); 3516 3517 arr_index = 0; 3518 idx = 0; 3519 while (idx < mem_descr_sg->num_elements) { 3520 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3521 3522 for (i = 0; 3523 i < (mem_descr_sg->mem_array[idx].size) / 3524 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3525 i++) { 3526 if (arr_index < phba->params.ios_per_ctrl) 3527 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3528 else 3529 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3530 phba->params.ios_per_ctrl]; 3531 psgl_handle->pfrag = pfrag; 3532 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3533 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3534 pfrag += phba->params.num_sge_per_io; 3535 psgl_handle->sgl_index = 3536 phba->fw_config.iscsi_icd_start + arr_index++; 3537 } 3538 idx++; 3539 } 3540 phba->io_sgl_free_index = 0; 3541 phba->io_sgl_alloc_index = 0; 3542 phba->eh_sgl_free_index = 0; 3543 phba->eh_sgl_alloc_index = 0; 3544 return 0; 3545 } 3546 3547 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3548 { 3549 int i, new_cid; 3550 3551 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3552 GFP_KERNEL); 3553 if (!phba->cid_array) { 3554 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3555 "BM_%d : Failed to allocate memory in " 3556 "hba_setup_cid_tbls\n"); 3557 return -ENOMEM; 3558 } 3559 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3560 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3561 if (!phba->ep_array) { 3562 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3563 "BM_%d : Failed to allocate memory in " 3564 "hba_setup_cid_tbls\n"); 3565 kfree(phba->cid_array); 3566 return -ENOMEM; 3567 } 3568 new_cid = phba->fw_config.iscsi_cid_start; 3569 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3570 phba->cid_array[i] = new_cid; 3571 new_cid += 2; 3572 } 3573 phba->avlbl_cids = phba->params.cxns_per_ctrl; 3574 return 0; 3575 } 3576 3577 static void hwi_enable_intr(struct beiscsi_hba *phba) 3578 { 3579 struct be_ctrl_info *ctrl = &phba->ctrl; 3580 struct hwi_controller *phwi_ctrlr; 3581 struct hwi_context_memory *phwi_context; 3582 struct be_queue_info *eq; 3583 u8 __iomem *addr; 3584 u32 reg, i; 3585 u32 enabled; 3586 3587 phwi_ctrlr = phba->phwi_ctrlr; 3588 phwi_context = phwi_ctrlr->phwi_ctxt; 3589 3590 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3591 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3592 reg = ioread32(addr); 3593 3594 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3595 if (!enabled) { 3596 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3597 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3598 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 3599 iowrite32(reg, addr); 3600 } 3601 3602 if (!phba->msix_enabled) { 3603 eq = &phwi_context->be_eq[0].q; 3604 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3605 "BM_%d : eq->id=%d\n", eq->id); 3606 3607 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3608 } else { 3609 for (i = 0; i <= phba->num_cpus; i++) { 3610 eq = &phwi_context->be_eq[i].q; 3611 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3612 "BM_%d : eq->id=%d\n", eq->id); 3613 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3614 } 3615 } 3616 } 3617 3618 static void hwi_disable_intr(struct beiscsi_hba *phba) 3619 { 3620 struct be_ctrl_info *ctrl = &phba->ctrl; 3621 3622 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 3623 u32 reg = ioread32(addr); 3624 3625 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3626 if (enabled) { 3627 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3628 iowrite32(reg, addr); 3629 } else 3630 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3631 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 3632 } 3633 3634 /** 3635 * beiscsi_get_boot_info()- Get the boot session info 3636 * @phba: The device priv structure instance 3637 * 3638 * Get the boot target info and store in driver priv structure 3639 * 3640 * return values 3641 * Success: 0 3642 * Failure: Non-Zero Value 3643 **/ 3644 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 3645 { 3646 struct be_cmd_get_session_resp *session_resp; 3647 struct be_mcc_wrb *wrb; 3648 struct be_dma_mem nonemb_cmd; 3649 unsigned int tag, wrb_num; 3650 unsigned short status, extd_status; 3651 unsigned int s_handle; 3652 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 3653 int ret = -ENOMEM; 3654 3655 /* Get the session handle of the boot target */ 3656 ret = be_mgmt_get_boot_shandle(phba, &s_handle); 3657 if (ret) { 3658 beiscsi_log(phba, KERN_ERR, 3659 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3660 "BM_%d : No boot session\n"); 3661 return ret; 3662 } 3663 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 3664 sizeof(*session_resp), 3665 &nonemb_cmd.dma); 3666 if (nonemb_cmd.va == NULL) { 3667 beiscsi_log(phba, KERN_ERR, 3668 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3669 "BM_%d : Failed to allocate memory for" 3670 "beiscsi_get_session_info\n"); 3671 3672 return -ENOMEM; 3673 } 3674 3675 memset(nonemb_cmd.va, 0, sizeof(*session_resp)); 3676 tag = mgmt_get_session_info(phba, s_handle, 3677 &nonemb_cmd); 3678 if (!tag) { 3679 beiscsi_log(phba, KERN_ERR, 3680 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3681 "BM_%d : beiscsi_get_session_info" 3682 " Failed\n"); 3683 3684 goto boot_freemem; 3685 } else 3686 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3687 phba->ctrl.mcc_numtag[tag]); 3688 3689 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; 3690 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 3691 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 3692 if (status || extd_status) { 3693 beiscsi_log(phba, KERN_ERR, 3694 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3695 "BM_%d : beiscsi_get_session_info Failed" 3696 " status = %d extd_status = %d\n", 3697 status, extd_status); 3698 3699 free_mcc_tag(&phba->ctrl, tag); 3700 goto boot_freemem; 3701 } 3702 wrb = queue_get_wrb(mccq, wrb_num); 3703 free_mcc_tag(&phba->ctrl, tag); 3704 session_resp = nonemb_cmd.va ; 3705 3706 memcpy(&phba->boot_sess, &session_resp->session_info, 3707 sizeof(struct mgmt_session_info)); 3708 ret = 0; 3709 3710 boot_freemem: 3711 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 3712 nonemb_cmd.va, nonemb_cmd.dma); 3713 return ret; 3714 } 3715 3716 static void beiscsi_boot_release(void *data) 3717 { 3718 struct beiscsi_hba *phba = data; 3719 3720 scsi_host_put(phba->shost); 3721 } 3722 3723 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 3724 { 3725 struct iscsi_boot_kobj *boot_kobj; 3726 3727 /* get boot info using mgmt cmd */ 3728 if (beiscsi_get_boot_info(phba)) 3729 /* Try to see if we can carry on without this */ 3730 return 0; 3731 3732 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 3733 if (!phba->boot_kset) 3734 return -ENOMEM; 3735 3736 /* get a ref because the show function will ref the phba */ 3737 if (!scsi_host_get(phba->shost)) 3738 goto free_kset; 3739 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 3740 beiscsi_show_boot_tgt_info, 3741 beiscsi_tgt_get_attr_visibility, 3742 beiscsi_boot_release); 3743 if (!boot_kobj) 3744 goto put_shost; 3745 3746 if (!scsi_host_get(phba->shost)) 3747 goto free_kset; 3748 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 3749 beiscsi_show_boot_ini_info, 3750 beiscsi_ini_get_attr_visibility, 3751 beiscsi_boot_release); 3752 if (!boot_kobj) 3753 goto put_shost; 3754 3755 if (!scsi_host_get(phba->shost)) 3756 goto free_kset; 3757 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 3758 beiscsi_show_boot_eth_info, 3759 beiscsi_eth_get_attr_visibility, 3760 beiscsi_boot_release); 3761 if (!boot_kobj) 3762 goto put_shost; 3763 return 0; 3764 3765 put_shost: 3766 scsi_host_put(phba->shost); 3767 free_kset: 3768 iscsi_boot_destroy_kset(phba->boot_kset); 3769 return -ENOMEM; 3770 } 3771 3772 static int beiscsi_init_port(struct beiscsi_hba *phba) 3773 { 3774 int ret; 3775 3776 ret = beiscsi_init_controller(phba); 3777 if (ret < 0) { 3778 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3779 "BM_%d : beiscsi_dev_probe - Failed in" 3780 "beiscsi_init_controller\n"); 3781 return ret; 3782 } 3783 ret = beiscsi_init_sgl_handle(phba); 3784 if (ret < 0) { 3785 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3786 "BM_%d : beiscsi_dev_probe - Failed in" 3787 "beiscsi_init_sgl_handle\n"); 3788 goto do_cleanup_ctrlr; 3789 } 3790 3791 if (hba_setup_cid_tbls(phba)) { 3792 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3793 "BM_%d : Failed in hba_setup_cid_tbls\n"); 3794 kfree(phba->io_sgl_hndl_base); 3795 kfree(phba->eh_sgl_hndl_base); 3796 goto do_cleanup_ctrlr; 3797 } 3798 3799 return ret; 3800 3801 do_cleanup_ctrlr: 3802 hwi_cleanup(phba); 3803 return ret; 3804 } 3805 3806 static void hwi_purge_eq(struct beiscsi_hba *phba) 3807 { 3808 struct hwi_controller *phwi_ctrlr; 3809 struct hwi_context_memory *phwi_context; 3810 struct be_queue_info *eq; 3811 struct be_eq_entry *eqe = NULL; 3812 int i, eq_msix; 3813 unsigned int num_processed; 3814 3815 phwi_ctrlr = phba->phwi_ctrlr; 3816 phwi_context = phwi_ctrlr->phwi_ctxt; 3817 if (phba->msix_enabled) 3818 eq_msix = 1; 3819 else 3820 eq_msix = 0; 3821 3822 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3823 eq = &phwi_context->be_eq[i].q; 3824 eqe = queue_tail_node(eq); 3825 num_processed = 0; 3826 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3827 & EQE_VALID_MASK) { 3828 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3829 queue_tail_inc(eq); 3830 eqe = queue_tail_node(eq); 3831 num_processed++; 3832 } 3833 3834 if (num_processed) 3835 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3836 } 3837 } 3838 3839 static void beiscsi_clean_port(struct beiscsi_hba *phba) 3840 { 3841 int mgmt_status; 3842 3843 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 3844 if (mgmt_status) 3845 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3846 "BM_%d : mgmt_epfw_cleanup FAILED\n"); 3847 3848 hwi_purge_eq(phba); 3849 hwi_cleanup(phba); 3850 kfree(phba->io_sgl_hndl_base); 3851 kfree(phba->eh_sgl_hndl_base); 3852 kfree(phba->cid_array); 3853 kfree(phba->ep_array); 3854 } 3855 3856 static void beiscsi_cleanup_task(struct iscsi_task *task) 3857 { 3858 struct beiscsi_io_task *io_task = task->dd_data; 3859 struct iscsi_conn *conn = task->conn; 3860 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3861 struct beiscsi_hba *phba = beiscsi_conn->phba; 3862 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 3863 struct hwi_wrb_context *pwrb_context; 3864 struct hwi_controller *phwi_ctrlr; 3865 3866 phwi_ctrlr = phba->phwi_ctrlr; 3867 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid 3868 - phba->fw_config.iscsi_cid_start]; 3869 3870 if (io_task->cmd_bhs) { 3871 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3872 io_task->bhs_pa.u.a64.address); 3873 io_task->cmd_bhs = NULL; 3874 } 3875 3876 if (task->sc) { 3877 if (io_task->pwrb_handle) { 3878 free_wrb_handle(phba, pwrb_context, 3879 io_task->pwrb_handle); 3880 io_task->pwrb_handle = NULL; 3881 } 3882 3883 if (io_task->psgl_handle) { 3884 spin_lock(&phba->io_sgl_lock); 3885 free_io_sgl_handle(phba, io_task->psgl_handle); 3886 spin_unlock(&phba->io_sgl_lock); 3887 io_task->psgl_handle = NULL; 3888 } 3889 } else { 3890 if (!beiscsi_conn->login_in_progress) { 3891 if (io_task->pwrb_handle) { 3892 free_wrb_handle(phba, pwrb_context, 3893 io_task->pwrb_handle); 3894 io_task->pwrb_handle = NULL; 3895 } 3896 if (io_task->psgl_handle) { 3897 spin_lock(&phba->mgmt_sgl_lock); 3898 free_mgmt_sgl_handle(phba, 3899 io_task->psgl_handle); 3900 spin_unlock(&phba->mgmt_sgl_lock); 3901 io_task->psgl_handle = NULL; 3902 } 3903 } 3904 } 3905 } 3906 3907 void 3908 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 3909 struct beiscsi_offload_params *params) 3910 { 3911 struct wrb_handle *pwrb_handle; 3912 struct iscsi_target_context_update_wrb *pwrb = NULL; 3913 struct be_mem_descriptor *mem_descr; 3914 struct beiscsi_hba *phba = beiscsi_conn->phba; 3915 struct iscsi_task *task = beiscsi_conn->task; 3916 struct iscsi_session *session = task->conn->session; 3917 u32 doorbell = 0; 3918 3919 /* 3920 * We can always use 0 here because it is reserved by libiscsi for 3921 * login/startup related tasks. 3922 */ 3923 beiscsi_conn->login_in_progress = 0; 3924 spin_lock_bh(&session->lock); 3925 beiscsi_cleanup_task(task); 3926 spin_unlock_bh(&session->lock); 3927 3928 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - 3929 phba->fw_config.iscsi_cid_start)); 3930 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; 3931 memset(pwrb, 0, sizeof(*pwrb)); 3932 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3933 max_burst_length, pwrb, params->dw[offsetof 3934 (struct amap_beiscsi_offload_params, 3935 max_burst_length) / 32]); 3936 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3937 max_send_data_segment_length, pwrb, 3938 params->dw[offsetof(struct amap_beiscsi_offload_params, 3939 max_send_data_segment_length) / 32]); 3940 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3941 first_burst_length, 3942 pwrb, 3943 params->dw[offsetof(struct amap_beiscsi_offload_params, 3944 first_burst_length) / 32]); 3945 3946 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, 3947 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3948 erl) / 32] & OFFLD_PARAMS_ERL)); 3949 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, 3950 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3951 dde) / 32] & OFFLD_PARAMS_DDE) >> 2); 3952 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, 3953 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3954 hde) / 32] & OFFLD_PARAMS_HDE) >> 3); 3955 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, 3956 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3957 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); 3958 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, 3959 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3960 imd) / 32] & OFFLD_PARAMS_IMD) >> 5); 3961 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, 3962 pwrb, 3963 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3964 exp_statsn) / 32] + 1)); 3965 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, 3966 0x7); 3967 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, 3968 pwrb, pwrb_handle->wrb_index); 3969 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, 3970 pwrb, pwrb_handle->nxt_wrb_index); 3971 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3972 session_state, pwrb, 0); 3973 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, 3974 pwrb, 1); 3975 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, 3976 pwrb, 0); 3977 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, 3978 0); 3979 3980 mem_descr = phba->init_mem; 3981 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 3982 3983 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3984 pad_buffer_addr_hi, pwrb, 3985 mem_descr->mem_array[0].bus_address.u.a32.address_hi); 3986 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3987 pad_buffer_addr_lo, pwrb, 3988 mem_descr->mem_array[0].bus_address.u.a32.address_lo); 3989 3990 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); 3991 3992 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3993 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 3994 << DB_DEF_PDU_WRB_INDEX_SHIFT; 3995 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3996 3997 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3998 } 3999 4000 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4001 int *index, int *age) 4002 { 4003 *index = (int)itt; 4004 if (age) 4005 *age = conn->session->age; 4006 } 4007 4008 /** 4009 * beiscsi_alloc_pdu - allocates pdu and related resources 4010 * @task: libiscsi task 4011 * @opcode: opcode of pdu for task 4012 * 4013 * This is called with the session lock held. It will allocate 4014 * the wrb and sgl if needed for the command. And it will prep 4015 * the pdu's itt. beiscsi_parse_pdu will later translate 4016 * the pdu itt to the libiscsi task itt. 4017 */ 4018 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4019 { 4020 struct beiscsi_io_task *io_task = task->dd_data; 4021 struct iscsi_conn *conn = task->conn; 4022 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4023 struct beiscsi_hba *phba = beiscsi_conn->phba; 4024 struct hwi_wrb_context *pwrb_context; 4025 struct hwi_controller *phwi_ctrlr; 4026 itt_t itt; 4027 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4028 dma_addr_t paddr; 4029 4030 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4031 GFP_ATOMIC, &paddr); 4032 if (!io_task->cmd_bhs) 4033 return -ENOMEM; 4034 io_task->bhs_pa.u.a64.address = paddr; 4035 io_task->libiscsi_itt = (itt_t)task->itt; 4036 io_task->conn = beiscsi_conn; 4037 4038 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4039 task->hdr_max = sizeof(struct be_cmd_bhs); 4040 io_task->psgl_handle = NULL; 4041 io_task->pwrb_handle = NULL; 4042 4043 if (task->sc) { 4044 spin_lock(&phba->io_sgl_lock); 4045 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4046 spin_unlock(&phba->io_sgl_lock); 4047 if (!io_task->psgl_handle) 4048 goto free_hndls; 4049 io_task->pwrb_handle = alloc_wrb_handle(phba, 4050 beiscsi_conn->beiscsi_conn_cid - 4051 phba->fw_config.iscsi_cid_start); 4052 if (!io_task->pwrb_handle) 4053 goto free_io_hndls; 4054 } else { 4055 io_task->scsi_cmnd = NULL; 4056 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4057 if (!beiscsi_conn->login_in_progress) { 4058 spin_lock(&phba->mgmt_sgl_lock); 4059 io_task->psgl_handle = (struct sgl_handle *) 4060 alloc_mgmt_sgl_handle(phba); 4061 spin_unlock(&phba->mgmt_sgl_lock); 4062 if (!io_task->psgl_handle) 4063 goto free_hndls; 4064 4065 beiscsi_conn->login_in_progress = 1; 4066 beiscsi_conn->plogin_sgl_handle = 4067 io_task->psgl_handle; 4068 io_task->pwrb_handle = 4069 alloc_wrb_handle(phba, 4070 beiscsi_conn->beiscsi_conn_cid - 4071 phba->fw_config.iscsi_cid_start); 4072 if (!io_task->pwrb_handle) 4073 goto free_io_hndls; 4074 beiscsi_conn->plogin_wrb_handle = 4075 io_task->pwrb_handle; 4076 4077 } else { 4078 io_task->psgl_handle = 4079 beiscsi_conn->plogin_sgl_handle; 4080 io_task->pwrb_handle = 4081 beiscsi_conn->plogin_wrb_handle; 4082 } 4083 beiscsi_conn->task = task; 4084 } else { 4085 spin_lock(&phba->mgmt_sgl_lock); 4086 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4087 spin_unlock(&phba->mgmt_sgl_lock); 4088 if (!io_task->psgl_handle) 4089 goto free_hndls; 4090 io_task->pwrb_handle = 4091 alloc_wrb_handle(phba, 4092 beiscsi_conn->beiscsi_conn_cid - 4093 phba->fw_config.iscsi_cid_start); 4094 if (!io_task->pwrb_handle) 4095 goto free_mgmt_hndls; 4096 4097 } 4098 } 4099 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4100 wrb_index << 16) | (unsigned int) 4101 (io_task->psgl_handle->sgl_index)); 4102 io_task->pwrb_handle->pio_handle = task; 4103 4104 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4105 return 0; 4106 4107 free_io_hndls: 4108 spin_lock(&phba->io_sgl_lock); 4109 free_io_sgl_handle(phba, io_task->psgl_handle); 4110 spin_unlock(&phba->io_sgl_lock); 4111 goto free_hndls; 4112 free_mgmt_hndls: 4113 spin_lock(&phba->mgmt_sgl_lock); 4114 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4115 spin_unlock(&phba->mgmt_sgl_lock); 4116 free_hndls: 4117 phwi_ctrlr = phba->phwi_ctrlr; 4118 pwrb_context = &phwi_ctrlr->wrb_context[ 4119 beiscsi_conn->beiscsi_conn_cid - 4120 phba->fw_config.iscsi_cid_start]; 4121 if (io_task->pwrb_handle) 4122 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4123 io_task->pwrb_handle = NULL; 4124 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4125 io_task->bhs_pa.u.a64.address); 4126 io_task->cmd_bhs = NULL; 4127 beiscsi_log(phba, KERN_ERR, 4128 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4129 "BM_%d : Alloc of SGL_ICD Failed\n"); 4130 return -ENOMEM; 4131 } 4132 4133 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4134 unsigned int num_sg, unsigned int xferlen, 4135 unsigned int writedir) 4136 { 4137 4138 struct beiscsi_io_task *io_task = task->dd_data; 4139 struct iscsi_conn *conn = task->conn; 4140 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4141 struct beiscsi_hba *phba = beiscsi_conn->phba; 4142 struct iscsi_wrb *pwrb = NULL; 4143 unsigned int doorbell = 0; 4144 4145 pwrb = io_task->pwrb_handle->pwrb; 4146 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4147 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4148 4149 if (writedir) { 4150 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4151 INI_WR_CMD); 4152 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4153 } else { 4154 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4155 INI_RD_CMD); 4156 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4157 } 4158 4159 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4160 cpu_to_be16(*(unsigned short *) 4161 &io_task->cmd_bhs->iscsi_hdr.lun)); 4162 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4163 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4164 io_task->pwrb_handle->wrb_index); 4165 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4166 be32_to_cpu(task->cmdsn)); 4167 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4168 io_task->psgl_handle->sgl_index); 4169 4170 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4171 4172 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4173 io_task->pwrb_handle->nxt_wrb_index); 4174 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4175 4176 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4177 doorbell |= (io_task->pwrb_handle->wrb_index & 4178 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4179 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4180 4181 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4182 return 0; 4183 } 4184 4185 static int beiscsi_mtask(struct iscsi_task *task) 4186 { 4187 struct beiscsi_io_task *io_task = task->dd_data; 4188 struct iscsi_conn *conn = task->conn; 4189 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4190 struct beiscsi_hba *phba = beiscsi_conn->phba; 4191 struct iscsi_wrb *pwrb = NULL; 4192 unsigned int doorbell = 0; 4193 unsigned int cid; 4194 4195 cid = beiscsi_conn->beiscsi_conn_cid; 4196 pwrb = io_task->pwrb_handle->pwrb; 4197 memset(pwrb, 0, sizeof(*pwrb)); 4198 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4199 be32_to_cpu(task->cmdsn)); 4200 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4201 io_task->pwrb_handle->wrb_index); 4202 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4203 io_task->psgl_handle->sgl_index); 4204 4205 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4206 case ISCSI_OP_LOGIN: 4207 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4208 TGT_DM_CMD); 4209 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4210 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4211 hwi_write_buffer(pwrb, task); 4212 break; 4213 case ISCSI_OP_NOOP_OUT: 4214 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4215 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4216 TGT_DM_CMD); 4217 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, 4218 pwrb, 0); 4219 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); 4220 } else { 4221 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4222 INI_RD_CMD); 4223 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4224 } 4225 hwi_write_buffer(pwrb, task); 4226 break; 4227 case ISCSI_OP_TEXT: 4228 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4229 TGT_DM_CMD); 4230 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4231 hwi_write_buffer(pwrb, task); 4232 break; 4233 case ISCSI_OP_SCSI_TMFUNC: 4234 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4235 INI_TMF_CMD); 4236 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4237 hwi_write_buffer(pwrb, task); 4238 break; 4239 case ISCSI_OP_LOGOUT: 4240 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4241 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4242 HWH_TYPE_LOGOUT); 4243 hwi_write_buffer(pwrb, task); 4244 break; 4245 4246 default: 4247 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4248 "BM_%d : opcode =%d Not supported\n", 4249 task->hdr->opcode & ISCSI_OPCODE_MASK); 4250 4251 return -EINVAL; 4252 } 4253 4254 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4255 task->data_count); 4256 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4257 io_task->pwrb_handle->nxt_wrb_index); 4258 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4259 4260 doorbell |= cid & DB_WRB_POST_CID_MASK; 4261 doorbell |= (io_task->pwrb_handle->wrb_index & 4262 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4263 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4264 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4265 return 0; 4266 } 4267 4268 static int beiscsi_task_xmit(struct iscsi_task *task) 4269 { 4270 struct beiscsi_io_task *io_task = task->dd_data; 4271 struct scsi_cmnd *sc = task->sc; 4272 struct scatterlist *sg; 4273 int num_sg; 4274 unsigned int writedir = 0, xferlen = 0; 4275 4276 if (!sc) 4277 return beiscsi_mtask(task); 4278 4279 io_task->scsi_cmnd = sc; 4280 num_sg = scsi_dma_map(sc); 4281 if (num_sg < 0) { 4282 struct iscsi_conn *conn = task->conn; 4283 struct beiscsi_hba *phba = NULL; 4284 4285 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 4286 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, 4287 "BM_%d : scsi_dma_map Failed\n"); 4288 4289 return num_sg; 4290 } 4291 xferlen = scsi_bufflen(sc); 4292 sg = scsi_sglist(sc); 4293 if (sc->sc_data_direction == DMA_TO_DEVICE) 4294 writedir = 1; 4295 else 4296 writedir = 0; 4297 4298 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 4299 } 4300 4301 /** 4302 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4303 * @job: job to handle 4304 */ 4305 static int beiscsi_bsg_request(struct bsg_job *job) 4306 { 4307 struct Scsi_Host *shost; 4308 struct beiscsi_hba *phba; 4309 struct iscsi_bsg_request *bsg_req = job->request; 4310 int rc = -EINVAL; 4311 unsigned int tag; 4312 struct be_dma_mem nonemb_cmd; 4313 struct be_cmd_resp_hdr *resp; 4314 struct iscsi_bsg_reply *bsg_reply = job->reply; 4315 unsigned short status, extd_status; 4316 4317 shost = iscsi_job_to_shost(job); 4318 phba = iscsi_host_priv(shost); 4319 4320 switch (bsg_req->msgcode) { 4321 case ISCSI_BSG_HST_VENDOR: 4322 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4323 job->request_payload.payload_len, 4324 &nonemb_cmd.dma); 4325 if (nonemb_cmd.va == NULL) { 4326 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4327 "BM_%d : Failed to allocate memory for " 4328 "beiscsi_bsg_request\n"); 4329 return -EIO; 4330 } 4331 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4332 &nonemb_cmd); 4333 if (!tag) { 4334 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4335 "BM_%d : be_cmd_get_mac_addr Failed\n"); 4336 4337 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4338 nonemb_cmd.va, nonemb_cmd.dma); 4339 return -EAGAIN; 4340 } else 4341 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 4342 phba->ctrl.mcc_numtag[tag]); 4343 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 4344 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 4345 free_mcc_tag(&phba->ctrl, tag); 4346 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4347 sg_copy_from_buffer(job->reply_payload.sg_list, 4348 job->reply_payload.sg_cnt, 4349 nonemb_cmd.va, (resp->response_length 4350 + sizeof(*resp))); 4351 bsg_reply->reply_payload_rcv_len = resp->response_length; 4352 bsg_reply->result = status; 4353 bsg_job_done(job, bsg_reply->result, 4354 bsg_reply->reply_payload_rcv_len); 4355 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4356 nonemb_cmd.va, nonemb_cmd.dma); 4357 if (status || extd_status) { 4358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4359 "BM_%d : be_cmd_get_mac_addr Failed" 4360 " status = %d extd_status = %d\n", 4361 status, extd_status); 4362 4363 return -EIO; 4364 } 4365 break; 4366 4367 default: 4368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4369 "BM_%d : Unsupported bsg command: 0x%x\n", 4370 bsg_req->msgcode); 4371 break; 4372 } 4373 4374 return rc; 4375 } 4376 4377 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4378 { 4379 /* Set the logging parameter */ 4380 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4381 } 4382 4383 static void beiscsi_quiesce(struct beiscsi_hba *phba) 4384 { 4385 struct hwi_controller *phwi_ctrlr; 4386 struct hwi_context_memory *phwi_context; 4387 struct be_eq_obj *pbe_eq; 4388 unsigned int i, msix_vec; 4389 u8 *real_offset = 0; 4390 u32 value = 0; 4391 4392 phwi_ctrlr = phba->phwi_ctrlr; 4393 phwi_context = phwi_ctrlr->phwi_ctxt; 4394 hwi_disable_intr(phba); 4395 if (phba->msix_enabled) { 4396 for (i = 0; i <= phba->num_cpus; i++) { 4397 msix_vec = phba->msix_entries[i].vector; 4398 free_irq(msix_vec, &phwi_context->be_eq[i]); 4399 kfree(phba->msi_name[i]); 4400 } 4401 } else 4402 if (phba->pcidev->irq) 4403 free_irq(phba->pcidev->irq, phba); 4404 pci_disable_msix(phba->pcidev); 4405 destroy_workqueue(phba->wq); 4406 if (blk_iopoll_enabled) 4407 for (i = 0; i < phba->num_cpus; i++) { 4408 pbe_eq = &phwi_context->be_eq[i]; 4409 blk_iopoll_disable(&pbe_eq->iopoll); 4410 } 4411 4412 beiscsi_clean_port(phba); 4413 beiscsi_free_mem(phba); 4414 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4415 4416 value = readl((void *)real_offset); 4417 4418 if (value & 0x00010000) { 4419 value &= 0xfffeffff; 4420 writel(value, (void *)real_offset); 4421 } 4422 beiscsi_unmap_pci_function(phba); 4423 pci_free_consistent(phba->pcidev, 4424 phba->ctrl.mbox_mem_alloced.size, 4425 phba->ctrl.mbox_mem_alloced.va, 4426 phba->ctrl.mbox_mem_alloced.dma); 4427 } 4428 4429 static void beiscsi_remove(struct pci_dev *pcidev) 4430 { 4431 4432 struct beiscsi_hba *phba = NULL; 4433 4434 phba = pci_get_drvdata(pcidev); 4435 if (!phba) { 4436 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 4437 return; 4438 } 4439 4440 beiscsi_destroy_def_ifaces(phba); 4441 beiscsi_quiesce(phba); 4442 iscsi_boot_destroy_kset(phba->boot_kset); 4443 iscsi_host_remove(phba->shost); 4444 pci_dev_put(phba->pcidev); 4445 iscsi_host_free(phba->shost); 4446 pci_disable_device(pcidev); 4447 } 4448 4449 static void beiscsi_shutdown(struct pci_dev *pcidev) 4450 { 4451 4452 struct beiscsi_hba *phba = NULL; 4453 4454 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 4455 if (!phba) { 4456 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 4457 return; 4458 } 4459 4460 beiscsi_quiesce(phba); 4461 pci_disable_device(pcidev); 4462 } 4463 4464 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4465 { 4466 int i, status; 4467 4468 for (i = 0; i <= phba->num_cpus; i++) 4469 phba->msix_entries[i].entry = i; 4470 4471 status = pci_enable_msix(phba->pcidev, phba->msix_entries, 4472 (phba->num_cpus + 1)); 4473 if (!status) 4474 phba->msix_enabled = true; 4475 4476 return; 4477 } 4478 4479 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, 4480 const struct pci_device_id *id) 4481 { 4482 struct beiscsi_hba *phba = NULL; 4483 struct hwi_controller *phwi_ctrlr; 4484 struct hwi_context_memory *phwi_context; 4485 struct be_eq_obj *pbe_eq; 4486 int ret, num_cpus, i; 4487 u8 *real_offset = 0; 4488 u32 value = 0; 4489 4490 ret = beiscsi_enable_pci(pcidev); 4491 if (ret < 0) { 4492 dev_err(&pcidev->dev, 4493 "beiscsi_dev_probe - Failed to enable pci device\n"); 4494 return ret; 4495 } 4496 4497 phba = beiscsi_hba_alloc(pcidev); 4498 if (!phba) { 4499 dev_err(&pcidev->dev, 4500 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 4501 goto disable_pci; 4502 } 4503 4504 /* Initialize Driver configuration Paramters */ 4505 beiscsi_hba_attrs_init(phba); 4506 4507 switch (pcidev->device) { 4508 case BE_DEVICE_ID1: 4509 case OC_DEVICE_ID1: 4510 case OC_DEVICE_ID2: 4511 phba->generation = BE_GEN2; 4512 break; 4513 case BE_DEVICE_ID2: 4514 case OC_DEVICE_ID3: 4515 phba->generation = BE_GEN3; 4516 break; 4517 default: 4518 phba->generation = 0; 4519 } 4520 4521 if (enable_msix) 4522 num_cpus = find_num_cpus(); 4523 else 4524 num_cpus = 1; 4525 phba->num_cpus = num_cpus; 4526 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4527 "BM_%d : num_cpus = %d\n", 4528 phba->num_cpus); 4529 4530 if (enable_msix) { 4531 beiscsi_msix_enable(phba); 4532 if (!phba->msix_enabled) 4533 phba->num_cpus = 1; 4534 } 4535 ret = be_ctrl_init(phba, pcidev); 4536 if (ret) { 4537 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4538 "BM_%d : beiscsi_dev_probe-" 4539 "Failed in be_ctrl_init\n"); 4540 goto hba_free; 4541 } 4542 4543 if (!num_hba) { 4544 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4545 value = readl((void *)real_offset); 4546 if (value & 0x00010000) { 4547 gcrashmode++; 4548 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4549 "BM_%d : Loading Driver in crashdump mode\n"); 4550 ret = beiscsi_cmd_reset_function(phba); 4551 if (ret) { 4552 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4553 "BM_%d : Reset Failed. Aborting Crashdump\n"); 4554 goto hba_free; 4555 } 4556 ret = be_chk_reset_complete(phba); 4557 if (ret) { 4558 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4559 "BM_%d : Failed to get out of reset." 4560 "Aborting Crashdump\n"); 4561 goto hba_free; 4562 } 4563 } else { 4564 value |= 0x00010000; 4565 writel(value, (void *)real_offset); 4566 num_hba++; 4567 } 4568 } 4569 4570 spin_lock_init(&phba->io_sgl_lock); 4571 spin_lock_init(&phba->mgmt_sgl_lock); 4572 spin_lock_init(&phba->isr_lock); 4573 ret = mgmt_get_fw_config(&phba->ctrl, phba); 4574 if (ret != 0) { 4575 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4576 "BM_%d : Error getting fw config\n"); 4577 goto free_port; 4578 } 4579 phba->shost->max_id = phba->fw_config.iscsi_cid_count; 4580 beiscsi_get_params(phba); 4581 phba->shost->can_queue = phba->params.ios_per_ctrl; 4582 ret = beiscsi_init_port(phba); 4583 if (ret < 0) { 4584 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4585 "BM_%d : beiscsi_dev_probe-" 4586 "Failed in beiscsi_init_port\n"); 4587 goto free_port; 4588 } 4589 4590 for (i = 0; i < MAX_MCC_CMD ; i++) { 4591 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 4592 phba->ctrl.mcc_tag[i] = i + 1; 4593 phba->ctrl.mcc_numtag[i + 1] = 0; 4594 phba->ctrl.mcc_tag_available++; 4595 } 4596 4597 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 4598 4599 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 4600 phba->shost->host_no); 4601 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); 4602 if (!phba->wq) { 4603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4604 "BM_%d : beiscsi_dev_probe-" 4605 "Failed to allocate work queue\n"); 4606 goto free_twq; 4607 } 4608 4609 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); 4610 4611 phwi_ctrlr = phba->phwi_ctrlr; 4612 phwi_context = phwi_ctrlr->phwi_ctxt; 4613 if (blk_iopoll_enabled) { 4614 for (i = 0; i < phba->num_cpus; i++) { 4615 pbe_eq = &phwi_context->be_eq[i]; 4616 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 4617 be_iopoll); 4618 blk_iopoll_enable(&pbe_eq->iopoll); 4619 } 4620 } 4621 ret = beiscsi_init_irqs(phba); 4622 if (ret < 0) { 4623 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4624 "BM_%d : beiscsi_dev_probe-" 4625 "Failed to beiscsi_init_irqs\n"); 4626 goto free_blkenbld; 4627 } 4628 hwi_enable_intr(phba); 4629 4630 if (beiscsi_setup_boot_info(phba)) 4631 /* 4632 * log error but continue, because we may not be using 4633 * iscsi boot. 4634 */ 4635 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4636 "BM_%d : Could not set up " 4637 "iSCSI boot info.\n"); 4638 4639 beiscsi_create_def_ifaces(phba); 4640 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4641 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 4642 return 0; 4643 4644 free_blkenbld: 4645 destroy_workqueue(phba->wq); 4646 if (blk_iopoll_enabled) 4647 for (i = 0; i < phba->num_cpus; i++) { 4648 pbe_eq = &phwi_context->be_eq[i]; 4649 blk_iopoll_disable(&pbe_eq->iopoll); 4650 } 4651 free_twq: 4652 beiscsi_clean_port(phba); 4653 beiscsi_free_mem(phba); 4654 free_port: 4655 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4656 4657 value = readl((void *)real_offset); 4658 4659 if (value & 0x00010000) { 4660 value &= 0xfffeffff; 4661 writel(value, (void *)real_offset); 4662 } 4663 4664 pci_free_consistent(phba->pcidev, 4665 phba->ctrl.mbox_mem_alloced.size, 4666 phba->ctrl.mbox_mem_alloced.va, 4667 phba->ctrl.mbox_mem_alloced.dma); 4668 beiscsi_unmap_pci_function(phba); 4669 hba_free: 4670 if (phba->msix_enabled) 4671 pci_disable_msix(phba->pcidev); 4672 iscsi_host_remove(phba->shost); 4673 pci_dev_put(phba->pcidev); 4674 iscsi_host_free(phba->shost); 4675 disable_pci: 4676 pci_disable_device(pcidev); 4677 return ret; 4678 } 4679 4680 struct iscsi_transport beiscsi_iscsi_transport = { 4681 .owner = THIS_MODULE, 4682 .name = DRV_NAME, 4683 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 4684 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 4685 .create_session = beiscsi_session_create, 4686 .destroy_session = beiscsi_session_destroy, 4687 .create_conn = beiscsi_conn_create, 4688 .bind_conn = beiscsi_conn_bind, 4689 .destroy_conn = iscsi_conn_teardown, 4690 .attr_is_visible = be2iscsi_attr_is_visible, 4691 .set_iface_param = be2iscsi_iface_set_param, 4692 .get_iface_param = be2iscsi_iface_get_param, 4693 .set_param = beiscsi_set_param, 4694 .get_conn_param = iscsi_conn_get_param, 4695 .get_session_param = iscsi_session_get_param, 4696 .get_host_param = beiscsi_get_host_param, 4697 .start_conn = beiscsi_conn_start, 4698 .stop_conn = iscsi_conn_stop, 4699 .send_pdu = iscsi_conn_send_pdu, 4700 .xmit_task = beiscsi_task_xmit, 4701 .cleanup_task = beiscsi_cleanup_task, 4702 .alloc_pdu = beiscsi_alloc_pdu, 4703 .parse_pdu_itt = beiscsi_parse_pdu, 4704 .get_stats = beiscsi_conn_get_stats, 4705 .get_ep_param = beiscsi_ep_get_param, 4706 .ep_connect = beiscsi_ep_connect, 4707 .ep_poll = beiscsi_ep_poll, 4708 .ep_disconnect = beiscsi_ep_disconnect, 4709 .session_recovery_timedout = iscsi_session_recovery_timedout, 4710 .bsg_request = beiscsi_bsg_request, 4711 }; 4712 4713 static struct pci_driver beiscsi_pci_driver = { 4714 .name = DRV_NAME, 4715 .probe = beiscsi_dev_probe, 4716 .remove = beiscsi_remove, 4717 .shutdown = beiscsi_shutdown, 4718 .id_table = beiscsi_pci_id_table 4719 }; 4720 4721 4722 static int __init beiscsi_module_init(void) 4723 { 4724 int ret; 4725 4726 beiscsi_scsi_transport = 4727 iscsi_register_transport(&beiscsi_iscsi_transport); 4728 if (!beiscsi_scsi_transport) { 4729 printk(KERN_ERR 4730 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 4731 return -ENOMEM; 4732 } 4733 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 4734 &beiscsi_iscsi_transport); 4735 4736 ret = pci_register_driver(&beiscsi_pci_driver); 4737 if (ret) { 4738 printk(KERN_ERR 4739 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 4740 goto unregister_iscsi_transport; 4741 } 4742 return 0; 4743 4744 unregister_iscsi_transport: 4745 iscsi_unregister_transport(&beiscsi_iscsi_transport); 4746 return ret; 4747 } 4748 4749 static void __exit beiscsi_module_exit(void) 4750 { 4751 pci_unregister_driver(&beiscsi_pci_driver); 4752 iscsi_unregister_transport(&beiscsi_iscsi_transport); 4753 } 4754 4755 module_init(beiscsi_module_init); 4756 module_exit(beiscsi_module_exit); 4757