1 /** 2 * Copyright (C) 2005 - 2015 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 * 12 * Contact Information: 13 * linux-drivers@avagotech.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 33 #include <scsi/libiscsi.h> 34 #include <scsi/scsi_bsg_iscsi.h> 35 #include <scsi/scsi_netlink.h> 36 #include <scsi/scsi_transport_iscsi.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi.h> 42 #include "be_main.h" 43 #include "be_iscsi.h" 44 #include "be_mgmt.h" 45 #include "be_cmds.h" 46 47 static unsigned int be_iopoll_budget = 10; 48 static unsigned int be_max_phys_size = 64; 49 static unsigned int enable_msix = 1; 50 51 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 52 MODULE_VERSION(BUILD_STR); 53 MODULE_AUTHOR("Emulex Corporation"); 54 MODULE_LICENSE("GPL"); 55 module_param(be_iopoll_budget, int, 0); 56 module_param(enable_msix, int, 0); 57 module_param(be_max_phys_size, uint, S_IRUGO); 58 MODULE_PARM_DESC(be_max_phys_size, 59 "Maximum Size (In Kilobytes) of physically contiguous " 60 "memory that can be allocated. Range is 16 - 128"); 61 62 #define beiscsi_disp_param(_name)\ 63 ssize_t \ 64 beiscsi_##_name##_disp(struct device *dev,\ 65 struct device_attribute *attrib, char *buf) \ 66 { \ 67 struct Scsi_Host *shost = class_to_shost(dev);\ 68 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 69 uint32_t param_val = 0; \ 70 param_val = phba->attr_##_name;\ 71 return snprintf(buf, PAGE_SIZE, "%d\n",\ 72 phba->attr_##_name);\ 73 } 74 75 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 76 int \ 77 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 78 {\ 79 if (val >= _minval && val <= _maxval) {\ 80 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 81 "BA_%d : beiscsi_"#_name" updated "\ 82 "from 0x%x ==> 0x%x\n",\ 83 phba->attr_##_name, val); \ 84 phba->attr_##_name = val;\ 85 return 0;\ 86 } \ 87 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 88 "BA_%d beiscsi_"#_name" attribute "\ 89 "cannot be updated to 0x%x, "\ 90 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 91 return -EINVAL;\ 92 } 93 94 #define beiscsi_store_param(_name) \ 95 ssize_t \ 96 beiscsi_##_name##_store(struct device *dev,\ 97 struct device_attribute *attr, const char *buf,\ 98 size_t count) \ 99 { \ 100 struct Scsi_Host *shost = class_to_shost(dev);\ 101 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 102 uint32_t param_val = 0;\ 103 if (!isdigit(buf[0]))\ 104 return -EINVAL;\ 105 if (sscanf(buf, "%i", ¶m_val) != 1)\ 106 return -EINVAL;\ 107 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 108 return strlen(buf);\ 109 else \ 110 return -EINVAL;\ 111 } 112 113 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 114 int \ 115 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 116 { \ 117 if (val >= _minval && val <= _maxval) {\ 118 phba->attr_##_name = val;\ 119 return 0;\ 120 } \ 121 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 122 "BA_%d beiscsi_"#_name" attribute " \ 123 "cannot be updated to 0x%x, "\ 124 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 125 phba->attr_##_name = _defval;\ 126 return -EINVAL;\ 127 } 128 129 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 130 static uint beiscsi_##_name = _defval;\ 131 module_param(beiscsi_##_name, uint, S_IRUGO);\ 132 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 133 beiscsi_disp_param(_name)\ 134 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 135 beiscsi_store_param(_name)\ 136 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 137 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 138 beiscsi_##_name##_disp, beiscsi_##_name##_store) 139 140 /* 141 * When new log level added update the 142 * the MAX allowed value for log_enable 143 */ 144 BEISCSI_RW_ATTR(log_enable, 0x00, 145 0xFF, 0x00, "Enable logging Bit Mask\n" 146 "\t\t\t\tInitialization Events : 0x01\n" 147 "\t\t\t\tMailbox Events : 0x02\n" 148 "\t\t\t\tMiscellaneous Events : 0x04\n" 149 "\t\t\t\tError Handling : 0x08\n" 150 "\t\t\t\tIO Path Events : 0x10\n" 151 "\t\t\t\tConfiguration Path : 0x20\n" 152 "\t\t\t\tiSCSI Protocol : 0x40\n"); 153 154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 155 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 156 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 157 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 158 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 159 beiscsi_active_session_disp, NULL); 160 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 161 beiscsi_free_session_disp, NULL); 162 struct device_attribute *beiscsi_attrs[] = { 163 &dev_attr_beiscsi_log_enable, 164 &dev_attr_beiscsi_drvr_ver, 165 &dev_attr_beiscsi_adapter_family, 166 &dev_attr_beiscsi_fw_ver, 167 &dev_attr_beiscsi_active_session_count, 168 &dev_attr_beiscsi_free_session_count, 169 &dev_attr_beiscsi_phys_port, 170 NULL, 171 }; 172 173 static char const *cqe_desc[] = { 174 "RESERVED_DESC", 175 "SOL_CMD_COMPLETE", 176 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 177 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 178 "CXN_KILLED_BURST_LEN_MISMATCH", 179 "CXN_KILLED_AHS_RCVD", 180 "CXN_KILLED_HDR_DIGEST_ERR", 181 "CXN_KILLED_UNKNOWN_HDR", 182 "CXN_KILLED_STALE_ITT_TTT_RCVD", 183 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 184 "CXN_KILLED_RST_RCVD", 185 "CXN_KILLED_TIMED_OUT", 186 "CXN_KILLED_RST_SENT", 187 "CXN_KILLED_FIN_RCVD", 188 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 189 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 190 "CXN_KILLED_OVER_RUN_RESIDUAL", 191 "CXN_KILLED_UNDER_RUN_RESIDUAL", 192 "CMD_KILLED_INVALID_STATSN_RCVD", 193 "CMD_KILLED_INVALID_R2T_RCVD", 194 "CMD_CXN_KILLED_LUN_INVALID", 195 "CMD_CXN_KILLED_ICD_INVALID", 196 "CMD_CXN_KILLED_ITT_INVALID", 197 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 198 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 199 "CXN_INVALIDATE_NOTIFY", 200 "CXN_INVALIDATE_INDEX_NOTIFY", 201 "CMD_INVALIDATED_NOTIFY", 202 "UNSOL_HDR_NOTIFY", 203 "UNSOL_DATA_NOTIFY", 204 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 205 "DRIVERMSG_NOTIFY", 206 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 207 "SOL_CMD_KILLED_DIF_ERR", 208 "CXN_KILLED_SYN_RCVD", 209 "CXN_KILLED_IMM_DATA_RCVD" 210 }; 211 212 static int beiscsi_slave_configure(struct scsi_device *sdev) 213 { 214 blk_queue_max_segment_size(sdev->request_queue, 65536); 215 return 0; 216 } 217 218 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 219 { 220 struct iscsi_cls_session *cls_session; 221 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 222 struct beiscsi_io_task *aborted_io_task; 223 struct iscsi_conn *conn; 224 struct beiscsi_conn *beiscsi_conn; 225 struct beiscsi_hba *phba; 226 struct iscsi_session *session; 227 struct invalidate_command_table *inv_tbl; 228 struct be_dma_mem nonemb_cmd; 229 unsigned int cid, tag, num_invalidate; 230 int rc; 231 232 cls_session = starget_to_session(scsi_target(sc->device)); 233 session = cls_session->dd_data; 234 235 spin_lock_bh(&session->frwd_lock); 236 if (!aborted_task || !aborted_task->sc) { 237 /* we raced */ 238 spin_unlock_bh(&session->frwd_lock); 239 return SUCCESS; 240 } 241 242 aborted_io_task = aborted_task->dd_data; 243 if (!aborted_io_task->scsi_cmnd) { 244 /* raced or invalid command */ 245 spin_unlock_bh(&session->frwd_lock); 246 return SUCCESS; 247 } 248 spin_unlock_bh(&session->frwd_lock); 249 /* Invalidate WRB Posted for this Task */ 250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 251 aborted_io_task->pwrb_handle->pwrb, 252 1); 253 254 conn = aborted_task->conn; 255 beiscsi_conn = conn->dd_data; 256 phba = beiscsi_conn->phba; 257 258 /* invalidate iocb */ 259 cid = beiscsi_conn->beiscsi_conn_cid; 260 inv_tbl = phba->inv_tbl; 261 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 262 inv_tbl->cid = cid; 263 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 264 num_invalidate = 1; 265 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 266 sizeof(struct invalidate_commands_params_in), 267 &nonemb_cmd.dma); 268 if (nonemb_cmd.va == NULL) { 269 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 270 "BM_%d : Failed to allocate memory for" 271 "mgmt_invalidate_icds\n"); 272 return FAILED; 273 } 274 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 275 276 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 277 cid, &nonemb_cmd); 278 if (!tag) { 279 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 280 "BM_%d : mgmt_invalidate_icds could not be" 281 "submitted\n"); 282 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 283 nonemb_cmd.va, nonemb_cmd.dma); 284 285 return FAILED; 286 } 287 288 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 289 if (rc != -EBUSY) 290 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 291 nonemb_cmd.va, nonemb_cmd.dma); 292 293 return iscsi_eh_abort(sc); 294 } 295 296 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 297 { 298 struct iscsi_task *abrt_task; 299 struct beiscsi_io_task *abrt_io_task; 300 struct iscsi_conn *conn; 301 struct beiscsi_conn *beiscsi_conn; 302 struct beiscsi_hba *phba; 303 struct iscsi_session *session; 304 struct iscsi_cls_session *cls_session; 305 struct invalidate_command_table *inv_tbl; 306 struct be_dma_mem nonemb_cmd; 307 unsigned int cid, tag, i, num_invalidate; 308 int rc; 309 310 /* invalidate iocbs */ 311 cls_session = starget_to_session(scsi_target(sc->device)); 312 session = cls_session->dd_data; 313 spin_lock_bh(&session->frwd_lock); 314 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 315 spin_unlock_bh(&session->frwd_lock); 316 return FAILED; 317 } 318 conn = session->leadconn; 319 beiscsi_conn = conn->dd_data; 320 phba = beiscsi_conn->phba; 321 cid = beiscsi_conn->beiscsi_conn_cid; 322 inv_tbl = phba->inv_tbl; 323 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 324 num_invalidate = 0; 325 for (i = 0; i < conn->session->cmds_max; i++) { 326 abrt_task = conn->session->cmds[i]; 327 abrt_io_task = abrt_task->dd_data; 328 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 329 continue; 330 331 if (sc->device->lun != abrt_task->sc->device->lun) 332 continue; 333 334 /* Invalidate WRB Posted for this Task */ 335 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 336 abrt_io_task->pwrb_handle->pwrb, 337 1); 338 339 inv_tbl->cid = cid; 340 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 341 num_invalidate++; 342 inv_tbl++; 343 } 344 spin_unlock_bh(&session->frwd_lock); 345 inv_tbl = phba->inv_tbl; 346 347 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 348 sizeof(struct invalidate_commands_params_in), 349 &nonemb_cmd.dma); 350 if (nonemb_cmd.va == NULL) { 351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 352 "BM_%d : Failed to allocate memory for" 353 "mgmt_invalidate_icds\n"); 354 return FAILED; 355 } 356 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 357 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 358 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 359 cid, &nonemb_cmd); 360 if (!tag) { 361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 362 "BM_%d : mgmt_invalidate_icds could not be" 363 " submitted\n"); 364 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 365 nonemb_cmd.va, nonemb_cmd.dma); 366 return FAILED; 367 } 368 369 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 370 if (rc != -EBUSY) 371 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 372 nonemb_cmd.va, nonemb_cmd.dma); 373 return iscsi_eh_device_reset(sc); 374 } 375 376 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 377 { 378 struct beiscsi_hba *phba = data; 379 struct mgmt_session_info *boot_sess = &phba->boot_sess; 380 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 381 char *str = buf; 382 int rc; 383 384 switch (type) { 385 case ISCSI_BOOT_TGT_NAME: 386 rc = sprintf(buf, "%.*s\n", 387 (int)strlen(boot_sess->target_name), 388 (char *)&boot_sess->target_name); 389 break; 390 case ISCSI_BOOT_TGT_IP_ADDR: 391 if (boot_conn->dest_ipaddr.ip_type == 0x1) 392 rc = sprintf(buf, "%pI4\n", 393 (char *)&boot_conn->dest_ipaddr.addr); 394 else 395 rc = sprintf(str, "%pI6\n", 396 (char *)&boot_conn->dest_ipaddr.addr); 397 break; 398 case ISCSI_BOOT_TGT_PORT: 399 rc = sprintf(str, "%d\n", boot_conn->dest_port); 400 break; 401 402 case ISCSI_BOOT_TGT_CHAP_NAME: 403 rc = sprintf(str, "%.*s\n", 404 boot_conn->negotiated_login_options.auth_data.chap. 405 target_chap_name_length, 406 (char *)&boot_conn->negotiated_login_options. 407 auth_data.chap.target_chap_name); 408 break; 409 case ISCSI_BOOT_TGT_CHAP_SECRET: 410 rc = sprintf(str, "%.*s\n", 411 boot_conn->negotiated_login_options.auth_data.chap. 412 target_secret_length, 413 (char *)&boot_conn->negotiated_login_options. 414 auth_data.chap.target_secret); 415 break; 416 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 417 rc = sprintf(str, "%.*s\n", 418 boot_conn->negotiated_login_options.auth_data.chap. 419 intr_chap_name_length, 420 (char *)&boot_conn->negotiated_login_options. 421 auth_data.chap.intr_chap_name); 422 break; 423 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 424 rc = sprintf(str, "%.*s\n", 425 boot_conn->negotiated_login_options.auth_data.chap. 426 intr_secret_length, 427 (char *)&boot_conn->negotiated_login_options. 428 auth_data.chap.intr_secret); 429 break; 430 case ISCSI_BOOT_TGT_FLAGS: 431 rc = sprintf(str, "2\n"); 432 break; 433 case ISCSI_BOOT_TGT_NIC_ASSOC: 434 rc = sprintf(str, "0\n"); 435 break; 436 default: 437 rc = -ENOSYS; 438 break; 439 } 440 return rc; 441 } 442 443 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 444 { 445 struct beiscsi_hba *phba = data; 446 char *str = buf; 447 int rc; 448 449 switch (type) { 450 case ISCSI_BOOT_INI_INITIATOR_NAME: 451 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 452 break; 453 default: 454 rc = -ENOSYS; 455 break; 456 } 457 return rc; 458 } 459 460 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 461 { 462 struct beiscsi_hba *phba = data; 463 char *str = buf; 464 int rc; 465 466 switch (type) { 467 case ISCSI_BOOT_ETH_FLAGS: 468 rc = sprintf(str, "2\n"); 469 break; 470 case ISCSI_BOOT_ETH_INDEX: 471 rc = sprintf(str, "0\n"); 472 break; 473 case ISCSI_BOOT_ETH_MAC: 474 rc = beiscsi_get_macaddr(str, phba); 475 break; 476 default: 477 rc = -ENOSYS; 478 break; 479 } 480 return rc; 481 } 482 483 484 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 485 { 486 umode_t rc; 487 488 switch (type) { 489 case ISCSI_BOOT_TGT_NAME: 490 case ISCSI_BOOT_TGT_IP_ADDR: 491 case ISCSI_BOOT_TGT_PORT: 492 case ISCSI_BOOT_TGT_CHAP_NAME: 493 case ISCSI_BOOT_TGT_CHAP_SECRET: 494 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 495 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 496 case ISCSI_BOOT_TGT_NIC_ASSOC: 497 case ISCSI_BOOT_TGT_FLAGS: 498 rc = S_IRUGO; 499 break; 500 default: 501 rc = 0; 502 break; 503 } 504 return rc; 505 } 506 507 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 508 { 509 umode_t rc; 510 511 switch (type) { 512 case ISCSI_BOOT_INI_INITIATOR_NAME: 513 rc = S_IRUGO; 514 break; 515 default: 516 rc = 0; 517 break; 518 } 519 return rc; 520 } 521 522 523 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 524 { 525 umode_t rc; 526 527 switch (type) { 528 case ISCSI_BOOT_ETH_FLAGS: 529 case ISCSI_BOOT_ETH_MAC: 530 case ISCSI_BOOT_ETH_INDEX: 531 rc = S_IRUGO; 532 break; 533 default: 534 rc = 0; 535 break; 536 } 537 return rc; 538 } 539 540 /*------------------- PCI Driver operations and data ----------------- */ 541 static const struct pci_device_id beiscsi_pci_id_table[] = { 542 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 544 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 545 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 546 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 547 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 548 { 0 } 549 }; 550 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 551 552 553 static struct scsi_host_template beiscsi_sht = { 554 .module = THIS_MODULE, 555 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 556 .proc_name = DRV_NAME, 557 .queuecommand = iscsi_queuecommand, 558 .change_queue_depth = scsi_change_queue_depth, 559 .slave_configure = beiscsi_slave_configure, 560 .target_alloc = iscsi_target_alloc, 561 .eh_abort_handler = beiscsi_eh_abort, 562 .eh_device_reset_handler = beiscsi_eh_device_reset, 563 .eh_target_reset_handler = iscsi_eh_session_reset, 564 .shost_attrs = beiscsi_attrs, 565 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 566 .can_queue = BE2_IO_DEPTH, 567 .this_id = -1, 568 .max_sectors = BEISCSI_MAX_SECTORS, 569 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 570 .use_clustering = ENABLE_CLUSTERING, 571 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 572 .track_queue_depth = 1, 573 }; 574 575 static struct scsi_transport_template *beiscsi_scsi_transport; 576 577 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 578 { 579 struct beiscsi_hba *phba; 580 struct Scsi_Host *shost; 581 582 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 583 if (!shost) { 584 dev_err(&pcidev->dev, 585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 586 return NULL; 587 } 588 shost->max_id = BE2_MAX_SESSIONS; 589 shost->max_channel = 0; 590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 591 shost->max_lun = BEISCSI_NUM_MAX_LUN; 592 shost->transportt = beiscsi_scsi_transport; 593 phba = iscsi_host_priv(shost); 594 memset(phba, 0, sizeof(*phba)); 595 phba->shost = shost; 596 phba->pcidev = pci_dev_get(pcidev); 597 pci_set_drvdata(pcidev, phba); 598 phba->interface_handle = 0xFFFFFFFF; 599 600 return phba; 601 } 602 603 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 604 { 605 if (phba->csr_va) { 606 iounmap(phba->csr_va); 607 phba->csr_va = NULL; 608 } 609 if (phba->db_va) { 610 iounmap(phba->db_va); 611 phba->db_va = NULL; 612 } 613 if (phba->pci_va) { 614 iounmap(phba->pci_va); 615 phba->pci_va = NULL; 616 } 617 } 618 619 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 620 struct pci_dev *pcidev) 621 { 622 u8 __iomem *addr; 623 int pcicfg_reg; 624 625 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 626 pci_resource_len(pcidev, 2)); 627 if (addr == NULL) 628 return -ENOMEM; 629 phba->ctrl.csr = addr; 630 phba->csr_va = addr; 631 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 632 633 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 634 if (addr == NULL) 635 goto pci_map_err; 636 phba->ctrl.db = addr; 637 phba->db_va = addr; 638 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 639 640 if (phba->generation == BE_GEN2) 641 pcicfg_reg = 1; 642 else 643 pcicfg_reg = 0; 644 645 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 646 pci_resource_len(pcidev, pcicfg_reg)); 647 648 if (addr == NULL) 649 goto pci_map_err; 650 phba->ctrl.pcicfg = addr; 651 phba->pci_va = addr; 652 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 653 return 0; 654 655 pci_map_err: 656 beiscsi_unmap_pci_function(phba); 657 return -ENOMEM; 658 } 659 660 static int beiscsi_enable_pci(struct pci_dev *pcidev) 661 { 662 int ret; 663 664 ret = pci_enable_device(pcidev); 665 if (ret) { 666 dev_err(&pcidev->dev, 667 "beiscsi_enable_pci - enable device failed\n"); 668 return ret; 669 } 670 671 ret = pci_request_regions(pcidev, DRV_NAME); 672 if (ret) { 673 dev_err(&pcidev->dev, 674 "beiscsi_enable_pci - request region failed\n"); 675 goto pci_dev_disable; 676 } 677 678 pci_set_master(pcidev); 679 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 680 if (ret) { 681 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 682 if (ret) { 683 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 684 goto pci_region_release; 685 } else { 686 ret = pci_set_consistent_dma_mask(pcidev, 687 DMA_BIT_MASK(32)); 688 } 689 } else { 690 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 691 if (ret) { 692 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 693 goto pci_region_release; 694 } 695 } 696 return 0; 697 698 pci_region_release: 699 pci_release_regions(pcidev); 700 pci_dev_disable: 701 pci_disable_device(pcidev); 702 703 return ret; 704 } 705 706 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 707 { 708 struct be_ctrl_info *ctrl = &phba->ctrl; 709 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 710 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 711 int status = 0; 712 713 ctrl->pdev = pdev; 714 status = beiscsi_map_pci_bars(phba, pdev); 715 if (status) 716 return status; 717 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 718 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 719 mbox_mem_alloc->size, 720 &mbox_mem_alloc->dma); 721 if (!mbox_mem_alloc->va) { 722 beiscsi_unmap_pci_function(phba); 723 return -ENOMEM; 724 } 725 726 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 727 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 728 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 729 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 730 spin_lock_init(&ctrl->mbox_lock); 731 spin_lock_init(&phba->ctrl.mcc_lock); 732 spin_lock_init(&phba->ctrl.mcc_cq_lock); 733 734 return status; 735 } 736 737 /** 738 * beiscsi_get_params()- Set the config paramters 739 * @phba: ptr device priv structure 740 **/ 741 static void beiscsi_get_params(struct beiscsi_hba *phba) 742 { 743 uint32_t total_cid_count = 0; 744 uint32_t total_icd_count = 0; 745 uint8_t ulp_num = 0; 746 747 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 748 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 749 750 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 751 uint32_t align_mask = 0; 752 uint32_t icd_post_per_page = 0; 753 uint32_t icd_count_unavailable = 0; 754 uint32_t icd_start = 0, icd_count = 0; 755 uint32_t icd_start_align = 0, icd_count_align = 0; 756 757 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 758 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 759 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 760 761 /* Get ICD count that can be posted on each page */ 762 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 763 sizeof(struct iscsi_sge))); 764 align_mask = (icd_post_per_page - 1); 765 766 /* Check if icd_start is aligned ICD per page posting */ 767 if (icd_start % icd_post_per_page) { 768 icd_start_align = ((icd_start + 769 icd_post_per_page) & 770 ~(align_mask)); 771 phba->fw_config. 772 iscsi_icd_start[ulp_num] = 773 icd_start_align; 774 } 775 776 icd_count_align = (icd_count & ~align_mask); 777 778 /* ICD discarded in the process of alignment */ 779 if (icd_start_align) 780 icd_count_unavailable = ((icd_start_align - 781 icd_start) + 782 (icd_count - 783 icd_count_align)); 784 785 /* Updated ICD count available */ 786 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 787 icd_count_unavailable); 788 789 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 790 "BM_%d : Aligned ICD values\n" 791 "\t ICD Start : %d\n" 792 "\t ICD Count : %d\n" 793 "\t ICD Discarded : %d\n", 794 phba->fw_config. 795 iscsi_icd_start[ulp_num], 796 phba->fw_config. 797 iscsi_icd_count[ulp_num], 798 icd_count_unavailable); 799 break; 800 } 801 } 802 803 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 804 phba->params.ios_per_ctrl = (total_icd_count - 805 (total_cid_count + 806 BE2_TMFS + BE2_NOPOUT_REQ)); 807 phba->params.cxns_per_ctrl = total_cid_count; 808 phba->params.asyncpdus_per_ctrl = total_cid_count; 809 phba->params.icds_per_ctrl = total_icd_count; 810 phba->params.num_sge_per_io = BE2_SGE; 811 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 812 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 813 phba->params.eq_timer = 64; 814 phba->params.num_eq_entries = 1024; 815 phba->params.num_cq_entries = 1024; 816 phba->params.wrbs_per_cxn = 256; 817 } 818 819 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 820 unsigned int id, unsigned int clr_interrupt, 821 unsigned int num_processed, 822 unsigned char rearm, unsigned char event) 823 { 824 u32 val = 0; 825 826 if (rearm) 827 val |= 1 << DB_EQ_REARM_SHIFT; 828 if (clr_interrupt) 829 val |= 1 << DB_EQ_CLR_SHIFT; 830 if (event) 831 val |= 1 << DB_EQ_EVNT_SHIFT; 832 833 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 834 /* Setting lower order EQ_ID Bits */ 835 val |= (id & DB_EQ_RING_ID_LOW_MASK); 836 837 /* Setting Higher order EQ_ID Bits */ 838 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 839 DB_EQ_RING_ID_HIGH_MASK) 840 << DB_EQ_HIGH_SET_SHIFT); 841 842 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 843 } 844 845 /** 846 * be_isr_mcc - The isr routine of the driver. 847 * @irq: Not used 848 * @dev_id: Pointer to host adapter structure 849 */ 850 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 851 { 852 struct beiscsi_hba *phba; 853 struct be_eq_entry *eqe = NULL; 854 struct be_queue_info *eq; 855 struct be_queue_info *mcc; 856 unsigned int num_eq_processed; 857 struct be_eq_obj *pbe_eq; 858 unsigned long flags; 859 860 pbe_eq = dev_id; 861 eq = &pbe_eq->q; 862 phba = pbe_eq->phba; 863 mcc = &phba->ctrl.mcc_obj.cq; 864 eqe = queue_tail_node(eq); 865 866 num_eq_processed = 0; 867 868 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 869 & EQE_VALID_MASK) { 870 if (((eqe->dw[offsetof(struct amap_eq_entry, 871 resource_id) / 32] & 872 EQE_RESID_MASK) >> 16) == mcc->id) { 873 spin_lock_irqsave(&phba->isr_lock, flags); 874 pbe_eq->todo_mcc_cq = true; 875 spin_unlock_irqrestore(&phba->isr_lock, flags); 876 } 877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 878 queue_tail_inc(eq); 879 eqe = queue_tail_node(eq); 880 num_eq_processed++; 881 } 882 if (pbe_eq->todo_mcc_cq) 883 queue_work(phba->wq, &pbe_eq->work_cqs); 884 if (num_eq_processed) 885 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 886 887 return IRQ_HANDLED; 888 } 889 890 /** 891 * be_isr_msix - The isr routine of the driver. 892 * @irq: Not used 893 * @dev_id: Pointer to host adapter structure 894 */ 895 static irqreturn_t be_isr_msix(int irq, void *dev_id) 896 { 897 struct beiscsi_hba *phba; 898 struct be_eq_entry *eqe = NULL; 899 struct be_queue_info *eq; 900 struct be_queue_info *cq; 901 unsigned int num_eq_processed; 902 struct be_eq_obj *pbe_eq; 903 904 pbe_eq = dev_id; 905 eq = &pbe_eq->q; 906 cq = pbe_eq->cq; 907 eqe = queue_tail_node(eq); 908 909 phba = pbe_eq->phba; 910 num_eq_processed = 0; 911 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 912 & EQE_VALID_MASK) { 913 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 914 blk_iopoll_sched(&pbe_eq->iopoll); 915 916 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 917 queue_tail_inc(eq); 918 eqe = queue_tail_node(eq); 919 num_eq_processed++; 920 } 921 922 if (num_eq_processed) 923 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 924 925 return IRQ_HANDLED; 926 } 927 928 /** 929 * be_isr - The isr routine of the driver. 930 * @irq: Not used 931 * @dev_id: Pointer to host adapter structure 932 */ 933 static irqreturn_t be_isr(int irq, void *dev_id) 934 { 935 struct beiscsi_hba *phba; 936 struct hwi_controller *phwi_ctrlr; 937 struct hwi_context_memory *phwi_context; 938 struct be_eq_entry *eqe = NULL; 939 struct be_queue_info *eq; 940 struct be_queue_info *mcc; 941 unsigned long flags, index; 942 unsigned int num_mcceq_processed, num_ioeq_processed; 943 struct be_ctrl_info *ctrl; 944 struct be_eq_obj *pbe_eq; 945 int isr; 946 947 phba = dev_id; 948 ctrl = &phba->ctrl; 949 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 950 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 951 if (!isr) 952 return IRQ_NONE; 953 954 phwi_ctrlr = phba->phwi_ctrlr; 955 phwi_context = phwi_ctrlr->phwi_ctxt; 956 pbe_eq = &phwi_context->be_eq[0]; 957 958 eq = &phwi_context->be_eq[0].q; 959 mcc = &phba->ctrl.mcc_obj.cq; 960 index = 0; 961 eqe = queue_tail_node(eq); 962 963 num_ioeq_processed = 0; 964 num_mcceq_processed = 0; 965 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 966 & EQE_VALID_MASK) { 967 if (((eqe->dw[offsetof(struct amap_eq_entry, 968 resource_id) / 32] & 969 EQE_RESID_MASK) >> 16) == mcc->id) { 970 spin_lock_irqsave(&phba->isr_lock, flags); 971 pbe_eq->todo_mcc_cq = true; 972 spin_unlock_irqrestore(&phba->isr_lock, flags); 973 num_mcceq_processed++; 974 } else { 975 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 976 blk_iopoll_sched(&pbe_eq->iopoll); 977 num_ioeq_processed++; 978 } 979 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 980 queue_tail_inc(eq); 981 eqe = queue_tail_node(eq); 982 } 983 if (num_ioeq_processed || num_mcceq_processed) { 984 if (pbe_eq->todo_mcc_cq) 985 queue_work(phba->wq, &pbe_eq->work_cqs); 986 987 if ((num_mcceq_processed) && (!num_ioeq_processed)) 988 hwi_ring_eq_db(phba, eq->id, 0, 989 (num_ioeq_processed + 990 num_mcceq_processed) , 1, 1); 991 else 992 hwi_ring_eq_db(phba, eq->id, 0, 993 (num_ioeq_processed + 994 num_mcceq_processed), 0, 1); 995 996 return IRQ_HANDLED; 997 } else 998 return IRQ_NONE; 999 } 1000 1001 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 1002 { 1003 struct pci_dev *pcidev = phba->pcidev; 1004 struct hwi_controller *phwi_ctrlr; 1005 struct hwi_context_memory *phwi_context; 1006 int ret, msix_vec, i, j; 1007 1008 phwi_ctrlr = phba->phwi_ctrlr; 1009 phwi_context = phwi_ctrlr->phwi_ctxt; 1010 1011 if (phba->msix_enabled) { 1012 for (i = 0; i < phba->num_cpus; i++) { 1013 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 1014 GFP_KERNEL); 1015 if (!phba->msi_name[i]) { 1016 ret = -ENOMEM; 1017 goto free_msix_irqs; 1018 } 1019 1020 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 1021 phba->shost->host_no, i); 1022 msix_vec = phba->msix_entries[i].vector; 1023 ret = request_irq(msix_vec, be_isr_msix, 0, 1024 phba->msi_name[i], 1025 &phwi_context->be_eq[i]); 1026 if (ret) { 1027 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1028 "BM_%d : beiscsi_init_irqs-Failed to" 1029 "register msix for i = %d\n", 1030 i); 1031 kfree(phba->msi_name[i]); 1032 goto free_msix_irqs; 1033 } 1034 } 1035 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 1036 if (!phba->msi_name[i]) { 1037 ret = -ENOMEM; 1038 goto free_msix_irqs; 1039 } 1040 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 1041 phba->shost->host_no); 1042 msix_vec = phba->msix_entries[i].vector; 1043 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 1044 &phwi_context->be_eq[i]); 1045 if (ret) { 1046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 1047 "BM_%d : beiscsi_init_irqs-" 1048 "Failed to register beiscsi_msix_mcc\n"); 1049 kfree(phba->msi_name[i]); 1050 goto free_msix_irqs; 1051 } 1052 1053 } else { 1054 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 1055 "beiscsi", phba); 1056 if (ret) { 1057 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1058 "BM_%d : beiscsi_init_irqs-" 1059 "Failed to register irq\\n"); 1060 return ret; 1061 } 1062 } 1063 return 0; 1064 free_msix_irqs: 1065 for (j = i - 1; j >= 0; j--) { 1066 kfree(phba->msi_name[j]); 1067 msix_vec = phba->msix_entries[j].vector; 1068 free_irq(msix_vec, &phwi_context->be_eq[j]); 1069 } 1070 return ret; 1071 } 1072 1073 void hwi_ring_cq_db(struct beiscsi_hba *phba, 1074 unsigned int id, unsigned int num_processed, 1075 unsigned char rearm, unsigned char event) 1076 { 1077 u32 val = 0; 1078 1079 if (rearm) 1080 val |= 1 << DB_CQ_REARM_SHIFT; 1081 1082 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 1083 1084 /* Setting lower order CQ_ID Bits */ 1085 val |= (id & DB_CQ_RING_ID_LOW_MASK); 1086 1087 /* Setting Higher order CQ_ID Bits */ 1088 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 1089 DB_CQ_RING_ID_HIGH_MASK) 1090 << DB_CQ_HIGH_SET_SHIFT); 1091 1092 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 1093 } 1094 1095 static unsigned int 1096 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1097 struct beiscsi_hba *phba, 1098 struct pdu_base *ppdu, 1099 unsigned long pdu_len, 1100 void *pbuffer, unsigned long buf_len) 1101 { 1102 struct iscsi_conn *conn = beiscsi_conn->conn; 1103 struct iscsi_session *session = conn->session; 1104 struct iscsi_task *task; 1105 struct beiscsi_io_task *io_task; 1106 struct iscsi_hdr *login_hdr; 1107 1108 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 1109 PDUBASE_OPCODE_MASK) { 1110 case ISCSI_OP_NOOP_IN: 1111 pbuffer = NULL; 1112 buf_len = 0; 1113 break; 1114 case ISCSI_OP_ASYNC_EVENT: 1115 break; 1116 case ISCSI_OP_REJECT: 1117 WARN_ON(!pbuffer); 1118 WARN_ON(!(buf_len == 48)); 1119 beiscsi_log(phba, KERN_ERR, 1120 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1121 "BM_%d : In ISCSI_OP_REJECT\n"); 1122 break; 1123 case ISCSI_OP_LOGIN_RSP: 1124 case ISCSI_OP_TEXT_RSP: 1125 task = conn->login_task; 1126 io_task = task->dd_data; 1127 login_hdr = (struct iscsi_hdr *)ppdu; 1128 login_hdr->itt = io_task->libiscsi_itt; 1129 break; 1130 default: 1131 beiscsi_log(phba, KERN_WARNING, 1132 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1133 "BM_%d : Unrecognized opcode 0x%x in async msg\n", 1134 (ppdu-> 1135 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1136 & PDUBASE_OPCODE_MASK)); 1137 return 1; 1138 } 1139 1140 spin_lock_bh(&session->back_lock); 1141 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1142 spin_unlock_bh(&session->back_lock); 1143 return 0; 1144 } 1145 1146 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 1147 { 1148 struct sgl_handle *psgl_handle; 1149 1150 if (phba->io_sgl_hndl_avbl) { 1151 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1152 "BM_%d : In alloc_io_sgl_handle," 1153 " io_sgl_alloc_index=%d\n", 1154 phba->io_sgl_alloc_index); 1155 1156 psgl_handle = phba->io_sgl_hndl_base[phba-> 1157 io_sgl_alloc_index]; 1158 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1159 phba->io_sgl_hndl_avbl--; 1160 if (phba->io_sgl_alloc_index == (phba->params. 1161 ios_per_ctrl - 1)) 1162 phba->io_sgl_alloc_index = 0; 1163 else 1164 phba->io_sgl_alloc_index++; 1165 } else 1166 psgl_handle = NULL; 1167 return psgl_handle; 1168 } 1169 1170 static void 1171 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1172 { 1173 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1174 "BM_%d : In free_,io_sgl_free_index=%d\n", 1175 phba->io_sgl_free_index); 1176 1177 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1178 /* 1179 * this can happen if clean_task is called on a task that 1180 * failed in xmit_task or alloc_pdu. 1181 */ 1182 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1183 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 1184 "value there=%p\n", phba->io_sgl_free_index, 1185 phba->io_sgl_hndl_base 1186 [phba->io_sgl_free_index]); 1187 return; 1188 } 1189 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1190 phba->io_sgl_hndl_avbl++; 1191 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 1192 phba->io_sgl_free_index = 0; 1193 else 1194 phba->io_sgl_free_index++; 1195 } 1196 1197 /** 1198 * alloc_wrb_handle - To allocate a wrb handle 1199 * @phba: The hba pointer 1200 * @cid: The cid to use for allocation 1201 * @pwrb_context: ptr to ptr to wrb context 1202 * 1203 * This happens under session_lock until submission to chip 1204 */ 1205 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 1206 struct hwi_wrb_context **pcontext) 1207 { 1208 struct hwi_wrb_context *pwrb_context; 1209 struct hwi_controller *phwi_ctrlr; 1210 struct wrb_handle *pwrb_handle; 1211 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1212 1213 phwi_ctrlr = phba->phwi_ctrlr; 1214 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1215 if (pwrb_context->wrb_handles_available >= 2) { 1216 pwrb_handle = pwrb_context->pwrb_handle_base[ 1217 pwrb_context->alloc_index]; 1218 pwrb_context->wrb_handles_available--; 1219 if (pwrb_context->alloc_index == 1220 (phba->params.wrbs_per_cxn - 1)) 1221 pwrb_context->alloc_index = 0; 1222 else 1223 pwrb_context->alloc_index++; 1224 1225 /* Return the context address */ 1226 *pcontext = pwrb_context; 1227 } else 1228 pwrb_handle = NULL; 1229 return pwrb_handle; 1230 } 1231 1232 /** 1233 * free_wrb_handle - To free the wrb handle back to pool 1234 * @phba: The hba pointer 1235 * @pwrb_context: The context to free from 1236 * @pwrb_handle: The wrb_handle to free 1237 * 1238 * This happens under session_lock until submission to chip 1239 */ 1240 static void 1241 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1242 struct wrb_handle *pwrb_handle) 1243 { 1244 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1245 pwrb_context->wrb_handles_available++; 1246 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1247 pwrb_context->free_index = 0; 1248 else 1249 pwrb_context->free_index++; 1250 1251 beiscsi_log(phba, KERN_INFO, 1252 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1253 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1254 "wrb_handles_available=%d\n", 1255 pwrb_handle, pwrb_context->free_index, 1256 pwrb_context->wrb_handles_available); 1257 } 1258 1259 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1260 { 1261 struct sgl_handle *psgl_handle; 1262 1263 if (phba->eh_sgl_hndl_avbl) { 1264 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1265 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1266 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1267 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1268 phba->eh_sgl_alloc_index, 1269 phba->eh_sgl_alloc_index); 1270 1271 phba->eh_sgl_hndl_avbl--; 1272 if (phba->eh_sgl_alloc_index == 1273 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1274 1)) 1275 phba->eh_sgl_alloc_index = 0; 1276 else 1277 phba->eh_sgl_alloc_index++; 1278 } else 1279 psgl_handle = NULL; 1280 return psgl_handle; 1281 } 1282 1283 void 1284 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1285 { 1286 1287 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1288 "BM_%d : In free_mgmt_sgl_handle," 1289 "eh_sgl_free_index=%d\n", 1290 phba->eh_sgl_free_index); 1291 1292 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1293 /* 1294 * this can happen if clean_task is called on a task that 1295 * failed in xmit_task or alloc_pdu. 1296 */ 1297 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1298 "BM_%d : Double Free in eh SGL ," 1299 "eh_sgl_free_index=%d\n", 1300 phba->eh_sgl_free_index); 1301 return; 1302 } 1303 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1304 phba->eh_sgl_hndl_avbl++; 1305 if (phba->eh_sgl_free_index == 1306 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1307 phba->eh_sgl_free_index = 0; 1308 else 1309 phba->eh_sgl_free_index++; 1310 } 1311 1312 static void 1313 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1314 struct iscsi_task *task, 1315 struct common_sol_cqe *csol_cqe) 1316 { 1317 struct beiscsi_io_task *io_task = task->dd_data; 1318 struct be_status_bhs *sts_bhs = 1319 (struct be_status_bhs *)io_task->cmd_bhs; 1320 struct iscsi_conn *conn = beiscsi_conn->conn; 1321 unsigned char *sense; 1322 u32 resid = 0, exp_cmdsn, max_cmdsn; 1323 u8 rsp, status, flags; 1324 1325 exp_cmdsn = csol_cqe->exp_cmdsn; 1326 max_cmdsn = (csol_cqe->exp_cmdsn + 1327 csol_cqe->cmd_wnd - 1); 1328 rsp = csol_cqe->i_resp; 1329 status = csol_cqe->i_sts; 1330 flags = csol_cqe->i_flags; 1331 resid = csol_cqe->res_cnt; 1332 1333 if (!task->sc) { 1334 if (io_task->scsi_cmnd) { 1335 scsi_dma_unmap(io_task->scsi_cmnd); 1336 io_task->scsi_cmnd = NULL; 1337 } 1338 1339 return; 1340 } 1341 task->sc->result = (DID_OK << 16) | status; 1342 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1343 task->sc->result = DID_ERROR << 16; 1344 goto unmap; 1345 } 1346 1347 /* bidi not initially supported */ 1348 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1349 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1350 task->sc->result = DID_ERROR << 16; 1351 1352 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1353 scsi_set_resid(task->sc, resid); 1354 if (!status && (scsi_bufflen(task->sc) - resid < 1355 task->sc->underflow)) 1356 task->sc->result = DID_ERROR << 16; 1357 } 1358 } 1359 1360 if (status == SAM_STAT_CHECK_CONDITION) { 1361 u16 sense_len; 1362 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1363 1364 sense = sts_bhs->sense_info + sizeof(unsigned short); 1365 sense_len = be16_to_cpu(*slen); 1366 memcpy(task->sc->sense_buffer, sense, 1367 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1368 } 1369 1370 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1371 conn->rxdata_octets += resid; 1372 unmap: 1373 if (io_task->scsi_cmnd) { 1374 scsi_dma_unmap(io_task->scsi_cmnd); 1375 io_task->scsi_cmnd = NULL; 1376 } 1377 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1378 } 1379 1380 static void 1381 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1382 struct iscsi_task *task, 1383 struct common_sol_cqe *csol_cqe) 1384 { 1385 struct iscsi_logout_rsp *hdr; 1386 struct beiscsi_io_task *io_task = task->dd_data; 1387 struct iscsi_conn *conn = beiscsi_conn->conn; 1388 1389 hdr = (struct iscsi_logout_rsp *)task->hdr; 1390 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1391 hdr->t2wait = 5; 1392 hdr->t2retain = 0; 1393 hdr->flags = csol_cqe->i_flags; 1394 hdr->response = csol_cqe->i_resp; 1395 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1396 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1397 csol_cqe->cmd_wnd - 1); 1398 1399 hdr->dlength[0] = 0; 1400 hdr->dlength[1] = 0; 1401 hdr->dlength[2] = 0; 1402 hdr->hlength = 0; 1403 hdr->itt = io_task->libiscsi_itt; 1404 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1405 } 1406 1407 static void 1408 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1409 struct iscsi_task *task, 1410 struct common_sol_cqe *csol_cqe) 1411 { 1412 struct iscsi_tm_rsp *hdr; 1413 struct iscsi_conn *conn = beiscsi_conn->conn; 1414 struct beiscsi_io_task *io_task = task->dd_data; 1415 1416 hdr = (struct iscsi_tm_rsp *)task->hdr; 1417 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1418 hdr->flags = csol_cqe->i_flags; 1419 hdr->response = csol_cqe->i_resp; 1420 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1421 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1422 csol_cqe->cmd_wnd - 1); 1423 1424 hdr->itt = io_task->libiscsi_itt; 1425 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1426 } 1427 1428 static void 1429 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1430 struct beiscsi_hba *phba, struct sol_cqe *psol) 1431 { 1432 struct hwi_wrb_context *pwrb_context; 1433 struct wrb_handle *pwrb_handle = NULL; 1434 struct hwi_controller *phwi_ctrlr; 1435 struct iscsi_task *task; 1436 struct beiscsi_io_task *io_task; 1437 uint16_t wrb_index, cid, cri_index; 1438 1439 phwi_ctrlr = phba->phwi_ctrlr; 1440 if (is_chip_be2_be3r(phba)) { 1441 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1442 wrb_idx, psol); 1443 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1444 cid, psol); 1445 } else { 1446 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1447 wrb_idx, psol); 1448 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1449 cid, psol); 1450 } 1451 1452 cri_index = BE_GET_CRI_FROM_CID(cid); 1453 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1454 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1455 task = pwrb_handle->pio_handle; 1456 1457 io_task = task->dd_data; 1458 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); 1459 iscsi_put_task(task); 1460 } 1461 1462 static void 1463 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1464 struct iscsi_task *task, 1465 struct common_sol_cqe *csol_cqe) 1466 { 1467 struct iscsi_nopin *hdr; 1468 struct iscsi_conn *conn = beiscsi_conn->conn; 1469 struct beiscsi_io_task *io_task = task->dd_data; 1470 1471 hdr = (struct iscsi_nopin *)task->hdr; 1472 hdr->flags = csol_cqe->i_flags; 1473 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1474 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1475 csol_cqe->cmd_wnd - 1); 1476 1477 hdr->opcode = ISCSI_OP_NOOP_IN; 1478 hdr->itt = io_task->libiscsi_itt; 1479 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1480 } 1481 1482 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1483 struct sol_cqe *psol, 1484 struct common_sol_cqe *csol_cqe) 1485 { 1486 if (is_chip_be2_be3r(phba)) { 1487 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1488 i_exp_cmd_sn, psol); 1489 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1490 i_res_cnt, psol); 1491 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1492 i_cmd_wnd, psol); 1493 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1494 wrb_index, psol); 1495 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1496 cid, psol); 1497 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1498 hw_sts, psol); 1499 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1500 i_resp, psol); 1501 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1502 i_sts, psol); 1503 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1504 i_flags, psol); 1505 } else { 1506 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1507 i_exp_cmd_sn, psol); 1508 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1509 i_res_cnt, psol); 1510 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1511 wrb_index, psol); 1512 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1513 cid, psol); 1514 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1515 hw_sts, psol); 1516 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1517 i_cmd_wnd, psol); 1518 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1519 cmd_cmpl, psol)) 1520 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1521 i_sts, psol); 1522 else 1523 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1524 i_sts, psol); 1525 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1526 u, psol)) 1527 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1528 1529 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1530 o, psol)) 1531 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1532 } 1533 } 1534 1535 1536 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1537 struct beiscsi_hba *phba, struct sol_cqe *psol) 1538 { 1539 struct hwi_wrb_context *pwrb_context; 1540 struct wrb_handle *pwrb_handle; 1541 struct iscsi_wrb *pwrb = NULL; 1542 struct hwi_controller *phwi_ctrlr; 1543 struct iscsi_task *task; 1544 unsigned int type; 1545 struct iscsi_conn *conn = beiscsi_conn->conn; 1546 struct iscsi_session *session = conn->session; 1547 struct common_sol_cqe csol_cqe = {0}; 1548 uint16_t cri_index = 0; 1549 1550 phwi_ctrlr = phba->phwi_ctrlr; 1551 1552 /* Copy the elements to a common structure */ 1553 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1554 1555 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1556 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1557 1558 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1559 csol_cqe.wrb_index]; 1560 1561 task = pwrb_handle->pio_handle; 1562 pwrb = pwrb_handle->pwrb; 1563 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1564 1565 spin_lock_bh(&session->back_lock); 1566 switch (type) { 1567 case HWH_TYPE_IO: 1568 case HWH_TYPE_IO_RD: 1569 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1570 ISCSI_OP_NOOP_OUT) 1571 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1572 else 1573 be_complete_io(beiscsi_conn, task, &csol_cqe); 1574 break; 1575 1576 case HWH_TYPE_LOGOUT: 1577 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1578 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1579 else 1580 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1581 break; 1582 1583 case HWH_TYPE_LOGIN: 1584 beiscsi_log(phba, KERN_ERR, 1585 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1586 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1587 " hwi_complete_cmd- Solicited path\n"); 1588 break; 1589 1590 case HWH_TYPE_NOP: 1591 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1592 break; 1593 1594 default: 1595 beiscsi_log(phba, KERN_WARNING, 1596 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1597 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1598 "wrb_index 0x%x CID 0x%x\n", type, 1599 csol_cqe.wrb_index, 1600 csol_cqe.cid); 1601 break; 1602 } 1603 1604 spin_unlock_bh(&session->back_lock); 1605 } 1606 1607 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1608 *pasync_ctx, unsigned int is_header, 1609 unsigned int host_write_ptr) 1610 { 1611 if (is_header) 1612 return &pasync_ctx->async_entry[host_write_ptr]. 1613 header_busy_list; 1614 else 1615 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1616 } 1617 1618 static struct async_pdu_handle * 1619 hwi_get_async_handle(struct beiscsi_hba *phba, 1620 struct beiscsi_conn *beiscsi_conn, 1621 struct hwi_async_pdu_context *pasync_ctx, 1622 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1623 { 1624 struct be_bus_address phys_addr; 1625 struct list_head *pbusy_list; 1626 struct async_pdu_handle *pasync_handle = NULL; 1627 unsigned char is_header = 0; 1628 unsigned int index, dpl; 1629 1630 if (is_chip_be2_be3r(phba)) { 1631 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1632 dpl, pdpdu_cqe); 1633 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1634 index, pdpdu_cqe); 1635 } else { 1636 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1637 dpl, pdpdu_cqe); 1638 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1639 index, pdpdu_cqe); 1640 } 1641 1642 phys_addr.u.a32.address_lo = 1643 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1644 db_addr_lo) / 32] - dpl); 1645 phys_addr.u.a32.address_hi = 1646 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1647 db_addr_hi) / 32]; 1648 1649 phys_addr.u.a64.address = 1650 *((unsigned long long *)(&phys_addr.u.a64.address)); 1651 1652 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1653 & PDUCQE_CODE_MASK) { 1654 case UNSOL_HDR_NOTIFY: 1655 is_header = 1; 1656 1657 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1658 is_header, index); 1659 break; 1660 case UNSOL_DATA_NOTIFY: 1661 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1662 is_header, index); 1663 break; 1664 default: 1665 pbusy_list = NULL; 1666 beiscsi_log(phba, KERN_WARNING, 1667 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1668 "BM_%d : Unexpected code=%d\n", 1669 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1670 code) / 32] & PDUCQE_CODE_MASK); 1671 return NULL; 1672 } 1673 1674 WARN_ON(list_empty(pbusy_list)); 1675 list_for_each_entry(pasync_handle, pbusy_list, link) { 1676 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1677 break; 1678 } 1679 1680 WARN_ON(!pasync_handle); 1681 1682 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( 1683 beiscsi_conn->beiscsi_conn_cid); 1684 pasync_handle->is_header = is_header; 1685 pasync_handle->buffer_len = dpl; 1686 *pcq_index = index; 1687 1688 return pasync_handle; 1689 } 1690 1691 static unsigned int 1692 hwi_update_async_writables(struct beiscsi_hba *phba, 1693 struct hwi_async_pdu_context *pasync_ctx, 1694 unsigned int is_header, unsigned int cq_index) 1695 { 1696 struct list_head *pbusy_list; 1697 struct async_pdu_handle *pasync_handle; 1698 unsigned int num_entries, writables = 0; 1699 unsigned int *pep_read_ptr, *pwritables; 1700 1701 num_entries = pasync_ctx->num_entries; 1702 if (is_header) { 1703 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1704 pwritables = &pasync_ctx->async_header.writables; 1705 } else { 1706 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1707 pwritables = &pasync_ctx->async_data.writables; 1708 } 1709 1710 while ((*pep_read_ptr) != cq_index) { 1711 (*pep_read_ptr)++; 1712 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1713 1714 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1715 *pep_read_ptr); 1716 if (writables == 0) 1717 WARN_ON(list_empty(pbusy_list)); 1718 1719 if (!list_empty(pbusy_list)) { 1720 pasync_handle = list_entry(pbusy_list->next, 1721 struct async_pdu_handle, 1722 link); 1723 WARN_ON(!pasync_handle); 1724 pasync_handle->consumed = 1; 1725 } 1726 1727 writables++; 1728 } 1729 1730 if (!writables) { 1731 beiscsi_log(phba, KERN_ERR, 1732 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1733 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1734 cq_index); 1735 WARN_ON(1); 1736 } 1737 1738 *pwritables = *pwritables + writables; 1739 return 0; 1740 } 1741 1742 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1743 struct hwi_async_pdu_context *pasync_ctx, 1744 unsigned int cri) 1745 { 1746 struct async_pdu_handle *pasync_handle, *tmp_handle; 1747 struct list_head *plist; 1748 1749 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1750 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1751 list_del(&pasync_handle->link); 1752 1753 if (pasync_handle->is_header) { 1754 list_add_tail(&pasync_handle->link, 1755 &pasync_ctx->async_header.free_list); 1756 pasync_ctx->async_header.free_entries++; 1757 } else { 1758 list_add_tail(&pasync_handle->link, 1759 &pasync_ctx->async_data.free_list); 1760 pasync_ctx->async_data.free_entries++; 1761 } 1762 } 1763 1764 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1765 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1766 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1767 } 1768 1769 static struct phys_addr * 1770 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1771 unsigned int is_header, unsigned int host_write_ptr) 1772 { 1773 struct phys_addr *pasync_sge = NULL; 1774 1775 if (is_header) 1776 pasync_sge = pasync_ctx->async_header.ring_base; 1777 else 1778 pasync_sge = pasync_ctx->async_data.ring_base; 1779 1780 return pasync_sge + host_write_ptr; 1781 } 1782 1783 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1784 unsigned int is_header, uint8_t ulp_num) 1785 { 1786 struct hwi_controller *phwi_ctrlr; 1787 struct hwi_async_pdu_context *pasync_ctx; 1788 struct async_pdu_handle *pasync_handle; 1789 struct list_head *pfree_link, *pbusy_list; 1790 struct phys_addr *pasync_sge; 1791 unsigned int ring_id, num_entries; 1792 unsigned int host_write_num, doorbell_offset; 1793 unsigned int writables; 1794 unsigned int i = 0; 1795 u32 doorbell = 0; 1796 1797 phwi_ctrlr = phba->phwi_ctrlr; 1798 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1799 num_entries = pasync_ctx->num_entries; 1800 1801 if (is_header) { 1802 writables = min(pasync_ctx->async_header.writables, 1803 pasync_ctx->async_header.free_entries); 1804 pfree_link = pasync_ctx->async_header.free_list.next; 1805 host_write_num = pasync_ctx->async_header.host_write_ptr; 1806 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1807 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1808 doorbell_offset; 1809 } else { 1810 writables = min(pasync_ctx->async_data.writables, 1811 pasync_ctx->async_data.free_entries); 1812 pfree_link = pasync_ctx->async_data.free_list.next; 1813 host_write_num = pasync_ctx->async_data.host_write_ptr; 1814 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1815 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1816 doorbell_offset; 1817 } 1818 1819 writables = (writables / 8) * 8; 1820 if (writables) { 1821 for (i = 0; i < writables; i++) { 1822 pbusy_list = 1823 hwi_get_async_busy_list(pasync_ctx, is_header, 1824 host_write_num); 1825 pasync_handle = 1826 list_entry(pfree_link, struct async_pdu_handle, 1827 link); 1828 WARN_ON(!pasync_handle); 1829 pasync_handle->consumed = 0; 1830 1831 pfree_link = pfree_link->next; 1832 1833 pasync_sge = hwi_get_ring_address(pasync_ctx, 1834 is_header, host_write_num); 1835 1836 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1837 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1838 1839 list_move(&pasync_handle->link, pbusy_list); 1840 1841 host_write_num++; 1842 host_write_num = host_write_num % num_entries; 1843 } 1844 1845 if (is_header) { 1846 pasync_ctx->async_header.host_write_ptr = 1847 host_write_num; 1848 pasync_ctx->async_header.free_entries -= writables; 1849 pasync_ctx->async_header.writables -= writables; 1850 pasync_ctx->async_header.busy_entries += writables; 1851 } else { 1852 pasync_ctx->async_data.host_write_ptr = host_write_num; 1853 pasync_ctx->async_data.free_entries -= writables; 1854 pasync_ctx->async_data.writables -= writables; 1855 pasync_ctx->async_data.busy_entries += writables; 1856 } 1857 1858 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1859 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1860 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1861 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1862 << DB_DEF_PDU_CQPROC_SHIFT; 1863 1864 iowrite32(doorbell, phba->db_va + doorbell_offset); 1865 } 1866 } 1867 1868 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1869 struct beiscsi_conn *beiscsi_conn, 1870 struct i_t_dpdu_cqe *pdpdu_cqe) 1871 { 1872 struct hwi_controller *phwi_ctrlr; 1873 struct hwi_async_pdu_context *pasync_ctx; 1874 struct async_pdu_handle *pasync_handle = NULL; 1875 unsigned int cq_index = -1; 1876 uint16_t cri_index = BE_GET_CRI_FROM_CID( 1877 beiscsi_conn->beiscsi_conn_cid); 1878 1879 phwi_ctrlr = phba->phwi_ctrlr; 1880 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1881 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1882 cri_index)); 1883 1884 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1885 pdpdu_cqe, &cq_index); 1886 BUG_ON(pasync_handle->is_header != 0); 1887 if (pasync_handle->consumed == 0) 1888 hwi_update_async_writables(phba, pasync_ctx, 1889 pasync_handle->is_header, cq_index); 1890 1891 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); 1892 hwi_post_async_buffers(phba, pasync_handle->is_header, 1893 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1894 cri_index)); 1895 } 1896 1897 static unsigned int 1898 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1899 struct beiscsi_hba *phba, 1900 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1901 { 1902 struct list_head *plist; 1903 struct async_pdu_handle *pasync_handle; 1904 void *phdr = NULL; 1905 unsigned int hdr_len = 0, buf_len = 0; 1906 unsigned int status, index = 0, offset = 0; 1907 void *pfirst_buffer = NULL; 1908 unsigned int num_buf = 0; 1909 1910 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1911 1912 list_for_each_entry(pasync_handle, plist, link) { 1913 if (index == 0) { 1914 phdr = pasync_handle->pbuffer; 1915 hdr_len = pasync_handle->buffer_len; 1916 } else { 1917 buf_len = pasync_handle->buffer_len; 1918 if (!num_buf) { 1919 pfirst_buffer = pasync_handle->pbuffer; 1920 num_buf++; 1921 } 1922 memcpy(pfirst_buffer + offset, 1923 pasync_handle->pbuffer, buf_len); 1924 offset += buf_len; 1925 } 1926 index++; 1927 } 1928 1929 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1930 phdr, hdr_len, pfirst_buffer, 1931 offset); 1932 1933 hwi_free_async_msg(phba, pasync_ctx, cri); 1934 return 0; 1935 } 1936 1937 static unsigned int 1938 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1939 struct beiscsi_hba *phba, 1940 struct async_pdu_handle *pasync_handle) 1941 { 1942 struct hwi_async_pdu_context *pasync_ctx; 1943 struct hwi_controller *phwi_ctrlr; 1944 unsigned int bytes_needed = 0, status = 0; 1945 unsigned short cri = pasync_handle->cri; 1946 struct pdu_base *ppdu; 1947 1948 phwi_ctrlr = phba->phwi_ctrlr; 1949 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1950 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1951 BE_GET_CRI_FROM_CID(beiscsi_conn-> 1952 beiscsi_conn_cid))); 1953 1954 list_del(&pasync_handle->link); 1955 if (pasync_handle->is_header) { 1956 pasync_ctx->async_header.busy_entries--; 1957 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1958 hwi_free_async_msg(phba, pasync_ctx, cri); 1959 BUG(); 1960 } 1961 1962 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1963 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1964 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1965 (unsigned short)pasync_handle->buffer_len; 1966 list_add_tail(&pasync_handle->link, 1967 &pasync_ctx->async_entry[cri].wait_queue.list); 1968 1969 ppdu = pasync_handle->pbuffer; 1970 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1971 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1972 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1973 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1974 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1975 1976 if (status == 0) { 1977 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1978 bytes_needed; 1979 1980 if (bytes_needed == 0) 1981 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1982 pasync_ctx, cri); 1983 } 1984 } else { 1985 pasync_ctx->async_data.busy_entries--; 1986 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1987 list_add_tail(&pasync_handle->link, 1988 &pasync_ctx->async_entry[cri].wait_queue. 1989 list); 1990 pasync_ctx->async_entry[cri].wait_queue. 1991 bytes_received += 1992 (unsigned short)pasync_handle->buffer_len; 1993 1994 if (pasync_ctx->async_entry[cri].wait_queue. 1995 bytes_received >= 1996 pasync_ctx->async_entry[cri].wait_queue. 1997 bytes_needed) 1998 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1999 pasync_ctx, cri); 2000 } 2001 } 2002 return status; 2003 } 2004 2005 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 2006 struct beiscsi_hba *phba, 2007 struct i_t_dpdu_cqe *pdpdu_cqe) 2008 { 2009 struct hwi_controller *phwi_ctrlr; 2010 struct hwi_async_pdu_context *pasync_ctx; 2011 struct async_pdu_handle *pasync_handle = NULL; 2012 unsigned int cq_index = -1; 2013 uint16_t cri_index = BE_GET_CRI_FROM_CID( 2014 beiscsi_conn->beiscsi_conn_cid); 2015 2016 phwi_ctrlr = phba->phwi_ctrlr; 2017 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 2018 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 2019 cri_index)); 2020 2021 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 2022 pdpdu_cqe, &cq_index); 2023 2024 if (pasync_handle->consumed == 0) 2025 hwi_update_async_writables(phba, pasync_ctx, 2026 pasync_handle->is_header, cq_index); 2027 2028 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 2029 hwi_post_async_buffers(phba, pasync_handle->is_header, 2030 BEISCSI_GET_ULP_FROM_CRI( 2031 phwi_ctrlr, cri_index)); 2032 } 2033 2034 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 2035 { 2036 struct be_queue_info *mcc_cq; 2037 struct be_mcc_compl *mcc_compl; 2038 unsigned int num_processed = 0; 2039 2040 mcc_cq = &phba->ctrl.mcc_obj.cq; 2041 mcc_compl = queue_tail_node(mcc_cq); 2042 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2043 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 2044 2045 if (num_processed >= 32) { 2046 hwi_ring_cq_db(phba, mcc_cq->id, 2047 num_processed, 0, 0); 2048 num_processed = 0; 2049 } 2050 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 2051 /* Interpret flags as an async trailer */ 2052 if (is_link_state_evt(mcc_compl->flags)) 2053 /* Interpret compl as a async link evt */ 2054 beiscsi_async_link_state_process(phba, 2055 (struct be_async_event_link_state *) mcc_compl); 2056 else { 2057 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 2058 "BM_%d : Unsupported Async Event, flags" 2059 " = 0x%08x\n", 2060 mcc_compl->flags); 2061 if (phba->state & BE_ADAPTER_LINK_UP) { 2062 phba->state |= BE_ADAPTER_CHECK_BOOT; 2063 phba->get_boot = BE_GET_BOOT_RETRIES; 2064 } 2065 } 2066 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 2067 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 2068 atomic_dec(&phba->ctrl.mcc_obj.q.used); 2069 } 2070 2071 mcc_compl->flags = 0; 2072 queue_tail_inc(mcc_cq); 2073 mcc_compl = queue_tail_node(mcc_cq); 2074 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2075 num_processed++; 2076 } 2077 2078 if (num_processed > 0) 2079 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 2080 2081 } 2082 2083 /** 2084 * beiscsi_process_cq()- Process the Completion Queue 2085 * @pbe_eq: Event Q on which the Completion has come 2086 * 2087 * return 2088 * Number of Completion Entries processed. 2089 **/ 2090 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 2091 { 2092 struct be_queue_info *cq; 2093 struct sol_cqe *sol; 2094 struct dmsg_cqe *dmsg; 2095 unsigned int num_processed = 0; 2096 unsigned int tot_nump = 0; 2097 unsigned short code = 0, cid = 0; 2098 uint16_t cri_index = 0; 2099 struct beiscsi_conn *beiscsi_conn; 2100 struct beiscsi_endpoint *beiscsi_ep; 2101 struct iscsi_endpoint *ep; 2102 struct beiscsi_hba *phba; 2103 2104 cq = pbe_eq->cq; 2105 sol = queue_tail_node(cq); 2106 phba = pbe_eq->phba; 2107 2108 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 2109 CQE_VALID_MASK) { 2110 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 2111 2112 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 2113 32] & CQE_CODE_MASK); 2114 2115 /* Get the CID */ 2116 if (is_chip_be2_be3r(phba)) { 2117 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 2118 } else { 2119 if ((code == DRIVERMSG_NOTIFY) || 2120 (code == UNSOL_HDR_NOTIFY) || 2121 (code == UNSOL_DATA_NOTIFY)) 2122 cid = AMAP_GET_BITS( 2123 struct amap_i_t_dpdu_cqe_v2, 2124 cid, sol); 2125 else 2126 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2127 cid, sol); 2128 } 2129 2130 cri_index = BE_GET_CRI_FROM_CID(cid); 2131 ep = phba->ep_array[cri_index]; 2132 2133 if (ep == NULL) { 2134 /* connection has already been freed 2135 * just move on to next one 2136 */ 2137 beiscsi_log(phba, KERN_WARNING, 2138 BEISCSI_LOG_INIT, 2139 "BM_%d : proc cqe of disconn ep: cid %d\n", 2140 cid); 2141 goto proc_next_cqe; 2142 } 2143 2144 beiscsi_ep = ep->dd_data; 2145 beiscsi_conn = beiscsi_ep->conn; 2146 2147 if (num_processed >= 32) { 2148 hwi_ring_cq_db(phba, cq->id, 2149 num_processed, 0, 0); 2150 tot_nump += num_processed; 2151 num_processed = 0; 2152 } 2153 2154 switch (code) { 2155 case SOL_CMD_COMPLETE: 2156 hwi_complete_cmd(beiscsi_conn, phba, sol); 2157 break; 2158 case DRIVERMSG_NOTIFY: 2159 beiscsi_log(phba, KERN_INFO, 2160 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2161 "BM_%d : Received %s[%d] on CID : %d\n", 2162 cqe_desc[code], code, cid); 2163 2164 dmsg = (struct dmsg_cqe *)sol; 2165 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 2166 break; 2167 case UNSOL_HDR_NOTIFY: 2168 beiscsi_log(phba, KERN_INFO, 2169 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2170 "BM_%d : Received %s[%d] on CID : %d\n", 2171 cqe_desc[code], code, cid); 2172 2173 spin_lock_bh(&phba->async_pdu_lock); 2174 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2175 (struct i_t_dpdu_cqe *)sol); 2176 spin_unlock_bh(&phba->async_pdu_lock); 2177 break; 2178 case UNSOL_DATA_NOTIFY: 2179 beiscsi_log(phba, KERN_INFO, 2180 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2181 "BM_%d : Received %s[%d] on CID : %d\n", 2182 cqe_desc[code], code, cid); 2183 2184 spin_lock_bh(&phba->async_pdu_lock); 2185 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2186 (struct i_t_dpdu_cqe *)sol); 2187 spin_unlock_bh(&phba->async_pdu_lock); 2188 break; 2189 case CXN_INVALIDATE_INDEX_NOTIFY: 2190 case CMD_INVALIDATED_NOTIFY: 2191 case CXN_INVALIDATE_NOTIFY: 2192 beiscsi_log(phba, KERN_ERR, 2193 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2194 "BM_%d : Ignoring %s[%d] on CID : %d\n", 2195 cqe_desc[code], code, cid); 2196 break; 2197 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 2198 case CMD_KILLED_INVALID_STATSN_RCVD: 2199 case CMD_KILLED_INVALID_R2T_RCVD: 2200 case CMD_CXN_KILLED_LUN_INVALID: 2201 case CMD_CXN_KILLED_ICD_INVALID: 2202 case CMD_CXN_KILLED_ITT_INVALID: 2203 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 2204 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 2205 beiscsi_log(phba, KERN_ERR, 2206 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2207 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 2208 cqe_desc[code], code, cid); 2209 break; 2210 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2211 beiscsi_log(phba, KERN_ERR, 2212 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2213 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2214 cqe_desc[code], code, cid); 2215 spin_lock_bh(&phba->async_pdu_lock); 2216 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2217 (struct i_t_dpdu_cqe *) sol); 2218 spin_unlock_bh(&phba->async_pdu_lock); 2219 break; 2220 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2221 case CXN_KILLED_BURST_LEN_MISMATCH: 2222 case CXN_KILLED_AHS_RCVD: 2223 case CXN_KILLED_HDR_DIGEST_ERR: 2224 case CXN_KILLED_UNKNOWN_HDR: 2225 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2226 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2227 case CXN_KILLED_TIMED_OUT: 2228 case CXN_KILLED_FIN_RCVD: 2229 case CXN_KILLED_RST_SENT: 2230 case CXN_KILLED_RST_RCVD: 2231 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2232 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2233 case CXN_KILLED_OVER_RUN_RESIDUAL: 2234 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2235 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2236 beiscsi_log(phba, KERN_ERR, 2237 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2238 "BM_%d : Event %s[%d] received on CID : %d\n", 2239 cqe_desc[code], code, cid); 2240 if (beiscsi_conn) 2241 iscsi_conn_failure(beiscsi_conn->conn, 2242 ISCSI_ERR_CONN_FAILED); 2243 break; 2244 default: 2245 beiscsi_log(phba, KERN_ERR, 2246 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2247 "BM_%d : Invalid CQE Event Received Code : %d" 2248 "CID 0x%x...\n", 2249 code, cid); 2250 break; 2251 } 2252 2253 proc_next_cqe: 2254 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2255 queue_tail_inc(cq); 2256 sol = queue_tail_node(cq); 2257 num_processed++; 2258 } 2259 2260 if (num_processed > 0) { 2261 tot_nump += num_processed; 2262 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 2263 } 2264 return tot_nump; 2265 } 2266 2267 void beiscsi_process_all_cqs(struct work_struct *work) 2268 { 2269 unsigned long flags; 2270 struct hwi_controller *phwi_ctrlr; 2271 struct hwi_context_memory *phwi_context; 2272 struct beiscsi_hba *phba; 2273 struct be_eq_obj *pbe_eq = 2274 container_of(work, struct be_eq_obj, work_cqs); 2275 2276 phba = pbe_eq->phba; 2277 phwi_ctrlr = phba->phwi_ctrlr; 2278 phwi_context = phwi_ctrlr->phwi_ctxt; 2279 2280 if (pbe_eq->todo_mcc_cq) { 2281 spin_lock_irqsave(&phba->isr_lock, flags); 2282 pbe_eq->todo_mcc_cq = false; 2283 spin_unlock_irqrestore(&phba->isr_lock, flags); 2284 beiscsi_process_mcc_isr(phba); 2285 } 2286 2287 if (pbe_eq->todo_cq) { 2288 spin_lock_irqsave(&phba->isr_lock, flags); 2289 pbe_eq->todo_cq = false; 2290 spin_unlock_irqrestore(&phba->isr_lock, flags); 2291 beiscsi_process_cq(pbe_eq); 2292 } 2293 2294 /* rearm EQ for further interrupts */ 2295 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2296 } 2297 2298 static int be_iopoll(struct blk_iopoll *iop, int budget) 2299 { 2300 unsigned int ret; 2301 struct beiscsi_hba *phba; 2302 struct be_eq_obj *pbe_eq; 2303 2304 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2305 ret = beiscsi_process_cq(pbe_eq); 2306 pbe_eq->cq_count += ret; 2307 if (ret < budget) { 2308 phba = pbe_eq->phba; 2309 blk_iopoll_complete(iop); 2310 beiscsi_log(phba, KERN_INFO, 2311 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2312 "BM_%d : rearm pbe_eq->q.id =%d\n", 2313 pbe_eq->q.id); 2314 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2315 } 2316 return ret; 2317 } 2318 2319 static void 2320 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2321 unsigned int num_sg, struct beiscsi_io_task *io_task) 2322 { 2323 struct iscsi_sge *psgl; 2324 unsigned int sg_len, index; 2325 unsigned int sge_len = 0; 2326 unsigned long long addr; 2327 struct scatterlist *l_sg; 2328 unsigned int offset; 2329 2330 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2331 io_task->bhs_pa.u.a32.address_lo); 2332 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2333 io_task->bhs_pa.u.a32.address_hi); 2334 2335 l_sg = sg; 2336 for (index = 0; (index < num_sg) && (index < 2); index++, 2337 sg = sg_next(sg)) { 2338 if (index == 0) { 2339 sg_len = sg_dma_len(sg); 2340 addr = (u64) sg_dma_address(sg); 2341 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2342 sge0_addr_lo, pwrb, 2343 lower_32_bits(addr)); 2344 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2345 sge0_addr_hi, pwrb, 2346 upper_32_bits(addr)); 2347 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2348 sge0_len, pwrb, 2349 sg_len); 2350 sge_len = sg_len; 2351 } else { 2352 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2353 pwrb, sge_len); 2354 sg_len = sg_dma_len(sg); 2355 addr = (u64) sg_dma_address(sg); 2356 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2357 sge1_addr_lo, pwrb, 2358 lower_32_bits(addr)); 2359 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2360 sge1_addr_hi, pwrb, 2361 upper_32_bits(addr)); 2362 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2363 sge1_len, pwrb, 2364 sg_len); 2365 } 2366 } 2367 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2368 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2369 2370 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2371 2372 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2373 io_task->bhs_pa.u.a32.address_hi); 2374 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2375 io_task->bhs_pa.u.a32.address_lo); 2376 2377 if (num_sg == 1) { 2378 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2379 1); 2380 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2381 0); 2382 } else if (num_sg == 2) { 2383 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2384 0); 2385 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2386 1); 2387 } else { 2388 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2389 0); 2390 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2391 0); 2392 } 2393 2394 sg = l_sg; 2395 psgl++; 2396 psgl++; 2397 offset = 0; 2398 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2399 sg_len = sg_dma_len(sg); 2400 addr = (u64) sg_dma_address(sg); 2401 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2402 lower_32_bits(addr)); 2403 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2404 upper_32_bits(addr)); 2405 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2406 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2407 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2408 offset += sg_len; 2409 } 2410 psgl--; 2411 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2412 } 2413 2414 static void 2415 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2416 unsigned int num_sg, struct beiscsi_io_task *io_task) 2417 { 2418 struct iscsi_sge *psgl; 2419 unsigned int sg_len, index; 2420 unsigned int sge_len = 0; 2421 unsigned long long addr; 2422 struct scatterlist *l_sg; 2423 unsigned int offset; 2424 2425 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2426 io_task->bhs_pa.u.a32.address_lo); 2427 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2428 io_task->bhs_pa.u.a32.address_hi); 2429 2430 l_sg = sg; 2431 for (index = 0; (index < num_sg) && (index < 2); index++, 2432 sg = sg_next(sg)) { 2433 if (index == 0) { 2434 sg_len = sg_dma_len(sg); 2435 addr = (u64) sg_dma_address(sg); 2436 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2437 ((u32)(addr & 0xFFFFFFFF))); 2438 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2439 ((u32)(addr >> 32))); 2440 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2441 sg_len); 2442 sge_len = sg_len; 2443 } else { 2444 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2445 pwrb, sge_len); 2446 sg_len = sg_dma_len(sg); 2447 addr = (u64) sg_dma_address(sg); 2448 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2449 ((u32)(addr & 0xFFFFFFFF))); 2450 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2451 ((u32)(addr >> 32))); 2452 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2453 sg_len); 2454 } 2455 } 2456 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2457 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2458 2459 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2460 2461 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2462 io_task->bhs_pa.u.a32.address_hi); 2463 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2464 io_task->bhs_pa.u.a32.address_lo); 2465 2466 if (num_sg == 1) { 2467 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2468 1); 2469 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2470 0); 2471 } else if (num_sg == 2) { 2472 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2473 0); 2474 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2475 1); 2476 } else { 2477 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2478 0); 2479 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2480 0); 2481 } 2482 sg = l_sg; 2483 psgl++; 2484 psgl++; 2485 offset = 0; 2486 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2487 sg_len = sg_dma_len(sg); 2488 addr = (u64) sg_dma_address(sg); 2489 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2490 (addr & 0xFFFFFFFF)); 2491 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2492 (addr >> 32)); 2493 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2494 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2495 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2496 offset += sg_len; 2497 } 2498 psgl--; 2499 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2500 } 2501 2502 /** 2503 * hwi_write_buffer()- Populate the WRB with task info 2504 * @pwrb: ptr to the WRB entry 2505 * @task: iscsi task which is to be executed 2506 **/ 2507 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2508 { 2509 struct iscsi_sge *psgl; 2510 struct beiscsi_io_task *io_task = task->dd_data; 2511 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2512 struct beiscsi_hba *phba = beiscsi_conn->phba; 2513 uint8_t dsp_value = 0; 2514 2515 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2516 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2517 io_task->bhs_pa.u.a32.address_lo); 2518 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2519 io_task->bhs_pa.u.a32.address_hi); 2520 2521 if (task->data) { 2522 2523 /* Check for the data_count */ 2524 dsp_value = (task->data_count) ? 1 : 0; 2525 2526 if (is_chip_be2_be3r(phba)) 2527 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2528 pwrb, dsp_value); 2529 else 2530 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2531 pwrb, dsp_value); 2532 2533 /* Map addr only if there is data_count */ 2534 if (dsp_value) { 2535 io_task->mtask_addr = pci_map_single(phba->pcidev, 2536 task->data, 2537 task->data_count, 2538 PCI_DMA_TODEVICE); 2539 io_task->mtask_data_count = task->data_count; 2540 } else 2541 io_task->mtask_addr = 0; 2542 2543 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2544 lower_32_bits(io_task->mtask_addr)); 2545 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2546 upper_32_bits(io_task->mtask_addr)); 2547 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2548 task->data_count); 2549 2550 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2551 } else { 2552 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2553 io_task->mtask_addr = 0; 2554 } 2555 2556 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2557 2558 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2559 2560 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2561 io_task->bhs_pa.u.a32.address_hi); 2562 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2563 io_task->bhs_pa.u.a32.address_lo); 2564 if (task->data) { 2565 psgl++; 2566 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2567 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2568 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2569 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2570 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2571 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2572 2573 psgl++; 2574 if (task->data) { 2575 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2576 lower_32_bits(io_task->mtask_addr)); 2577 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2578 upper_32_bits(io_task->mtask_addr)); 2579 } 2580 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2581 } 2582 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2583 } 2584 2585 /** 2586 * beiscsi_find_mem_req()- Find mem needed 2587 * @phba: ptr to HBA struct 2588 **/ 2589 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2590 { 2591 uint8_t mem_descr_index, ulp_num; 2592 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2593 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2594 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2595 2596 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2597 sizeof(struct sol_cqe)); 2598 2599 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2600 2601 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2602 BE_ISCSI_PDU_HEADER_SIZE; 2603 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2604 sizeof(struct hwi_context_memory); 2605 2606 2607 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2608 * (phba->params.wrbs_per_cxn) 2609 * phba->params.cxns_per_ctrl; 2610 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2611 (phba->params.wrbs_per_cxn); 2612 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2613 phba->params.cxns_per_ctrl); 2614 2615 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2616 phba->params.icds_per_ctrl; 2617 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2618 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2619 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2620 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2621 2622 num_async_pdu_buf_sgl_pages = 2623 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2624 phba, ulp_num) * 2625 sizeof(struct phys_addr)); 2626 2627 num_async_pdu_buf_pages = 2628 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2629 phba, ulp_num) * 2630 phba->params.defpdu_hdr_sz); 2631 2632 num_async_pdu_data_pages = 2633 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2634 phba, ulp_num) * 2635 phba->params.defpdu_data_sz); 2636 2637 num_async_pdu_data_sgl_pages = 2638 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2639 phba, ulp_num) * 2640 sizeof(struct phys_addr)); 2641 2642 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2643 (ulp_num * MEM_DESCR_OFFSET)); 2644 phba->mem_req[mem_descr_index] = 2645 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2646 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2647 2648 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2649 (ulp_num * MEM_DESCR_OFFSET)); 2650 phba->mem_req[mem_descr_index] = 2651 num_async_pdu_buf_pages * 2652 PAGE_SIZE; 2653 2654 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2655 (ulp_num * MEM_DESCR_OFFSET)); 2656 phba->mem_req[mem_descr_index] = 2657 num_async_pdu_data_pages * 2658 PAGE_SIZE; 2659 2660 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2661 (ulp_num * MEM_DESCR_OFFSET)); 2662 phba->mem_req[mem_descr_index] = 2663 num_async_pdu_buf_sgl_pages * 2664 PAGE_SIZE; 2665 2666 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2667 (ulp_num * MEM_DESCR_OFFSET)); 2668 phba->mem_req[mem_descr_index] = 2669 num_async_pdu_data_sgl_pages * 2670 PAGE_SIZE; 2671 2672 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2673 (ulp_num * MEM_DESCR_OFFSET)); 2674 phba->mem_req[mem_descr_index] = 2675 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2676 sizeof(struct async_pdu_handle); 2677 2678 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2679 (ulp_num * MEM_DESCR_OFFSET)); 2680 phba->mem_req[mem_descr_index] = 2681 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2682 sizeof(struct async_pdu_handle); 2683 2684 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2685 (ulp_num * MEM_DESCR_OFFSET)); 2686 phba->mem_req[mem_descr_index] = 2687 sizeof(struct hwi_async_pdu_context) + 2688 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2689 sizeof(struct hwi_async_entry)); 2690 } 2691 } 2692 } 2693 2694 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2695 { 2696 dma_addr_t bus_add; 2697 struct hwi_controller *phwi_ctrlr; 2698 struct be_mem_descriptor *mem_descr; 2699 struct mem_array *mem_arr, *mem_arr_orig; 2700 unsigned int i, j, alloc_size, curr_alloc_size; 2701 2702 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2703 if (!phba->phwi_ctrlr) 2704 return -ENOMEM; 2705 2706 /* Allocate memory for wrb_context */ 2707 phwi_ctrlr = phba->phwi_ctrlr; 2708 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2709 phba->params.cxns_per_ctrl, 2710 GFP_KERNEL); 2711 if (!phwi_ctrlr->wrb_context) 2712 return -ENOMEM; 2713 2714 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2715 GFP_KERNEL); 2716 if (!phba->init_mem) { 2717 kfree(phwi_ctrlr->wrb_context); 2718 kfree(phba->phwi_ctrlr); 2719 return -ENOMEM; 2720 } 2721 2722 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2723 GFP_KERNEL); 2724 if (!mem_arr_orig) { 2725 kfree(phba->init_mem); 2726 kfree(phwi_ctrlr->wrb_context); 2727 kfree(phba->phwi_ctrlr); 2728 return -ENOMEM; 2729 } 2730 2731 mem_descr = phba->init_mem; 2732 for (i = 0; i < SE_MEM_MAX; i++) { 2733 if (!phba->mem_req[i]) { 2734 mem_descr->mem_array = NULL; 2735 mem_descr++; 2736 continue; 2737 } 2738 2739 j = 0; 2740 mem_arr = mem_arr_orig; 2741 alloc_size = phba->mem_req[i]; 2742 memset(mem_arr, 0, sizeof(struct mem_array) * 2743 BEISCSI_MAX_FRAGS_INIT); 2744 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2745 do { 2746 mem_arr->virtual_address = pci_alloc_consistent( 2747 phba->pcidev, 2748 curr_alloc_size, 2749 &bus_add); 2750 if (!mem_arr->virtual_address) { 2751 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2752 goto free_mem; 2753 if (curr_alloc_size - 2754 rounddown_pow_of_two(curr_alloc_size)) 2755 curr_alloc_size = rounddown_pow_of_two 2756 (curr_alloc_size); 2757 else 2758 curr_alloc_size = curr_alloc_size / 2; 2759 } else { 2760 mem_arr->bus_address.u. 2761 a64.address = (__u64) bus_add; 2762 mem_arr->size = curr_alloc_size; 2763 alloc_size -= curr_alloc_size; 2764 curr_alloc_size = min(be_max_phys_size * 2765 1024, alloc_size); 2766 j++; 2767 mem_arr++; 2768 } 2769 } while (alloc_size); 2770 mem_descr->num_elements = j; 2771 mem_descr->size_in_bytes = phba->mem_req[i]; 2772 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2773 GFP_KERNEL); 2774 if (!mem_descr->mem_array) 2775 goto free_mem; 2776 2777 memcpy(mem_descr->mem_array, mem_arr_orig, 2778 sizeof(struct mem_array) * j); 2779 mem_descr++; 2780 } 2781 kfree(mem_arr_orig); 2782 return 0; 2783 free_mem: 2784 mem_descr->num_elements = j; 2785 while ((i) || (j)) { 2786 for (j = mem_descr->num_elements; j > 0; j--) { 2787 pci_free_consistent(phba->pcidev, 2788 mem_descr->mem_array[j - 1].size, 2789 mem_descr->mem_array[j - 1]. 2790 virtual_address, 2791 (unsigned long)mem_descr-> 2792 mem_array[j - 1]. 2793 bus_address.u.a64.address); 2794 } 2795 if (i) { 2796 i--; 2797 kfree(mem_descr->mem_array); 2798 mem_descr--; 2799 } 2800 } 2801 kfree(mem_arr_orig); 2802 kfree(phba->init_mem); 2803 kfree(phba->phwi_ctrlr->wrb_context); 2804 kfree(phba->phwi_ctrlr); 2805 return -ENOMEM; 2806 } 2807 2808 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2809 { 2810 beiscsi_find_mem_req(phba); 2811 return beiscsi_alloc_mem(phba); 2812 } 2813 2814 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2815 { 2816 struct pdu_data_out *pdata_out; 2817 struct pdu_nop_out *pnop_out; 2818 struct be_mem_descriptor *mem_descr; 2819 2820 mem_descr = phba->init_mem; 2821 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2822 pdata_out = 2823 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2824 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2825 2826 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2827 IIOC_SCSI_DATA); 2828 2829 pnop_out = 2830 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2831 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2832 2833 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2834 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2835 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2836 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2837 } 2838 2839 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2840 { 2841 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2842 struct hwi_context_memory *phwi_ctxt; 2843 struct wrb_handle *pwrb_handle = NULL; 2844 struct hwi_controller *phwi_ctrlr; 2845 struct hwi_wrb_context *pwrb_context; 2846 struct iscsi_wrb *pwrb = NULL; 2847 unsigned int num_cxn_wrbh = 0; 2848 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2849 2850 mem_descr_wrbh = phba->init_mem; 2851 mem_descr_wrbh += HWI_MEM_WRBH; 2852 2853 mem_descr_wrb = phba->init_mem; 2854 mem_descr_wrb += HWI_MEM_WRB; 2855 phwi_ctrlr = phba->phwi_ctrlr; 2856 2857 /* Allocate memory for WRBQ */ 2858 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2859 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2860 phba->params.cxns_per_ctrl, 2861 GFP_KERNEL); 2862 if (!phwi_ctxt->be_wrbq) { 2863 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2864 "BM_%d : WRBQ Mem Alloc Failed\n"); 2865 return -ENOMEM; 2866 } 2867 2868 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2869 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2870 pwrb_context->pwrb_handle_base = 2871 kzalloc(sizeof(struct wrb_handle *) * 2872 phba->params.wrbs_per_cxn, GFP_KERNEL); 2873 if (!pwrb_context->pwrb_handle_base) { 2874 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2875 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2876 goto init_wrb_hndl_failed; 2877 } 2878 pwrb_context->pwrb_handle_basestd = 2879 kzalloc(sizeof(struct wrb_handle *) * 2880 phba->params.wrbs_per_cxn, GFP_KERNEL); 2881 if (!pwrb_context->pwrb_handle_basestd) { 2882 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2883 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2884 goto init_wrb_hndl_failed; 2885 } 2886 if (!num_cxn_wrbh) { 2887 pwrb_handle = 2888 mem_descr_wrbh->mem_array[idx].virtual_address; 2889 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2890 ((sizeof(struct wrb_handle)) * 2891 phba->params.wrbs_per_cxn)); 2892 idx++; 2893 } 2894 pwrb_context->alloc_index = 0; 2895 pwrb_context->wrb_handles_available = 0; 2896 pwrb_context->free_index = 0; 2897 2898 if (num_cxn_wrbh) { 2899 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2900 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2901 pwrb_context->pwrb_handle_basestd[j] = 2902 pwrb_handle; 2903 pwrb_context->wrb_handles_available++; 2904 pwrb_handle->wrb_index = j; 2905 pwrb_handle++; 2906 } 2907 num_cxn_wrbh--; 2908 } 2909 } 2910 idx = 0; 2911 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2912 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2913 if (!num_cxn_wrb) { 2914 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2915 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2916 ((sizeof(struct iscsi_wrb) * 2917 phba->params.wrbs_per_cxn)); 2918 idx++; 2919 } 2920 2921 if (num_cxn_wrb) { 2922 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2923 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2924 pwrb_handle->pwrb = pwrb; 2925 pwrb++; 2926 } 2927 num_cxn_wrb--; 2928 } 2929 } 2930 return 0; 2931 init_wrb_hndl_failed: 2932 for (j = index; j > 0; j--) { 2933 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2934 kfree(pwrb_context->pwrb_handle_base); 2935 kfree(pwrb_context->pwrb_handle_basestd); 2936 } 2937 return -ENOMEM; 2938 } 2939 2940 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2941 { 2942 uint8_t ulp_num; 2943 struct hwi_controller *phwi_ctrlr; 2944 struct hba_parameters *p = &phba->params; 2945 struct hwi_async_pdu_context *pasync_ctx; 2946 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2947 unsigned int index, idx, num_per_mem, num_async_data; 2948 struct be_mem_descriptor *mem_descr; 2949 2950 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2951 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2952 2953 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2954 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2955 (ulp_num * MEM_DESCR_OFFSET)); 2956 2957 phwi_ctrlr = phba->phwi_ctrlr; 2958 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2959 (struct hwi_async_pdu_context *) 2960 mem_descr->mem_array[0].virtual_address; 2961 2962 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2963 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2964 2965 pasync_ctx->async_entry = 2966 (struct hwi_async_entry *) 2967 ((long unsigned int)pasync_ctx + 2968 sizeof(struct hwi_async_pdu_context)); 2969 2970 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2971 ulp_num); 2972 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2973 2974 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2975 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2976 (ulp_num * MEM_DESCR_OFFSET); 2977 if (mem_descr->mem_array[0].virtual_address) { 2978 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2979 "BM_%d : hwi_init_async_pdu_ctx" 2980 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2981 ulp_num, 2982 mem_descr->mem_array[0]. 2983 virtual_address); 2984 } else 2985 beiscsi_log(phba, KERN_WARNING, 2986 BEISCSI_LOG_INIT, 2987 "BM_%d : No Virtual address for ULP : %d\n", 2988 ulp_num); 2989 2990 pasync_ctx->async_header.va_base = 2991 mem_descr->mem_array[0].virtual_address; 2992 2993 pasync_ctx->async_header.pa_base.u.a64.address = 2994 mem_descr->mem_array[0]. 2995 bus_address.u.a64.address; 2996 2997 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2998 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2999 (ulp_num * MEM_DESCR_OFFSET); 3000 if (mem_descr->mem_array[0].virtual_address) { 3001 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3002 "BM_%d : hwi_init_async_pdu_ctx" 3003 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 3004 ulp_num, 3005 mem_descr->mem_array[0]. 3006 virtual_address); 3007 } else 3008 beiscsi_log(phba, KERN_WARNING, 3009 BEISCSI_LOG_INIT, 3010 "BM_%d : No Virtual address for ULP : %d\n", 3011 ulp_num); 3012 3013 pasync_ctx->async_header.ring_base = 3014 mem_descr->mem_array[0].virtual_address; 3015 3016 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3017 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 3018 (ulp_num * MEM_DESCR_OFFSET); 3019 if (mem_descr->mem_array[0].virtual_address) { 3020 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3021 "BM_%d : hwi_init_async_pdu_ctx" 3022 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 3023 ulp_num, 3024 mem_descr->mem_array[0]. 3025 virtual_address); 3026 } else 3027 beiscsi_log(phba, KERN_WARNING, 3028 BEISCSI_LOG_INIT, 3029 "BM_%d : No Virtual address for ULP : %d\n", 3030 ulp_num); 3031 3032 pasync_ctx->async_header.handle_base = 3033 mem_descr->mem_array[0].virtual_address; 3034 pasync_ctx->async_header.writables = 0; 3035 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 3036 3037 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3038 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3039 (ulp_num * MEM_DESCR_OFFSET); 3040 if (mem_descr->mem_array[0].virtual_address) { 3041 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3042 "BM_%d : hwi_init_async_pdu_ctx" 3043 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 3044 ulp_num, 3045 mem_descr->mem_array[0]. 3046 virtual_address); 3047 } else 3048 beiscsi_log(phba, KERN_WARNING, 3049 BEISCSI_LOG_INIT, 3050 "BM_%d : No Virtual address for ULP : %d\n", 3051 ulp_num); 3052 3053 pasync_ctx->async_data.ring_base = 3054 mem_descr->mem_array[0].virtual_address; 3055 3056 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3057 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 3058 (ulp_num * MEM_DESCR_OFFSET); 3059 if (!mem_descr->mem_array[0].virtual_address) 3060 beiscsi_log(phba, KERN_WARNING, 3061 BEISCSI_LOG_INIT, 3062 "BM_%d : No Virtual address for ULP : %d\n", 3063 ulp_num); 3064 3065 pasync_ctx->async_data.handle_base = 3066 mem_descr->mem_array[0].virtual_address; 3067 pasync_ctx->async_data.writables = 0; 3068 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 3069 3070 pasync_header_h = 3071 (struct async_pdu_handle *) 3072 pasync_ctx->async_header.handle_base; 3073 pasync_data_h = 3074 (struct async_pdu_handle *) 3075 pasync_ctx->async_data.handle_base; 3076 3077 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3078 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 3079 (ulp_num * MEM_DESCR_OFFSET); 3080 if (mem_descr->mem_array[0].virtual_address) { 3081 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3082 "BM_%d : hwi_init_async_pdu_ctx" 3083 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 3084 ulp_num, 3085 mem_descr->mem_array[0]. 3086 virtual_address); 3087 } else 3088 beiscsi_log(phba, KERN_WARNING, 3089 BEISCSI_LOG_INIT, 3090 "BM_%d : No Virtual address for ULP : %d\n", 3091 ulp_num); 3092 3093 idx = 0; 3094 pasync_ctx->async_data.va_base = 3095 mem_descr->mem_array[idx].virtual_address; 3096 pasync_ctx->async_data.pa_base.u.a64.address = 3097 mem_descr->mem_array[idx]. 3098 bus_address.u.a64.address; 3099 3100 num_async_data = ((mem_descr->mem_array[idx].size) / 3101 phba->params.defpdu_data_sz); 3102 num_per_mem = 0; 3103 3104 for (index = 0; index < BEISCSI_GET_CID_COUNT 3105 (phba, ulp_num); index++) { 3106 pasync_header_h->cri = -1; 3107 pasync_header_h->index = (char)index; 3108 INIT_LIST_HEAD(&pasync_header_h->link); 3109 pasync_header_h->pbuffer = 3110 (void *)((unsigned long) 3111 (pasync_ctx-> 3112 async_header.va_base) + 3113 (p->defpdu_hdr_sz * index)); 3114 3115 pasync_header_h->pa.u.a64.address = 3116 pasync_ctx->async_header.pa_base.u.a64. 3117 address + (p->defpdu_hdr_sz * index); 3118 3119 list_add_tail(&pasync_header_h->link, 3120 &pasync_ctx->async_header. 3121 free_list); 3122 pasync_header_h++; 3123 pasync_ctx->async_header.free_entries++; 3124 pasync_ctx->async_header.writables++; 3125 3126 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3127 wait_queue.list); 3128 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3129 header_busy_list); 3130 pasync_data_h->cri = -1; 3131 pasync_data_h->index = (char)index; 3132 INIT_LIST_HEAD(&pasync_data_h->link); 3133 3134 if (!num_async_data) { 3135 num_per_mem = 0; 3136 idx++; 3137 pasync_ctx->async_data.va_base = 3138 mem_descr->mem_array[idx]. 3139 virtual_address; 3140 pasync_ctx->async_data.pa_base.u. 3141 a64.address = 3142 mem_descr->mem_array[idx]. 3143 bus_address.u.a64.address; 3144 num_async_data = 3145 ((mem_descr->mem_array[idx]. 3146 size) / 3147 phba->params.defpdu_data_sz); 3148 } 3149 pasync_data_h->pbuffer = 3150 (void *)((unsigned long) 3151 (pasync_ctx->async_data.va_base) + 3152 (p->defpdu_data_sz * num_per_mem)); 3153 3154 pasync_data_h->pa.u.a64.address = 3155 pasync_ctx->async_data.pa_base.u.a64. 3156 address + (p->defpdu_data_sz * 3157 num_per_mem); 3158 num_per_mem++; 3159 num_async_data--; 3160 3161 list_add_tail(&pasync_data_h->link, 3162 &pasync_ctx->async_data. 3163 free_list); 3164 pasync_data_h++; 3165 pasync_ctx->async_data.free_entries++; 3166 pasync_ctx->async_data.writables++; 3167 3168 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3169 data_busy_list); 3170 } 3171 3172 pasync_ctx->async_header.host_write_ptr = 0; 3173 pasync_ctx->async_header.ep_read_ptr = -1; 3174 pasync_ctx->async_data.host_write_ptr = 0; 3175 pasync_ctx->async_data.ep_read_ptr = -1; 3176 } 3177 } 3178 3179 return 0; 3180 } 3181 3182 static int 3183 be_sgl_create_contiguous(void *virtual_address, 3184 u64 physical_address, u32 length, 3185 struct be_dma_mem *sgl) 3186 { 3187 WARN_ON(!virtual_address); 3188 WARN_ON(!physical_address); 3189 WARN_ON(!length); 3190 WARN_ON(!sgl); 3191 3192 sgl->va = virtual_address; 3193 sgl->dma = (unsigned long)physical_address; 3194 sgl->size = length; 3195 3196 return 0; 3197 } 3198 3199 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 3200 { 3201 memset(sgl, 0, sizeof(*sgl)); 3202 } 3203 3204 static void 3205 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 3206 struct mem_array *pmem, struct be_dma_mem *sgl) 3207 { 3208 if (sgl->va) 3209 be_sgl_destroy_contiguous(sgl); 3210 3211 be_sgl_create_contiguous(pmem->virtual_address, 3212 pmem->bus_address.u.a64.address, 3213 pmem->size, sgl); 3214 } 3215 3216 static void 3217 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 3218 struct mem_array *pmem, struct be_dma_mem *sgl) 3219 { 3220 if (sgl->va) 3221 be_sgl_destroy_contiguous(sgl); 3222 3223 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 3224 pmem->bus_address.u.a64.address, 3225 pmem->size, sgl); 3226 } 3227 3228 static int be_fill_queue(struct be_queue_info *q, 3229 u16 len, u16 entry_size, void *vaddress) 3230 { 3231 struct be_dma_mem *mem = &q->dma_mem; 3232 3233 memset(q, 0, sizeof(*q)); 3234 q->len = len; 3235 q->entry_size = entry_size; 3236 mem->size = len * entry_size; 3237 mem->va = vaddress; 3238 if (!mem->va) 3239 return -ENOMEM; 3240 memset(mem->va, 0, mem->size); 3241 return 0; 3242 } 3243 3244 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3245 struct hwi_context_memory *phwi_context) 3246 { 3247 unsigned int i, num_eq_pages; 3248 int ret = 0, eq_for_mcc; 3249 struct be_queue_info *eq; 3250 struct be_dma_mem *mem; 3251 void *eq_vaddress; 3252 dma_addr_t paddr; 3253 3254 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3255 sizeof(struct be_eq_entry)); 3256 3257 if (phba->msix_enabled) 3258 eq_for_mcc = 1; 3259 else 3260 eq_for_mcc = 0; 3261 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3262 eq = &phwi_context->be_eq[i].q; 3263 mem = &eq->dma_mem; 3264 phwi_context->be_eq[i].phba = phba; 3265 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3266 num_eq_pages * PAGE_SIZE, 3267 &paddr); 3268 if (!eq_vaddress) 3269 goto create_eq_error; 3270 3271 mem->va = eq_vaddress; 3272 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3273 sizeof(struct be_eq_entry), eq_vaddress); 3274 if (ret) { 3275 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3276 "BM_%d : be_fill_queue Failed for EQ\n"); 3277 goto create_eq_error; 3278 } 3279 3280 mem->dma = paddr; 3281 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3282 phwi_context->cur_eqd); 3283 if (ret) { 3284 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3285 "BM_%d : beiscsi_cmd_eq_create" 3286 "Failed for EQ\n"); 3287 goto create_eq_error; 3288 } 3289 3290 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3291 "BM_%d : eqid = %d\n", 3292 phwi_context->be_eq[i].q.id); 3293 } 3294 return 0; 3295 create_eq_error: 3296 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3297 eq = &phwi_context->be_eq[i].q; 3298 mem = &eq->dma_mem; 3299 if (mem->va) 3300 pci_free_consistent(phba->pcidev, num_eq_pages 3301 * PAGE_SIZE, 3302 mem->va, mem->dma); 3303 } 3304 return ret; 3305 } 3306 3307 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3308 struct hwi_context_memory *phwi_context) 3309 { 3310 unsigned int i, num_cq_pages; 3311 int ret = 0; 3312 struct be_queue_info *cq, *eq; 3313 struct be_dma_mem *mem; 3314 struct be_eq_obj *pbe_eq; 3315 void *cq_vaddress; 3316 dma_addr_t paddr; 3317 3318 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3319 sizeof(struct sol_cqe)); 3320 3321 for (i = 0; i < phba->num_cpus; i++) { 3322 cq = &phwi_context->be_cq[i]; 3323 eq = &phwi_context->be_eq[i].q; 3324 pbe_eq = &phwi_context->be_eq[i]; 3325 pbe_eq->cq = cq; 3326 pbe_eq->phba = phba; 3327 mem = &cq->dma_mem; 3328 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3329 num_cq_pages * PAGE_SIZE, 3330 &paddr); 3331 if (!cq_vaddress) 3332 goto create_cq_error; 3333 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3334 sizeof(struct sol_cqe), cq_vaddress); 3335 if (ret) { 3336 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3337 "BM_%d : be_fill_queue Failed " 3338 "for ISCSI CQ\n"); 3339 goto create_cq_error; 3340 } 3341 3342 mem->dma = paddr; 3343 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3344 false, 0); 3345 if (ret) { 3346 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3347 "BM_%d : beiscsi_cmd_eq_create" 3348 "Failed for ISCSI CQ\n"); 3349 goto create_cq_error; 3350 } 3351 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3352 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3353 "iSCSI CQ CREATED\n", cq->id, eq->id); 3354 } 3355 return 0; 3356 3357 create_cq_error: 3358 for (i = 0; i < phba->num_cpus; i++) { 3359 cq = &phwi_context->be_cq[i]; 3360 mem = &cq->dma_mem; 3361 if (mem->va) 3362 pci_free_consistent(phba->pcidev, num_cq_pages 3363 * PAGE_SIZE, 3364 mem->va, mem->dma); 3365 } 3366 return ret; 3367 3368 } 3369 3370 static int 3371 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3372 struct hwi_context_memory *phwi_context, 3373 struct hwi_controller *phwi_ctrlr, 3374 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3375 { 3376 unsigned int idx; 3377 int ret; 3378 struct be_queue_info *dq, *cq; 3379 struct be_dma_mem *mem; 3380 struct be_mem_descriptor *mem_descr; 3381 void *dq_vaddress; 3382 3383 idx = 0; 3384 dq = &phwi_context->be_def_hdrq[ulp_num]; 3385 cq = &phwi_context->be_cq[0]; 3386 mem = &dq->dma_mem; 3387 mem_descr = phba->init_mem; 3388 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3389 (ulp_num * MEM_DESCR_OFFSET); 3390 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3391 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3392 sizeof(struct phys_addr), 3393 sizeof(struct phys_addr), dq_vaddress); 3394 if (ret) { 3395 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3396 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3397 ulp_num); 3398 3399 return ret; 3400 } 3401 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3402 bus_address.u.a64.address; 3403 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3404 def_pdu_ring_sz, 3405 phba->params.defpdu_hdr_sz, 3406 BEISCSI_DEFQ_HDR, ulp_num); 3407 if (ret) { 3408 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3409 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3410 ulp_num); 3411 3412 return ret; 3413 } 3414 3415 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3416 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3417 ulp_num, 3418 phwi_context->be_def_hdrq[ulp_num].id); 3419 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); 3420 return 0; 3421 } 3422 3423 static int 3424 beiscsi_create_def_data(struct beiscsi_hba *phba, 3425 struct hwi_context_memory *phwi_context, 3426 struct hwi_controller *phwi_ctrlr, 3427 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3428 { 3429 unsigned int idx; 3430 int ret; 3431 struct be_queue_info *dataq, *cq; 3432 struct be_dma_mem *mem; 3433 struct be_mem_descriptor *mem_descr; 3434 void *dq_vaddress; 3435 3436 idx = 0; 3437 dataq = &phwi_context->be_def_dataq[ulp_num]; 3438 cq = &phwi_context->be_cq[0]; 3439 mem = &dataq->dma_mem; 3440 mem_descr = phba->init_mem; 3441 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3442 (ulp_num * MEM_DESCR_OFFSET); 3443 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3444 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3445 sizeof(struct phys_addr), 3446 sizeof(struct phys_addr), dq_vaddress); 3447 if (ret) { 3448 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3449 "BM_%d : be_fill_queue Failed for DEF PDU " 3450 "DATA on ULP : %d\n", 3451 ulp_num); 3452 3453 return ret; 3454 } 3455 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3456 bus_address.u.a64.address; 3457 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3458 def_pdu_ring_sz, 3459 phba->params.defpdu_data_sz, 3460 BEISCSI_DEFQ_DATA, ulp_num); 3461 if (ret) { 3462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3463 "BM_%d be_cmd_create_default_pdu_queue" 3464 " Failed for DEF PDU DATA on ULP : %d\n", 3465 ulp_num); 3466 return ret; 3467 } 3468 3469 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3470 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3471 ulp_num, 3472 phwi_context->be_def_dataq[ulp_num].id); 3473 3474 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); 3475 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3476 "BM_%d : DEFAULT PDU DATA RING CREATED" 3477 "on ULP : %d\n", ulp_num); 3478 3479 return 0; 3480 } 3481 3482 3483 static int 3484 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3485 { 3486 struct be_mem_descriptor *mem_descr; 3487 struct mem_array *pm_arr; 3488 struct be_dma_mem sgl; 3489 int status, ulp_num; 3490 3491 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3492 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3493 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3494 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3495 (ulp_num * MEM_DESCR_OFFSET); 3496 pm_arr = mem_descr->mem_array; 3497 3498 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3499 status = be_cmd_iscsi_post_template_hdr( 3500 &phba->ctrl, &sgl); 3501 3502 if (status != 0) { 3503 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3504 "BM_%d : Post Template HDR Failed for" 3505 "ULP_%d\n", ulp_num); 3506 return status; 3507 } 3508 3509 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3510 "BM_%d : Template HDR Pages Posted for" 3511 "ULP_%d\n", ulp_num); 3512 } 3513 } 3514 return 0; 3515 } 3516 3517 static int 3518 beiscsi_post_pages(struct beiscsi_hba *phba) 3519 { 3520 struct be_mem_descriptor *mem_descr; 3521 struct mem_array *pm_arr; 3522 unsigned int page_offset, i; 3523 struct be_dma_mem sgl; 3524 int status, ulp_num = 0; 3525 3526 mem_descr = phba->init_mem; 3527 mem_descr += HWI_MEM_SGE; 3528 pm_arr = mem_descr->mem_array; 3529 3530 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3531 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3532 break; 3533 3534 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3535 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3536 for (i = 0; i < mem_descr->num_elements; i++) { 3537 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3538 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3539 page_offset, 3540 (pm_arr->size / PAGE_SIZE)); 3541 page_offset += pm_arr->size / PAGE_SIZE; 3542 if (status != 0) { 3543 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3544 "BM_%d : post sgl failed.\n"); 3545 return status; 3546 } 3547 pm_arr++; 3548 } 3549 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3550 "BM_%d : POSTED PAGES\n"); 3551 return 0; 3552 } 3553 3554 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3555 { 3556 struct be_dma_mem *mem = &q->dma_mem; 3557 if (mem->va) { 3558 pci_free_consistent(phba->pcidev, mem->size, 3559 mem->va, mem->dma); 3560 mem->va = NULL; 3561 } 3562 } 3563 3564 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3565 u16 len, u16 entry_size) 3566 { 3567 struct be_dma_mem *mem = &q->dma_mem; 3568 3569 memset(q, 0, sizeof(*q)); 3570 q->len = len; 3571 q->entry_size = entry_size; 3572 mem->size = len * entry_size; 3573 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3574 if (!mem->va) 3575 return -ENOMEM; 3576 return 0; 3577 } 3578 3579 static int 3580 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3581 struct hwi_context_memory *phwi_context, 3582 struct hwi_controller *phwi_ctrlr) 3583 { 3584 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3585 u64 pa_addr_lo; 3586 unsigned int idx, num, i, ulp_num; 3587 struct mem_array *pwrb_arr; 3588 void *wrb_vaddr; 3589 struct be_dma_mem sgl; 3590 struct be_mem_descriptor *mem_descr; 3591 struct hwi_wrb_context *pwrb_context; 3592 int status; 3593 uint8_t ulp_count = 0, ulp_base_num = 0; 3594 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3595 3596 idx = 0; 3597 mem_descr = phba->init_mem; 3598 mem_descr += HWI_MEM_WRB; 3599 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3600 GFP_KERNEL); 3601 if (!pwrb_arr) { 3602 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3603 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3604 return -ENOMEM; 3605 } 3606 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3607 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3608 num_wrb_rings = mem_descr->mem_array[idx].size / 3609 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3610 3611 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3612 if (num_wrb_rings) { 3613 pwrb_arr[num].virtual_address = wrb_vaddr; 3614 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3615 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3616 sizeof(struct iscsi_wrb); 3617 wrb_vaddr += pwrb_arr[num].size; 3618 pa_addr_lo += pwrb_arr[num].size; 3619 num_wrb_rings--; 3620 } else { 3621 idx++; 3622 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3623 pa_addr_lo = mem_descr->mem_array[idx].\ 3624 bus_address.u.a64.address; 3625 num_wrb_rings = mem_descr->mem_array[idx].size / 3626 (phba->params.wrbs_per_cxn * 3627 sizeof(struct iscsi_wrb)); 3628 pwrb_arr[num].virtual_address = wrb_vaddr; 3629 pwrb_arr[num].bus_address.u.a64.address\ 3630 = pa_addr_lo; 3631 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3632 sizeof(struct iscsi_wrb); 3633 wrb_vaddr += pwrb_arr[num].size; 3634 pa_addr_lo += pwrb_arr[num].size; 3635 num_wrb_rings--; 3636 } 3637 } 3638 3639 /* Get the ULP Count */ 3640 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3641 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3642 ulp_count++; 3643 ulp_base_num = ulp_num; 3644 cid_count_ulp[ulp_num] = 3645 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3646 } 3647 3648 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3649 wrb_mem_index = 0; 3650 offset = 0; 3651 size = 0; 3652 3653 if (ulp_count > 1) { 3654 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3655 3656 if (!cid_count_ulp[ulp_base_num]) 3657 ulp_base_num = (ulp_base_num + 1) % 3658 BEISCSI_ULP_COUNT; 3659 3660 cid_count_ulp[ulp_base_num]--; 3661 } 3662 3663 3664 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3665 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3666 &phwi_context->be_wrbq[i], 3667 &phwi_ctrlr->wrb_context[i], 3668 ulp_base_num); 3669 if (status != 0) { 3670 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3671 "BM_%d : wrbq create failed."); 3672 kfree(pwrb_arr); 3673 return status; 3674 } 3675 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3676 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3677 } 3678 kfree(pwrb_arr); 3679 return 0; 3680 } 3681 3682 static void free_wrb_handles(struct beiscsi_hba *phba) 3683 { 3684 unsigned int index; 3685 struct hwi_controller *phwi_ctrlr; 3686 struct hwi_wrb_context *pwrb_context; 3687 3688 phwi_ctrlr = phba->phwi_ctrlr; 3689 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3690 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3691 kfree(pwrb_context->pwrb_handle_base); 3692 kfree(pwrb_context->pwrb_handle_basestd); 3693 } 3694 } 3695 3696 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3697 { 3698 struct be_queue_info *q; 3699 struct be_ctrl_info *ctrl = &phba->ctrl; 3700 3701 q = &phba->ctrl.mcc_obj.q; 3702 if (q->created) { 3703 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3704 be_queue_free(phba, q); 3705 } 3706 3707 q = &phba->ctrl.mcc_obj.cq; 3708 if (q->created) { 3709 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3710 be_queue_free(phba, q); 3711 } 3712 } 3713 3714 static void hwi_cleanup(struct beiscsi_hba *phba) 3715 { 3716 struct be_queue_info *q; 3717 struct be_ctrl_info *ctrl = &phba->ctrl; 3718 struct hwi_controller *phwi_ctrlr; 3719 struct hwi_context_memory *phwi_context; 3720 struct hwi_async_pdu_context *pasync_ctx; 3721 int i, eq_for_mcc, ulp_num; 3722 3723 phwi_ctrlr = phba->phwi_ctrlr; 3724 phwi_context = phwi_ctrlr->phwi_ctxt; 3725 3726 be_cmd_iscsi_remove_template_hdr(ctrl); 3727 3728 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3729 q = &phwi_context->be_wrbq[i]; 3730 if (q->created) 3731 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3732 } 3733 kfree(phwi_context->be_wrbq); 3734 free_wrb_handles(phba); 3735 3736 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3737 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3738 3739 q = &phwi_context->be_def_hdrq[ulp_num]; 3740 if (q->created) 3741 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3742 3743 q = &phwi_context->be_def_dataq[ulp_num]; 3744 if (q->created) 3745 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3746 3747 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 3748 } 3749 } 3750 3751 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3752 3753 for (i = 0; i < (phba->num_cpus); i++) { 3754 q = &phwi_context->be_cq[i]; 3755 if (q->created) { 3756 be_queue_free(phba, q); 3757 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3758 } 3759 } 3760 3761 be_mcc_queues_destroy(phba); 3762 if (phba->msix_enabled) 3763 eq_for_mcc = 1; 3764 else 3765 eq_for_mcc = 0; 3766 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3767 q = &phwi_context->be_eq[i].q; 3768 if (q->created) { 3769 be_queue_free(phba, q); 3770 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3771 } 3772 } 3773 be_cmd_fw_uninit(ctrl); 3774 } 3775 3776 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3777 struct hwi_context_memory *phwi_context) 3778 { 3779 struct be_queue_info *q, *cq; 3780 struct be_ctrl_info *ctrl = &phba->ctrl; 3781 3782 /* Alloc MCC compl queue */ 3783 cq = &phba->ctrl.mcc_obj.cq; 3784 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3785 sizeof(struct be_mcc_compl))) 3786 goto err; 3787 /* Ask BE to create MCC compl queue; */ 3788 if (phba->msix_enabled) { 3789 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3790 [phba->num_cpus].q, false, true, 0)) 3791 goto mcc_cq_free; 3792 } else { 3793 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3794 false, true, 0)) 3795 goto mcc_cq_free; 3796 } 3797 3798 /* Alloc MCC queue */ 3799 q = &phba->ctrl.mcc_obj.q; 3800 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3801 goto mcc_cq_destroy; 3802 3803 /* Ask BE to create MCC queue */ 3804 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3805 goto mcc_q_free; 3806 3807 return 0; 3808 3809 mcc_q_free: 3810 be_queue_free(phba, q); 3811 mcc_cq_destroy: 3812 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3813 mcc_cq_free: 3814 be_queue_free(phba, cq); 3815 err: 3816 return -ENOMEM; 3817 } 3818 3819 /** 3820 * find_num_cpus()- Get the CPU online count 3821 * @phba: ptr to priv structure 3822 * 3823 * CPU count is used for creating EQ. 3824 **/ 3825 static void find_num_cpus(struct beiscsi_hba *phba) 3826 { 3827 int num_cpus = 0; 3828 3829 num_cpus = num_online_cpus(); 3830 3831 switch (phba->generation) { 3832 case BE_GEN2: 3833 case BE_GEN3: 3834 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? 3835 BEISCSI_MAX_NUM_CPUS : num_cpus; 3836 break; 3837 case BE_GEN4: 3838 /* 3839 * If eqid_count == 1 fall back to 3840 * INTX mechanism 3841 **/ 3842 if (phba->fw_config.eqid_count == 1) { 3843 enable_msix = 0; 3844 phba->num_cpus = 1; 3845 return; 3846 } 3847 3848 phba->num_cpus = 3849 (num_cpus > (phba->fw_config.eqid_count - 1)) ? 3850 (phba->fw_config.eqid_count - 1) : num_cpus; 3851 break; 3852 default: 3853 phba->num_cpus = 1; 3854 } 3855 } 3856 3857 static int hwi_init_port(struct beiscsi_hba *phba) 3858 { 3859 struct hwi_controller *phwi_ctrlr; 3860 struct hwi_context_memory *phwi_context; 3861 unsigned int def_pdu_ring_sz; 3862 struct be_ctrl_info *ctrl = &phba->ctrl; 3863 int status, ulp_num; 3864 3865 phwi_ctrlr = phba->phwi_ctrlr; 3866 phwi_context = phwi_ctrlr->phwi_ctxt; 3867 phwi_context->max_eqd = 128; 3868 phwi_context->min_eqd = 0; 3869 phwi_context->cur_eqd = 0; 3870 be_cmd_fw_initialize(&phba->ctrl); 3871 3872 status = beiscsi_create_eqs(phba, phwi_context); 3873 if (status != 0) { 3874 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3875 "BM_%d : EQ not created\n"); 3876 goto error; 3877 } 3878 3879 status = be_mcc_queues_create(phba, phwi_context); 3880 if (status != 0) 3881 goto error; 3882 3883 status = mgmt_check_supported_fw(ctrl, phba); 3884 if (status != 0) { 3885 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3886 "BM_%d : Unsupported fw version\n"); 3887 goto error; 3888 } 3889 3890 status = beiscsi_create_cqs(phba, phwi_context); 3891 if (status != 0) { 3892 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3893 "BM_%d : CQ not created\n"); 3894 goto error; 3895 } 3896 3897 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3898 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3899 3900 def_pdu_ring_sz = 3901 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3902 sizeof(struct phys_addr); 3903 3904 status = beiscsi_create_def_hdr(phba, phwi_context, 3905 phwi_ctrlr, 3906 def_pdu_ring_sz, 3907 ulp_num); 3908 if (status != 0) { 3909 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3910 "BM_%d : Default Header not created for ULP : %d\n", 3911 ulp_num); 3912 goto error; 3913 } 3914 3915 status = beiscsi_create_def_data(phba, phwi_context, 3916 phwi_ctrlr, 3917 def_pdu_ring_sz, 3918 ulp_num); 3919 if (status != 0) { 3920 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3921 "BM_%d : Default Data not created for ULP : %d\n", 3922 ulp_num); 3923 goto error; 3924 } 3925 } 3926 } 3927 3928 status = beiscsi_post_pages(phba); 3929 if (status != 0) { 3930 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3931 "BM_%d : Post SGL Pages Failed\n"); 3932 goto error; 3933 } 3934 3935 status = beiscsi_post_template_hdr(phba); 3936 if (status != 0) { 3937 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3938 "BM_%d : Template HDR Posting for CXN Failed\n"); 3939 } 3940 3941 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3942 if (status != 0) { 3943 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3944 "BM_%d : WRB Rings not created\n"); 3945 goto error; 3946 } 3947 3948 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3949 uint16_t async_arr_idx = 0; 3950 3951 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3952 uint16_t cri = 0; 3953 struct hwi_async_pdu_context *pasync_ctx; 3954 3955 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3956 phwi_ctrlr, ulp_num); 3957 for (cri = 0; cri < 3958 phba->params.cxns_per_ctrl; cri++) { 3959 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3960 (phwi_ctrlr, cri)) 3961 pasync_ctx->cid_to_async_cri_map[ 3962 phwi_ctrlr->wrb_context[cri].cid] = 3963 async_arr_idx++; 3964 } 3965 } 3966 } 3967 3968 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3969 "BM_%d : hwi_init_port success\n"); 3970 return 0; 3971 3972 error: 3973 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3974 "BM_%d : hwi_init_port failed"); 3975 hwi_cleanup(phba); 3976 return status; 3977 } 3978 3979 static int hwi_init_controller(struct beiscsi_hba *phba) 3980 { 3981 struct hwi_controller *phwi_ctrlr; 3982 3983 phwi_ctrlr = phba->phwi_ctrlr; 3984 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3985 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3986 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3987 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3988 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3989 phwi_ctrlr->phwi_ctxt); 3990 } else { 3991 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3992 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3993 "than one element.Failing to load\n"); 3994 return -ENOMEM; 3995 } 3996 3997 iscsi_init_global_templates(phba); 3998 if (beiscsi_init_wrb_handle(phba)) 3999 return -ENOMEM; 4000 4001 if (hwi_init_async_pdu_ctx(phba)) { 4002 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4003 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 4004 return -ENOMEM; 4005 } 4006 4007 if (hwi_init_port(phba) != 0) { 4008 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4009 "BM_%d : hwi_init_controller failed\n"); 4010 4011 return -ENOMEM; 4012 } 4013 return 0; 4014 } 4015 4016 static void beiscsi_free_mem(struct beiscsi_hba *phba) 4017 { 4018 struct be_mem_descriptor *mem_descr; 4019 int i, j; 4020 4021 mem_descr = phba->init_mem; 4022 i = 0; 4023 j = 0; 4024 for (i = 0; i < SE_MEM_MAX; i++) { 4025 for (j = mem_descr->num_elements; j > 0; j--) { 4026 pci_free_consistent(phba->pcidev, 4027 mem_descr->mem_array[j - 1].size, 4028 mem_descr->mem_array[j - 1].virtual_address, 4029 (unsigned long)mem_descr->mem_array[j - 1]. 4030 bus_address.u.a64.address); 4031 } 4032 4033 kfree(mem_descr->mem_array); 4034 mem_descr++; 4035 } 4036 kfree(phba->init_mem); 4037 kfree(phba->phwi_ctrlr->wrb_context); 4038 kfree(phba->phwi_ctrlr); 4039 } 4040 4041 static int beiscsi_init_controller(struct beiscsi_hba *phba) 4042 { 4043 int ret = -ENOMEM; 4044 4045 ret = beiscsi_get_memory(phba); 4046 if (ret < 0) { 4047 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4048 "BM_%d : beiscsi_dev_probe -" 4049 "Failed in beiscsi_alloc_memory\n"); 4050 return ret; 4051 } 4052 4053 ret = hwi_init_controller(phba); 4054 if (ret) 4055 goto free_init; 4056 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4057 "BM_%d : Return success from beiscsi_init_controller"); 4058 4059 return 0; 4060 4061 free_init: 4062 beiscsi_free_mem(phba); 4063 return ret; 4064 } 4065 4066 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 4067 { 4068 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 4069 struct sgl_handle *psgl_handle; 4070 struct iscsi_sge *pfrag; 4071 unsigned int arr_index, i, idx; 4072 unsigned int ulp_icd_start, ulp_num = 0; 4073 4074 phba->io_sgl_hndl_avbl = 0; 4075 phba->eh_sgl_hndl_avbl = 0; 4076 4077 mem_descr_sglh = phba->init_mem; 4078 mem_descr_sglh += HWI_MEM_SGLH; 4079 if (1 == mem_descr_sglh->num_elements) { 4080 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4081 phba->params.ios_per_ctrl, 4082 GFP_KERNEL); 4083 if (!phba->io_sgl_hndl_base) { 4084 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4085 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4086 return -ENOMEM; 4087 } 4088 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4089 (phba->params.icds_per_ctrl - 4090 phba->params.ios_per_ctrl), 4091 GFP_KERNEL); 4092 if (!phba->eh_sgl_hndl_base) { 4093 kfree(phba->io_sgl_hndl_base); 4094 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4095 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4096 return -ENOMEM; 4097 } 4098 } else { 4099 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4100 "BM_%d : HWI_MEM_SGLH is more than one element." 4101 "Failing to load\n"); 4102 return -ENOMEM; 4103 } 4104 4105 arr_index = 0; 4106 idx = 0; 4107 while (idx < mem_descr_sglh->num_elements) { 4108 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 4109 4110 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 4111 sizeof(struct sgl_handle)); i++) { 4112 if (arr_index < phba->params.ios_per_ctrl) { 4113 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 4114 phba->io_sgl_hndl_avbl++; 4115 arr_index++; 4116 } else { 4117 phba->eh_sgl_hndl_base[arr_index - 4118 phba->params.ios_per_ctrl] = 4119 psgl_handle; 4120 arr_index++; 4121 phba->eh_sgl_hndl_avbl++; 4122 } 4123 psgl_handle++; 4124 } 4125 idx++; 4126 } 4127 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4128 "BM_%d : phba->io_sgl_hndl_avbl=%d" 4129 "phba->eh_sgl_hndl_avbl=%d\n", 4130 phba->io_sgl_hndl_avbl, 4131 phba->eh_sgl_hndl_avbl); 4132 4133 mem_descr_sg = phba->init_mem; 4134 mem_descr_sg += HWI_MEM_SGE; 4135 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4136 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4137 mem_descr_sg->num_elements); 4138 4139 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 4140 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 4141 break; 4142 4143 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 4144 4145 arr_index = 0; 4146 idx = 0; 4147 while (idx < mem_descr_sg->num_elements) { 4148 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 4149 4150 for (i = 0; 4151 i < (mem_descr_sg->mem_array[idx].size) / 4152 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 4153 i++) { 4154 if (arr_index < phba->params.ios_per_ctrl) 4155 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 4156 else 4157 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 4158 phba->params.ios_per_ctrl]; 4159 psgl_handle->pfrag = pfrag; 4160 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4161 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4162 pfrag += phba->params.num_sge_per_io; 4163 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 4164 } 4165 idx++; 4166 } 4167 phba->io_sgl_free_index = 0; 4168 phba->io_sgl_alloc_index = 0; 4169 phba->eh_sgl_free_index = 0; 4170 phba->eh_sgl_alloc_index = 0; 4171 return 0; 4172 } 4173 4174 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4175 { 4176 int ret; 4177 uint16_t i, ulp_num; 4178 struct ulp_cid_info *ptr_cid_info = NULL; 4179 4180 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4181 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4182 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4183 GFP_KERNEL); 4184 4185 if (!ptr_cid_info) { 4186 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4187 "BM_%d : Failed to allocate memory" 4188 "for ULP_CID_INFO for ULP : %d\n", 4189 ulp_num); 4190 ret = -ENOMEM; 4191 goto free_memory; 4192 4193 } 4194 4195 /* Allocate memory for CID array */ 4196 ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 4197 BEISCSI_GET_CID_COUNT(phba, 4198 ulp_num), GFP_KERNEL); 4199 if (!ptr_cid_info->cid_array) { 4200 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4201 "BM_%d : Failed to allocate memory" 4202 "for CID_ARRAY for ULP : %d\n", 4203 ulp_num); 4204 kfree(ptr_cid_info); 4205 ptr_cid_info = NULL; 4206 ret = -ENOMEM; 4207 4208 goto free_memory; 4209 } 4210 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4211 phba, ulp_num); 4212 4213 /* Save the cid_info_array ptr */ 4214 phba->cid_array_info[ulp_num] = ptr_cid_info; 4215 } 4216 } 4217 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4218 phba->params.cxns_per_ctrl, GFP_KERNEL); 4219 if (!phba->ep_array) { 4220 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4221 "BM_%d : Failed to allocate memory in " 4222 "hba_setup_cid_tbls\n"); 4223 ret = -ENOMEM; 4224 4225 goto free_memory; 4226 } 4227 4228 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4229 phba->params.cxns_per_ctrl, GFP_KERNEL); 4230 if (!phba->conn_table) { 4231 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4232 "BM_%d : Failed to allocate memory in" 4233 "hba_setup_cid_tbls\n"); 4234 4235 kfree(phba->ep_array); 4236 phba->ep_array = NULL; 4237 ret = -ENOMEM; 4238 4239 goto free_memory; 4240 } 4241 4242 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4243 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4244 4245 ptr_cid_info = phba->cid_array_info[ulp_num]; 4246 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4247 phba->phwi_ctrlr->wrb_context[i].cid; 4248 4249 } 4250 4251 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4252 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4253 ptr_cid_info = phba->cid_array_info[ulp_num]; 4254 4255 ptr_cid_info->cid_alloc = 0; 4256 ptr_cid_info->cid_free = 0; 4257 } 4258 } 4259 return 0; 4260 4261 free_memory: 4262 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4263 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4264 ptr_cid_info = phba->cid_array_info[ulp_num]; 4265 4266 if (ptr_cid_info) { 4267 kfree(ptr_cid_info->cid_array); 4268 kfree(ptr_cid_info); 4269 phba->cid_array_info[ulp_num] = NULL; 4270 } 4271 } 4272 } 4273 4274 return ret; 4275 } 4276 4277 static void hwi_enable_intr(struct beiscsi_hba *phba) 4278 { 4279 struct be_ctrl_info *ctrl = &phba->ctrl; 4280 struct hwi_controller *phwi_ctrlr; 4281 struct hwi_context_memory *phwi_context; 4282 struct be_queue_info *eq; 4283 u8 __iomem *addr; 4284 u32 reg, i; 4285 u32 enabled; 4286 4287 phwi_ctrlr = phba->phwi_ctrlr; 4288 phwi_context = phwi_ctrlr->phwi_ctxt; 4289 4290 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4291 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4292 reg = ioread32(addr); 4293 4294 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4295 if (!enabled) { 4296 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4297 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4298 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4299 iowrite32(reg, addr); 4300 } 4301 4302 if (!phba->msix_enabled) { 4303 eq = &phwi_context->be_eq[0].q; 4304 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4305 "BM_%d : eq->id=%d\n", eq->id); 4306 4307 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4308 } else { 4309 for (i = 0; i <= phba->num_cpus; i++) { 4310 eq = &phwi_context->be_eq[i].q; 4311 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4312 "BM_%d : eq->id=%d\n", eq->id); 4313 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4314 } 4315 } 4316 } 4317 4318 static void hwi_disable_intr(struct beiscsi_hba *phba) 4319 { 4320 struct be_ctrl_info *ctrl = &phba->ctrl; 4321 4322 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4323 u32 reg = ioread32(addr); 4324 4325 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4326 if (enabled) { 4327 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4328 iowrite32(reg, addr); 4329 } else 4330 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4331 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4332 } 4333 4334 /** 4335 * beiscsi_get_boot_info()- Get the boot session info 4336 * @phba: The device priv structure instance 4337 * 4338 * Get the boot target info and store in driver priv structure 4339 * 4340 * return values 4341 * Success: 0 4342 * Failure: Non-Zero Value 4343 **/ 4344 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 4345 { 4346 struct be_cmd_get_session_resp *session_resp; 4347 struct be_dma_mem nonemb_cmd; 4348 unsigned int tag; 4349 unsigned int s_handle; 4350 int ret = -ENOMEM; 4351 4352 /* Get the session handle of the boot target */ 4353 ret = be_mgmt_get_boot_shandle(phba, &s_handle); 4354 if (ret) { 4355 beiscsi_log(phba, KERN_ERR, 4356 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4357 "BM_%d : No boot session\n"); 4358 4359 if (ret == -ENXIO) 4360 phba->get_boot = 0; 4361 4362 4363 return ret; 4364 } 4365 phba->get_boot = 0; 4366 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, 4367 sizeof(*session_resp), 4368 &nonemb_cmd.dma); 4369 if (nonemb_cmd.va == NULL) { 4370 beiscsi_log(phba, KERN_ERR, 4371 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4372 "BM_%d : Failed to allocate memory for" 4373 "beiscsi_get_session_info\n"); 4374 4375 return -ENOMEM; 4376 } 4377 4378 tag = mgmt_get_session_info(phba, s_handle, 4379 &nonemb_cmd); 4380 if (!tag) { 4381 beiscsi_log(phba, KERN_ERR, 4382 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4383 "BM_%d : beiscsi_get_session_info" 4384 " Failed\n"); 4385 4386 goto boot_freemem; 4387 } 4388 4389 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 4390 if (ret) { 4391 beiscsi_log(phba, KERN_ERR, 4392 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4393 "BM_%d : beiscsi_get_session_info Failed"); 4394 4395 if (ret != -EBUSY) 4396 goto boot_freemem; 4397 else 4398 return ret; 4399 } 4400 4401 session_resp = nonemb_cmd.va ; 4402 4403 memcpy(&phba->boot_sess, &session_resp->session_info, 4404 sizeof(struct mgmt_session_info)); 4405 4406 beiscsi_logout_fw_sess(phba, 4407 phba->boot_sess.session_handle); 4408 ret = 0; 4409 4410 boot_freemem: 4411 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4412 nonemb_cmd.va, nonemb_cmd.dma); 4413 return ret; 4414 } 4415 4416 static void beiscsi_boot_release(void *data) 4417 { 4418 struct beiscsi_hba *phba = data; 4419 4420 scsi_host_put(phba->shost); 4421 } 4422 4423 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 4424 { 4425 struct iscsi_boot_kobj *boot_kobj; 4426 4427 /* it has been created previously */ 4428 if (phba->boot_kset) 4429 return 0; 4430 4431 /* get boot info using mgmt cmd */ 4432 if (beiscsi_get_boot_info(phba)) 4433 /* Try to see if we can carry on without this */ 4434 return 0; 4435 4436 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 4437 if (!phba->boot_kset) 4438 return -ENOMEM; 4439 4440 /* get a ref because the show function will ref the phba */ 4441 if (!scsi_host_get(phba->shost)) 4442 goto free_kset; 4443 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 4444 beiscsi_show_boot_tgt_info, 4445 beiscsi_tgt_get_attr_visibility, 4446 beiscsi_boot_release); 4447 if (!boot_kobj) 4448 goto put_shost; 4449 4450 if (!scsi_host_get(phba->shost)) 4451 goto free_kset; 4452 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 4453 beiscsi_show_boot_ini_info, 4454 beiscsi_ini_get_attr_visibility, 4455 beiscsi_boot_release); 4456 if (!boot_kobj) 4457 goto put_shost; 4458 4459 if (!scsi_host_get(phba->shost)) 4460 goto free_kset; 4461 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 4462 beiscsi_show_boot_eth_info, 4463 beiscsi_eth_get_attr_visibility, 4464 beiscsi_boot_release); 4465 if (!boot_kobj) 4466 goto put_shost; 4467 return 0; 4468 4469 put_shost: 4470 scsi_host_put(phba->shost); 4471 free_kset: 4472 iscsi_boot_destroy_kset(phba->boot_kset); 4473 return -ENOMEM; 4474 } 4475 4476 static int beiscsi_init_port(struct beiscsi_hba *phba) 4477 { 4478 int ret; 4479 4480 ret = beiscsi_init_controller(phba); 4481 if (ret < 0) { 4482 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4483 "BM_%d : beiscsi_dev_probe - Failed in" 4484 "beiscsi_init_controller\n"); 4485 return ret; 4486 } 4487 ret = beiscsi_init_sgl_handle(phba); 4488 if (ret < 0) { 4489 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4490 "BM_%d : beiscsi_dev_probe - Failed in" 4491 "beiscsi_init_sgl_handle\n"); 4492 goto do_cleanup_ctrlr; 4493 } 4494 4495 if (hba_setup_cid_tbls(phba)) { 4496 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4497 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4498 kfree(phba->io_sgl_hndl_base); 4499 kfree(phba->eh_sgl_hndl_base); 4500 goto do_cleanup_ctrlr; 4501 } 4502 4503 return ret; 4504 4505 do_cleanup_ctrlr: 4506 hwi_cleanup(phba); 4507 return ret; 4508 } 4509 4510 static void hwi_purge_eq(struct beiscsi_hba *phba) 4511 { 4512 struct hwi_controller *phwi_ctrlr; 4513 struct hwi_context_memory *phwi_context; 4514 struct be_queue_info *eq; 4515 struct be_eq_entry *eqe = NULL; 4516 int i, eq_msix; 4517 unsigned int num_processed; 4518 4519 phwi_ctrlr = phba->phwi_ctrlr; 4520 phwi_context = phwi_ctrlr->phwi_ctxt; 4521 if (phba->msix_enabled) 4522 eq_msix = 1; 4523 else 4524 eq_msix = 0; 4525 4526 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 4527 eq = &phwi_context->be_eq[i].q; 4528 eqe = queue_tail_node(eq); 4529 num_processed = 0; 4530 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 4531 & EQE_VALID_MASK) { 4532 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 4533 queue_tail_inc(eq); 4534 eqe = queue_tail_node(eq); 4535 num_processed++; 4536 } 4537 4538 if (num_processed) 4539 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 4540 } 4541 } 4542 4543 static void beiscsi_clean_port(struct beiscsi_hba *phba) 4544 { 4545 int mgmt_status, ulp_num; 4546 struct ulp_cid_info *ptr_cid_info = NULL; 4547 4548 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4549 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4550 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); 4551 if (mgmt_status) 4552 beiscsi_log(phba, KERN_WARNING, 4553 BEISCSI_LOG_INIT, 4554 "BM_%d : mgmt_epfw_cleanup FAILED" 4555 " for ULP_%d\n", ulp_num); 4556 } 4557 } 4558 4559 hwi_purge_eq(phba); 4560 hwi_cleanup(phba); 4561 kfree(phba->io_sgl_hndl_base); 4562 kfree(phba->eh_sgl_hndl_base); 4563 kfree(phba->ep_array); 4564 kfree(phba->conn_table); 4565 4566 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4567 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4568 ptr_cid_info = phba->cid_array_info[ulp_num]; 4569 4570 if (ptr_cid_info) { 4571 kfree(ptr_cid_info->cid_array); 4572 kfree(ptr_cid_info); 4573 phba->cid_array_info[ulp_num] = NULL; 4574 } 4575 } 4576 } 4577 4578 } 4579 4580 /** 4581 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4582 * @beiscsi_conn: ptr to the conn to be cleaned up 4583 * @task: ptr to iscsi_task resource to be freed. 4584 * 4585 * Free driver mgmt resources binded to CXN. 4586 **/ 4587 void 4588 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4589 struct iscsi_task *task) 4590 { 4591 struct beiscsi_io_task *io_task; 4592 struct beiscsi_hba *phba = beiscsi_conn->phba; 4593 struct hwi_wrb_context *pwrb_context; 4594 struct hwi_controller *phwi_ctrlr; 4595 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4596 beiscsi_conn->beiscsi_conn_cid); 4597 4598 phwi_ctrlr = phba->phwi_ctrlr; 4599 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4600 4601 io_task = task->dd_data; 4602 4603 if (io_task->pwrb_handle) { 4604 memset(io_task->pwrb_handle->pwrb, 0, 4605 sizeof(struct iscsi_wrb)); 4606 free_wrb_handle(phba, pwrb_context, 4607 io_task->pwrb_handle); 4608 io_task->pwrb_handle = NULL; 4609 } 4610 4611 if (io_task->psgl_handle) { 4612 spin_lock_bh(&phba->mgmt_sgl_lock); 4613 free_mgmt_sgl_handle(phba, 4614 io_task->psgl_handle); 4615 io_task->psgl_handle = NULL; 4616 spin_unlock_bh(&phba->mgmt_sgl_lock); 4617 } 4618 4619 if (io_task->mtask_addr) { 4620 pci_unmap_single(phba->pcidev, 4621 io_task->mtask_addr, 4622 io_task->mtask_data_count, 4623 PCI_DMA_TODEVICE); 4624 io_task->mtask_addr = 0; 4625 } 4626 } 4627 4628 /** 4629 * beiscsi_cleanup_task()- Free driver resources of the task 4630 * @task: ptr to the iscsi task 4631 * 4632 **/ 4633 static void beiscsi_cleanup_task(struct iscsi_task *task) 4634 { 4635 struct beiscsi_io_task *io_task = task->dd_data; 4636 struct iscsi_conn *conn = task->conn; 4637 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4638 struct beiscsi_hba *phba = beiscsi_conn->phba; 4639 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4640 struct hwi_wrb_context *pwrb_context; 4641 struct hwi_controller *phwi_ctrlr; 4642 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4643 beiscsi_conn->beiscsi_conn_cid); 4644 4645 phwi_ctrlr = phba->phwi_ctrlr; 4646 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4647 4648 if (io_task->cmd_bhs) { 4649 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4650 io_task->bhs_pa.u.a64.address); 4651 io_task->cmd_bhs = NULL; 4652 } 4653 4654 if (task->sc) { 4655 if (io_task->pwrb_handle) { 4656 free_wrb_handle(phba, pwrb_context, 4657 io_task->pwrb_handle); 4658 io_task->pwrb_handle = NULL; 4659 } 4660 4661 if (io_task->psgl_handle) { 4662 spin_lock(&phba->io_sgl_lock); 4663 free_io_sgl_handle(phba, io_task->psgl_handle); 4664 spin_unlock(&phba->io_sgl_lock); 4665 io_task->psgl_handle = NULL; 4666 } 4667 4668 if (io_task->scsi_cmnd) { 4669 scsi_dma_unmap(io_task->scsi_cmnd); 4670 io_task->scsi_cmnd = NULL; 4671 } 4672 } else { 4673 if (!beiscsi_conn->login_in_progress) 4674 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4675 } 4676 } 4677 4678 void 4679 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4680 struct beiscsi_offload_params *params) 4681 { 4682 struct wrb_handle *pwrb_handle; 4683 struct hwi_wrb_context *pwrb_context = NULL; 4684 struct beiscsi_hba *phba = beiscsi_conn->phba; 4685 struct iscsi_task *task = beiscsi_conn->task; 4686 struct iscsi_session *session = task->conn->session; 4687 u32 doorbell = 0; 4688 4689 /* 4690 * We can always use 0 here because it is reserved by libiscsi for 4691 * login/startup related tasks. 4692 */ 4693 beiscsi_conn->login_in_progress = 0; 4694 spin_lock_bh(&session->back_lock); 4695 beiscsi_cleanup_task(task); 4696 spin_unlock_bh(&session->back_lock); 4697 4698 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4699 &pwrb_context); 4700 4701 /* Check for the adapter family */ 4702 if (is_chip_be2_be3r(phba)) 4703 beiscsi_offload_cxn_v0(params, pwrb_handle, 4704 phba->init_mem, 4705 pwrb_context); 4706 else 4707 beiscsi_offload_cxn_v2(params, pwrb_handle, 4708 pwrb_context); 4709 4710 be_dws_le_to_cpu(pwrb_handle->pwrb, 4711 sizeof(struct iscsi_target_context_update_wrb)); 4712 4713 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4714 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4715 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4716 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4717 iowrite32(doorbell, phba->db_va + 4718 beiscsi_conn->doorbell_offset); 4719 } 4720 4721 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4722 int *index, int *age) 4723 { 4724 *index = (int)itt; 4725 if (age) 4726 *age = conn->session->age; 4727 } 4728 4729 /** 4730 * beiscsi_alloc_pdu - allocates pdu and related resources 4731 * @task: libiscsi task 4732 * @opcode: opcode of pdu for task 4733 * 4734 * This is called with the session lock held. It will allocate 4735 * the wrb and sgl if needed for the command. And it will prep 4736 * the pdu's itt. beiscsi_parse_pdu will later translate 4737 * the pdu itt to the libiscsi task itt. 4738 */ 4739 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4740 { 4741 struct beiscsi_io_task *io_task = task->dd_data; 4742 struct iscsi_conn *conn = task->conn; 4743 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4744 struct beiscsi_hba *phba = beiscsi_conn->phba; 4745 struct hwi_wrb_context *pwrb_context; 4746 struct hwi_controller *phwi_ctrlr; 4747 itt_t itt; 4748 uint16_t cri_index = 0; 4749 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4750 dma_addr_t paddr; 4751 4752 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4753 GFP_ATOMIC, &paddr); 4754 if (!io_task->cmd_bhs) 4755 return -ENOMEM; 4756 io_task->bhs_pa.u.a64.address = paddr; 4757 io_task->libiscsi_itt = (itt_t)task->itt; 4758 io_task->conn = beiscsi_conn; 4759 4760 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4761 task->hdr_max = sizeof(struct be_cmd_bhs); 4762 io_task->psgl_handle = NULL; 4763 io_task->pwrb_handle = NULL; 4764 4765 if (task->sc) { 4766 spin_lock(&phba->io_sgl_lock); 4767 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4768 spin_unlock(&phba->io_sgl_lock); 4769 if (!io_task->psgl_handle) { 4770 beiscsi_log(phba, KERN_ERR, 4771 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4772 "BM_%d : Alloc of IO_SGL_ICD Failed" 4773 "for the CID : %d\n", 4774 beiscsi_conn->beiscsi_conn_cid); 4775 goto free_hndls; 4776 } 4777 io_task->pwrb_handle = alloc_wrb_handle(phba, 4778 beiscsi_conn->beiscsi_conn_cid, 4779 &io_task->pwrb_context); 4780 if (!io_task->pwrb_handle) { 4781 beiscsi_log(phba, KERN_ERR, 4782 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4783 "BM_%d : Alloc of WRB_HANDLE Failed" 4784 "for the CID : %d\n", 4785 beiscsi_conn->beiscsi_conn_cid); 4786 goto free_io_hndls; 4787 } 4788 } else { 4789 io_task->scsi_cmnd = NULL; 4790 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4791 beiscsi_conn->task = task; 4792 if (!beiscsi_conn->login_in_progress) { 4793 spin_lock(&phba->mgmt_sgl_lock); 4794 io_task->psgl_handle = (struct sgl_handle *) 4795 alloc_mgmt_sgl_handle(phba); 4796 spin_unlock(&phba->mgmt_sgl_lock); 4797 if (!io_task->psgl_handle) { 4798 beiscsi_log(phba, KERN_ERR, 4799 BEISCSI_LOG_IO | 4800 BEISCSI_LOG_CONFIG, 4801 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4802 "for the CID : %d\n", 4803 beiscsi_conn-> 4804 beiscsi_conn_cid); 4805 goto free_hndls; 4806 } 4807 4808 beiscsi_conn->login_in_progress = 1; 4809 beiscsi_conn->plogin_sgl_handle = 4810 io_task->psgl_handle; 4811 io_task->pwrb_handle = 4812 alloc_wrb_handle(phba, 4813 beiscsi_conn->beiscsi_conn_cid, 4814 &io_task->pwrb_context); 4815 if (!io_task->pwrb_handle) { 4816 beiscsi_log(phba, KERN_ERR, 4817 BEISCSI_LOG_IO | 4818 BEISCSI_LOG_CONFIG, 4819 "BM_%d : Alloc of WRB_HANDLE Failed" 4820 "for the CID : %d\n", 4821 beiscsi_conn-> 4822 beiscsi_conn_cid); 4823 goto free_mgmt_hndls; 4824 } 4825 beiscsi_conn->plogin_wrb_handle = 4826 io_task->pwrb_handle; 4827 4828 } else { 4829 io_task->psgl_handle = 4830 beiscsi_conn->plogin_sgl_handle; 4831 io_task->pwrb_handle = 4832 beiscsi_conn->plogin_wrb_handle; 4833 } 4834 } else { 4835 spin_lock(&phba->mgmt_sgl_lock); 4836 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4837 spin_unlock(&phba->mgmt_sgl_lock); 4838 if (!io_task->psgl_handle) { 4839 beiscsi_log(phba, KERN_ERR, 4840 BEISCSI_LOG_IO | 4841 BEISCSI_LOG_CONFIG, 4842 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4843 "for the CID : %d\n", 4844 beiscsi_conn-> 4845 beiscsi_conn_cid); 4846 goto free_hndls; 4847 } 4848 io_task->pwrb_handle = 4849 alloc_wrb_handle(phba, 4850 beiscsi_conn->beiscsi_conn_cid, 4851 &io_task->pwrb_context); 4852 if (!io_task->pwrb_handle) { 4853 beiscsi_log(phba, KERN_ERR, 4854 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4855 "BM_%d : Alloc of WRB_HANDLE Failed" 4856 "for the CID : %d\n", 4857 beiscsi_conn->beiscsi_conn_cid); 4858 goto free_mgmt_hndls; 4859 } 4860 4861 } 4862 } 4863 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4864 wrb_index << 16) | (unsigned int) 4865 (io_task->psgl_handle->sgl_index)); 4866 io_task->pwrb_handle->pio_handle = task; 4867 4868 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4869 return 0; 4870 4871 free_io_hndls: 4872 spin_lock(&phba->io_sgl_lock); 4873 free_io_sgl_handle(phba, io_task->psgl_handle); 4874 spin_unlock(&phba->io_sgl_lock); 4875 goto free_hndls; 4876 free_mgmt_hndls: 4877 spin_lock(&phba->mgmt_sgl_lock); 4878 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4879 io_task->psgl_handle = NULL; 4880 spin_unlock(&phba->mgmt_sgl_lock); 4881 free_hndls: 4882 phwi_ctrlr = phba->phwi_ctrlr; 4883 cri_index = BE_GET_CRI_FROM_CID( 4884 beiscsi_conn->beiscsi_conn_cid); 4885 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4886 if (io_task->pwrb_handle) 4887 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4888 io_task->pwrb_handle = NULL; 4889 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4890 io_task->bhs_pa.u.a64.address); 4891 io_task->cmd_bhs = NULL; 4892 return -ENOMEM; 4893 } 4894 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4895 unsigned int num_sg, unsigned int xferlen, 4896 unsigned int writedir) 4897 { 4898 4899 struct beiscsi_io_task *io_task = task->dd_data; 4900 struct iscsi_conn *conn = task->conn; 4901 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4902 struct beiscsi_hba *phba = beiscsi_conn->phba; 4903 struct iscsi_wrb *pwrb = NULL; 4904 unsigned int doorbell = 0; 4905 4906 pwrb = io_task->pwrb_handle->pwrb; 4907 4908 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4909 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4910 4911 if (writedir) { 4912 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4913 INI_WR_CMD); 4914 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4915 } else { 4916 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4917 INI_RD_CMD); 4918 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4919 } 4920 4921 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4922 type, pwrb); 4923 4924 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4925 cpu_to_be16(*(unsigned short *) 4926 &io_task->cmd_bhs->iscsi_hdr.lun)); 4927 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4928 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4929 io_task->pwrb_handle->wrb_index); 4930 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4931 be32_to_cpu(task->cmdsn)); 4932 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4933 io_task->psgl_handle->sgl_index); 4934 4935 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4936 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4937 io_task->pwrb_handle->wrb_index); 4938 if (io_task->pwrb_context->plast_wrb) 4939 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4940 io_task->pwrb_context->plast_wrb, 4941 io_task->pwrb_handle->wrb_index); 4942 io_task->pwrb_context->plast_wrb = pwrb; 4943 4944 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4945 4946 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4947 doorbell |= (io_task->pwrb_handle->wrb_index & 4948 DB_DEF_PDU_WRB_INDEX_MASK) << 4949 DB_DEF_PDU_WRB_INDEX_SHIFT; 4950 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4951 iowrite32(doorbell, phba->db_va + 4952 beiscsi_conn->doorbell_offset); 4953 return 0; 4954 } 4955 4956 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4957 unsigned int num_sg, unsigned int xferlen, 4958 unsigned int writedir) 4959 { 4960 4961 struct beiscsi_io_task *io_task = task->dd_data; 4962 struct iscsi_conn *conn = task->conn; 4963 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4964 struct beiscsi_hba *phba = beiscsi_conn->phba; 4965 struct iscsi_wrb *pwrb = NULL; 4966 unsigned int doorbell = 0; 4967 4968 pwrb = io_task->pwrb_handle->pwrb; 4969 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4970 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4971 4972 if (writedir) { 4973 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4974 INI_WR_CMD); 4975 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4976 } else { 4977 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4978 INI_RD_CMD); 4979 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4980 } 4981 4982 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4983 type, pwrb); 4984 4985 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4986 cpu_to_be16(*(unsigned short *) 4987 &io_task->cmd_bhs->iscsi_hdr.lun)); 4988 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4989 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4990 io_task->pwrb_handle->wrb_index); 4991 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4992 be32_to_cpu(task->cmdsn)); 4993 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4994 io_task->psgl_handle->sgl_index); 4995 4996 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4997 4998 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4999 io_task->pwrb_handle->wrb_index); 5000 if (io_task->pwrb_context->plast_wrb) 5001 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 5002 io_task->pwrb_context->plast_wrb, 5003 io_task->pwrb_handle->wrb_index); 5004 io_task->pwrb_context->plast_wrb = pwrb; 5005 5006 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 5007 5008 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 5009 doorbell |= (io_task->pwrb_handle->wrb_index & 5010 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5011 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5012 5013 iowrite32(doorbell, phba->db_va + 5014 beiscsi_conn->doorbell_offset); 5015 return 0; 5016 } 5017 5018 static int beiscsi_mtask(struct iscsi_task *task) 5019 { 5020 struct beiscsi_io_task *io_task = task->dd_data; 5021 struct iscsi_conn *conn = task->conn; 5022 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 5023 struct beiscsi_hba *phba = beiscsi_conn->phba; 5024 struct iscsi_wrb *pwrb = NULL; 5025 unsigned int doorbell = 0; 5026 unsigned int cid; 5027 unsigned int pwrb_typeoffset = 0; 5028 5029 cid = beiscsi_conn->beiscsi_conn_cid; 5030 pwrb = io_task->pwrb_handle->pwrb; 5031 memset(pwrb, 0, sizeof(*pwrb)); 5032 5033 if (is_chip_be2_be3r(phba)) { 5034 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 5035 be32_to_cpu(task->cmdsn)); 5036 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 5037 io_task->pwrb_handle->wrb_index); 5038 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 5039 io_task->psgl_handle->sgl_index); 5040 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 5041 task->data_count); 5042 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 5043 io_task->pwrb_handle->wrb_index); 5044 if (io_task->pwrb_context->plast_wrb) 5045 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 5046 io_task->pwrb_context->plast_wrb, 5047 io_task->pwrb_handle->wrb_index); 5048 io_task->pwrb_context->plast_wrb = pwrb; 5049 5050 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 5051 } else { 5052 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 5053 be32_to_cpu(task->cmdsn)); 5054 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 5055 io_task->pwrb_handle->wrb_index); 5056 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 5057 io_task->psgl_handle->sgl_index); 5058 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 5059 task->data_count); 5060 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 5061 io_task->pwrb_handle->wrb_index); 5062 if (io_task->pwrb_context->plast_wrb) 5063 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 5064 io_task->pwrb_context->plast_wrb, 5065 io_task->pwrb_handle->wrb_index); 5066 io_task->pwrb_context->plast_wrb = pwrb; 5067 5068 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 5069 } 5070 5071 5072 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 5073 case ISCSI_OP_LOGIN: 5074 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 5075 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5076 hwi_write_buffer(pwrb, task); 5077 break; 5078 case ISCSI_OP_NOOP_OUT: 5079 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 5080 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5081 if (is_chip_be2_be3r(phba)) 5082 AMAP_SET_BITS(struct amap_iscsi_wrb, 5083 dmsg, pwrb, 1); 5084 else 5085 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5086 dmsg, pwrb, 1); 5087 } else { 5088 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 5089 if (is_chip_be2_be3r(phba)) 5090 AMAP_SET_BITS(struct amap_iscsi_wrb, 5091 dmsg, pwrb, 0); 5092 else 5093 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5094 dmsg, pwrb, 0); 5095 } 5096 hwi_write_buffer(pwrb, task); 5097 break; 5098 case ISCSI_OP_TEXT: 5099 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5100 hwi_write_buffer(pwrb, task); 5101 break; 5102 case ISCSI_OP_SCSI_TMFUNC: 5103 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 5104 hwi_write_buffer(pwrb, task); 5105 break; 5106 case ISCSI_OP_LOGOUT: 5107 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 5108 hwi_write_buffer(pwrb, task); 5109 break; 5110 5111 default: 5112 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5113 "BM_%d : opcode =%d Not supported\n", 5114 task->hdr->opcode & ISCSI_OPCODE_MASK); 5115 5116 return -EINVAL; 5117 } 5118 5119 /* Set the task type */ 5120 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 5121 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 5122 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 5123 5124 doorbell |= cid & DB_WRB_POST_CID_MASK; 5125 doorbell |= (io_task->pwrb_handle->wrb_index & 5126 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5127 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5128 iowrite32(doorbell, phba->db_va + 5129 beiscsi_conn->doorbell_offset); 5130 return 0; 5131 } 5132 5133 static int beiscsi_task_xmit(struct iscsi_task *task) 5134 { 5135 struct beiscsi_io_task *io_task = task->dd_data; 5136 struct scsi_cmnd *sc = task->sc; 5137 struct beiscsi_hba *phba = NULL; 5138 struct scatterlist *sg; 5139 int num_sg; 5140 unsigned int writedir = 0, xferlen = 0; 5141 5142 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; 5143 5144 if (!sc) 5145 return beiscsi_mtask(task); 5146 5147 io_task->scsi_cmnd = sc; 5148 num_sg = scsi_dma_map(sc); 5149 if (num_sg < 0) { 5150 struct iscsi_conn *conn = task->conn; 5151 struct beiscsi_hba *phba = NULL; 5152 5153 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 5154 beiscsi_log(phba, KERN_ERR, 5155 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 5156 "BM_%d : scsi_dma_map Failed " 5157 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 5158 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 5159 io_task->libiscsi_itt, scsi_bufflen(sc)); 5160 5161 return num_sg; 5162 } 5163 xferlen = scsi_bufflen(sc); 5164 sg = scsi_sglist(sc); 5165 if (sc->sc_data_direction == DMA_TO_DEVICE) 5166 writedir = 1; 5167 else 5168 writedir = 0; 5169 5170 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 5171 } 5172 5173 /** 5174 * beiscsi_bsg_request - handle bsg request from ISCSI transport 5175 * @job: job to handle 5176 */ 5177 static int beiscsi_bsg_request(struct bsg_job *job) 5178 { 5179 struct Scsi_Host *shost; 5180 struct beiscsi_hba *phba; 5181 struct iscsi_bsg_request *bsg_req = job->request; 5182 int rc = -EINVAL; 5183 unsigned int tag; 5184 struct be_dma_mem nonemb_cmd; 5185 struct be_cmd_resp_hdr *resp; 5186 struct iscsi_bsg_reply *bsg_reply = job->reply; 5187 unsigned short status, extd_status; 5188 5189 shost = iscsi_job_to_shost(job); 5190 phba = iscsi_host_priv(shost); 5191 5192 switch (bsg_req->msgcode) { 5193 case ISCSI_BSG_HST_VENDOR: 5194 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 5195 job->request_payload.payload_len, 5196 &nonemb_cmd.dma); 5197 if (nonemb_cmd.va == NULL) { 5198 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5199 "BM_%d : Failed to allocate memory for " 5200 "beiscsi_bsg_request\n"); 5201 return -ENOMEM; 5202 } 5203 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 5204 &nonemb_cmd); 5205 if (!tag) { 5206 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5207 "BM_%d : MBX Tag Allocation Failed\n"); 5208 5209 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5210 nonemb_cmd.va, nonemb_cmd.dma); 5211 return -EAGAIN; 5212 } 5213 5214 rc = wait_event_interruptible_timeout( 5215 phba->ctrl.mcc_wait[tag], 5216 phba->ctrl.mcc_numtag[tag], 5217 msecs_to_jiffies( 5218 BEISCSI_HOST_MBX_TIMEOUT)); 5219 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 5220 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 5221 free_mcc_tag(&phba->ctrl, tag); 5222 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 5223 sg_copy_from_buffer(job->reply_payload.sg_list, 5224 job->reply_payload.sg_cnt, 5225 nonemb_cmd.va, (resp->response_length 5226 + sizeof(*resp))); 5227 bsg_reply->reply_payload_rcv_len = resp->response_length; 5228 bsg_reply->result = status; 5229 bsg_job_done(job, bsg_reply->result, 5230 bsg_reply->reply_payload_rcv_len); 5231 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5232 nonemb_cmd.va, nonemb_cmd.dma); 5233 if (status || extd_status) { 5234 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5235 "BM_%d : MBX Cmd Failed" 5236 " status = %d extd_status = %d\n", 5237 status, extd_status); 5238 5239 return -EIO; 5240 } else { 5241 rc = 0; 5242 } 5243 break; 5244 5245 default: 5246 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5247 "BM_%d : Unsupported bsg command: 0x%x\n", 5248 bsg_req->msgcode); 5249 break; 5250 } 5251 5252 return rc; 5253 } 5254 5255 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 5256 { 5257 /* Set the logging parameter */ 5258 beiscsi_log_enable_init(phba, beiscsi_log_enable); 5259 } 5260 5261 /* 5262 * beiscsi_quiesce()- Cleanup Driver resources 5263 * @phba: Instance Priv structure 5264 * @unload_state:i Clean or EEH unload state 5265 * 5266 * Free the OS and HW resources held by the driver 5267 **/ 5268 static void beiscsi_quiesce(struct beiscsi_hba *phba, 5269 uint32_t unload_state) 5270 { 5271 struct hwi_controller *phwi_ctrlr; 5272 struct hwi_context_memory *phwi_context; 5273 struct be_eq_obj *pbe_eq; 5274 unsigned int i, msix_vec; 5275 5276 phwi_ctrlr = phba->phwi_ctrlr; 5277 phwi_context = phwi_ctrlr->phwi_ctxt; 5278 hwi_disable_intr(phba); 5279 if (phba->msix_enabled) { 5280 for (i = 0; i <= phba->num_cpus; i++) { 5281 msix_vec = phba->msix_entries[i].vector; 5282 synchronize_irq(msix_vec); 5283 free_irq(msix_vec, &phwi_context->be_eq[i]); 5284 kfree(phba->msi_name[i]); 5285 } 5286 } else 5287 if (phba->pcidev->irq) { 5288 synchronize_irq(phba->pcidev->irq); 5289 free_irq(phba->pcidev->irq, phba); 5290 } 5291 pci_disable_msix(phba->pcidev); 5292 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 5293 5294 for (i = 0; i < phba->num_cpus; i++) { 5295 pbe_eq = &phwi_context->be_eq[i]; 5296 blk_iopoll_disable(&pbe_eq->iopoll); 5297 } 5298 5299 if (unload_state == BEISCSI_CLEAN_UNLOAD) { 5300 destroy_workqueue(phba->wq); 5301 beiscsi_clean_port(phba); 5302 beiscsi_free_mem(phba); 5303 5304 beiscsi_unmap_pci_function(phba); 5305 pci_free_consistent(phba->pcidev, 5306 phba->ctrl.mbox_mem_alloced.size, 5307 phba->ctrl.mbox_mem_alloced.va, 5308 phba->ctrl.mbox_mem_alloced.dma); 5309 } else { 5310 hwi_purge_eq(phba); 5311 hwi_cleanup(phba); 5312 } 5313 5314 } 5315 5316 static void beiscsi_remove(struct pci_dev *pcidev) 5317 { 5318 5319 struct beiscsi_hba *phba = NULL; 5320 5321 phba = pci_get_drvdata(pcidev); 5322 if (!phba) { 5323 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5324 return; 5325 } 5326 5327 beiscsi_destroy_def_ifaces(phba); 5328 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5329 iscsi_boot_destroy_kset(phba->boot_kset); 5330 iscsi_host_remove(phba->shost); 5331 pci_dev_put(phba->pcidev); 5332 iscsi_host_free(phba->shost); 5333 pci_disable_pcie_error_reporting(pcidev); 5334 pci_set_drvdata(pcidev, NULL); 5335 pci_release_regions(pcidev); 5336 pci_disable_device(pcidev); 5337 } 5338 5339 static void beiscsi_shutdown(struct pci_dev *pcidev) 5340 { 5341 5342 struct beiscsi_hba *phba = NULL; 5343 5344 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 5345 if (!phba) { 5346 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 5347 return; 5348 } 5349 5350 phba->state = BE_ADAPTER_STATE_SHUTDOWN; 5351 iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); 5352 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5353 pci_disable_device(pcidev); 5354 } 5355 5356 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 5357 { 5358 int i, status; 5359 5360 for (i = 0; i <= phba->num_cpus; i++) 5361 phba->msix_entries[i].entry = i; 5362 5363 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, 5364 phba->num_cpus + 1, phba->num_cpus + 1); 5365 if (status > 0) 5366 phba->msix_enabled = true; 5367 5368 return; 5369 } 5370 5371 static void be_eqd_update(struct beiscsi_hba *phba) 5372 { 5373 struct be_set_eqd set_eqd[MAX_CPUS]; 5374 struct be_aic_obj *aic; 5375 struct be_eq_obj *pbe_eq; 5376 struct hwi_controller *phwi_ctrlr; 5377 struct hwi_context_memory *phwi_context; 5378 int eqd, i, num = 0; 5379 ulong now; 5380 u32 pps, delta; 5381 unsigned int tag; 5382 5383 phwi_ctrlr = phba->phwi_ctrlr; 5384 phwi_context = phwi_ctrlr->phwi_ctxt; 5385 5386 for (i = 0; i <= phba->num_cpus; i++) { 5387 aic = &phba->aic_obj[i]; 5388 pbe_eq = &phwi_context->be_eq[i]; 5389 now = jiffies; 5390 if (!aic->jiffs || time_before(now, aic->jiffs) || 5391 pbe_eq->cq_count < aic->eq_prev) { 5392 aic->jiffs = now; 5393 aic->eq_prev = pbe_eq->cq_count; 5394 continue; 5395 } 5396 delta = jiffies_to_msecs(now - aic->jiffs); 5397 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5398 eqd = (pps / 1500) << 2; 5399 5400 if (eqd < 8) 5401 eqd = 0; 5402 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5403 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5404 5405 aic->jiffs = now; 5406 aic->eq_prev = pbe_eq->cq_count; 5407 5408 if (eqd != aic->prev_eqd) { 5409 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5410 set_eqd[num].eq_id = pbe_eq->q.id; 5411 aic->prev_eqd = eqd; 5412 num++; 5413 } 5414 } 5415 if (num) { 5416 tag = be_cmd_modify_eq_delay(phba, set_eqd, num); 5417 if (tag) 5418 beiscsi_mccq_compl(phba, tag, NULL, NULL); 5419 } 5420 } 5421 5422 static void be_check_boot_session(struct beiscsi_hba *phba) 5423 { 5424 if (beiscsi_setup_boot_info(phba)) 5425 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5426 "BM_%d : Could not set up " 5427 "iSCSI boot info on async event.\n"); 5428 } 5429 5430 /* 5431 * beiscsi_hw_health_check()- Check adapter health 5432 * @work: work item to check HW health 5433 * 5434 * Check if adapter in an unrecoverable state or not. 5435 **/ 5436 static void 5437 beiscsi_hw_health_check(struct work_struct *work) 5438 { 5439 struct beiscsi_hba *phba = 5440 container_of(work, struct beiscsi_hba, 5441 beiscsi_hw_check_task.work); 5442 5443 be_eqd_update(phba); 5444 5445 if (phba->state & BE_ADAPTER_CHECK_BOOT) { 5446 if ((phba->get_boot > 0) && (!phba->boot_kset)) { 5447 phba->get_boot--; 5448 if (!(phba->get_boot % BE_GET_BOOT_TO)) 5449 be_check_boot_session(phba); 5450 } else { 5451 phba->state &= ~BE_ADAPTER_CHECK_BOOT; 5452 phba->get_boot = 0; 5453 } 5454 } 5455 5456 beiscsi_ue_detect(phba); 5457 5458 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5459 msecs_to_jiffies(1000)); 5460 } 5461 5462 5463 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5464 pci_channel_state_t state) 5465 { 5466 struct beiscsi_hba *phba = NULL; 5467 5468 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5469 phba->state |= BE_ADAPTER_PCI_ERR; 5470 5471 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5472 "BM_%d : EEH error detected\n"); 5473 5474 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); 5475 5476 if (state == pci_channel_io_perm_failure) { 5477 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5478 "BM_%d : EEH : State PERM Failure"); 5479 return PCI_ERS_RESULT_DISCONNECT; 5480 } 5481 5482 pci_disable_device(pdev); 5483 5484 /* The error could cause the FW to trigger a flash debug dump. 5485 * Resetting the card while flash dump is in progress 5486 * can cause it not to recover; wait for it to finish. 5487 * Wait only for first function as it is needed only once per 5488 * adapter. 5489 **/ 5490 if (pdev->devfn == 0) 5491 ssleep(30); 5492 5493 return PCI_ERS_RESULT_NEED_RESET; 5494 } 5495 5496 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5497 { 5498 struct beiscsi_hba *phba = NULL; 5499 int status = 0; 5500 5501 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5502 5503 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5504 "BM_%d : EEH Reset\n"); 5505 5506 status = pci_enable_device(pdev); 5507 if (status) 5508 return PCI_ERS_RESULT_DISCONNECT; 5509 5510 pci_set_master(pdev); 5511 pci_set_power_state(pdev, PCI_D0); 5512 pci_restore_state(pdev); 5513 5514 /* Wait for the CHIP Reset to complete */ 5515 status = be_chk_reset_complete(phba); 5516 if (!status) { 5517 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5518 "BM_%d : EEH Reset Completed\n"); 5519 } else { 5520 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5521 "BM_%d : EEH Reset Completion Failure\n"); 5522 return PCI_ERS_RESULT_DISCONNECT; 5523 } 5524 5525 pci_cleanup_aer_uncorrect_error_status(pdev); 5526 return PCI_ERS_RESULT_RECOVERED; 5527 } 5528 5529 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5530 { 5531 int ret = 0, i; 5532 struct be_eq_obj *pbe_eq; 5533 struct beiscsi_hba *phba = NULL; 5534 struct hwi_controller *phwi_ctrlr; 5535 struct hwi_context_memory *phwi_context; 5536 5537 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5538 pci_save_state(pdev); 5539 5540 if (enable_msix) 5541 find_num_cpus(phba); 5542 else 5543 phba->num_cpus = 1; 5544 5545 if (enable_msix) { 5546 beiscsi_msix_enable(phba); 5547 if (!phba->msix_enabled) 5548 phba->num_cpus = 1; 5549 } 5550 5551 ret = beiscsi_cmd_reset_function(phba); 5552 if (ret) { 5553 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5554 "BM_%d : Reset Failed\n"); 5555 goto ret_err; 5556 } 5557 5558 ret = be_chk_reset_complete(phba); 5559 if (ret) { 5560 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5561 "BM_%d : Failed to get out of reset.\n"); 5562 goto ret_err; 5563 } 5564 5565 beiscsi_get_params(phba); 5566 phba->shost->max_id = phba->params.cxns_per_ctrl; 5567 phba->shost->can_queue = phba->params.ios_per_ctrl; 5568 ret = hwi_init_controller(phba); 5569 5570 for (i = 0; i < MAX_MCC_CMD; i++) { 5571 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5572 phba->ctrl.mcc_tag[i] = i + 1; 5573 phba->ctrl.mcc_numtag[i + 1] = 0; 5574 phba->ctrl.mcc_tag_available++; 5575 } 5576 5577 phwi_ctrlr = phba->phwi_ctrlr; 5578 phwi_context = phwi_ctrlr->phwi_ctxt; 5579 5580 for (i = 0; i < phba->num_cpus; i++) { 5581 pbe_eq = &phwi_context->be_eq[i]; 5582 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5583 be_iopoll); 5584 blk_iopoll_enable(&pbe_eq->iopoll); 5585 } 5586 5587 i = (phba->msix_enabled) ? i : 0; 5588 /* Work item for MCC handling */ 5589 pbe_eq = &phwi_context->be_eq[i]; 5590 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5591 5592 ret = beiscsi_init_irqs(phba); 5593 if (ret < 0) { 5594 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5595 "BM_%d : beiscsi_eeh_resume - " 5596 "Failed to beiscsi_init_irqs\n"); 5597 goto ret_err; 5598 } 5599 5600 hwi_enable_intr(phba); 5601 phba->state &= ~BE_ADAPTER_PCI_ERR; 5602 5603 return; 5604 ret_err: 5605 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5606 "BM_%d : AER EEH Resume Failed\n"); 5607 } 5608 5609 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5610 const struct pci_device_id *id) 5611 { 5612 struct beiscsi_hba *phba = NULL; 5613 struct hwi_controller *phwi_ctrlr; 5614 struct hwi_context_memory *phwi_context; 5615 struct be_eq_obj *pbe_eq; 5616 int ret = 0, i; 5617 5618 ret = beiscsi_enable_pci(pcidev); 5619 if (ret < 0) { 5620 dev_err(&pcidev->dev, 5621 "beiscsi_dev_probe - Failed to enable pci device\n"); 5622 return ret; 5623 } 5624 5625 phba = beiscsi_hba_alloc(pcidev); 5626 if (!phba) { 5627 dev_err(&pcidev->dev, 5628 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5629 goto disable_pci; 5630 } 5631 5632 /* Enable EEH reporting */ 5633 ret = pci_enable_pcie_error_reporting(pcidev); 5634 if (ret) 5635 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5636 "BM_%d : PCIe Error Reporting " 5637 "Enabling Failed\n"); 5638 5639 pci_save_state(pcidev); 5640 5641 /* Initialize Driver configuration Paramters */ 5642 beiscsi_hba_attrs_init(phba); 5643 5644 phba->fw_timeout = false; 5645 phba->mac_addr_set = false; 5646 5647 5648 switch (pcidev->device) { 5649 case BE_DEVICE_ID1: 5650 case OC_DEVICE_ID1: 5651 case OC_DEVICE_ID2: 5652 phba->generation = BE_GEN2; 5653 phba->iotask_fn = beiscsi_iotask; 5654 break; 5655 case BE_DEVICE_ID2: 5656 case OC_DEVICE_ID3: 5657 phba->generation = BE_GEN3; 5658 phba->iotask_fn = beiscsi_iotask; 5659 break; 5660 case OC_SKH_ID1: 5661 phba->generation = BE_GEN4; 5662 phba->iotask_fn = beiscsi_iotask_v2; 5663 break; 5664 default: 5665 phba->generation = 0; 5666 } 5667 5668 ret = be_ctrl_init(phba, pcidev); 5669 if (ret) { 5670 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5671 "BM_%d : beiscsi_dev_probe-" 5672 "Failed in be_ctrl_init\n"); 5673 goto hba_free; 5674 } 5675 5676 ret = beiscsi_cmd_reset_function(phba); 5677 if (ret) { 5678 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5679 "BM_%d : Reset Failed\n"); 5680 goto hba_free; 5681 } 5682 ret = be_chk_reset_complete(phba); 5683 if (ret) { 5684 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5685 "BM_%d : Failed to get out of reset.\n"); 5686 goto hba_free; 5687 } 5688 5689 spin_lock_init(&phba->io_sgl_lock); 5690 spin_lock_init(&phba->mgmt_sgl_lock); 5691 spin_lock_init(&phba->isr_lock); 5692 spin_lock_init(&phba->async_pdu_lock); 5693 ret = mgmt_get_fw_config(&phba->ctrl, phba); 5694 if (ret != 0) { 5695 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5696 "BM_%d : Error getting fw config\n"); 5697 goto free_port; 5698 } 5699 5700 if (enable_msix) 5701 find_num_cpus(phba); 5702 else 5703 phba->num_cpus = 1; 5704 5705 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5706 "BM_%d : num_cpus = %d\n", 5707 phba->num_cpus); 5708 5709 if (enable_msix) { 5710 beiscsi_msix_enable(phba); 5711 if (!phba->msix_enabled) 5712 phba->num_cpus = 1; 5713 } 5714 5715 phba->shost->max_id = phba->params.cxns_per_ctrl; 5716 beiscsi_get_params(phba); 5717 phba->shost->can_queue = phba->params.ios_per_ctrl; 5718 ret = beiscsi_init_port(phba); 5719 if (ret < 0) { 5720 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5721 "BM_%d : beiscsi_dev_probe-" 5722 "Failed in beiscsi_init_port\n"); 5723 goto free_port; 5724 } 5725 5726 for (i = 0; i < MAX_MCC_CMD; i++) { 5727 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5728 phba->ctrl.mcc_tag[i] = i + 1; 5729 phba->ctrl.mcc_numtag[i + 1] = 0; 5730 phba->ctrl.mcc_tag_available++; 5731 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5732 sizeof(struct be_dma_mem)); 5733 } 5734 5735 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5736 5737 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 5738 phba->shost->host_no); 5739 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); 5740 if (!phba->wq) { 5741 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5742 "BM_%d : beiscsi_dev_probe-" 5743 "Failed to allocate work queue\n"); 5744 goto free_twq; 5745 } 5746 5747 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, 5748 beiscsi_hw_health_check); 5749 5750 phwi_ctrlr = phba->phwi_ctrlr; 5751 phwi_context = phwi_ctrlr->phwi_ctxt; 5752 5753 for (i = 0; i < phba->num_cpus; i++) { 5754 pbe_eq = &phwi_context->be_eq[i]; 5755 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5756 be_iopoll); 5757 blk_iopoll_enable(&pbe_eq->iopoll); 5758 } 5759 5760 i = (phba->msix_enabled) ? i : 0; 5761 /* Work item for MCC handling */ 5762 pbe_eq = &phwi_context->be_eq[i]; 5763 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5764 5765 ret = beiscsi_init_irqs(phba); 5766 if (ret < 0) { 5767 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5768 "BM_%d : beiscsi_dev_probe-" 5769 "Failed to beiscsi_init_irqs\n"); 5770 goto free_blkenbld; 5771 } 5772 hwi_enable_intr(phba); 5773 5774 if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) 5775 goto free_blkenbld; 5776 5777 if (beiscsi_setup_boot_info(phba)) 5778 /* 5779 * log error but continue, because we may not be using 5780 * iscsi boot. 5781 */ 5782 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5783 "BM_%d : Could not set up " 5784 "iSCSI boot info.\n"); 5785 5786 beiscsi_create_def_ifaces(phba); 5787 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5788 msecs_to_jiffies(1000)); 5789 5790 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5791 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5792 return 0; 5793 5794 free_blkenbld: 5795 destroy_workqueue(phba->wq); 5796 for (i = 0; i < phba->num_cpus; i++) { 5797 pbe_eq = &phwi_context->be_eq[i]; 5798 blk_iopoll_disable(&pbe_eq->iopoll); 5799 } 5800 free_twq: 5801 beiscsi_clean_port(phba); 5802 beiscsi_free_mem(phba); 5803 free_port: 5804 pci_free_consistent(phba->pcidev, 5805 phba->ctrl.mbox_mem_alloced.size, 5806 phba->ctrl.mbox_mem_alloced.va, 5807 phba->ctrl.mbox_mem_alloced.dma); 5808 beiscsi_unmap_pci_function(phba); 5809 hba_free: 5810 if (phba->msix_enabled) 5811 pci_disable_msix(phba->pcidev); 5812 pci_dev_put(phba->pcidev); 5813 iscsi_host_free(phba->shost); 5814 pci_set_drvdata(pcidev, NULL); 5815 disable_pci: 5816 pci_release_regions(pcidev); 5817 pci_disable_device(pcidev); 5818 return ret; 5819 } 5820 5821 static struct pci_error_handlers beiscsi_eeh_handlers = { 5822 .error_detected = beiscsi_eeh_err_detected, 5823 .slot_reset = beiscsi_eeh_reset, 5824 .resume = beiscsi_eeh_resume, 5825 }; 5826 5827 struct iscsi_transport beiscsi_iscsi_transport = { 5828 .owner = THIS_MODULE, 5829 .name = DRV_NAME, 5830 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5831 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5832 .create_session = beiscsi_session_create, 5833 .destroy_session = beiscsi_session_destroy, 5834 .create_conn = beiscsi_conn_create, 5835 .bind_conn = beiscsi_conn_bind, 5836 .destroy_conn = iscsi_conn_teardown, 5837 .attr_is_visible = be2iscsi_attr_is_visible, 5838 .set_iface_param = be2iscsi_iface_set_param, 5839 .get_iface_param = be2iscsi_iface_get_param, 5840 .set_param = beiscsi_set_param, 5841 .get_conn_param = iscsi_conn_get_param, 5842 .get_session_param = iscsi_session_get_param, 5843 .get_host_param = beiscsi_get_host_param, 5844 .start_conn = beiscsi_conn_start, 5845 .stop_conn = iscsi_conn_stop, 5846 .send_pdu = iscsi_conn_send_pdu, 5847 .xmit_task = beiscsi_task_xmit, 5848 .cleanup_task = beiscsi_cleanup_task, 5849 .alloc_pdu = beiscsi_alloc_pdu, 5850 .parse_pdu_itt = beiscsi_parse_pdu, 5851 .get_stats = beiscsi_conn_get_stats, 5852 .get_ep_param = beiscsi_ep_get_param, 5853 .ep_connect = beiscsi_ep_connect, 5854 .ep_poll = beiscsi_ep_poll, 5855 .ep_disconnect = beiscsi_ep_disconnect, 5856 .session_recovery_timedout = iscsi_session_recovery_timedout, 5857 .bsg_request = beiscsi_bsg_request, 5858 }; 5859 5860 static struct pci_driver beiscsi_pci_driver = { 5861 .name = DRV_NAME, 5862 .probe = beiscsi_dev_probe, 5863 .remove = beiscsi_remove, 5864 .shutdown = beiscsi_shutdown, 5865 .id_table = beiscsi_pci_id_table, 5866 .err_handler = &beiscsi_eeh_handlers 5867 }; 5868 5869 5870 static int __init beiscsi_module_init(void) 5871 { 5872 int ret; 5873 5874 beiscsi_scsi_transport = 5875 iscsi_register_transport(&beiscsi_iscsi_transport); 5876 if (!beiscsi_scsi_transport) { 5877 printk(KERN_ERR 5878 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5879 return -ENOMEM; 5880 } 5881 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5882 &beiscsi_iscsi_transport); 5883 5884 ret = pci_register_driver(&beiscsi_pci_driver); 5885 if (ret) { 5886 printk(KERN_ERR 5887 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5888 goto unregister_iscsi_transport; 5889 } 5890 return 0; 5891 5892 unregister_iscsi_transport: 5893 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5894 return ret; 5895 } 5896 5897 static void __exit beiscsi_module_exit(void) 5898 { 5899 pci_unregister_driver(&beiscsi_pci_driver); 5900 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5901 } 5902 5903 module_init(beiscsi_module_init); 5904 module_exit(beiscsi_module_exit); 5905