1 /* 2 * linux/drivers/scsi/esas2r/esas2r_int.c 3 * esas2r interrupt handling 4 * 5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 6 * (mailto:linuxdrivers@attotech.com) 7 */ 8 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 9 /* 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 * 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 * 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 42 */ 43 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 44 45 #include "esas2r.h" 46 47 /* Local function prototypes */ 48 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell); 49 static void esas2r_get_outbound_responses(struct esas2r_adapter *a); 50 static void esas2r_process_bus_reset(struct esas2r_adapter *a); 51 52 /* 53 * Poll the adapter for interrupts and service them. 54 * This function handles both legacy interrupts and MSI. 55 */ 56 void esas2r_polled_interrupt(struct esas2r_adapter *a) 57 { 58 u32 intstat; 59 u32 doorbell; 60 61 esas2r_disable_chip_interrupts(a); 62 63 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); 64 65 if (intstat & MU_INTSTAT_POST_OUT) { 66 /* clear the interrupt */ 67 68 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, 69 MU_OLIS_INT); 70 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); 71 72 esas2r_get_outbound_responses(a); 73 } 74 75 if (intstat & MU_INTSTAT_DRBL) { 76 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 77 if (doorbell != 0) 78 esas2r_doorbell_interrupt(a, doorbell); 79 } 80 81 esas2r_enable_chip_interrupts(a); 82 83 if (atomic_read(&a->disable_cnt) == 0) 84 esas2r_do_deferred_processes(a); 85 } 86 87 /* 88 * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler 89 * schedules a TASKLET to process events, whereas the MSI handler just 90 * processes interrupt events directly. 91 */ 92 irqreturn_t esas2r_interrupt(int irq, void *dev_id) 93 { 94 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; 95 96 if (!esas2r_adapter_interrupt_pending(a)) 97 return IRQ_NONE; 98 99 set_bit(AF2_INT_PENDING, &a->flags2); 100 esas2r_schedule_tasklet(a); 101 102 return IRQ_HANDLED; 103 } 104 105 void esas2r_adapter_interrupt(struct esas2r_adapter *a) 106 { 107 u32 doorbell; 108 109 if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) { 110 /* clear the interrupt */ 111 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, 112 MU_OLIS_INT); 113 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); 114 esas2r_get_outbound_responses(a); 115 } 116 117 if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) { 118 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 119 if (doorbell != 0) 120 esas2r_doorbell_interrupt(a, doorbell); 121 } 122 123 a->int_mask = ESAS2R_INT_STS_MASK; 124 125 esas2r_enable_chip_interrupts(a); 126 127 if (likely(atomic_read(&a->disable_cnt) == 0)) 128 esas2r_do_deferred_processes(a); 129 } 130 131 irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id) 132 { 133 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; 134 u32 intstat; 135 u32 doorbell; 136 137 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); 138 139 if (likely(intstat & MU_INTSTAT_POST_OUT)) { 140 /* clear the interrupt */ 141 142 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, 143 MU_OLIS_INT); 144 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); 145 146 esas2r_get_outbound_responses(a); 147 } 148 149 if (unlikely(intstat & MU_INTSTAT_DRBL)) { 150 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); 151 if (doorbell != 0) 152 esas2r_doorbell_interrupt(a, doorbell); 153 } 154 155 /* 156 * Work around a chip bug and force a new MSI to be sent if one is 157 * still pending. 158 */ 159 esas2r_disable_chip_interrupts(a); 160 esas2r_enable_chip_interrupts(a); 161 162 if (likely(atomic_read(&a->disable_cnt) == 0)) 163 esas2r_do_deferred_processes(a); 164 165 esas2r_do_tasklet_tasks(a); 166 167 return 1; 168 } 169 170 171 172 static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, 173 struct esas2r_request *rq, 174 struct atto_vda_ob_rsp *rsp) 175 { 176 177 /* 178 * For I/O requests, only copy the response if an error 179 * occurred and setup a callback to do error processing. 180 */ 181 if (unlikely(rq->req_stat != RS_SUCCESS)) { 182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); 183 184 if (rq->req_stat == RS_ABORTED) { 185 if (rq->timeout > RQ_MAX_TIMEOUT) 186 rq->req_stat = RS_TIMEOUT; 187 } else if (rq->req_stat == RS_SCSI_ERROR) { 188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; 189 190 esas2r_trace("scsistatus: %x", scsistatus); 191 192 /* Any of these are a good result. */ 193 if (scsistatus == SAM_STAT_GOOD || scsistatus == 194 SAM_STAT_CONDITION_MET || scsistatus == 195 SAM_STAT_INTERMEDIATE || scsistatus == 196 SAM_STAT_INTERMEDIATE_CONDITION_MET) { 197 rq->req_stat = RS_SUCCESS; 198 rq->func_rsp.scsi_rsp.scsi_stat = 199 SAM_STAT_GOOD; 200 } 201 } 202 } 203 } 204 205 static void esas2r_get_outbound_responses(struct esas2r_adapter *a) 206 { 207 struct atto_vda_ob_rsp *rsp; 208 u32 rspput_ptr; 209 u32 rspget_ptr; 210 struct esas2r_request *rq; 211 u32 handle; 212 unsigned long flags; 213 214 LIST_HEAD(comp_list); 215 216 esas2r_trace_enter(); 217 218 spin_lock_irqsave(&a->queue_lock, flags); 219 220 /* Get the outbound limit and pointers */ 221 rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR; 222 rspget_ptr = a->last_read; 223 224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr); 225 226 /* If we don't have anything to process, get out */ 227 if (unlikely(rspget_ptr == rspput_ptr)) { 228 spin_unlock_irqrestore(&a->queue_lock, flags); 229 esas2r_trace_exit(); 230 return; 231 } 232 233 /* Make sure the firmware is healthy */ 234 if (unlikely(rspput_ptr >= a->list_size)) { 235 spin_unlock_irqrestore(&a->queue_lock, flags); 236 esas2r_bugon(); 237 esas2r_local_reset_adapter(a); 238 esas2r_trace_exit(); 239 return; 240 } 241 242 do { 243 rspget_ptr++; 244 245 if (rspget_ptr >= a->list_size) 246 rspget_ptr = 0; 247 248 rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr 249 + rspget_ptr; 250 251 handle = rsp->handle; 252 253 /* Verify the handle range */ 254 if (unlikely(LOWORD(handle) == 0 255 || LOWORD(handle) > num_requests + 256 num_ae_requests + 1)) { 257 esas2r_bugon(); 258 continue; 259 } 260 261 /* Get the request for this handle */ 262 rq = a->req_table[LOWORD(handle)]; 263 264 if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) { 265 esas2r_bugon(); 266 continue; 267 } 268 269 list_del(&rq->req_list); 270 271 /* Get the completion status */ 272 rq->req_stat = rsp->req_stat; 273 274 esas2r_trace("handle: %x", handle); 275 esas2r_trace("rq: %p", rq); 276 esas2r_trace("req_status: %x", rq->req_stat); 277 278 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { 279 esas2r_handle_outbound_rsp_err(a, rq, rsp); 280 } else { 281 /* 282 * Copy the outbound completion struct for non-I/O 283 * requests. 284 */ 285 memcpy(&rq->func_rsp, &rsp->func_rsp, 286 sizeof(rsp->func_rsp)); 287 } 288 289 /* Queue the request for completion. */ 290 list_add_tail(&rq->comp_list, &comp_list); 291 292 } while (rspget_ptr != rspput_ptr); 293 294 a->last_read = rspget_ptr; 295 spin_unlock_irqrestore(&a->queue_lock, flags); 296 297 esas2r_comp_list_drain(a, &comp_list); 298 esas2r_trace_exit(); 299 } 300 301 /* 302 * Perform all deferred processes for the adapter. Deferred 303 * processes can only be done while the current interrupt 304 * disable_cnt for the adapter is zero. 305 */ 306 void esas2r_do_deferred_processes(struct esas2r_adapter *a) 307 { 308 int startreqs = 2; 309 struct esas2r_request *rq; 310 unsigned long flags; 311 312 /* 313 * startreqs is used to control starting requests 314 * that are on the deferred queue 315 * = 0 - do not start any requests 316 * = 1 - can start discovery requests 317 * = 2 - can start any request 318 */ 319 320 if (test_bit(AF_CHPRST_PENDING, &a->flags) || 321 test_bit(AF_FLASHING, &a->flags)) 322 startreqs = 0; 323 else if (test_bit(AF_DISC_PENDING, &a->flags)) 324 startreqs = 1; 325 326 atomic_inc(&a->disable_cnt); 327 328 /* Clear off the completed list to be processed later. */ 329 330 if (esas2r_is_tasklet_pending(a)) { 331 esas2r_schedule_tasklet(a); 332 333 startreqs = 0; 334 } 335 336 /* 337 * If we can start requests then traverse the defer queue 338 * looking for requests to start or complete 339 */ 340 if (startreqs && !list_empty(&a->defer_list)) { 341 LIST_HEAD(comp_list); 342 struct list_head *element, *next; 343 344 spin_lock_irqsave(&a->queue_lock, flags); 345 346 list_for_each_safe(element, next, &a->defer_list) { 347 rq = list_entry(element, struct esas2r_request, 348 req_list); 349 350 if (rq->req_stat != RS_PENDING) { 351 list_del(element); 352 list_add_tail(&rq->comp_list, &comp_list); 353 } 354 /* 355 * Process discovery and OS requests separately. We 356 * can't hold up discovery requests when discovery is 357 * pending. In general, there may be different sets of 358 * conditions for starting different types of requests. 359 */ 360 else if (rq->req_type == RT_DISC_REQ) { 361 list_del(element); 362 esas2r_disc_local_start_request(a, rq); 363 } else if (startreqs == 2) { 364 list_del(element); 365 esas2r_local_start_request(a, rq); 366 367 /* 368 * Flashing could have been set by last local 369 * start 370 */ 371 if (test_bit(AF_FLASHING, &a->flags)) 372 break; 373 } 374 } 375 376 spin_unlock_irqrestore(&a->queue_lock, flags); 377 esas2r_comp_list_drain(a, &comp_list); 378 } 379 380 atomic_dec(&a->disable_cnt); 381 } 382 383 /* 384 * Process an adapter reset (or one that is about to happen) 385 * by making sure all outstanding requests are completed that 386 * haven't been already. 387 */ 388 void esas2r_process_adapter_reset(struct esas2r_adapter *a) 389 { 390 struct esas2r_request *rq = &a->general_req; 391 unsigned long flags; 392 struct esas2r_disc_context *dc; 393 394 LIST_HEAD(comp_list); 395 struct list_head *element; 396 397 esas2r_trace_enter(); 398 399 spin_lock_irqsave(&a->queue_lock, flags); 400 401 /* abort the active discovery, if any. */ 402 403 if (rq->interrupt_cx) { 404 dc = (struct esas2r_disc_context *)rq->interrupt_cx; 405 406 dc->disc_evt = 0; 407 408 clear_bit(AF_DISC_IN_PROG, &a->flags); 409 } 410 411 /* 412 * just clear the interrupt callback for now. it will be dequeued if 413 * and when we find it on the active queue and we don't want the 414 * callback called. also set the dummy completion callback in case we 415 * were doing an I/O request. 416 */ 417 418 rq->interrupt_cx = NULL; 419 rq->interrupt_cb = NULL; 420 421 rq->comp_cb = esas2r_dummy_complete; 422 423 /* Reset the read and write pointers */ 424 425 *a->outbound_copy = 426 a->last_write = 427 a->last_read = a->list_size - 1; 428 429 set_bit(AF_COMM_LIST_TOGGLE, &a->flags); 430 431 /* Kill all the requests on the active list */ 432 list_for_each(element, &a->defer_list) { 433 rq = list_entry(element, struct esas2r_request, req_list); 434 435 if (rq->req_stat == RS_STARTED) 436 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) 437 list_add_tail(&rq->comp_list, &comp_list); 438 } 439 440 spin_unlock_irqrestore(&a->queue_lock, flags); 441 esas2r_comp_list_drain(a, &comp_list); 442 esas2r_process_bus_reset(a); 443 esas2r_trace_exit(); 444 } 445 446 static void esas2r_process_bus_reset(struct esas2r_adapter *a) 447 { 448 struct esas2r_request *rq; 449 struct list_head *element; 450 unsigned long flags; 451 452 LIST_HEAD(comp_list); 453 454 esas2r_trace_enter(); 455 456 esas2r_hdebug("reset detected"); 457 458 spin_lock_irqsave(&a->queue_lock, flags); 459 460 /* kill all the requests on the deferred queue */ 461 list_for_each(element, &a->defer_list) { 462 rq = list_entry(element, struct esas2r_request, req_list); 463 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) 464 list_add_tail(&rq->comp_list, &comp_list); 465 } 466 467 spin_unlock_irqrestore(&a->queue_lock, flags); 468 469 esas2r_comp_list_drain(a, &comp_list); 470 471 if (atomic_read(&a->disable_cnt) == 0) 472 esas2r_do_deferred_processes(a); 473 474 clear_bit(AF_OS_RESET, &a->flags); 475 476 esas2r_trace_exit(); 477 } 478 479 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) 480 { 481 482 clear_bit(AF_CHPRST_NEEDED, &a->flags); 483 clear_bit(AF_BUSRST_NEEDED, &a->flags); 484 clear_bit(AF_BUSRST_DETECTED, &a->flags); 485 clear_bit(AF_BUSRST_PENDING, &a->flags); 486 /* 487 * Make sure we don't get attempt more than 3 resets 488 * when the uptime between resets does not exceed one 489 * minute. This will stop any situation where there is 490 * really something wrong with the hardware. The way 491 * this works is that we start with uptime ticks at 0. 492 * Each time we do a reset, we add 20 seconds worth to 493 * the count. Each time a timer tick occurs, as long 494 * as a chip reset is not pending, we decrement the 495 * tick count. If the uptime ticks ever gets to 60 496 * seconds worth, we disable the adapter from that 497 * point forward. Three strikes, you're out. 498 */ 499 if (!esas2r_is_adapter_present(a) || (a->chip_uptime >= 500 ESAS2R_CHP_UPTIME_MAX)) { 501 esas2r_hdebug("*** adapter disabled ***"); 502 503 /* 504 * Ok, some kind of hard failure. Make sure we 505 * exit this loop with chip interrupts 506 * permanently disabled so we don't lock up the 507 * entire system. Also flag degraded mode to 508 * prevent the heartbeat from trying to recover. 509 */ 510 511 set_bit(AF_DEGRADED_MODE, &a->flags); 512 set_bit(AF_DISABLED, &a->flags); 513 clear_bit(AF_CHPRST_PENDING, &a->flags); 514 clear_bit(AF_DISC_PENDING, &a->flags); 515 516 esas2r_disable_chip_interrupts(a); 517 a->int_mask = 0; 518 esas2r_process_adapter_reset(a); 519 520 esas2r_log(ESAS2R_LOG_CRIT, 521 "Adapter disabled because of hardware failure"); 522 } else { 523 bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); 524 525 if (!alrdyrst) 526 /* 527 * Only disable interrupts if this is 528 * the first reset attempt. 529 */ 530 esas2r_disable_chip_interrupts(a); 531 532 if ((test_bit(AF_POWER_MGT, &a->flags)) && 533 !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { 534 /* 535 * Don't reset the chip on the first 536 * deferred power up attempt. 537 */ 538 } else { 539 esas2r_hdebug("*** resetting chip ***"); 540 esas2r_reset_chip(a); 541 } 542 543 /* Kick off the reinitialization */ 544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; 545 a->chip_init_time = jiffies_to_msecs(jiffies); 546 if (!test_bit(AF_POWER_MGT, &a->flags)) { 547 esas2r_process_adapter_reset(a); 548 549 if (!alrdyrst) { 550 /* Remove devices now that I/O is cleaned up. */ 551 a->prev_dev_cnt = 552 esas2r_targ_db_get_tgt_cnt(a); 553 esas2r_targ_db_remove_all(a, false); 554 } 555 } 556 557 a->int_mask = 0; 558 } 559 } 560 561 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) 562 { 563 while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { 564 /* 565 * Balance the enable in esas2r_initadapter_hw. 566 * Esas2r_power_down already took care of it for power 567 * management. 568 */ 569 if (!test_bit(AF_DEGRADED_MODE, &a->flags) && 570 !test_bit(AF_POWER_MGT, &a->flags)) 571 esas2r_disable_chip_interrupts(a); 572 573 /* Reinitialize the chip. */ 574 esas2r_check_adapter(a); 575 esas2r_init_adapter_hw(a, 0); 576 577 if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 578 break; 579 580 if (test_bit(AF_POWER_MGT, &a->flags)) { 581 /* Recovery from power management. */ 582 if (test_bit(AF_FIRST_INIT, &a->flags)) { 583 /* Chip reset during normal power up */ 584 esas2r_log(ESAS2R_LOG_CRIT, 585 "The firmware was reset during a normal power-up sequence"); 586 } else { 587 /* Deferred power up complete. */ 588 clear_bit(AF_POWER_MGT, &a->flags); 589 esas2r_send_reset_ae(a, true); 590 } 591 } else { 592 /* Recovery from online chip reset. */ 593 if (test_bit(AF_FIRST_INIT, &a->flags)) { 594 /* Chip reset during driver load */ 595 } else { 596 /* Chip reset after driver load */ 597 esas2r_send_reset_ae(a, false); 598 } 599 600 esas2r_log(ESAS2R_LOG_CRIT, 601 "Recovering from a chip reset while the chip was online"); 602 } 603 604 clear_bit(AF_CHPRST_STARTED, &a->flags); 605 esas2r_enable_chip_interrupts(a); 606 607 /* 608 * Clear this flag last! this indicates that the chip has been 609 * reset already during initialization. 610 */ 611 clear_bit(AF_CHPRST_DETECTED, &a->flags); 612 } 613 } 614 615 616 /* Perform deferred tasks when chip interrupts are disabled */ 617 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) 618 { 619 620 if (test_bit(AF_CHPRST_NEEDED, &a->flags) || 621 test_bit(AF_CHPRST_DETECTED, &a->flags)) { 622 if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 623 esas2r_chip_rst_needed_during_tasklet(a); 624 625 esas2r_handle_chip_rst_during_tasklet(a); 626 } 627 628 if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { 629 esas2r_hdebug("hard resetting bus"); 630 631 clear_bit(AF_BUSRST_NEEDED, &a->flags); 632 633 if (test_bit(AF_FLASHING, &a->flags)) 634 set_bit(AF_BUSRST_DETECTED, &a->flags); 635 else 636 esas2r_write_register_dword(a, MU_DOORBELL_IN, 637 DRBL_RESET_BUS); 638 } 639 640 if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { 641 esas2r_process_bus_reset(a); 642 643 esas2r_log_dev(ESAS2R_LOG_WARN, 644 &(a->host->shost_gendev), 645 "scsi_report_bus_reset() called"); 646 647 scsi_report_bus_reset(a->host, 0); 648 649 clear_bit(AF_BUSRST_DETECTED, &a->flags); 650 clear_bit(AF_BUSRST_PENDING, &a->flags); 651 652 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); 653 } 654 655 if (test_bit(AF_PORT_CHANGE, &a->flags)) { 656 clear_bit(AF_PORT_CHANGE, &a->flags); 657 658 esas2r_targ_db_report_changes(a); 659 } 660 661 if (atomic_read(&a->disable_cnt) == 0) 662 esas2r_do_deferred_processes(a); 663 } 664 665 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) 666 { 667 if (!(doorbell & DRBL_FORCE_INT)) { 668 esas2r_trace_enter(); 669 esas2r_trace("doorbell: %x", doorbell); 670 } 671 672 /* First clear the doorbell bits */ 673 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); 674 675 if (doorbell & DRBL_RESET_BUS) 676 set_bit(AF_BUSRST_DETECTED, &a->flags); 677 678 if (doorbell & DRBL_FORCE_INT) 679 clear_bit(AF_HEARTBEAT, &a->flags); 680 681 if (doorbell & DRBL_PANIC_REASON_MASK) { 682 esas2r_hdebug("*** Firmware Panic ***"); 683 esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked"); 684 } 685 686 if (doorbell & DRBL_FW_RESET) { 687 set_bit(AF2_COREDUMP_AVAIL, &a->flags2); 688 esas2r_local_reset_adapter(a); 689 } 690 691 if (!(doorbell & DRBL_FORCE_INT)) { 692 esas2r_trace_exit(); 693 } 694 } 695 696 void esas2r_force_interrupt(struct esas2r_adapter *a) 697 { 698 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT | 699 DRBL_DRV_VER); 700 } 701 702 703 static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae, 704 u16 target, u32 length) 705 { 706 struct esas2r_target *t = a->targetdb + target; 707 u32 cplen = length; 708 unsigned long flags; 709 710 if (cplen > sizeof(t->lu_event)) 711 cplen = sizeof(t->lu_event); 712 713 esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent); 714 esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate); 715 716 spin_lock_irqsave(&a->mem_lock, flags); 717 718 t->new_target_state = TS_INVALID; 719 720 if (ae->lu.dwevent & VDAAE_LU_LOST) { 721 t->new_target_state = TS_NOT_PRESENT; 722 } else { 723 switch (ae->lu.bystate) { 724 case VDAAE_LU_NOT_PRESENT: 725 case VDAAE_LU_OFFLINE: 726 case VDAAE_LU_DELETED: 727 case VDAAE_LU_FACTORY_DISABLED: 728 t->new_target_state = TS_NOT_PRESENT; 729 break; 730 731 case VDAAE_LU_ONLINE: 732 case VDAAE_LU_DEGRADED: 733 t->new_target_state = TS_PRESENT; 734 break; 735 } 736 } 737 738 if (t->new_target_state != TS_INVALID) { 739 memcpy(&t->lu_event, &ae->lu, cplen); 740 741 esas2r_disc_queue_event(a, DCDE_DEV_CHANGE); 742 } 743 744 spin_unlock_irqrestore(&a->mem_lock, flags); 745 } 746 747 748 749 void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) 750 { 751 union atto_vda_ae *ae = 752 (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data; 753 u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length); 754 union atto_vda_ae *last = 755 (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data 756 + length); 757 758 esas2r_trace_enter(); 759 esas2r_trace("length: %d", length); 760 761 if (length > sizeof(struct atto_vda_ae_data) 762 || (length & 3) != 0 763 || length == 0) { 764 esas2r_log(ESAS2R_LOG_WARN, 765 "The AE request response length (%p) is too long: %d", 766 rq, length); 767 768 esas2r_hdebug("aereq->length (0x%x) too long", length); 769 esas2r_bugon(); 770 771 last = ae; 772 } 773 774 while (ae < last) { 775 u16 target; 776 777 esas2r_trace("ae: %p", ae); 778 esas2r_trace("ae->hdr: %p", &(ae->hdr)); 779 780 length = ae->hdr.bylength; 781 782 if (length > (u32)((u8 *)last - (u8 *)ae) 783 || (length & 3) != 0 784 || length == 0) { 785 esas2r_log(ESAS2R_LOG_CRIT, 786 "the async event length is invalid (%p): %d", 787 ae, length); 788 789 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length); 790 esas2r_bugon(); 791 792 break; 793 } 794 795 esas2r_nuxi_ae_data(ae); 796 797 esas2r_queue_fw_event(a, fw_event_vda_ae, ae, 798 sizeof(union atto_vda_ae)); 799 800 switch (ae->hdr.bytype) { 801 case VDAAE_HDR_TYPE_RAID: 802 803 if (ae->raid.dwflags & (VDAAE_GROUP_STATE 804 | VDAAE_RBLD_STATE 805 | VDAAE_MEMBER_CHG 806 | VDAAE_PART_CHG)) { 807 esas2r_log(ESAS2R_LOG_INFO, 808 "RAID event received - name:%s rebuild_state:%d group_state:%d", 809 ae->raid.acname, 810 ae->raid.byrebuild_state, 811 ae->raid.bygroup_state); 812 } 813 814 break; 815 816 case VDAAE_HDR_TYPE_LU: 817 esas2r_log(ESAS2R_LOG_INFO, 818 "LUN event received: event:%d target_id:%d LUN:%d state:%d", 819 ae->lu.dwevent, 820 ae->lu.id.tgtlun.wtarget_id, 821 ae->lu.id.tgtlun.bylun, 822 ae->lu.bystate); 823 824 target = ae->lu.id.tgtlun.wtarget_id; 825 826 if (target < ESAS2R_MAX_TARGETS) 827 esas2r_lun_event(a, ae, target, length); 828 829 break; 830 831 case VDAAE_HDR_TYPE_DISK: 832 esas2r_log(ESAS2R_LOG_INFO, "Disk event received"); 833 break; 834 835 default: 836 837 /* Silently ignore the rest and let the apps deal with 838 * them. 839 */ 840 841 break; 842 } 843 844 ae = (union atto_vda_ae *)((u8 *)ae + length); 845 } 846 847 /* Now requeue it. */ 848 esas2r_start_ae_request(a, rq); 849 esas2r_trace_exit(); 850 } 851 852 /* Send an asynchronous event for a chip reset or power management. */ 853 void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt) 854 { 855 struct atto_vda_ae_hdr ae; 856 857 if (pwr_mgt) 858 ae.bytype = VDAAE_HDR_TYPE_PWRMGT; 859 else 860 ae.bytype = VDAAE_HDR_TYPE_RESET; 861 862 ae.byversion = VDAAE_HDR_VER_0; 863 ae.byflags = 0; 864 ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr); 865 866 if (pwr_mgt) { 867 esas2r_hdebug("*** sending power management AE ***"); 868 } else { 869 esas2r_hdebug("*** sending reset AE ***"); 870 } 871 872 esas2r_queue_fw_event(a, fw_event_vda_ae, &ae, 873 sizeof(union atto_vda_ae)); 874 } 875 876 void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) 877 {} 878 879 static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a, 880 struct esas2r_request *rq) 881 { 882 u8 snslen, snslen2; 883 884 snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len; 885 886 if (snslen > rq->sense_len) 887 snslen = rq->sense_len; 888 889 if (snslen) { 890 if (rq->sense_buf) 891 memcpy(rq->sense_buf, rq->data_buf, snslen); 892 else 893 rq->sense_buf = (u8 *)rq->data_buf; 894 895 /* See about possible sense data */ 896 if (snslen2 > 0x0c) { 897 u8 *s = (u8 *)rq->data_buf; 898 899 esas2r_trace_enter(); 900 901 /* Report LUNS data has changed */ 902 if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) { 903 esas2r_trace("rq->target_id: %d", 904 rq->target_id); 905 esas2r_target_state_changed(a, rq->target_id, 906 TS_LUN_CHANGE); 907 } 908 909 esas2r_trace("add_sense_key=%x", s[0x0c]); 910 esas2r_trace("add_sense_qual=%x", s[0x0d]); 911 esas2r_trace_exit(); 912 } 913 } 914 915 rq->sense_len = snslen; 916 } 917 918 919 void esas2r_complete_request(struct esas2r_adapter *a, 920 struct esas2r_request *rq) 921 { 922 if (rq->vrq->scsi.function == VDA_FUNC_FLASH 923 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) 924 clear_bit(AF_FLASHING, &a->flags); 925 926 /* See if we setup a callback to do special processing */ 927 928 if (rq->interrupt_cb) { 929 (*rq->interrupt_cb)(a, rq); 930 931 if (rq->req_stat == RS_PENDING) { 932 esas2r_start_request(a, rq); 933 return; 934 } 935 } 936 937 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI) 938 && unlikely(rq->req_stat != RS_SUCCESS)) { 939 esas2r_check_req_rsp_sense(a, rq); 940 esas2r_log_request_failure(a, rq); 941 } 942 943 (*rq->comp_cb)(a, rq); 944 } 945