1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module Name: 27 * src.c 28 * 29 * Abstract: Hardware Device Interface for PMC SRC based controllers 30 * 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/init.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/completion.h> 42 #include <linux/time.h> 43 #include <linux/interrupt.h> 44 #include <scsi/scsi_host.h> 45 46 #include "aacraid.h" 47 48 static int aac_src_get_sync_status(struct aac_dev *dev); 49 50 static irqreturn_t aac_src_intr_message(int irq, void *dev_id) 51 { 52 struct aac_msix_ctx *ctx; 53 struct aac_dev *dev; 54 unsigned long bellbits, bellbits_shifted; 55 int vector_no; 56 int isFastResponse, mode; 57 u32 index, handle; 58 59 ctx = (struct aac_msix_ctx *)dev_id; 60 dev = ctx->dev; 61 vector_no = ctx->vector_no; 62 63 if (dev->msi_enabled) { 64 mode = AAC_INT_MODE_MSI; 65 if (vector_no == 0) { 66 bellbits = src_readl(dev, MUnit.ODR_MSI); 67 if (bellbits & 0x40000) 68 mode |= AAC_INT_MODE_AIF; 69 if (bellbits & 0x1000) 70 mode |= AAC_INT_MODE_SYNC; 71 } 72 } else { 73 mode = AAC_INT_MODE_INTX; 74 bellbits = src_readl(dev, MUnit.ODR_R); 75 if (bellbits & PmDoorBellResponseSent) { 76 bellbits = PmDoorBellResponseSent; 77 src_writel(dev, MUnit.ODR_C, bellbits); 78 src_readl(dev, MUnit.ODR_C); 79 } else { 80 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); 81 src_writel(dev, MUnit.ODR_C, bellbits); 82 src_readl(dev, MUnit.ODR_C); 83 84 if (bellbits_shifted & DoorBellAifPending) 85 mode |= AAC_INT_MODE_AIF; 86 else if (bellbits_shifted & OUTBOUNDDOORBELL_0) 87 mode |= AAC_INT_MODE_SYNC; 88 } 89 } 90 91 if (mode & AAC_INT_MODE_SYNC) { 92 unsigned long sflags; 93 struct list_head *entry; 94 int send_it = 0; 95 extern int aac_sync_mode; 96 97 if (!aac_sync_mode && !dev->msi_enabled) { 98 src_writel(dev, MUnit.ODR_C, bellbits); 99 src_readl(dev, MUnit.ODR_C); 100 } 101 102 if (dev->sync_fib) { 103 if (dev->sync_fib->callback) 104 dev->sync_fib->callback(dev->sync_fib->callback_data, 105 dev->sync_fib); 106 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); 107 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { 108 dev->management_fib_count--; 109 complete(&dev->sync_fib->event_wait); 110 } 111 spin_unlock_irqrestore(&dev->sync_fib->event_lock, 112 sflags); 113 spin_lock_irqsave(&dev->sync_lock, sflags); 114 if (!list_empty(&dev->sync_fib_list)) { 115 entry = dev->sync_fib_list.next; 116 dev->sync_fib = list_entry(entry, 117 struct fib, 118 fiblink); 119 list_del(entry); 120 send_it = 1; 121 } else { 122 dev->sync_fib = NULL; 123 } 124 spin_unlock_irqrestore(&dev->sync_lock, sflags); 125 if (send_it) { 126 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, 127 (u32)dev->sync_fib->hw_fib_pa, 128 0, 0, 0, 0, 0, 129 NULL, NULL, NULL, NULL, NULL); 130 } 131 } 132 if (!dev->msi_enabled) 133 mode = 0; 134 135 } 136 137 if (mode & AAC_INT_MODE_AIF) { 138 /* handle AIF */ 139 if (dev->sa_firmware) { 140 u32 events = src_readl(dev, MUnit.SCR0); 141 142 aac_intr_normal(dev, events, 1, 0, NULL); 143 writel(events, &dev->IndexRegs->Mailbox[0]); 144 src_writel(dev, MUnit.IDR, 1 << 23); 145 } else { 146 if (dev->aif_thread && dev->fsa_dev) 147 aac_intr_normal(dev, 0, 2, 0, NULL); 148 } 149 if (dev->msi_enabled) 150 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); 151 mode = 0; 152 } 153 154 if (mode) { 155 index = dev->host_rrq_idx[vector_no]; 156 157 for (;;) { 158 isFastResponse = 0; 159 /* remove toggle bit (31) */ 160 handle = le32_to_cpu((dev->host_rrq[index]) 161 & 0x7fffffff); 162 /* check fast response bits (30, 1) */ 163 if (handle & 0x40000000) 164 isFastResponse = 1; 165 handle &= 0x0000ffff; 166 if (handle == 0) 167 break; 168 handle >>= 2; 169 if (dev->msi_enabled && dev->max_msix > 1) 170 atomic_dec(&dev->rrq_outstanding[vector_no]); 171 aac_intr_normal(dev, handle, 0, isFastResponse, NULL); 172 dev->host_rrq[index++] = 0; 173 if (index == (vector_no + 1) * dev->vector_cap) 174 index = vector_no * dev->vector_cap; 175 dev->host_rrq_idx[vector_no] = index; 176 } 177 mode = 0; 178 } 179 180 return IRQ_HANDLED; 181 } 182 183 /** 184 * aac_src_disable_interrupt - Disable interrupts 185 * @dev: Adapter 186 */ 187 188 static void aac_src_disable_interrupt(struct aac_dev *dev) 189 { 190 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 191 } 192 193 /** 194 * aac_src_enable_interrupt_message - Enable interrupts 195 * @dev: Adapter 196 */ 197 198 static void aac_src_enable_interrupt_message(struct aac_dev *dev) 199 { 200 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); 201 } 202 203 /** 204 * src_sync_cmd - send a command and wait 205 * @dev: Adapter 206 * @command: Command to execute 207 * @p1: first parameter 208 * @ret: adapter status 209 * 210 * This routine will send a synchronous command to the adapter and wait 211 * for its completion. 212 */ 213 214 static int src_sync_cmd(struct aac_dev *dev, u32 command, 215 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 216 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 217 { 218 unsigned long start; 219 unsigned long delay; 220 int ok; 221 222 /* 223 * Write the command into Mailbox 0 224 */ 225 writel(command, &dev->IndexRegs->Mailbox[0]); 226 /* 227 * Write the parameters into Mailboxes 1 - 6 228 */ 229 writel(p1, &dev->IndexRegs->Mailbox[1]); 230 writel(p2, &dev->IndexRegs->Mailbox[2]); 231 writel(p3, &dev->IndexRegs->Mailbox[3]); 232 writel(p4, &dev->IndexRegs->Mailbox[4]); 233 234 /* 235 * Clear the synch command doorbell to start on a clean slate. 236 */ 237 if (!dev->msi_enabled) 238 src_writel(dev, 239 MUnit.ODR_C, 240 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 241 242 /* 243 * Disable doorbell interrupts 244 */ 245 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff); 246 247 /* 248 * Force the completion of the mask register write before issuing 249 * the interrupt. 250 */ 251 src_readl(dev, MUnit.OIMR); 252 253 /* 254 * Signal that there is a new synch command 255 */ 256 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); 257 258 if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) && 259 !dev->in_soft_reset) { 260 ok = 0; 261 start = jiffies; 262 263 if (command == IOP_RESET_ALWAYS) { 264 /* Wait up to 10 sec */ 265 delay = 10*HZ; 266 } else { 267 /* Wait up to 5 minutes */ 268 delay = 300*HZ; 269 } 270 while (time_before(jiffies, start+delay)) { 271 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 272 /* 273 * Mon960 will set doorbell0 bit when it has completed the command. 274 */ 275 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { 276 /* 277 * Clear the doorbell. 278 */ 279 if (dev->msi_enabled) 280 aac_src_access_devreg(dev, 281 AAC_CLEAR_SYNC_BIT); 282 else 283 src_writel(dev, 284 MUnit.ODR_C, 285 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 286 ok = 1; 287 break; 288 } 289 /* 290 * Yield the processor in case we are slow 291 */ 292 msleep(1); 293 } 294 if (unlikely(ok != 1)) { 295 /* 296 * Restore interrupt mask even though we timed out 297 */ 298 aac_adapter_enable_int(dev); 299 return -ETIMEDOUT; 300 } 301 /* 302 * Pull the synch status from Mailbox 0. 303 */ 304 if (status) 305 *status = readl(&dev->IndexRegs->Mailbox[0]); 306 if (r1) 307 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 308 if (r2) 309 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 310 if (r3) 311 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 312 if (r4) 313 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 314 if (command == GET_COMM_PREFERRED_SETTINGS) 315 dev->max_msix = 316 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; 317 /* 318 * Clear the synch command doorbell. 319 */ 320 if (!dev->msi_enabled) 321 src_writel(dev, 322 MUnit.ODR_C, 323 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 324 } 325 326 /* 327 * Restore interrupt mask 328 */ 329 aac_adapter_enable_int(dev); 330 return 0; 331 } 332 333 /** 334 * aac_src_interrupt_adapter - interrupt adapter 335 * @dev: Adapter 336 * 337 * Send an interrupt to the i960 and breakpoint it. 338 */ 339 340 static void aac_src_interrupt_adapter(struct aac_dev *dev) 341 { 342 src_sync_cmd(dev, BREAKPOINT_REQUEST, 343 0, 0, 0, 0, 0, 0, 344 NULL, NULL, NULL, NULL, NULL); 345 } 346 347 /** 348 * aac_src_notify_adapter - send an event to the adapter 349 * @dev: Adapter 350 * @event: Event to send 351 * 352 * Notify the i960 that something it probably cares about has 353 * happened. 354 */ 355 356 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) 357 { 358 switch (event) { 359 360 case AdapNormCmdQue: 361 src_writel(dev, MUnit.ODR_C, 362 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT); 363 break; 364 case HostNormRespNotFull: 365 src_writel(dev, MUnit.ODR_C, 366 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT); 367 break; 368 case AdapNormRespQue: 369 src_writel(dev, MUnit.ODR_C, 370 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT); 371 break; 372 case HostNormCmdNotFull: 373 src_writel(dev, MUnit.ODR_C, 374 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT); 375 break; 376 case FastIo: 377 src_writel(dev, MUnit.ODR_C, 378 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT); 379 break; 380 case AdapPrintfDone: 381 src_writel(dev, MUnit.ODR_C, 382 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT); 383 break; 384 default: 385 BUG(); 386 break; 387 } 388 } 389 390 /** 391 * aac_src_start_adapter - activate adapter 392 * @dev: Adapter 393 * 394 * Start up processing on an i960 based AAC adapter 395 */ 396 397 static void aac_src_start_adapter(struct aac_dev *dev) 398 { 399 union aac_init *init; 400 int i; 401 402 /* reset host_rrq_idx first */ 403 for (i = 0; i < dev->max_msix; i++) { 404 dev->host_rrq_idx[i] = i * dev->vector_cap; 405 atomic_set(&dev->rrq_outstanding[i], 0); 406 } 407 atomic_set(&dev->msix_counter, 0); 408 dev->fibs_pushed_no = 0; 409 410 init = dev->init; 411 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { 412 init->r8.host_elapsed_seconds = 413 cpu_to_le32(ktime_get_real_seconds()); 414 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 415 lower_32_bits(dev->init_pa), 416 upper_32_bits(dev->init_pa), 417 sizeof(struct _r8) + 418 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), 419 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 420 } else { 421 init->r7.host_elapsed_seconds = 422 cpu_to_le32(ktime_get_real_seconds()); 423 // We can only use a 32 bit address here 424 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 425 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, 426 NULL, NULL, NULL, NULL, NULL); 427 } 428 429 } 430 431 /** 432 * aac_src_check_health 433 * @dev: device to check if healthy 434 * 435 * Will attempt to determine if the specified adapter is alive and 436 * capable of handling requests, returning 0 if alive. 437 */ 438 static int aac_src_check_health(struct aac_dev *dev) 439 { 440 u32 status = src_readl(dev, MUnit.OMR); 441 442 /* 443 * Check to see if the board panic'd. 444 */ 445 if (unlikely(status & KERNEL_PANIC)) 446 goto err_blink; 447 448 /* 449 * Check to see if the board failed any self tests. 450 */ 451 if (unlikely(status & SELF_TEST_FAILED)) 452 goto err_out; 453 454 /* 455 * Check to see if the board failed any self tests. 456 */ 457 if (unlikely(status & MONITOR_PANIC)) 458 goto err_out; 459 460 /* 461 * Wait for the adapter to be up and running. 462 */ 463 if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 464 return -3; 465 /* 466 * Everything is OK 467 */ 468 return 0; 469 470 err_out: 471 return -1; 472 473 err_blink: 474 return (status >> 16) & 0xFF; 475 } 476 477 static inline u32 aac_get_vector(struct aac_dev *dev) 478 { 479 return atomic_inc_return(&dev->msix_counter)%dev->max_msix; 480 } 481 482 /** 483 * aac_src_deliver_message 484 * @fib: fib to issue 485 * 486 * Will send a fib, returning 0 if successful. 487 */ 488 static int aac_src_deliver_message(struct fib *fib) 489 { 490 struct aac_dev *dev = fib->dev; 491 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 492 u32 fibsize; 493 dma_addr_t address; 494 struct aac_fib_xporthdr *pFibX; 495 int native_hba; 496 #if !defined(writeq) 497 unsigned long flags; 498 #endif 499 500 u16 vector_no; 501 502 atomic_inc(&q->numpending); 503 504 native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0; 505 506 507 if (dev->msi_enabled && dev->max_msix > 1 && 508 (native_hba || fib->hw_fib_va->header.Command != AifRequest)) { 509 510 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) 511 && dev->sa_firmware) 512 vector_no = aac_get_vector(dev); 513 else 514 vector_no = fib->vector_no; 515 516 if (native_hba) { 517 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { 518 struct aac_hba_tm_req *tm_req; 519 520 tm_req = (struct aac_hba_tm_req *) 521 fib->hw_fib_va; 522 if (tm_req->iu_type == 523 HBA_IU_TYPE_SCSI_TM_REQ) { 524 ((struct aac_hba_tm_req *) 525 fib->hw_fib_va)->reply_qid 526 = vector_no; 527 ((struct aac_hba_tm_req *) 528 fib->hw_fib_va)->request_id 529 += (vector_no << 16); 530 } else { 531 ((struct aac_hba_reset_req *) 532 fib->hw_fib_va)->reply_qid 533 = vector_no; 534 ((struct aac_hba_reset_req *) 535 fib->hw_fib_va)->request_id 536 += (vector_no << 16); 537 } 538 } else { 539 ((struct aac_hba_cmd_req *) 540 fib->hw_fib_va)->reply_qid 541 = vector_no; 542 ((struct aac_hba_cmd_req *) 543 fib->hw_fib_va)->request_id 544 += (vector_no << 16); 545 } 546 } else { 547 fib->hw_fib_va->header.Handle += (vector_no << 16); 548 } 549 } else { 550 vector_no = 0; 551 } 552 553 atomic_inc(&dev->rrq_outstanding[vector_no]); 554 555 if (native_hba) { 556 address = fib->hw_fib_pa; 557 fibsize = (fib->hbacmd_size + 127) / 128 - 1; 558 if (fibsize > 31) 559 fibsize = 31; 560 address |= fibsize; 561 #if defined(writeq) 562 src_writeq(dev, MUnit.IQN_L, (u64)address); 563 #else 564 spin_lock_irqsave(&fib->dev->iq_lock, flags); 565 src_writel(dev, MUnit.IQN_H, 566 upper_32_bits(address) & 0xffffffff); 567 src_writel(dev, MUnit.IQN_L, address & 0xffffffff); 568 spin_unlock_irqrestore(&fib->dev->iq_lock, flags); 569 #endif 570 } else { 571 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || 572 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { 573 /* Calculate the amount to the fibsize bits */ 574 fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size) 575 + 127) / 128 - 1; 576 /* New FIB header, 32-bit */ 577 address = fib->hw_fib_pa; 578 fib->hw_fib_va->header.StructType = FIB_MAGIC2; 579 fib->hw_fib_va->header.SenderFibAddress = 580 cpu_to_le32((u32)address); 581 fib->hw_fib_va->header.u.TimeStamp = 0; 582 WARN_ON(upper_32_bits(address) != 0L); 583 } else { 584 /* Calculate the amount to the fibsize bits */ 585 fibsize = (sizeof(struct aac_fib_xporthdr) + 586 le16_to_cpu(fib->hw_fib_va->header.Size) 587 + 127) / 128 - 1; 588 /* Fill XPORT header */ 589 pFibX = (struct aac_fib_xporthdr *) 590 ((unsigned char *)fib->hw_fib_va - 591 sizeof(struct aac_fib_xporthdr)); 592 pFibX->Handle = fib->hw_fib_va->header.Handle; 593 pFibX->HostAddress = 594 cpu_to_le64((u64)fib->hw_fib_pa); 595 pFibX->Size = cpu_to_le32( 596 le16_to_cpu(fib->hw_fib_va->header.Size)); 597 address = fib->hw_fib_pa - 598 (u64)sizeof(struct aac_fib_xporthdr); 599 } 600 if (fibsize > 31) 601 fibsize = 31; 602 address |= fibsize; 603 604 #if defined(writeq) 605 src_writeq(dev, MUnit.IQ_L, (u64)address); 606 #else 607 spin_lock_irqsave(&fib->dev->iq_lock, flags); 608 src_writel(dev, MUnit.IQ_H, 609 upper_32_bits(address) & 0xffffffff); 610 src_writel(dev, MUnit.IQ_L, address & 0xffffffff); 611 spin_unlock_irqrestore(&fib->dev->iq_lock, flags); 612 #endif 613 } 614 return 0; 615 } 616 617 /** 618 * aac_src_ioremap 619 * @size: mapping resize request 620 * 621 */ 622 static int aac_src_ioremap(struct aac_dev *dev, u32 size) 623 { 624 if (!size) { 625 iounmap(dev->regs.src.bar1); 626 dev->regs.src.bar1 = NULL; 627 iounmap(dev->regs.src.bar0); 628 dev->base = dev->regs.src.bar0 = NULL; 629 return 0; 630 } 631 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), 632 AAC_MIN_SRC_BAR1_SIZE); 633 dev->base = NULL; 634 if (dev->regs.src.bar1 == NULL) 635 return -1; 636 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 637 if (dev->base == NULL) { 638 iounmap(dev->regs.src.bar1); 639 dev->regs.src.bar1 = NULL; 640 return -1; 641 } 642 dev->IndexRegs = &((struct src_registers __iomem *) 643 dev->base)->u.tupelo.IndexRegs; 644 return 0; 645 } 646 647 /** 648 * aac_srcv_ioremap 649 * @size: mapping resize request 650 * 651 */ 652 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size) 653 { 654 if (!size) { 655 iounmap(dev->regs.src.bar0); 656 dev->base = dev->regs.src.bar0 = NULL; 657 return 0; 658 } 659 660 dev->regs.src.bar1 = 661 ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE); 662 dev->base = NULL; 663 if (dev->regs.src.bar1 == NULL) 664 return -1; 665 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size); 666 if (dev->base == NULL) { 667 iounmap(dev->regs.src.bar1); 668 dev->regs.src.bar1 = NULL; 669 return -1; 670 } 671 dev->IndexRegs = &((struct src_registers __iomem *) 672 dev->base)->u.denali.IndexRegs; 673 return 0; 674 } 675 676 void aac_set_intx_mode(struct aac_dev *dev) 677 { 678 if (dev->msi_enabled) { 679 aac_src_access_devreg(dev, AAC_ENABLE_INTX); 680 dev->msi_enabled = 0; 681 msleep(5000); /* Delay 5 seconds */ 682 } 683 } 684 685 static void aac_clear_omr(struct aac_dev *dev) 686 { 687 u32 omr_value = 0; 688 689 omr_value = src_readl(dev, MUnit.OMR); 690 691 /* 692 * Check for PCI Errors or Kernel Panic 693 */ 694 if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC)) 695 omr_value = 0; 696 697 /* 698 * Preserve MSIX Value if any 699 */ 700 src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX); 701 src_readl(dev, MUnit.OMR); 702 } 703 704 static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev) 705 { 706 __le32 supported_options3; 707 708 if (!aac_fib_dump) 709 return; 710 711 supported_options3 = dev->supplement_adapter_info.supported_options3; 712 if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP)) 713 return; 714 715 aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP, 716 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 717 } 718 719 static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) 720 { 721 bool ctrl_up = true; 722 unsigned long status, start; 723 bool is_up = false; 724 725 start = jiffies; 726 do { 727 schedule(); 728 status = src_readl(dev, MUnit.OMR); 729 730 if (status == 0xffffffff) 731 status = 0; 732 733 if (status & KERNEL_BOOTING) { 734 start = jiffies; 735 continue; 736 } 737 738 if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) { 739 ctrl_up = false; 740 break; 741 } 742 743 is_up = status & KERNEL_UP_AND_RUNNING; 744 745 } while (!is_up); 746 747 return ctrl_up; 748 } 749 750 static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) 751 { 752 aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, 753 NULL, NULL, NULL, NULL); 754 } 755 756 static void aac_send_iop_reset(struct aac_dev *dev) 757 { 758 aac_dump_fw_fib_iop_reset(dev); 759 760 aac_notify_fw_of_iop_reset(dev); 761 762 aac_set_intx_mode(dev); 763 764 aac_clear_omr(dev); 765 766 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); 767 768 msleep(5000); 769 } 770 771 static void aac_send_hardware_soft_reset(struct aac_dev *dev) 772 { 773 u_int32_t val; 774 775 aac_clear_omr(dev); 776 val = readl(((char *)(dev->base) + IBW_SWR_OFFSET)); 777 val |= 0x01; 778 writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET)); 779 msleep_interruptible(20000); 780 } 781 782 static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) 783 { 784 bool is_ctrl_up; 785 int ret = 0; 786 787 if (bled < 0) 788 goto invalid_out; 789 790 if (bled) 791 dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled); 792 793 /* 794 * When there is a BlinkLED, IOP_RESET has not effect 795 */ 796 if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET) 797 reset_type &= ~HW_IOP_RESET; 798 799 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 800 801 dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type); 802 803 if (reset_type & HW_IOP_RESET) { 804 dev_info(&dev->pdev->dev, "Issuing IOP reset\n"); 805 aac_send_iop_reset(dev); 806 807 /* 808 * Creates a delay or wait till up and running comes thru 809 */ 810 is_ctrl_up = aac_is_ctrl_up_and_running(dev); 811 if (!is_ctrl_up) 812 dev_err(&dev->pdev->dev, "IOP reset failed\n"); 813 else { 814 dev_info(&dev->pdev->dev, "IOP reset succeeded\n"); 815 goto set_startup; 816 } 817 } 818 819 if (!dev->sa_firmware) { 820 dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n"); 821 ret = -ENODEV; 822 goto out; 823 } 824 825 if (reset_type & HW_SOFT_RESET) { 826 dev_info(&dev->pdev->dev, "Issuing SOFT reset\n"); 827 aac_send_hardware_soft_reset(dev); 828 dev->msi_enabled = 0; 829 830 is_ctrl_up = aac_is_ctrl_up_and_running(dev); 831 if (!is_ctrl_up) { 832 dev_err(&dev->pdev->dev, "SOFT reset failed\n"); 833 ret = -ENODEV; 834 goto out; 835 } else 836 dev_info(&dev->pdev->dev, "SOFT reset succeeded\n"); 837 } 838 839 set_startup: 840 if (startup_timeout < 300) 841 startup_timeout = 300; 842 843 out: 844 return ret; 845 846 invalid_out: 847 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) 848 ret = -ENODEV; 849 goto out; 850 } 851 852 /** 853 * aac_src_select_comm - Select communications method 854 * @dev: Adapter 855 * @comm: communications method 856 */ 857 static int aac_src_select_comm(struct aac_dev *dev, int comm) 858 { 859 switch (comm) { 860 case AAC_COMM_MESSAGE: 861 dev->a_ops.adapter_intr = aac_src_intr_message; 862 dev->a_ops.adapter_deliver = aac_src_deliver_message; 863 break; 864 default: 865 return 1; 866 } 867 return 0; 868 } 869 870 /** 871 * aac_src_init - initialize an Cardinal Frey Bar card 872 * @dev: device to configure 873 * 874 */ 875 876 int aac_src_init(struct aac_dev *dev) 877 { 878 unsigned long start; 879 unsigned long status; 880 int restart = 0; 881 int instance = dev->id; 882 const char *name = dev->name; 883 884 dev->a_ops.adapter_ioremap = aac_src_ioremap; 885 dev->a_ops.adapter_comm = aac_src_select_comm; 886 887 dev->base_size = AAC_MIN_SRC_BAR0_SIZE; 888 if (aac_adapter_ioremap(dev, dev->base_size)) { 889 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 890 goto error_iounmap; 891 } 892 893 /* Failure to reset here is an option ... */ 894 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 895 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 896 897 if (dev->init_reset) { 898 dev->init_reset = false; 899 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) 900 ++restart; 901 } 902 903 /* 904 * Check to see if the board panic'd while booting. 905 */ 906 status = src_readl(dev, MUnit.OMR); 907 if (status & KERNEL_PANIC) { 908 if (aac_src_restart_adapter(dev, 909 aac_src_check_health(dev), IOP_HWSOFT_RESET)) 910 goto error_iounmap; 911 ++restart; 912 } 913 /* 914 * Check to see if the board failed any self tests. 915 */ 916 status = src_readl(dev, MUnit.OMR); 917 if (status & SELF_TEST_FAILED) { 918 printk(KERN_ERR "%s%d: adapter self-test failed.\n", 919 dev->name, instance); 920 goto error_iounmap; 921 } 922 /* 923 * Check to see if the monitor panic'd while booting. 924 */ 925 if (status & MONITOR_PANIC) { 926 printk(KERN_ERR "%s%d: adapter monitor panic.\n", 927 dev->name, instance); 928 goto error_iounmap; 929 } 930 start = jiffies; 931 /* 932 * Wait for the adapter to be up and running. Wait up to 3 minutes 933 */ 934 while (!((status = src_readl(dev, MUnit.OMR)) & 935 KERNEL_UP_AND_RUNNING)) { 936 if ((restart && 937 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 938 time_after(jiffies, start+HZ*startup_timeout)) { 939 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 940 dev->name, instance, status); 941 goto error_iounmap; 942 } 943 if (!restart && 944 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 945 time_after(jiffies, start + HZ * 946 ((startup_timeout > 60) 947 ? (startup_timeout - 60) 948 : (startup_timeout / 2))))) { 949 if (likely(!aac_src_restart_adapter(dev, 950 aac_src_check_health(dev), IOP_HWSOFT_RESET))) 951 start = jiffies; 952 ++restart; 953 } 954 msleep(1); 955 } 956 if (restart && aac_commit) 957 aac_commit = 1; 958 /* 959 * Fill in the common function dispatch table. 960 */ 961 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 962 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 963 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 964 dev->a_ops.adapter_notify = aac_src_notify_adapter; 965 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 966 dev->a_ops.adapter_check_health = aac_src_check_health; 967 dev->a_ops.adapter_restart = aac_src_restart_adapter; 968 dev->a_ops.adapter_start = aac_src_start_adapter; 969 970 /* 971 * First clear out all interrupts. Then enable the one's that we 972 * can handle. 973 */ 974 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 975 aac_adapter_disable_int(dev); 976 src_writel(dev, MUnit.ODR_C, 0xffffffff); 977 aac_adapter_enable_int(dev); 978 979 if (aac_init_adapter(dev) == NULL) 980 goto error_iounmap; 981 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1) 982 goto error_iounmap; 983 984 dev->msi = !pci_enable_msi(dev->pdev); 985 986 dev->aac_msix[0].vector_no = 0; 987 dev->aac_msix[0].dev = dev; 988 989 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 990 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { 991 992 if (dev->msi) 993 pci_disable_msi(dev->pdev); 994 995 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 996 name, instance); 997 goto error_iounmap; 998 } 999 dev->dbg_base = pci_resource_start(dev->pdev, 2); 1000 dev->dbg_base_mapped = dev->regs.src.bar1; 1001 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; 1002 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 1003 1004 aac_adapter_enable_int(dev); 1005 1006 if (!dev->sync_mode) { 1007 /* 1008 * Tell the adapter that all is configured, and it can 1009 * start accepting requests 1010 */ 1011 aac_src_start_adapter(dev); 1012 } 1013 return 0; 1014 1015 error_iounmap: 1016 1017 return -1; 1018 } 1019 1020 static int aac_src_wait_sync(struct aac_dev *dev, int *status) 1021 { 1022 unsigned long start = jiffies; 1023 unsigned long usecs = 0; 1024 int delay = 5 * HZ; 1025 int rc = 1; 1026 1027 while (time_before(jiffies, start+delay)) { 1028 /* 1029 * Delay 5 microseconds to let Mon960 get info. 1030 */ 1031 udelay(5); 1032 1033 /* 1034 * Mon960 will set doorbell0 bit when it has completed the 1035 * command. 1036 */ 1037 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { 1038 /* 1039 * Clear: the doorbell. 1040 */ 1041 if (dev->msi_enabled) 1042 aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT); 1043 else 1044 src_writel(dev, MUnit.ODR_C, 1045 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 1046 rc = 0; 1047 1048 break; 1049 } 1050 1051 /* 1052 * Yield the processor in case we are slow 1053 */ 1054 usecs = 1 * USEC_PER_MSEC; 1055 usleep_range(usecs, usecs + 50); 1056 } 1057 /* 1058 * Pull the synch status from Mailbox 0. 1059 */ 1060 if (status && !rc) { 1061 status[0] = readl(&dev->IndexRegs->Mailbox[0]); 1062 status[1] = readl(&dev->IndexRegs->Mailbox[1]); 1063 status[2] = readl(&dev->IndexRegs->Mailbox[2]); 1064 status[3] = readl(&dev->IndexRegs->Mailbox[3]); 1065 status[4] = readl(&dev->IndexRegs->Mailbox[4]); 1066 } 1067 1068 return rc; 1069 } 1070 1071 /** 1072 * aac_src_soft_reset - perform soft reset to speed up 1073 * access 1074 * 1075 * Assumptions: That the controller is in a state where we can 1076 * bring it back to life with an init struct. We can only use 1077 * fast sync commands, as the timeout is 5 seconds. 1078 * 1079 * @dev: device to configure 1080 * 1081 */ 1082 1083 static int aac_src_soft_reset(struct aac_dev *dev) 1084 { 1085 u32 status_omr = src_readl(dev, MUnit.OMR); 1086 u32 status[5]; 1087 int rc = 1; 1088 int state = 0; 1089 char *state_str[7] = { 1090 "GET_ADAPTER_PROPERTIES Failed", 1091 "GET_ADAPTER_PROPERTIES timeout", 1092 "SOFT_RESET not supported", 1093 "DROP_IO Failed", 1094 "DROP_IO timeout", 1095 "Check Health failed" 1096 }; 1097 1098 if (status_omr == INVALID_OMR) 1099 return 1; // pcie hosed 1100 1101 if (!(status_omr & KERNEL_UP_AND_RUNNING)) 1102 return 1; // not up and running 1103 1104 /* 1105 * We go into soft reset mode to allow us to handle response 1106 */ 1107 dev->in_soft_reset = 1; 1108 dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX; 1109 1110 /* Get adapter properties */ 1111 rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 1112 0, 0, 0, status+0, status+1, status+2, status+3, status+4); 1113 if (rc) 1114 goto out; 1115 1116 state++; 1117 if (aac_src_wait_sync(dev, status)) { 1118 rc = 1; 1119 goto out; 1120 } 1121 1122 state++; 1123 if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) && 1124 (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) { 1125 rc = 2; 1126 goto out; 1127 } 1128 1129 if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) && 1130 (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE))) 1131 dev->sa_firmware = 1; 1132 1133 state++; 1134 rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0, 1135 status+0, status+1, status+2, status+3, status+4); 1136 1137 if (rc) 1138 goto out; 1139 1140 state++; 1141 if (aac_src_wait_sync(dev, status)) { 1142 rc = 3; 1143 goto out; 1144 } 1145 1146 if (status[1]) 1147 dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n", 1148 __func__, status[1]); 1149 1150 state++; 1151 rc = aac_src_check_health(dev); 1152 1153 out: 1154 dev->in_soft_reset = 0; 1155 dev->msi_enabled = 0; 1156 if (rc) 1157 dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__, 1158 state_str[state], rc); 1159 1160 return rc; 1161 } 1162 /** 1163 * aac_srcv_init - initialize an SRCv card 1164 * @dev: device to configure 1165 * 1166 */ 1167 1168 int aac_srcv_init(struct aac_dev *dev) 1169 { 1170 unsigned long start; 1171 unsigned long status; 1172 int restart = 0; 1173 int instance = dev->id; 1174 const char *name = dev->name; 1175 1176 dev->a_ops.adapter_ioremap = aac_srcv_ioremap; 1177 dev->a_ops.adapter_comm = aac_src_select_comm; 1178 1179 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE; 1180 if (aac_adapter_ioremap(dev, dev->base_size)) { 1181 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 1182 goto error_iounmap; 1183 } 1184 1185 /* Failure to reset here is an option ... */ 1186 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 1187 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 1188 1189 if (dev->init_reset) { 1190 dev->init_reset = false; 1191 if (aac_src_soft_reset(dev)) { 1192 aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET); 1193 ++restart; 1194 } 1195 } 1196 1197 /* 1198 * Check to see if flash update is running. 1199 * Wait for the adapter to be up and running. Wait up to 5 minutes 1200 */ 1201 status = src_readl(dev, MUnit.OMR); 1202 if (status & FLASH_UPD_PENDING) { 1203 start = jiffies; 1204 do { 1205 status = src_readl(dev, MUnit.OMR); 1206 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { 1207 printk(KERN_ERR "%s%d: adapter flash update failed.\n", 1208 dev->name, instance); 1209 goto error_iounmap; 1210 } 1211 } while (!(status & FLASH_UPD_SUCCESS) && 1212 !(status & FLASH_UPD_FAILED)); 1213 /* Delay 10 seconds. 1214 * Because right now FW is doing a soft reset, 1215 * do not read scratch pad register at this time 1216 */ 1217 ssleep(10); 1218 } 1219 /* 1220 * Check to see if the board panic'd while booting. 1221 */ 1222 status = src_readl(dev, MUnit.OMR); 1223 if (status & KERNEL_PANIC) { 1224 if (aac_src_restart_adapter(dev, 1225 aac_src_check_health(dev), IOP_HWSOFT_RESET)) 1226 goto error_iounmap; 1227 ++restart; 1228 } 1229 /* 1230 * Check to see if the board failed any self tests. 1231 */ 1232 status = src_readl(dev, MUnit.OMR); 1233 if (status & SELF_TEST_FAILED) { 1234 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 1235 goto error_iounmap; 1236 } 1237 /* 1238 * Check to see if the monitor panic'd while booting. 1239 */ 1240 if (status & MONITOR_PANIC) { 1241 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 1242 goto error_iounmap; 1243 } 1244 1245 start = jiffies; 1246 /* 1247 * Wait for the adapter to be up and running. Wait up to 3 minutes 1248 */ 1249 do { 1250 status = src_readl(dev, MUnit.OMR); 1251 if (status == INVALID_OMR) 1252 status = 0; 1253 1254 if ((restart && 1255 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 1256 time_after(jiffies, start+HZ*startup_timeout)) { 1257 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 1258 dev->name, instance, status); 1259 goto error_iounmap; 1260 } 1261 if (!restart && 1262 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 1263 time_after(jiffies, start + HZ * 1264 ((startup_timeout > 60) 1265 ? (startup_timeout - 60) 1266 : (startup_timeout / 2))))) { 1267 if (likely(!aac_src_restart_adapter(dev, 1268 aac_src_check_health(dev), IOP_HWSOFT_RESET))) 1269 start = jiffies; 1270 ++restart; 1271 } 1272 msleep(1); 1273 } while (!(status & KERNEL_UP_AND_RUNNING)); 1274 1275 if (restart && aac_commit) 1276 aac_commit = 1; 1277 /* 1278 * Fill in the common function dispatch table. 1279 */ 1280 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; 1281 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; 1282 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; 1283 dev->a_ops.adapter_notify = aac_src_notify_adapter; 1284 dev->a_ops.adapter_sync_cmd = src_sync_cmd; 1285 dev->a_ops.adapter_check_health = aac_src_check_health; 1286 dev->a_ops.adapter_restart = aac_src_restart_adapter; 1287 dev->a_ops.adapter_start = aac_src_start_adapter; 1288 1289 /* 1290 * First clear out all interrupts. Then enable the one's that we 1291 * can handle. 1292 */ 1293 aac_adapter_comm(dev, AAC_COMM_MESSAGE); 1294 aac_adapter_disable_int(dev); 1295 src_writel(dev, MUnit.ODR_C, 0xffffffff); 1296 aac_adapter_enable_int(dev); 1297 1298 if (aac_init_adapter(dev) == NULL) 1299 goto error_iounmap; 1300 if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) && 1301 (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)) 1302 goto error_iounmap; 1303 if (dev->msi_enabled) 1304 aac_src_access_devreg(dev, AAC_ENABLE_MSIX); 1305 1306 if (aac_acquire_irq(dev)) 1307 goto error_iounmap; 1308 1309 dev->dbg_base = pci_resource_start(dev->pdev, 2); 1310 dev->dbg_base_mapped = dev->regs.src.bar1; 1311 dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE; 1312 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; 1313 1314 aac_adapter_enable_int(dev); 1315 1316 if (!dev->sync_mode) { 1317 /* 1318 * Tell the adapter that all is configured, and it can 1319 * start accepting requests 1320 */ 1321 aac_src_start_adapter(dev); 1322 } 1323 return 0; 1324 1325 error_iounmap: 1326 1327 return -1; 1328 } 1329 1330 void aac_src_access_devreg(struct aac_dev *dev, int mode) 1331 { 1332 u_int32_t val; 1333 1334 switch (mode) { 1335 case AAC_ENABLE_INTERRUPT: 1336 src_writel(dev, 1337 MUnit.OIMR, 1338 dev->OIMR = (dev->msi_enabled ? 1339 AAC_INT_ENABLE_TYPE1_MSIX : 1340 AAC_INT_ENABLE_TYPE1_INTX)); 1341 break; 1342 1343 case AAC_DISABLE_INTERRUPT: 1344 src_writel(dev, 1345 MUnit.OIMR, 1346 dev->OIMR = AAC_INT_DISABLE_ALL); 1347 break; 1348 1349 case AAC_ENABLE_MSIX: 1350 /* set bit 6 */ 1351 val = src_readl(dev, MUnit.IDR); 1352 val |= 0x40; 1353 src_writel(dev, MUnit.IDR, val); 1354 src_readl(dev, MUnit.IDR); 1355 /* unmask int. */ 1356 val = PMC_ALL_INTERRUPT_BITS; 1357 src_writel(dev, MUnit.IOAR, val); 1358 val = src_readl(dev, MUnit.OIMR); 1359 src_writel(dev, 1360 MUnit.OIMR, 1361 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); 1362 break; 1363 1364 case AAC_DISABLE_MSIX: 1365 /* reset bit 6 */ 1366 val = src_readl(dev, MUnit.IDR); 1367 val &= ~0x40; 1368 src_writel(dev, MUnit.IDR, val); 1369 src_readl(dev, MUnit.IDR); 1370 break; 1371 1372 case AAC_CLEAR_AIF_BIT: 1373 /* set bit 5 */ 1374 val = src_readl(dev, MUnit.IDR); 1375 val |= 0x20; 1376 src_writel(dev, MUnit.IDR, val); 1377 src_readl(dev, MUnit.IDR); 1378 break; 1379 1380 case AAC_CLEAR_SYNC_BIT: 1381 /* set bit 4 */ 1382 val = src_readl(dev, MUnit.IDR); 1383 val |= 0x10; 1384 src_writel(dev, MUnit.IDR, val); 1385 src_readl(dev, MUnit.IDR); 1386 break; 1387 1388 case AAC_ENABLE_INTX: 1389 /* set bit 7 */ 1390 val = src_readl(dev, MUnit.IDR); 1391 val |= 0x80; 1392 src_writel(dev, MUnit.IDR, val); 1393 src_readl(dev, MUnit.IDR); 1394 /* unmask int. */ 1395 val = PMC_ALL_INTERRUPT_BITS; 1396 src_writel(dev, MUnit.IOAR, val); 1397 src_readl(dev, MUnit.IOAR); 1398 val = src_readl(dev, MUnit.OIMR); 1399 src_writel(dev, MUnit.OIMR, 1400 val & (~(PMC_GLOBAL_INT_BIT2))); 1401 break; 1402 1403 default: 1404 break; 1405 } 1406 } 1407 1408 static int aac_src_get_sync_status(struct aac_dev *dev) 1409 { 1410 int msix_val = 0; 1411 int legacy_val = 0; 1412 1413 msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0; 1414 1415 if (!dev->msi_enabled) { 1416 /* 1417 * if Legacy int status indicates cmd is not complete 1418 * sample MSIx register to see if it indiactes cmd complete, 1419 * if yes set the controller in MSIx mode and consider cmd 1420 * completed 1421 */ 1422 legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; 1423 if (!(legacy_val & 1) && msix_val) 1424 dev->msi_enabled = 1; 1425 return legacy_val; 1426 } 1427 1428 return msix_val; 1429 } 1430