1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module Name: 27 * rx.c 28 * 29 * Abstract: Hardware miniport for Drawbridge specific hardware functions. 30 * 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/init.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/blkdev.h> 39 #include <linux/delay.h> 40 #include <linux/completion.h> 41 #include <linux/time.h> 42 #include <linux/interrupt.h> 43 44 #include <scsi/scsi_host.h> 45 46 #include "aacraid.h" 47 48 static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) 49 { 50 struct aac_dev *dev = dev_id; 51 unsigned long bellbits; 52 u8 intstat = rx_readb(dev, MUnit.OISR); 53 54 /* 55 * Read mask and invert because drawbridge is reversed. 56 * This allows us to only service interrupts that have 57 * been enabled. 58 * Check to see if this is our interrupt. If it isn't just return 59 */ 60 if (likely(intstat & ~(dev->OIMR))) { 61 bellbits = rx_readl(dev, OutboundDoorbellReg); 62 if (unlikely(bellbits & DoorBellPrintfReady)) { 63 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); 64 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 65 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 66 } 67 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { 68 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); 70 } 71 else if (likely(bellbits & DoorBellAdapterNormRespReady)) { 72 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); 73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); 74 } 75 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { 76 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 77 } 78 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { 79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 80 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); 81 } 82 return IRQ_HANDLED; 83 } 84 return IRQ_NONE; 85 } 86 87 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) 88 { 89 int isAif, isFastResponse, isSpecial; 90 struct aac_dev *dev = dev_id; 91 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 92 if (unlikely(Index == 0xFFFFFFFFL)) 93 Index = rx_readl(dev, MUnit.OutboundQueue); 94 if (likely(Index != 0xFFFFFFFFL)) { 95 do { 96 isAif = isFastResponse = isSpecial = 0; 97 if (Index & 0x00000002L) { 98 isAif = 1; 99 if (Index == 0xFFFFFFFEL) 100 isSpecial = 1; 101 Index &= ~0x00000002L; 102 } else { 103 if (Index & 0x00000001L) 104 isFastResponse = 1; 105 Index >>= 2; 106 } 107 if (!isSpecial) { 108 if (unlikely(aac_intr_normal(dev, 109 Index, isAif, 110 isFastResponse, NULL))) { 111 rx_writel(dev, 112 MUnit.OutboundQueue, 113 Index); 114 rx_writel(dev, 115 MUnit.ODR, 116 DoorBellAdapterNormRespReady); 117 } 118 } 119 Index = rx_readl(dev, MUnit.OutboundQueue); 120 } while (Index != 0xFFFFFFFFL); 121 return IRQ_HANDLED; 122 } 123 return IRQ_NONE; 124 } 125 126 /** 127 * aac_rx_disable_interrupt - Disable interrupts 128 * @dev: Adapter 129 */ 130 131 static void aac_rx_disable_interrupt(struct aac_dev *dev) 132 { 133 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 134 } 135 136 /** 137 * aac_rx_enable_interrupt_producer - Enable interrupts 138 * @dev: Adapter 139 */ 140 141 static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) 142 { 143 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 144 } 145 146 /** 147 * aac_rx_enable_interrupt_message - Enable interrupts 148 * @dev: Adapter 149 */ 150 151 static void aac_rx_enable_interrupt_message(struct aac_dev *dev) 152 { 153 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 154 } 155 156 /** 157 * rx_sync_cmd - send a command and wait 158 * @dev: Adapter 159 * @command: Command to execute 160 * @p1: first parameter 161 * @ret: adapter status 162 * 163 * This routine will send a synchronous command to the adapter and wait 164 * for its completion. 165 */ 166 167 static int rx_sync_cmd(struct aac_dev *dev, u32 command, 168 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 169 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 170 { 171 unsigned long start; 172 int ok; 173 /* 174 * Write the command into Mailbox 0 175 */ 176 writel(command, &dev->IndexRegs->Mailbox[0]); 177 /* 178 * Write the parameters into Mailboxes 1 - 6 179 */ 180 writel(p1, &dev->IndexRegs->Mailbox[1]); 181 writel(p2, &dev->IndexRegs->Mailbox[2]); 182 writel(p3, &dev->IndexRegs->Mailbox[3]); 183 writel(p4, &dev->IndexRegs->Mailbox[4]); 184 /* 185 * Clear the synch command doorbell to start on a clean slate. 186 */ 187 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 188 /* 189 * Disable doorbell interrupts 190 */ 191 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 192 /* 193 * Force the completion of the mask register write before issuing 194 * the interrupt. 195 */ 196 rx_readb (dev, MUnit.OIMR); 197 /* 198 * Signal that there is a new synch command 199 */ 200 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); 201 202 ok = 0; 203 start = jiffies; 204 205 /* 206 * Wait up to 30 seconds 207 */ 208 while (time_before(jiffies, start+30*HZ)) 209 { 210 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 211 /* 212 * Mon960 will set doorbell0 bit when it has completed the command. 213 */ 214 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { 215 /* 216 * Clear the doorbell. 217 */ 218 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 219 ok = 1; 220 break; 221 } 222 /* 223 * Yield the processor in case we are slow 224 */ 225 msleep(1); 226 } 227 if (unlikely(ok != 1)) { 228 /* 229 * Restore interrupt mask even though we timed out 230 */ 231 aac_adapter_enable_int(dev); 232 return -ETIMEDOUT; 233 } 234 /* 235 * Pull the synch status from Mailbox 0. 236 */ 237 if (status) 238 *status = readl(&dev->IndexRegs->Mailbox[0]); 239 if (r1) 240 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 241 if (r2) 242 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 243 if (r3) 244 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 245 if (r4) 246 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 247 /* 248 * Clear the synch command doorbell. 249 */ 250 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 251 /* 252 * Restore interrupt mask 253 */ 254 aac_adapter_enable_int(dev); 255 return 0; 256 257 } 258 259 /** 260 * aac_rx_interrupt_adapter - interrupt adapter 261 * @dev: Adapter 262 * 263 * Send an interrupt to the i960 and breakpoint it. 264 */ 265 266 static void aac_rx_interrupt_adapter(struct aac_dev *dev) 267 { 268 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 269 } 270 271 /** 272 * aac_rx_notify_adapter - send an event to the adapter 273 * @dev: Adapter 274 * @event: Event to send 275 * 276 * Notify the i960 that something it probably cares about has 277 * happened. 278 */ 279 280 static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) 281 { 282 switch (event) { 283 284 case AdapNormCmdQue: 285 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); 286 break; 287 case HostNormRespNotFull: 288 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); 289 break; 290 case AdapNormRespQue: 291 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); 292 break; 293 case HostNormCmdNotFull: 294 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 295 break; 296 case HostShutdown: 297 break; 298 case FastIo: 299 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 300 break; 301 case AdapPrintfDone: 302 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); 303 break; 304 default: 305 BUG(); 306 break; 307 } 308 } 309 310 /** 311 * aac_rx_start_adapter - activate adapter 312 * @dev: Adapter 313 * 314 * Start up processing on an i960 based AAC adapter 315 */ 316 317 static void aac_rx_start_adapter(struct aac_dev *dev) 318 { 319 union aac_init *init; 320 321 init = dev->init; 322 init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); 323 // We can only use a 32 bit address here 324 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 325 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 326 } 327 328 /** 329 * aac_rx_check_health 330 * @dev: device to check if healthy 331 * 332 * Will attempt to determine if the specified adapter is alive and 333 * capable of handling requests, returning 0 if alive. 334 */ 335 static int aac_rx_check_health(struct aac_dev *dev) 336 { 337 u32 status = rx_readl(dev, MUnit.OMRx[0]); 338 339 /* 340 * Check to see if the board failed any self tests. 341 */ 342 if (unlikely(status & SELF_TEST_FAILED)) 343 return -1; 344 /* 345 * Check to see if the board panic'd. 346 */ 347 if (unlikely(status & KERNEL_PANIC)) { 348 char * buffer; 349 struct POSTSTATUS { 350 __le32 Post_Command; 351 __le32 Post_Address; 352 } * post; 353 dma_addr_t paddr, baddr; 354 int ret; 355 356 if (likely((status & 0xFF000000L) == 0xBC000000L)) 357 return (status >> 16) & 0xFF; 358 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); 359 ret = -2; 360 if (unlikely(buffer == NULL)) 361 return ret; 362 post = pci_alloc_consistent(dev->pdev, 363 sizeof(struct POSTSTATUS), &paddr); 364 if (unlikely(post == NULL)) { 365 pci_free_consistent(dev->pdev, 512, buffer, baddr); 366 return ret; 367 } 368 memset(buffer, 0, 512); 369 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); 370 post->Post_Address = cpu_to_le32(baddr); 371 rx_writel(dev, MUnit.IMRx[0], paddr); 372 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, 373 NULL, NULL, NULL, NULL, NULL); 374 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), 375 post, paddr); 376 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { 377 ret = (hex_to_bin(buffer[2]) << 4) + 378 hex_to_bin(buffer[3]); 379 } 380 pci_free_consistent(dev->pdev, 512, buffer, baddr); 381 return ret; 382 } 383 /* 384 * Wait for the adapter to be up and running. 385 */ 386 if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 387 return -3; 388 /* 389 * Everything is OK 390 */ 391 return 0; 392 } 393 394 /** 395 * aac_rx_deliver_producer 396 * @fib: fib to issue 397 * 398 * Will send a fib, returning 0 if successful. 399 */ 400 int aac_rx_deliver_producer(struct fib * fib) 401 { 402 struct aac_dev *dev = fib->dev; 403 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 404 u32 Index; 405 unsigned long nointr = 0; 406 407 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); 408 409 atomic_inc(&q->numpending); 410 *(q->headers.producer) = cpu_to_le32(Index + 1); 411 if (!(nointr & aac_config.irq_mod)) 412 aac_adapter_notify(dev, AdapNormCmdQueue); 413 414 return 0; 415 } 416 417 /** 418 * aac_rx_deliver_message 419 * @fib: fib to issue 420 * 421 * Will send a fib, returning 0 if successful. 422 */ 423 static int aac_rx_deliver_message(struct fib * fib) 424 { 425 struct aac_dev *dev = fib->dev; 426 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 427 u32 Index; 428 u64 addr; 429 volatile void __iomem *device; 430 431 unsigned long count = 10000000L; /* 50 seconds */ 432 atomic_inc(&q->numpending); 433 for(;;) { 434 Index = rx_readl(dev, MUnit.InboundQueue); 435 if (unlikely(Index == 0xFFFFFFFFL)) 436 Index = rx_readl(dev, MUnit.InboundQueue); 437 if (likely(Index != 0xFFFFFFFFL)) 438 break; 439 if (--count == 0) { 440 atomic_dec(&q->numpending); 441 return -ETIMEDOUT; 442 } 443 udelay(5); 444 } 445 device = dev->base + Index; 446 addr = fib->hw_fib_pa; 447 writel((u32)(addr & 0xffffffff), device); 448 device += sizeof(u32); 449 writel((u32)(addr >> 32), device); 450 device += sizeof(u32); 451 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); 452 rx_writel(dev, MUnit.InboundQueue, Index); 453 return 0; 454 } 455 456 /** 457 * aac_rx_ioremap 458 * @size: mapping resize request 459 * 460 */ 461 static int aac_rx_ioremap(struct aac_dev * dev, u32 size) 462 { 463 if (!size) { 464 iounmap(dev->regs.rx); 465 return 0; 466 } 467 dev->base = dev->regs.rx = ioremap(dev->base_start, size); 468 if (dev->base == NULL) 469 return -1; 470 dev->IndexRegs = &dev->regs.rx->IndexRegs; 471 return 0; 472 } 473 474 static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type) 475 { 476 u32 var = 0; 477 478 if (!(dev->supplement_adapter_info.supported_options2 & 479 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { 480 if (bled) 481 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 482 dev->name, dev->id, bled); 483 else { 484 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 485 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 486 if (!bled && (var != 0x00000001) && (var != 0x3803000F)) 487 bled = -EINVAL; 488 } 489 if (bled && (bled != -ETIMEDOUT)) 490 bled = aac_adapter_sync_cmd(dev, IOP_RESET, 491 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 492 493 if (bled && (bled != -ETIMEDOUT)) 494 return -EINVAL; 495 } 496 if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */ 497 rx_writel(dev, MUnit.reserved2, 3); 498 msleep(5000); /* Delay 5 seconds */ 499 var = 0x00000001; 500 } 501 if (bled && (var != 0x00000001)) 502 return -EINVAL; 503 ssleep(5); 504 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 505 return -ENODEV; 506 if (startup_timeout < 300) 507 startup_timeout = 300; 508 return 0; 509 } 510 511 /** 512 * aac_rx_select_comm - Select communications method 513 * @dev: Adapter 514 * @comm: communications method 515 */ 516 517 int aac_rx_select_comm(struct aac_dev *dev, int comm) 518 { 519 switch (comm) { 520 case AAC_COMM_PRODUCER: 521 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; 522 dev->a_ops.adapter_intr = aac_rx_intr_producer; 523 dev->a_ops.adapter_deliver = aac_rx_deliver_producer; 524 break; 525 case AAC_COMM_MESSAGE: 526 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; 527 dev->a_ops.adapter_intr = aac_rx_intr_message; 528 dev->a_ops.adapter_deliver = aac_rx_deliver_message; 529 break; 530 default: 531 return 1; 532 } 533 return 0; 534 } 535 536 /** 537 * aac_rx_init - initialize an i960 based AAC card 538 * @dev: device to configure 539 * 540 * Allocate and set up resources for the i960 based AAC variants. The 541 * device_interface in the commregion will be allocated and linked 542 * to the comm region. 543 */ 544 545 int _aac_rx_init(struct aac_dev *dev) 546 { 547 unsigned long start; 548 unsigned long status; 549 int restart = 0; 550 int instance = dev->id; 551 const char * name = dev->name; 552 553 if (aac_adapter_ioremap(dev, dev->base_size)) { 554 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 555 goto error_iounmap; 556 } 557 558 /* Failure to reset here is an option ... */ 559 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 560 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; 561 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 562 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && 563 !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) 564 /* Make sure the Hardware FIFO is empty */ 565 while ((++restart < 512) && 566 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); 567 /* 568 * Check to see if the board panic'd while booting. 569 */ 570 status = rx_readl(dev, MUnit.OMRx[0]); 571 if (status & KERNEL_PANIC) { 572 if (aac_rx_restart_adapter(dev, 573 aac_rx_check_health(dev), IOP_HWSOFT_RESET)) 574 goto error_iounmap; 575 ++restart; 576 } 577 /* 578 * Check to see if the board failed any self tests. 579 */ 580 status = rx_readl(dev, MUnit.OMRx[0]); 581 if (status & SELF_TEST_FAILED) { 582 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 583 goto error_iounmap; 584 } 585 /* 586 * Check to see if the monitor panic'd while booting. 587 */ 588 if (status & MONITOR_PANIC) { 589 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 590 goto error_iounmap; 591 } 592 start = jiffies; 593 /* 594 * Wait for the adapter to be up and running. Wait up to 3 minutes 595 */ 596 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) 597 { 598 if ((restart && 599 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 600 time_after(jiffies, start+HZ*startup_timeout)) { 601 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 602 dev->name, instance, status); 603 goto error_iounmap; 604 } 605 if (!restart && 606 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 607 time_after(jiffies, start + HZ * 608 ((startup_timeout > 60) 609 ? (startup_timeout - 60) 610 : (startup_timeout / 2))))) { 611 if (likely(!aac_rx_restart_adapter(dev, 612 aac_rx_check_health(dev), IOP_HWSOFT_RESET))) 613 start = jiffies; 614 ++restart; 615 } 616 msleep(1); 617 } 618 if (restart && aac_commit) 619 aac_commit = 1; 620 /* 621 * Fill in the common function dispatch table. 622 */ 623 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 624 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 625 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 626 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 627 dev->a_ops.adapter_check_health = aac_rx_check_health; 628 dev->a_ops.adapter_restart = aac_rx_restart_adapter; 629 dev->a_ops.adapter_start = aac_rx_start_adapter; 630 631 /* 632 * First clear out all interrupts. Then enable the one's that we 633 * can handle. 634 */ 635 aac_adapter_comm(dev, AAC_COMM_PRODUCER); 636 aac_adapter_disable_int(dev); 637 rx_writel(dev, MUnit.ODR, 0xffffffff); 638 aac_adapter_enable_int(dev); 639 640 if (aac_init_adapter(dev) == NULL) 641 goto error_iounmap; 642 aac_adapter_comm(dev, dev->comm_interface); 643 dev->sync_mode = 0; /* sync. mode not supported */ 644 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 645 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 646 IRQF_SHARED, "aacraid", dev) < 0) { 647 if (dev->msi) 648 pci_disable_msi(dev->pdev); 649 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 650 name, instance); 651 goto error_iounmap; 652 } 653 dev->dbg_base = dev->base_start; 654 dev->dbg_base_mapped = dev->base; 655 dev->dbg_size = dev->base_size; 656 657 aac_adapter_enable_int(dev); 658 /* 659 * Tell the adapter that all is configured, and it can 660 * start accepting requests 661 */ 662 aac_rx_start_adapter(dev); 663 664 return 0; 665 666 error_iounmap: 667 668 return -1; 669 } 670 671 int aac_rx_init(struct aac_dev *dev) 672 { 673 /* 674 * Fill in the function dispatch table. 675 */ 676 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 677 dev->a_ops.adapter_comm = aac_rx_select_comm; 678 679 return _aac_rx_init(dev); 680 } 681