1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * rx.c 26 * 27 * Abstract: Hardware miniport for Drawbridge specific hardware functions. 28 * 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/types.h> 34 #include <linux/pci.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/blkdev.h> 38 #include <linux/delay.h> 39 #include <linux/completion.h> 40 #include <linux/time.h> 41 #include <linux/interrupt.h> 42 43 #include <scsi/scsi_host.h> 44 45 #include "aacraid.h" 46 47 static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) 48 { 49 struct aac_dev *dev = dev_id; 50 unsigned long bellbits; 51 u8 intstat = rx_readb(dev, MUnit.OISR); 52 53 /* 54 * Read mask and invert because drawbridge is reversed. 55 * This allows us to only service interrupts that have 56 * been enabled. 57 * Check to see if this is our interrupt. If it isn't just return 58 */ 59 if (likely(intstat & ~(dev->OIMR))) { 60 bellbits = rx_readl(dev, OutboundDoorbellReg); 61 if (unlikely(bellbits & DoorBellPrintfReady)) { 62 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); 63 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 64 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 65 } 66 else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { 67 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); 68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); 69 } 70 else if (likely(bellbits & DoorBellAdapterNormRespReady)) { 71 rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); 72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); 73 } 74 else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { 75 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 76 } 77 else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { 78 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); 79 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); 80 } 81 return IRQ_HANDLED; 82 } 83 return IRQ_NONE; 84 } 85 86 static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) 87 { 88 struct aac_dev *dev = dev_id; 89 u32 Index = rx_readl(dev, MUnit.OutboundQueue); 90 if (unlikely(Index == 0xFFFFFFFFL)) 91 Index = rx_readl(dev, MUnit.OutboundQueue); 92 if (likely(Index != 0xFFFFFFFFL)) { 93 do { 94 if (unlikely(aac_intr_normal(dev, Index))) { 95 rx_writel(dev, MUnit.OutboundQueue, Index); 96 rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); 97 } 98 Index = rx_readl(dev, MUnit.OutboundQueue); 99 } while (Index != 0xFFFFFFFFL); 100 return IRQ_HANDLED; 101 } 102 return IRQ_NONE; 103 } 104 105 /** 106 * aac_rx_disable_interrupt - Disable interrupts 107 * @dev: Adapter 108 */ 109 110 static void aac_rx_disable_interrupt(struct aac_dev *dev) 111 { 112 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 113 } 114 115 /** 116 * aac_rx_enable_interrupt_producer - Enable interrupts 117 * @dev: Adapter 118 */ 119 120 static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) 121 { 122 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); 123 } 124 125 /** 126 * aac_rx_enable_interrupt_message - Enable interrupts 127 * @dev: Adapter 128 */ 129 130 static void aac_rx_enable_interrupt_message(struct aac_dev *dev) 131 { 132 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 133 } 134 135 /** 136 * rx_sync_cmd - send a command and wait 137 * @dev: Adapter 138 * @command: Command to execute 139 * @p1: first parameter 140 * @ret: adapter status 141 * 142 * This routine will send a synchronous command to the adapter and wait 143 * for its completion. 144 */ 145 146 static int rx_sync_cmd(struct aac_dev *dev, u32 command, 147 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, 148 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) 149 { 150 unsigned long start; 151 int ok; 152 /* 153 * Write the command into Mailbox 0 154 */ 155 writel(command, &dev->IndexRegs->Mailbox[0]); 156 /* 157 * Write the parameters into Mailboxes 1 - 6 158 */ 159 writel(p1, &dev->IndexRegs->Mailbox[1]); 160 writel(p2, &dev->IndexRegs->Mailbox[2]); 161 writel(p3, &dev->IndexRegs->Mailbox[3]); 162 writel(p4, &dev->IndexRegs->Mailbox[4]); 163 /* 164 * Clear the synch command doorbell to start on a clean slate. 165 */ 166 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 167 /* 168 * Disable doorbell interrupts 169 */ 170 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); 171 /* 172 * Force the completion of the mask register write before issuing 173 * the interrupt. 174 */ 175 rx_readb (dev, MUnit.OIMR); 176 /* 177 * Signal that there is a new synch command 178 */ 179 rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); 180 181 ok = 0; 182 start = jiffies; 183 184 /* 185 * Wait up to 30 seconds 186 */ 187 while (time_before(jiffies, start+30*HZ)) 188 { 189 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ 190 /* 191 * Mon960 will set doorbell0 bit when it has completed the command. 192 */ 193 if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { 194 /* 195 * Clear the doorbell. 196 */ 197 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 198 ok = 1; 199 break; 200 } 201 /* 202 * Yield the processor in case we are slow 203 */ 204 msleep(1); 205 } 206 if (unlikely(ok != 1)) { 207 /* 208 * Restore interrupt mask even though we timed out 209 */ 210 aac_adapter_enable_int(dev); 211 return -ETIMEDOUT; 212 } 213 /* 214 * Pull the synch status from Mailbox 0. 215 */ 216 if (status) 217 *status = readl(&dev->IndexRegs->Mailbox[0]); 218 if (r1) 219 *r1 = readl(&dev->IndexRegs->Mailbox[1]); 220 if (r2) 221 *r2 = readl(&dev->IndexRegs->Mailbox[2]); 222 if (r3) 223 *r3 = readl(&dev->IndexRegs->Mailbox[3]); 224 if (r4) 225 *r4 = readl(&dev->IndexRegs->Mailbox[4]); 226 /* 227 * Clear the synch command doorbell. 228 */ 229 rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); 230 /* 231 * Restore interrupt mask 232 */ 233 aac_adapter_enable_int(dev); 234 return 0; 235 236 } 237 238 /** 239 * aac_rx_interrupt_adapter - interrupt adapter 240 * @dev: Adapter 241 * 242 * Send an interrupt to the i960 and breakpoint it. 243 */ 244 245 static void aac_rx_interrupt_adapter(struct aac_dev *dev) 246 { 247 rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 248 } 249 250 /** 251 * aac_rx_notify_adapter - send an event to the adapter 252 * @dev: Adapter 253 * @event: Event to send 254 * 255 * Notify the i960 that something it probably cares about has 256 * happened. 257 */ 258 259 static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) 260 { 261 switch (event) { 262 263 case AdapNormCmdQue: 264 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); 265 break; 266 case HostNormRespNotFull: 267 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); 268 break; 269 case AdapNormRespQue: 270 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); 271 break; 272 case HostNormCmdNotFull: 273 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 274 break; 275 case HostShutdown: 276 break; 277 case FastIo: 278 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 279 break; 280 case AdapPrintfDone: 281 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); 282 break; 283 default: 284 BUG(); 285 break; 286 } 287 } 288 289 /** 290 * aac_rx_start_adapter - activate adapter 291 * @dev: Adapter 292 * 293 * Start up processing on an i960 based AAC adapter 294 */ 295 296 static void aac_rx_start_adapter(struct aac_dev *dev) 297 { 298 struct aac_init *init; 299 300 init = dev->init; 301 init->HostElapsedSeconds = cpu_to_le32(get_seconds()); 302 // We can only use a 32 bit address here 303 rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, 304 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); 305 } 306 307 /** 308 * aac_rx_check_health 309 * @dev: device to check if healthy 310 * 311 * Will attempt to determine if the specified adapter is alive and 312 * capable of handling requests, returning 0 if alive. 313 */ 314 static int aac_rx_check_health(struct aac_dev *dev) 315 { 316 u32 status = rx_readl(dev, MUnit.OMRx[0]); 317 318 /* 319 * Check to see if the board failed any self tests. 320 */ 321 if (unlikely(status & SELF_TEST_FAILED)) 322 return -1; 323 /* 324 * Check to see if the board panic'd. 325 */ 326 if (unlikely(status & KERNEL_PANIC)) { 327 char * buffer; 328 struct POSTSTATUS { 329 __le32 Post_Command; 330 __le32 Post_Address; 331 } * post; 332 dma_addr_t paddr, baddr; 333 int ret; 334 335 if (likely((status & 0xFF000000L) == 0xBC000000L)) 336 return (status >> 16) & 0xFF; 337 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); 338 ret = -2; 339 if (unlikely(buffer == NULL)) 340 return ret; 341 post = pci_alloc_consistent(dev->pdev, 342 sizeof(struct POSTSTATUS), &paddr); 343 if (unlikely(post == NULL)) { 344 pci_free_consistent(dev->pdev, 512, buffer, baddr); 345 return ret; 346 } 347 memset(buffer, 0, 512); 348 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); 349 post->Post_Address = cpu_to_le32(baddr); 350 rx_writel(dev, MUnit.IMRx[0], paddr); 351 rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, 352 NULL, NULL, NULL, NULL, NULL); 353 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), 354 post, paddr); 355 if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { 356 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); 357 ret <<= 4; 358 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); 359 } 360 pci_free_consistent(dev->pdev, 512, buffer, baddr); 361 return ret; 362 } 363 /* 364 * Wait for the adapter to be up and running. 365 */ 366 if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) 367 return -3; 368 /* 369 * Everything is OK 370 */ 371 return 0; 372 } 373 374 /** 375 * aac_rx_deliver_producer 376 * @fib: fib to issue 377 * 378 * Will send a fib, returning 0 if successful. 379 */ 380 int aac_rx_deliver_producer(struct fib * fib) 381 { 382 struct aac_dev *dev = fib->dev; 383 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 384 unsigned long qflags; 385 u32 Index; 386 unsigned long nointr = 0; 387 388 spin_lock_irqsave(q->lock, qflags); 389 aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); 390 391 q->numpending++; 392 *(q->headers.producer) = cpu_to_le32(Index + 1); 393 spin_unlock_irqrestore(q->lock, qflags); 394 if (!(nointr & aac_config.irq_mod)) 395 aac_adapter_notify(dev, AdapNormCmdQueue); 396 397 return 0; 398 } 399 400 /** 401 * aac_rx_deliver_message 402 * @fib: fib to issue 403 * 404 * Will send a fib, returning 0 if successful. 405 */ 406 static int aac_rx_deliver_message(struct fib * fib) 407 { 408 struct aac_dev *dev = fib->dev; 409 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; 410 unsigned long qflags; 411 u32 Index; 412 u64 addr; 413 volatile void __iomem *device; 414 415 unsigned long count = 10000000L; /* 50 seconds */ 416 spin_lock_irqsave(q->lock, qflags); 417 q->numpending++; 418 spin_unlock_irqrestore(q->lock, qflags); 419 for(;;) { 420 Index = rx_readl(dev, MUnit.InboundQueue); 421 if (unlikely(Index == 0xFFFFFFFFL)) 422 Index = rx_readl(dev, MUnit.InboundQueue); 423 if (likely(Index != 0xFFFFFFFFL)) 424 break; 425 if (--count == 0) { 426 spin_lock_irqsave(q->lock, qflags); 427 q->numpending--; 428 spin_unlock_irqrestore(q->lock, qflags); 429 return -ETIMEDOUT; 430 } 431 udelay(5); 432 } 433 device = dev->base + Index; 434 addr = fib->hw_fib_pa; 435 writel((u32)(addr & 0xffffffff), device); 436 device += sizeof(u32); 437 writel((u32)(addr >> 32), device); 438 device += sizeof(u32); 439 writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); 440 rx_writel(dev, MUnit.InboundQueue, Index); 441 return 0; 442 } 443 444 /** 445 * aac_rx_ioremap 446 * @size: mapping resize request 447 * 448 */ 449 static int aac_rx_ioremap(struct aac_dev * dev, u32 size) 450 { 451 if (!size) { 452 iounmap(dev->regs.rx); 453 return 0; 454 } 455 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size); 456 if (dev->base == NULL) 457 return -1; 458 dev->IndexRegs = &dev->regs.rx->IndexRegs; 459 return 0; 460 } 461 462 static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) 463 { 464 u32 var; 465 466 if (!(dev->supplement_adapter_info.SupportedOptions2 & 467 AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { 468 if (bled) 469 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 470 dev->name, dev->id, bled); 471 else { 472 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 473 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 474 if (!bled && (var != 0x00000001) && (var != 0x3803000F)) 475 bled = -EINVAL; 476 } 477 if (bled && (bled != -ETIMEDOUT)) 478 bled = aac_adapter_sync_cmd(dev, IOP_RESET, 479 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 480 481 if (bled && (bled != -ETIMEDOUT)) 482 return -EINVAL; 483 } 484 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ 485 rx_writel(dev, MUnit.reserved2, 3); 486 msleep(5000); /* Delay 5 seconds */ 487 var = 0x00000001; 488 } 489 if (var != 0x00000001) 490 return -EINVAL; 491 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) 492 return -ENODEV; 493 if (startup_timeout < 300) 494 startup_timeout = 300; 495 return 0; 496 } 497 498 /** 499 * aac_rx_select_comm - Select communications method 500 * @dev: Adapter 501 * @comm: communications method 502 */ 503 504 int aac_rx_select_comm(struct aac_dev *dev, int comm) 505 { 506 switch (comm) { 507 case AAC_COMM_PRODUCER: 508 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; 509 dev->a_ops.adapter_intr = aac_rx_intr_producer; 510 dev->a_ops.adapter_deliver = aac_rx_deliver_producer; 511 break; 512 case AAC_COMM_MESSAGE: 513 dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; 514 dev->a_ops.adapter_intr = aac_rx_intr_message; 515 dev->a_ops.adapter_deliver = aac_rx_deliver_message; 516 break; 517 default: 518 return 1; 519 } 520 return 0; 521 } 522 523 /** 524 * aac_rx_init - initialize an i960 based AAC card 525 * @dev: device to configure 526 * 527 * Allocate and set up resources for the i960 based AAC variants. The 528 * device_interface in the commregion will be allocated and linked 529 * to the comm region. 530 */ 531 532 int _aac_rx_init(struct aac_dev *dev) 533 { 534 unsigned long start; 535 unsigned long status; 536 int restart = 0; 537 int instance = dev->id; 538 const char * name = dev->name; 539 540 if (aac_adapter_ioremap(dev, dev->base_size)) { 541 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 542 goto error_iounmap; 543 } 544 545 /* Failure to reset here is an option ... */ 546 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 547 dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; 548 dev->OIMR = status = rx_readb (dev, MUnit.OIMR); 549 if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && 550 !aac_rx_restart_adapter(dev, 0)) 551 /* Make sure the Hardware FIFO is empty */ 552 while ((++restart < 512) && 553 (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); 554 /* 555 * Check to see if the board panic'd while booting. 556 */ 557 status = rx_readl(dev, MUnit.OMRx[0]); 558 if (status & KERNEL_PANIC) { 559 if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) 560 goto error_iounmap; 561 ++restart; 562 } 563 /* 564 * Check to see if the board failed any self tests. 565 */ 566 status = rx_readl(dev, MUnit.OMRx[0]); 567 if (status & SELF_TEST_FAILED) { 568 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 569 goto error_iounmap; 570 } 571 /* 572 * Check to see if the monitor panic'd while booting. 573 */ 574 if (status & MONITOR_PANIC) { 575 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 576 goto error_iounmap; 577 } 578 start = jiffies; 579 /* 580 * Wait for the adapter to be up and running. Wait up to 3 minutes 581 */ 582 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) 583 { 584 if ((restart && 585 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || 586 time_after(jiffies, start+HZ*startup_timeout)) { 587 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 588 dev->name, instance, status); 589 goto error_iounmap; 590 } 591 if (!restart && 592 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || 593 time_after(jiffies, start + HZ * 594 ((startup_timeout > 60) 595 ? (startup_timeout - 60) 596 : (startup_timeout / 2))))) { 597 if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) 598 start = jiffies; 599 ++restart; 600 } 601 msleep(1); 602 } 603 if (restart && aac_commit) 604 aac_commit = 1; 605 /* 606 * Fill in the common function dispatch table. 607 */ 608 dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; 609 dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; 610 dev->a_ops.adapter_notify = aac_rx_notify_adapter; 611 dev->a_ops.adapter_sync_cmd = rx_sync_cmd; 612 dev->a_ops.adapter_check_health = aac_rx_check_health; 613 dev->a_ops.adapter_restart = aac_rx_restart_adapter; 614 615 /* 616 * First clear out all interrupts. Then enable the one's that we 617 * can handle. 618 */ 619 aac_adapter_comm(dev, AAC_COMM_PRODUCER); 620 aac_adapter_disable_int(dev); 621 rx_writel(dev, MUnit.ODR, 0xffffffff); 622 aac_adapter_enable_int(dev); 623 624 if (aac_init_adapter(dev) == NULL) 625 goto error_iounmap; 626 aac_adapter_comm(dev, dev->comm_interface); 627 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 628 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 629 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 630 if (dev->msi) 631 pci_disable_msi(dev->pdev); 632 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 633 name, instance); 634 goto error_iounmap; 635 } 636 aac_adapter_enable_int(dev); 637 /* 638 * Tell the adapter that all is configured, and it can 639 * start accepting requests 640 */ 641 aac_rx_start_adapter(dev); 642 643 return 0; 644 645 error_iounmap: 646 647 return -1; 648 } 649 650 int aac_rx_init(struct aac_dev *dev) 651 { 652 /* 653 * Fill in the function dispatch table. 654 */ 655 dev->a_ops.adapter_ioremap = aac_rx_ioremap; 656 dev->a_ops.adapter_comm = aac_rx_select_comm; 657 658 return _aac_rx_init(dev); 659 } 660