1 /* 2 * sata_sil.c - Silicon Image SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2005 Red Hat, Inc. 9 * Copyright 2003 Benjamin Herrenschmidt 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Documentation for SiI 3112: 31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 32 * 33 * Other errata and documentation available under NDA. 34 * 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/pci.h> 40 #include <linux/init.h> 41 #include <linux/blkdev.h> 42 #include <linux/delay.h> 43 #include <linux/interrupt.h> 44 #include <linux/device.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_sil" 49 #define DRV_VERSION "2.3" 50 51 enum { 52 SIL_MMIO_BAR = 5, 53 54 /* 55 * host flags 56 */ 57 SIL_FLAG_NO_SATA_IRQ = (1 << 28), 58 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 59 SIL_FLAG_MOD15WRITE = (1 << 30), 60 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 62 ATA_FLAG_MMIO, 63 64 /* 65 * Controller IDs 66 */ 67 sil_3112 = 0, 68 sil_3112_no_sata_irq = 1, 69 sil_3512 = 2, 70 sil_3114 = 3, 71 72 /* 73 * Register offsets 74 */ 75 SIL_SYSCFG = 0x48, 76 77 /* 78 * Register bits 79 */ 80 /* SYSCFG */ 81 SIL_MASK_IDE0_INT = (1 << 22), 82 SIL_MASK_IDE1_INT = (1 << 23), 83 SIL_MASK_IDE2_INT = (1 << 24), 84 SIL_MASK_IDE3_INT = (1 << 25), 85 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, 86 SIL_MASK_4PORT = SIL_MASK_2PORT | 87 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 88 89 /* BMDMA/BMDMA2 */ 90 SIL_INTR_STEERING = (1 << 1), 91 92 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ 93 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ 94 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ 95 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ 96 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ 97 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ 98 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ 99 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ 100 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ 101 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ 102 103 /* SIEN */ 104 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ 105 106 /* 107 * Others 108 */ 109 SIL_QUIRK_MOD15WRITE = (1 << 0), 110 SIL_QUIRK_UDMA5MAX = (1 << 1), 111 }; 112 113 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 114 #ifdef CONFIG_PM 115 static int sil_pci_device_resume(struct pci_dev *pdev); 116 #endif 117 static void sil_dev_config(struct ata_device *dev); 118 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 119 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 120 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 121 static void sil_freeze(struct ata_port *ap); 122 static void sil_thaw(struct ata_port *ap); 123 124 125 static const struct pci_device_id sil_pci_tbl[] = { 126 { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, 127 { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, 128 { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, 129 { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, 130 { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, 131 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, 132 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, 133 134 { } /* terminate list */ 135 }; 136 137 138 /* TODO firmware versions should be added - eric */ 139 static const struct sil_drivelist { 140 const char *product; 141 unsigned int quirk; 142 } sil_blacklist [] = { 143 { "ST320012AS", SIL_QUIRK_MOD15WRITE }, 144 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 148 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 149 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 150 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 151 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 152 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 153 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 154 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 155 { } 156 }; 157 158 static struct pci_driver sil_pci_driver = { 159 .name = DRV_NAME, 160 .id_table = sil_pci_tbl, 161 .probe = sil_init_one, 162 .remove = ata_pci_remove_one, 163 #ifdef CONFIG_PM 164 .suspend = ata_pci_device_suspend, 165 .resume = sil_pci_device_resume, 166 #endif 167 }; 168 169 static struct scsi_host_template sil_sht = { 170 ATA_BMDMA_SHT(DRV_NAME), 171 }; 172 173 static struct ata_port_operations sil_ops = { 174 .inherits = &ata_bmdma_port_ops, 175 .dev_config = sil_dev_config, 176 .set_mode = sil_set_mode, 177 .freeze = sil_freeze, 178 .thaw = sil_thaw, 179 .scr_read = sil_scr_read, 180 .scr_write = sil_scr_write, 181 }; 182 183 static const struct ata_port_info sil_port_info[] = { 184 /* sil_3112 */ 185 { 186 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 187 .pio_mask = 0x1f, /* pio0-4 */ 188 .mwdma_mask = 0x07, /* mwdma0-2 */ 189 .udma_mask = ATA_UDMA5, 190 .port_ops = &sil_ops, 191 }, 192 /* sil_3112_no_sata_irq */ 193 { 194 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 195 SIL_FLAG_NO_SATA_IRQ, 196 .pio_mask = 0x1f, /* pio0-4 */ 197 .mwdma_mask = 0x07, /* mwdma0-2 */ 198 .udma_mask = ATA_UDMA5, 199 .port_ops = &sil_ops, 200 }, 201 /* sil_3512 */ 202 { 203 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 204 .pio_mask = 0x1f, /* pio0-4 */ 205 .mwdma_mask = 0x07, /* mwdma0-2 */ 206 .udma_mask = ATA_UDMA5, 207 .port_ops = &sil_ops, 208 }, 209 /* sil_3114 */ 210 { 211 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 212 .pio_mask = 0x1f, /* pio0-4 */ 213 .mwdma_mask = 0x07, /* mwdma0-2 */ 214 .udma_mask = ATA_UDMA5, 215 .port_ops = &sil_ops, 216 }, 217 }; 218 219 /* per-port register offsets */ 220 /* TODO: we can probably calculate rather than use a table */ 221 static const struct { 222 unsigned long tf; /* ATA taskfile register block */ 223 unsigned long ctl; /* ATA control/altstatus register block */ 224 unsigned long bmdma; /* DMA register block */ 225 unsigned long bmdma2; /* DMA register block #2 */ 226 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 227 unsigned long scr; /* SATA control register block */ 228 unsigned long sien; /* SATA Interrupt Enable register */ 229 unsigned long xfer_mode;/* data transfer mode register */ 230 unsigned long sfis_cfg; /* SATA FIS reception config register */ 231 } sil_port[] = { 232 /* port 0 ... */ 233 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ 234 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 235 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 236 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 237 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 238 /* ... port 3 */ 239 }; 240 241 MODULE_AUTHOR("Jeff Garzik"); 242 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); 243 MODULE_LICENSE("GPL"); 244 MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 245 MODULE_VERSION(DRV_VERSION); 246 247 static int slow_down; 248 module_param(slow_down, int, 0444); 249 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 250 251 252 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 253 { 254 u8 cache_line = 0; 255 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); 256 return cache_line; 257 } 258 259 /** 260 * sil_set_mode - wrap set_mode functions 261 * @link: link to set up 262 * @r_failed: returned device when we fail 263 * 264 * Wrap the libata method for device setup as after the setup we need 265 * to inspect the results and do some configuration work 266 */ 267 268 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) 269 { 270 struct ata_port *ap = link->ap; 271 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 272 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 273 struct ata_device *dev; 274 u32 tmp, dev_mode[2] = { }; 275 int rc; 276 277 rc = ata_do_set_mode(link, r_failed); 278 if (rc) 279 return rc; 280 281 ata_link_for_each_dev(dev, link) { 282 if (!ata_dev_enabled(dev)) 283 dev_mode[dev->devno] = 0; /* PIO0/1/2 */ 284 else if (dev->flags & ATA_DFLAG_PIO) 285 dev_mode[dev->devno] = 1; /* PIO3/4 */ 286 else 287 dev_mode[dev->devno] = 3; /* UDMA */ 288 /* value 2 indicates MDMA */ 289 } 290 291 tmp = readl(addr); 292 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); 293 tmp |= dev_mode[0]; 294 tmp |= (dev_mode[1] << 4); 295 writel(tmp, addr); 296 readl(addr); /* flush */ 297 return 0; 298 } 299 300 static inline void __iomem *sil_scr_addr(struct ata_port *ap, 301 unsigned int sc_reg) 302 { 303 void __iomem *offset = ap->ioaddr.scr_addr; 304 305 switch (sc_reg) { 306 case SCR_STATUS: 307 return offset + 4; 308 case SCR_ERROR: 309 return offset + 8; 310 case SCR_CONTROL: 311 return offset; 312 default: 313 /* do nothing */ 314 break; 315 } 316 317 return NULL; 318 } 319 320 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 321 { 322 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); 323 324 if (mmio) { 325 *val = readl(mmio); 326 return 0; 327 } 328 return -EINVAL; 329 } 330 331 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 332 { 333 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg); 334 335 if (mmio) { 336 writel(val, mmio); 337 return 0; 338 } 339 return -EINVAL; 340 } 341 342 static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 343 { 344 struct ata_eh_info *ehi = &ap->link.eh_info; 345 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 346 u8 status; 347 348 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 349 u32 serror; 350 351 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 352 * controllers continue to assert IRQ as long as 353 * SError bits are pending. Clear SError immediately. 354 */ 355 sil_scr_read(&ap->link, SCR_ERROR, &serror); 356 sil_scr_write(&ap->link, SCR_ERROR, serror); 357 358 /* Sometimes spurious interrupts occur, double check 359 * it's PHYRDY CHG. 360 */ 361 if (serror & SERR_PHYRDY_CHG) { 362 ap->link.eh_info.serror |= serror; 363 goto freeze; 364 } 365 366 if (!(bmdma2 & SIL_DMA_COMPLETE)) 367 return; 368 } 369 370 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 371 /* this sometimes happens, just clear IRQ */ 372 ap->ops->sff_check_status(ap); 373 return; 374 } 375 376 /* Check whether we are expecting interrupt in this state */ 377 switch (ap->hsm_task_state) { 378 case HSM_ST_FIRST: 379 /* Some pre-ATAPI-4 devices assert INTRQ 380 * at this state when ready to receive CDB. 381 */ 382 383 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 384 * The flag was turned on only for atapi devices. No 385 * need to check ata_is_atapi(qc->tf.protocol) again. 386 */ 387 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 388 goto err_hsm; 389 break; 390 case HSM_ST_LAST: 391 if (ata_is_dma(qc->tf.protocol)) { 392 /* clear DMA-Start bit */ 393 ap->ops->bmdma_stop(qc); 394 395 if (bmdma2 & SIL_DMA_ERROR) { 396 qc->err_mask |= AC_ERR_HOST_BUS; 397 ap->hsm_task_state = HSM_ST_ERR; 398 } 399 } 400 break; 401 case HSM_ST: 402 break; 403 default: 404 goto err_hsm; 405 } 406 407 /* check main status, clearing INTRQ */ 408 status = ap->ops->sff_check_status(ap); 409 if (unlikely(status & ATA_BUSY)) 410 goto err_hsm; 411 412 /* ack bmdma irq events */ 413 ata_sff_irq_clear(ap); 414 415 /* kick HSM in the ass */ 416 ata_sff_hsm_move(ap, qc, status, 0); 417 418 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) 419 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 420 421 return; 422 423 err_hsm: 424 qc->err_mask |= AC_ERR_HSM; 425 freeze: 426 ata_port_freeze(ap); 427 } 428 429 static irqreturn_t sil_interrupt(int irq, void *dev_instance) 430 { 431 struct ata_host *host = dev_instance; 432 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 433 int handled = 0; 434 int i; 435 436 spin_lock(&host->lock); 437 438 for (i = 0; i < host->n_ports; i++) { 439 struct ata_port *ap = host->ports[i]; 440 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 441 442 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 443 continue; 444 445 /* turn off SATA_IRQ if not supported */ 446 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 447 bmdma2 &= ~SIL_DMA_SATA_IRQ; 448 449 if (bmdma2 == 0xffffffff || 450 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 451 continue; 452 453 sil_host_intr(ap, bmdma2); 454 handled = 1; 455 } 456 457 spin_unlock(&host->lock); 458 459 return IRQ_RETVAL(handled); 460 } 461 462 static void sil_freeze(struct ata_port *ap) 463 { 464 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 465 u32 tmp; 466 467 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 468 writel(0, mmio_base + sil_port[ap->port_no].sien); 469 470 /* plug IRQ */ 471 tmp = readl(mmio_base + SIL_SYSCFG); 472 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 473 writel(tmp, mmio_base + SIL_SYSCFG); 474 readl(mmio_base + SIL_SYSCFG); /* flush */ 475 } 476 477 static void sil_thaw(struct ata_port *ap) 478 { 479 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 480 u32 tmp; 481 482 /* clear IRQ */ 483 ap->ops->sff_check_status(ap); 484 ata_sff_irq_clear(ap); 485 486 /* turn on SATA IRQ if supported */ 487 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 488 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 489 490 /* turn on IRQ */ 491 tmp = readl(mmio_base + SIL_SYSCFG); 492 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); 493 writel(tmp, mmio_base + SIL_SYSCFG); 494 } 495 496 /** 497 * sil_dev_config - Apply device/host-specific errata fixups 498 * @dev: Device to be examined 499 * 500 * After the IDENTIFY [PACKET] DEVICE step is complete, and a 501 * device is known to be present, this function is called. 502 * We apply two errata fixups which are specific to Silicon Image, 503 * a Seagate and a Maxtor fixup. 504 * 505 * For certain Seagate devices, we must limit the maximum sectors 506 * to under 8K. 507 * 508 * For certain Maxtor devices, we must not program the drive 509 * beyond udma5. 510 * 511 * Both fixups are unfairly pessimistic. As soon as I get more 512 * information on these errata, I will create a more exhaustive 513 * list, and apply the fixups to only the specific 514 * devices/hosts/firmwares that need it. 515 * 516 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted 517 * The Maxtor quirk is in the blacklist, but I'm keeping the original 518 * pessimistic fix for the following reasons... 519 * - There seems to be less info on it, only one device gleaned off the 520 * Windows driver, maybe only one is affected. More info would be greatly 521 * appreciated. 522 * - But then again UDMA5 is hardly anything to complain about 523 */ 524 static void sil_dev_config(struct ata_device *dev) 525 { 526 struct ata_port *ap = dev->link->ap; 527 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; 528 unsigned int n, quirks = 0; 529 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 530 531 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 532 533 for (n = 0; sil_blacklist[n].product; n++) 534 if (!strcmp(sil_blacklist[n].product, model_num)) { 535 quirks = sil_blacklist[n].quirk; 536 break; 537 } 538 539 /* limit requests to 15 sectors */ 540 if (slow_down || 541 ((ap->flags & SIL_FLAG_MOD15WRITE) && 542 (quirks & SIL_QUIRK_MOD15WRITE))) { 543 if (print_info) 544 ata_dev_printk(dev, KERN_INFO, "applying Seagate " 545 "errata fix (mod15write workaround)\n"); 546 dev->max_sectors = 15; 547 return; 548 } 549 550 /* limit to udma5 */ 551 if (quirks & SIL_QUIRK_UDMA5MAX) { 552 if (print_info) 553 ata_dev_printk(dev, KERN_INFO, "applying Maxtor " 554 "errata fix %s\n", model_num); 555 dev->udma_mask &= ATA_UDMA5; 556 return; 557 } 558 } 559 560 static void sil_init_controller(struct ata_host *host) 561 { 562 struct pci_dev *pdev = to_pci_dev(host->dev); 563 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 564 u8 cls; 565 u32 tmp; 566 int i; 567 568 /* Initialize FIFO PCI bus arbitration */ 569 cls = sil_get_device_cache_line(pdev); 570 if (cls) { 571 cls >>= 3; 572 cls++; /* cls = (line_size/8)+1 */ 573 for (i = 0; i < host->n_ports; i++) 574 writew(cls << 8 | cls, 575 mmio_base + sil_port[i].fifo_cfg); 576 } else 577 dev_printk(KERN_WARNING, &pdev->dev, 578 "cache line size not set. Driver may not function\n"); 579 580 /* Apply R_ERR on DMA activate FIS errata workaround */ 581 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { 582 int cnt; 583 584 for (i = 0, cnt = 0; i < host->n_ports; i++) { 585 tmp = readl(mmio_base + sil_port[i].sfis_cfg); 586 if ((tmp & 0x3) != 0x01) 587 continue; 588 if (!cnt) 589 dev_printk(KERN_INFO, &pdev->dev, 590 "Applying R_ERR on DMA activate " 591 "FIS errata fix\n"); 592 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 593 cnt++; 594 } 595 } 596 597 if (host->n_ports == 4) { 598 /* flip the magic "make 4 ports work" bit */ 599 tmp = readl(mmio_base + sil_port[2].bmdma); 600 if ((tmp & SIL_INTR_STEERING) == 0) 601 writel(tmp | SIL_INTR_STEERING, 602 mmio_base + sil_port[2].bmdma); 603 } 604 } 605 606 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 607 { 608 static int printed_version; 609 int board_id = ent->driver_data; 610 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 611 struct ata_host *host; 612 void __iomem *mmio_base; 613 int n_ports, rc; 614 unsigned int i; 615 616 if (!printed_version++) 617 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 618 619 /* allocate host */ 620 n_ports = 2; 621 if (board_id == sil_3114) 622 n_ports = 4; 623 624 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 625 if (!host) 626 return -ENOMEM; 627 628 /* acquire resources and fill host */ 629 rc = pcim_enable_device(pdev); 630 if (rc) 631 return rc; 632 633 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); 634 if (rc == -EBUSY) 635 pcim_pin_device(pdev); 636 if (rc) 637 return rc; 638 host->iomap = pcim_iomap_table(pdev); 639 640 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 641 if (rc) 642 return rc; 643 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 644 if (rc) 645 return rc; 646 647 mmio_base = host->iomap[SIL_MMIO_BAR]; 648 649 for (i = 0; i < host->n_ports; i++) { 650 struct ata_port *ap = host->ports[i]; 651 struct ata_ioports *ioaddr = &ap->ioaddr; 652 653 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 654 ioaddr->altstatus_addr = 655 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; 656 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 657 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 658 ata_sff_std_ports(ioaddr); 659 660 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); 661 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); 662 } 663 664 /* initialize and activate */ 665 sil_init_controller(host); 666 667 pci_set_master(pdev); 668 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, 669 &sil_sht); 670 } 671 672 #ifdef CONFIG_PM 673 static int sil_pci_device_resume(struct pci_dev *pdev) 674 { 675 struct ata_host *host = dev_get_drvdata(&pdev->dev); 676 int rc; 677 678 rc = ata_pci_device_do_resume(pdev); 679 if (rc) 680 return rc; 681 682 sil_init_controller(host); 683 ata_host_resume(host); 684 685 return 0; 686 } 687 #endif 688 689 static int __init sil_init(void) 690 { 691 return pci_register_driver(&sil_pci_driver); 692 } 693 694 static void __exit sil_exit(void) 695 { 696 pci_unregister_driver(&sil_pci_driver); 697 } 698 699 700 module_init(sil_init); 701 module_exit(sil_exit); 702