1 /* 2 * sata_sil.c - Silicon Image SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2005 Red Hat, Inc. 9 * Copyright 2003 Benjamin Herrenschmidt 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Documentation for SiI 3112: 31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 32 * 33 * Other errata and documentation available under NDA. 34 * 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/pci.h> 40 #include <linux/init.h> 41 #include <linux/blkdev.h> 42 #include <linux/delay.h> 43 #include <linux/interrupt.h> 44 #include <linux/device.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_sil" 49 #define DRV_VERSION "2.3" 50 51 enum { 52 SIL_MMIO_BAR = 5, 53 54 /* 55 * host flags 56 */ 57 SIL_FLAG_NO_SATA_IRQ = (1 << 28), 58 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 59 SIL_FLAG_MOD15WRITE = (1 << 30), 60 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 62 ATA_FLAG_MMIO, 63 SIL_DFL_LINK_FLAGS = ATA_LFLAG_HRST_TO_RESUME, 64 65 /* 66 * Controller IDs 67 */ 68 sil_3112 = 0, 69 sil_3112_no_sata_irq = 1, 70 sil_3512 = 2, 71 sil_3114 = 3, 72 73 /* 74 * Register offsets 75 */ 76 SIL_SYSCFG = 0x48, 77 78 /* 79 * Register bits 80 */ 81 /* SYSCFG */ 82 SIL_MASK_IDE0_INT = (1 << 22), 83 SIL_MASK_IDE1_INT = (1 << 23), 84 SIL_MASK_IDE2_INT = (1 << 24), 85 SIL_MASK_IDE3_INT = (1 << 25), 86 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, 87 SIL_MASK_4PORT = SIL_MASK_2PORT | 88 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 89 90 /* BMDMA/BMDMA2 */ 91 SIL_INTR_STEERING = (1 << 1), 92 93 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ 94 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ 95 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ 96 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ 97 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ 98 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ 99 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ 100 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ 101 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ 102 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ 103 104 /* SIEN */ 105 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ 106 107 /* 108 * Others 109 */ 110 SIL_QUIRK_MOD15WRITE = (1 << 0), 111 SIL_QUIRK_UDMA5MAX = (1 << 1), 112 }; 113 114 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 115 #ifdef CONFIG_PM 116 static int sil_pci_device_resume(struct pci_dev *pdev); 117 #endif 118 static void sil_dev_config(struct ata_device *dev); 119 static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 120 static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 121 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 122 static void sil_freeze(struct ata_port *ap); 123 static void sil_thaw(struct ata_port *ap); 124 125 126 static const struct pci_device_id sil_pci_tbl[] = { 127 { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, 128 { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, 129 { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, 130 { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, 131 { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, 132 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, 133 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, 134 135 { } /* terminate list */ 136 }; 137 138 139 /* TODO firmware versions should be added - eric */ 140 static const struct sil_drivelist { 141 const char * product; 142 unsigned int quirk; 143 } sil_blacklist [] = { 144 { "ST320012AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 148 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 149 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 150 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 151 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 152 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 153 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 154 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 155 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 156 { } 157 }; 158 159 static struct pci_driver sil_pci_driver = { 160 .name = DRV_NAME, 161 .id_table = sil_pci_tbl, 162 .probe = sil_init_one, 163 .remove = ata_pci_remove_one, 164 #ifdef CONFIG_PM 165 .suspend = ata_pci_device_suspend, 166 .resume = sil_pci_device_resume, 167 #endif 168 }; 169 170 static struct scsi_host_template sil_sht = { 171 .module = THIS_MODULE, 172 .name = DRV_NAME, 173 .ioctl = ata_scsi_ioctl, 174 .queuecommand = ata_scsi_queuecmd, 175 .can_queue = ATA_DEF_QUEUE, 176 .this_id = ATA_SHT_THIS_ID, 177 .sg_tablesize = LIBATA_MAX_PRD, 178 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 179 .emulated = ATA_SHT_EMULATED, 180 .use_clustering = ATA_SHT_USE_CLUSTERING, 181 .proc_name = DRV_NAME, 182 .dma_boundary = ATA_DMA_BOUNDARY, 183 .slave_configure = ata_scsi_slave_config, 184 .slave_destroy = ata_scsi_slave_destroy, 185 .bios_param = ata_std_bios_param, 186 }; 187 188 static const struct ata_port_operations sil_ops = { 189 .dev_config = sil_dev_config, 190 .tf_load = ata_tf_load, 191 .tf_read = ata_tf_read, 192 .check_status = ata_check_status, 193 .exec_command = ata_exec_command, 194 .dev_select = ata_std_dev_select, 195 .set_mode = sil_set_mode, 196 .bmdma_setup = ata_bmdma_setup, 197 .bmdma_start = ata_bmdma_start, 198 .bmdma_stop = ata_bmdma_stop, 199 .bmdma_status = ata_bmdma_status, 200 .qc_prep = ata_qc_prep, 201 .qc_issue = ata_qc_issue_prot, 202 .data_xfer = ata_data_xfer, 203 .freeze = sil_freeze, 204 .thaw = sil_thaw, 205 .error_handler = ata_bmdma_error_handler, 206 .post_internal_cmd = ata_bmdma_post_internal_cmd, 207 .irq_clear = ata_bmdma_irq_clear, 208 .irq_on = ata_irq_on, 209 .scr_read = sil_scr_read, 210 .scr_write = sil_scr_write, 211 .port_start = ata_port_start, 212 }; 213 214 static const struct ata_port_info sil_port_info[] = { 215 /* sil_3112 */ 216 { 217 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 218 .link_flags = SIL_DFL_LINK_FLAGS, 219 .pio_mask = 0x1f, /* pio0-4 */ 220 .mwdma_mask = 0x07, /* mwdma0-2 */ 221 .udma_mask = ATA_UDMA5, 222 .port_ops = &sil_ops, 223 }, 224 /* sil_3112_no_sata_irq */ 225 { 226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 227 SIL_FLAG_NO_SATA_IRQ, 228 .link_flags = SIL_DFL_LINK_FLAGS, 229 .pio_mask = 0x1f, /* pio0-4 */ 230 .mwdma_mask = 0x07, /* mwdma0-2 */ 231 .udma_mask = ATA_UDMA5, 232 .port_ops = &sil_ops, 233 }, 234 /* sil_3512 */ 235 { 236 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 237 .link_flags = SIL_DFL_LINK_FLAGS, 238 .pio_mask = 0x1f, /* pio0-4 */ 239 .mwdma_mask = 0x07, /* mwdma0-2 */ 240 .udma_mask = ATA_UDMA5, 241 .port_ops = &sil_ops, 242 }, 243 /* sil_3114 */ 244 { 245 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 246 .link_flags = SIL_DFL_LINK_FLAGS, 247 .pio_mask = 0x1f, /* pio0-4 */ 248 .mwdma_mask = 0x07, /* mwdma0-2 */ 249 .udma_mask = ATA_UDMA5, 250 .port_ops = &sil_ops, 251 }, 252 }; 253 254 /* per-port register offsets */ 255 /* TODO: we can probably calculate rather than use a table */ 256 static const struct { 257 unsigned long tf; /* ATA taskfile register block */ 258 unsigned long ctl; /* ATA control/altstatus register block */ 259 unsigned long bmdma; /* DMA register block */ 260 unsigned long bmdma2; /* DMA register block #2 */ 261 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 262 unsigned long scr; /* SATA control register block */ 263 unsigned long sien; /* SATA Interrupt Enable register */ 264 unsigned long xfer_mode;/* data transfer mode register */ 265 unsigned long sfis_cfg; /* SATA FIS reception config register */ 266 } sil_port[] = { 267 /* port 0 ... */ 268 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ 269 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 270 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 271 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 272 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 273 /* ... port 3 */ 274 }; 275 276 MODULE_AUTHOR("Jeff Garzik"); 277 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); 278 MODULE_LICENSE("GPL"); 279 MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 280 MODULE_VERSION(DRV_VERSION); 281 282 static int slow_down = 0; 283 module_param(slow_down, int, 0444); 284 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 285 286 287 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 288 { 289 u8 cache_line = 0; 290 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); 291 return cache_line; 292 } 293 294 /** 295 * sil_set_mode - wrap set_mode functions 296 * @link: link to set up 297 * @r_failed: returned device when we fail 298 * 299 * Wrap the libata method for device setup as after the setup we need 300 * to inspect the results and do some configuration work 301 */ 302 303 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) 304 { 305 struct ata_port *ap = link->ap; 306 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 307 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 308 struct ata_device *dev; 309 u32 tmp, dev_mode[2] = { }; 310 int rc; 311 312 rc = ata_do_set_mode(link, r_failed); 313 if (rc) 314 return rc; 315 316 ata_link_for_each_dev(dev, link) { 317 if (!ata_dev_enabled(dev)) 318 dev_mode[dev->devno] = 0; /* PIO0/1/2 */ 319 else if (dev->flags & ATA_DFLAG_PIO) 320 dev_mode[dev->devno] = 1; /* PIO3/4 */ 321 else 322 dev_mode[dev->devno] = 3; /* UDMA */ 323 /* value 2 indicates MDMA */ 324 } 325 326 tmp = readl(addr); 327 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); 328 tmp |= dev_mode[0]; 329 tmp |= (dev_mode[1] << 4); 330 writel(tmp, addr); 331 readl(addr); /* flush */ 332 return 0; 333 } 334 335 static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) 336 { 337 void __iomem *offset = ap->ioaddr.scr_addr; 338 339 switch (sc_reg) { 340 case SCR_STATUS: 341 return offset + 4; 342 case SCR_ERROR: 343 return offset + 8; 344 case SCR_CONTROL: 345 return offset; 346 default: 347 /* do nothing */ 348 break; 349 } 350 351 return NULL; 352 } 353 354 static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 355 { 356 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 357 358 if (mmio) { 359 *val = readl(mmio); 360 return 0; 361 } 362 return -EINVAL; 363 } 364 365 static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 366 { 367 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 368 369 if (mmio) { 370 writel(val, mmio); 371 return 0; 372 } 373 return -EINVAL; 374 } 375 376 static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 377 { 378 struct ata_eh_info *ehi = &ap->link.eh_info; 379 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 380 u8 status; 381 382 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 383 u32 serror; 384 385 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 386 * controllers continue to assert IRQ as long as 387 * SError bits are pending. Clear SError immediately. 388 */ 389 sil_scr_read(ap, SCR_ERROR, &serror); 390 sil_scr_write(ap, SCR_ERROR, serror); 391 392 /* Trigger hotplug and accumulate SError only if the 393 * port isn't already frozen. Otherwise, PHY events 394 * during hardreset makes controllers with broken SIEN 395 * repeat probing needlessly. 396 */ 397 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 398 ata_ehi_hotplugged(&ap->link.eh_info); 399 ap->link.eh_info.serror |= serror; 400 } 401 402 goto freeze; 403 } 404 405 if (unlikely(!qc)) 406 goto freeze; 407 408 if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) { 409 /* this sometimes happens, just clear IRQ */ 410 ata_chk_status(ap); 411 return; 412 } 413 414 /* Check whether we are expecting interrupt in this state */ 415 switch (ap->hsm_task_state) { 416 case HSM_ST_FIRST: 417 /* Some pre-ATAPI-4 devices assert INTRQ 418 * at this state when ready to receive CDB. 419 */ 420 421 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 422 * The flag was turned on only for atapi devices. 423 * No need to check is_atapi_taskfile(&qc->tf) again. 424 */ 425 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 426 goto err_hsm; 427 break; 428 case HSM_ST_LAST: 429 if (qc->tf.protocol == ATA_PROT_DMA || 430 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 431 /* clear DMA-Start bit */ 432 ap->ops->bmdma_stop(qc); 433 434 if (bmdma2 & SIL_DMA_ERROR) { 435 qc->err_mask |= AC_ERR_HOST_BUS; 436 ap->hsm_task_state = HSM_ST_ERR; 437 } 438 } 439 break; 440 case HSM_ST: 441 break; 442 default: 443 goto err_hsm; 444 } 445 446 /* check main status, clearing INTRQ */ 447 status = ata_chk_status(ap); 448 if (unlikely(status & ATA_BUSY)) 449 goto err_hsm; 450 451 /* ack bmdma irq events */ 452 ata_bmdma_irq_clear(ap); 453 454 /* kick HSM in the ass */ 455 ata_hsm_move(ap, qc, status, 0); 456 457 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 458 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 459 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 460 461 return; 462 463 err_hsm: 464 qc->err_mask |= AC_ERR_HSM; 465 freeze: 466 ata_port_freeze(ap); 467 } 468 469 static irqreturn_t sil_interrupt(int irq, void *dev_instance) 470 { 471 struct ata_host *host = dev_instance; 472 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 473 int handled = 0; 474 int i; 475 476 spin_lock(&host->lock); 477 478 for (i = 0; i < host->n_ports; i++) { 479 struct ata_port *ap = host->ports[i]; 480 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 481 482 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 483 continue; 484 485 /* turn off SATA_IRQ if not supported */ 486 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 487 bmdma2 &= ~SIL_DMA_SATA_IRQ; 488 489 if (bmdma2 == 0xffffffff || 490 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 491 continue; 492 493 sil_host_intr(ap, bmdma2); 494 handled = 1; 495 } 496 497 spin_unlock(&host->lock); 498 499 return IRQ_RETVAL(handled); 500 } 501 502 static void sil_freeze(struct ata_port *ap) 503 { 504 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 505 u32 tmp; 506 507 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 508 writel(0, mmio_base + sil_port[ap->port_no].sien); 509 510 /* plug IRQ */ 511 tmp = readl(mmio_base + SIL_SYSCFG); 512 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 513 writel(tmp, mmio_base + SIL_SYSCFG); 514 readl(mmio_base + SIL_SYSCFG); /* flush */ 515 } 516 517 static void sil_thaw(struct ata_port *ap) 518 { 519 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 520 u32 tmp; 521 522 /* clear IRQ */ 523 ata_chk_status(ap); 524 ata_bmdma_irq_clear(ap); 525 526 /* turn on SATA IRQ if supported */ 527 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 528 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 529 530 /* turn on IRQ */ 531 tmp = readl(mmio_base + SIL_SYSCFG); 532 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); 533 writel(tmp, mmio_base + SIL_SYSCFG); 534 } 535 536 /** 537 * sil_dev_config - Apply device/host-specific errata fixups 538 * @dev: Device to be examined 539 * 540 * After the IDENTIFY [PACKET] DEVICE step is complete, and a 541 * device is known to be present, this function is called. 542 * We apply two errata fixups which are specific to Silicon Image, 543 * a Seagate and a Maxtor fixup. 544 * 545 * For certain Seagate devices, we must limit the maximum sectors 546 * to under 8K. 547 * 548 * For certain Maxtor devices, we must not program the drive 549 * beyond udma5. 550 * 551 * Both fixups are unfairly pessimistic. As soon as I get more 552 * information on these errata, I will create a more exhaustive 553 * list, and apply the fixups to only the specific 554 * devices/hosts/firmwares that need it. 555 * 556 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted 557 * The Maxtor quirk is in the blacklist, but I'm keeping the original 558 * pessimistic fix for the following reasons... 559 * - There seems to be less info on it, only one device gleaned off the 560 * Windows driver, maybe only one is affected. More info would be greatly 561 * appreciated. 562 * - But then again UDMA5 is hardly anything to complain about 563 */ 564 static void sil_dev_config(struct ata_device *dev) 565 { 566 struct ata_port *ap = dev->link->ap; 567 int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO; 568 unsigned int n, quirks = 0; 569 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 570 571 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 572 573 for (n = 0; sil_blacklist[n].product; n++) 574 if (!strcmp(sil_blacklist[n].product, model_num)) { 575 quirks = sil_blacklist[n].quirk; 576 break; 577 } 578 579 /* limit requests to 15 sectors */ 580 if (slow_down || 581 ((ap->flags & SIL_FLAG_MOD15WRITE) && 582 (quirks & SIL_QUIRK_MOD15WRITE))) { 583 if (print_info) 584 ata_dev_printk(dev, KERN_INFO, "applying Seagate " 585 "errata fix (mod15write workaround)\n"); 586 dev->max_sectors = 15; 587 return; 588 } 589 590 /* limit to udma5 */ 591 if (quirks & SIL_QUIRK_UDMA5MAX) { 592 if (print_info) 593 ata_dev_printk(dev, KERN_INFO, "applying Maxtor " 594 "errata fix %s\n", model_num); 595 dev->udma_mask &= ATA_UDMA5; 596 return; 597 } 598 } 599 600 static void sil_init_controller(struct ata_host *host) 601 { 602 struct pci_dev *pdev = to_pci_dev(host->dev); 603 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 604 u8 cls; 605 u32 tmp; 606 int i; 607 608 /* Initialize FIFO PCI bus arbitration */ 609 cls = sil_get_device_cache_line(pdev); 610 if (cls) { 611 cls >>= 3; 612 cls++; /* cls = (line_size/8)+1 */ 613 for (i = 0; i < host->n_ports; i++) 614 writew(cls << 8 | cls, 615 mmio_base + sil_port[i].fifo_cfg); 616 } else 617 dev_printk(KERN_WARNING, &pdev->dev, 618 "cache line size not set. Driver may not function\n"); 619 620 /* Apply R_ERR on DMA activate FIS errata workaround */ 621 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { 622 int cnt; 623 624 for (i = 0, cnt = 0; i < host->n_ports; i++) { 625 tmp = readl(mmio_base + sil_port[i].sfis_cfg); 626 if ((tmp & 0x3) != 0x01) 627 continue; 628 if (!cnt) 629 dev_printk(KERN_INFO, &pdev->dev, 630 "Applying R_ERR on DMA activate " 631 "FIS errata fix\n"); 632 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 633 cnt++; 634 } 635 } 636 637 if (host->n_ports == 4) { 638 /* flip the magic "make 4 ports work" bit */ 639 tmp = readl(mmio_base + sil_port[2].bmdma); 640 if ((tmp & SIL_INTR_STEERING) == 0) 641 writel(tmp | SIL_INTR_STEERING, 642 mmio_base + sil_port[2].bmdma); 643 } 644 } 645 646 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 647 { 648 static int printed_version; 649 int board_id = ent->driver_data; 650 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 651 struct ata_host *host; 652 void __iomem *mmio_base; 653 int n_ports, rc; 654 unsigned int i; 655 656 if (!printed_version++) 657 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 658 659 /* allocate host */ 660 n_ports = 2; 661 if (board_id == sil_3114) 662 n_ports = 4; 663 664 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 665 if (!host) 666 return -ENOMEM; 667 668 /* acquire resources and fill host */ 669 rc = pcim_enable_device(pdev); 670 if (rc) 671 return rc; 672 673 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); 674 if (rc == -EBUSY) 675 pcim_pin_device(pdev); 676 if (rc) 677 return rc; 678 host->iomap = pcim_iomap_table(pdev); 679 680 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 681 if (rc) 682 return rc; 683 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 684 if (rc) 685 return rc; 686 687 mmio_base = host->iomap[SIL_MMIO_BAR]; 688 689 for (i = 0; i < host->n_ports; i++) { 690 struct ata_port *ap = host->ports[i]; 691 struct ata_ioports *ioaddr = &ap->ioaddr; 692 693 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 694 ioaddr->altstatus_addr = 695 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; 696 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 697 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 698 ata_std_ports(ioaddr); 699 700 ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio"); 701 ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf"); 702 } 703 704 /* initialize and activate */ 705 sil_init_controller(host); 706 707 pci_set_master(pdev); 708 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, 709 &sil_sht); 710 } 711 712 #ifdef CONFIG_PM 713 static int sil_pci_device_resume(struct pci_dev *pdev) 714 { 715 struct ata_host *host = dev_get_drvdata(&pdev->dev); 716 int rc; 717 718 rc = ata_pci_device_do_resume(pdev); 719 if (rc) 720 return rc; 721 722 sil_init_controller(host); 723 ata_host_resume(host); 724 725 return 0; 726 } 727 #endif 728 729 static int __init sil_init(void) 730 { 731 return pci_register_driver(&sil_pci_driver); 732 } 733 734 static void __exit sil_exit(void) 735 { 736 pci_unregister_driver(&sil_pci_driver); 737 } 738 739 740 module_init(sil_init); 741 module_exit(sil_exit); 742