1 /* 2 * sata_sil.c - Silicon Image SATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2005 Red Hat, Inc. 9 * Copyright 2003 Benjamin Herrenschmidt 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Documentation for SiI 3112: 31 * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2 32 * 33 * Other errata and documentation available under NDA. 34 * 35 */ 36 37 #include <linux/kernel.h> 38 #include <linux/module.h> 39 #include <linux/pci.h> 40 #include <linux/init.h> 41 #include <linux/blkdev.h> 42 #include <linux/delay.h> 43 #include <linux/interrupt.h> 44 #include <linux/device.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_sil" 49 #define DRV_VERSION "2.2" 50 51 enum { 52 SIL_MMIO_BAR = 5, 53 54 /* 55 * host flags 56 */ 57 SIL_FLAG_NO_SATA_IRQ = (1 << 28), 58 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 59 SIL_FLAG_MOD15WRITE = (1 << 30), 60 61 SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 62 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, 63 64 /* 65 * Controller IDs 66 */ 67 sil_3112 = 0, 68 sil_3112_no_sata_irq = 1, 69 sil_3512 = 2, 70 sil_3114 = 3, 71 72 /* 73 * Register offsets 74 */ 75 SIL_SYSCFG = 0x48, 76 77 /* 78 * Register bits 79 */ 80 /* SYSCFG */ 81 SIL_MASK_IDE0_INT = (1 << 22), 82 SIL_MASK_IDE1_INT = (1 << 23), 83 SIL_MASK_IDE2_INT = (1 << 24), 84 SIL_MASK_IDE3_INT = (1 << 25), 85 SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, 86 SIL_MASK_4PORT = SIL_MASK_2PORT | 87 SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, 88 89 /* BMDMA/BMDMA2 */ 90 SIL_INTR_STEERING = (1 << 1), 91 92 SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */ 93 SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */ 94 SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */ 95 SIL_DMA_ACTIVE = (1 << 16), /* DMA running */ 96 SIL_DMA_ERROR = (1 << 17), /* PCI bus error */ 97 SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */ 98 SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */ 99 SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */ 100 SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */ 101 SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */ 102 103 /* SIEN */ 104 SIL_SIEN_N = (1 << 16), /* triggered by SError.N */ 105 106 /* 107 * Others 108 */ 109 SIL_QUIRK_MOD15WRITE = (1 << 0), 110 SIL_QUIRK_UDMA5MAX = (1 << 1), 111 }; 112 113 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 114 #ifdef CONFIG_PM 115 static int sil_pci_device_resume(struct pci_dev *pdev); 116 #endif 117 static void sil_dev_config(struct ata_device *dev); 118 static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 119 static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 120 static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed); 121 static void sil_freeze(struct ata_port *ap); 122 static void sil_thaw(struct ata_port *ap); 123 124 125 static const struct pci_device_id sil_pci_tbl[] = { 126 { PCI_VDEVICE(CMD, 0x3112), sil_3112 }, 127 { PCI_VDEVICE(CMD, 0x0240), sil_3112 }, 128 { PCI_VDEVICE(CMD, 0x3512), sil_3512 }, 129 { PCI_VDEVICE(CMD, 0x3114), sil_3114 }, 130 { PCI_VDEVICE(ATI, 0x436e), sil_3112 }, 131 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq }, 132 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq }, 133 134 { } /* terminate list */ 135 }; 136 137 138 /* TODO firmware versions should be added - eric */ 139 static const struct sil_drivelist { 140 const char * product; 141 unsigned int quirk; 142 } sil_blacklist [] = { 143 { "ST320012AS", SIL_QUIRK_MOD15WRITE }, 144 { "ST330013AS", SIL_QUIRK_MOD15WRITE }, 145 { "ST340017AS", SIL_QUIRK_MOD15WRITE }, 146 { "ST360015AS", SIL_QUIRK_MOD15WRITE }, 147 { "ST380023AS", SIL_QUIRK_MOD15WRITE }, 148 { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, 149 { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, 150 { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, 151 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 152 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 153 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 154 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 155 { } 156 }; 157 158 static struct pci_driver sil_pci_driver = { 159 .name = DRV_NAME, 160 .id_table = sil_pci_tbl, 161 .probe = sil_init_one, 162 .remove = ata_pci_remove_one, 163 #ifdef CONFIG_PM 164 .suspend = ata_pci_device_suspend, 165 .resume = sil_pci_device_resume, 166 #endif 167 }; 168 169 static struct scsi_host_template sil_sht = { 170 .module = THIS_MODULE, 171 .name = DRV_NAME, 172 .ioctl = ata_scsi_ioctl, 173 .queuecommand = ata_scsi_queuecmd, 174 .can_queue = ATA_DEF_QUEUE, 175 .this_id = ATA_SHT_THIS_ID, 176 .sg_tablesize = LIBATA_MAX_PRD, 177 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 178 .emulated = ATA_SHT_EMULATED, 179 .use_clustering = ATA_SHT_USE_CLUSTERING, 180 .proc_name = DRV_NAME, 181 .dma_boundary = ATA_DMA_BOUNDARY, 182 .slave_configure = ata_scsi_slave_config, 183 .slave_destroy = ata_scsi_slave_destroy, 184 .bios_param = ata_std_bios_param, 185 }; 186 187 static const struct ata_port_operations sil_ops = { 188 .port_disable = ata_port_disable, 189 .dev_config = sil_dev_config, 190 .tf_load = ata_tf_load, 191 .tf_read = ata_tf_read, 192 .check_status = ata_check_status, 193 .exec_command = ata_exec_command, 194 .dev_select = ata_std_dev_select, 195 .set_mode = sil_set_mode, 196 .bmdma_setup = ata_bmdma_setup, 197 .bmdma_start = ata_bmdma_start, 198 .bmdma_stop = ata_bmdma_stop, 199 .bmdma_status = ata_bmdma_status, 200 .qc_prep = ata_qc_prep, 201 .qc_issue = ata_qc_issue_prot, 202 .data_xfer = ata_data_xfer, 203 .freeze = sil_freeze, 204 .thaw = sil_thaw, 205 .error_handler = ata_bmdma_error_handler, 206 .post_internal_cmd = ata_bmdma_post_internal_cmd, 207 .irq_clear = ata_bmdma_irq_clear, 208 .irq_on = ata_irq_on, 209 .irq_ack = ata_irq_ack, 210 .scr_read = sil_scr_read, 211 .scr_write = sil_scr_write, 212 .port_start = ata_port_start, 213 }; 214 215 static const struct ata_port_info sil_port_info[] = { 216 /* sil_3112 */ 217 { 218 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 219 .pio_mask = 0x1f, /* pio0-4 */ 220 .mwdma_mask = 0x07, /* mwdma0-2 */ 221 .udma_mask = ATA_UDMA5, 222 .port_ops = &sil_ops, 223 }, 224 /* sil_3112_no_sata_irq */ 225 { 226 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 227 SIL_FLAG_NO_SATA_IRQ, 228 .pio_mask = 0x1f, /* pio0-4 */ 229 .mwdma_mask = 0x07, /* mwdma0-2 */ 230 .udma_mask = ATA_UDMA5, 231 .port_ops = &sil_ops, 232 }, 233 /* sil_3512 */ 234 { 235 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 236 .pio_mask = 0x1f, /* pio0-4 */ 237 .mwdma_mask = 0x07, /* mwdma0-2 */ 238 .udma_mask = ATA_UDMA5, 239 .port_ops = &sil_ops, 240 }, 241 /* sil_3114 */ 242 { 243 .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 244 .pio_mask = 0x1f, /* pio0-4 */ 245 .mwdma_mask = 0x07, /* mwdma0-2 */ 246 .udma_mask = ATA_UDMA5, 247 .port_ops = &sil_ops, 248 }, 249 }; 250 251 /* per-port register offsets */ 252 /* TODO: we can probably calculate rather than use a table */ 253 static const struct { 254 unsigned long tf; /* ATA taskfile register block */ 255 unsigned long ctl; /* ATA control/altstatus register block */ 256 unsigned long bmdma; /* DMA register block */ 257 unsigned long bmdma2; /* DMA register block #2 */ 258 unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ 259 unsigned long scr; /* SATA control register block */ 260 unsigned long sien; /* SATA Interrupt Enable register */ 261 unsigned long xfer_mode;/* data transfer mode register */ 262 unsigned long sfis_cfg; /* SATA FIS reception config register */ 263 } sil_port[] = { 264 /* port 0 ... */ 265 /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */ 266 { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c }, 267 { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, 268 { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, 269 { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, 270 /* ... port 3 */ 271 }; 272 273 MODULE_AUTHOR("Jeff Garzik"); 274 MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); 275 MODULE_LICENSE("GPL"); 276 MODULE_DEVICE_TABLE(pci, sil_pci_tbl); 277 MODULE_VERSION(DRV_VERSION); 278 279 static int slow_down = 0; 280 module_param(slow_down, int, 0444); 281 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); 282 283 284 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) 285 { 286 u8 cache_line = 0; 287 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line); 288 return cache_line; 289 } 290 291 /** 292 * sil_set_mode - wrap set_mode functions 293 * @ap: port to set up 294 * @r_failed: returned device when we fail 295 * 296 * Wrap the libata method for device setup as after the setup we need 297 * to inspect the results and do some configuration work 298 */ 299 300 static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed) 301 { 302 struct ata_host *host = ap->host; 303 struct ata_device *dev; 304 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 305 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode; 306 u32 tmp, dev_mode[2]; 307 unsigned int i; 308 int rc; 309 310 rc = ata_do_set_mode(ap, r_failed); 311 if (rc) 312 return rc; 313 314 for (i = 0; i < 2; i++) { 315 dev = &ap->device[i]; 316 if (!ata_dev_enabled(dev)) 317 dev_mode[i] = 0; /* PIO0/1/2 */ 318 else if (dev->flags & ATA_DFLAG_PIO) 319 dev_mode[i] = 1; /* PIO3/4 */ 320 else 321 dev_mode[i] = 3; /* UDMA */ 322 /* value 2 indicates MDMA */ 323 } 324 325 tmp = readl(addr); 326 tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); 327 tmp |= dev_mode[0]; 328 tmp |= (dev_mode[1] << 4); 329 writel(tmp, addr); 330 readl(addr); /* flush */ 331 return 0; 332 } 333 334 static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) 335 { 336 void __iomem *offset = ap->ioaddr.scr_addr; 337 338 switch (sc_reg) { 339 case SCR_STATUS: 340 return offset + 4; 341 case SCR_ERROR: 342 return offset + 8; 343 case SCR_CONTROL: 344 return offset; 345 default: 346 /* do nothing */ 347 break; 348 } 349 350 return NULL; 351 } 352 353 static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 354 { 355 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 356 357 if (mmio) { 358 *val = readl(mmio); 359 return 0; 360 } 361 return -EINVAL; 362 } 363 364 static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 365 { 366 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 367 368 if (mmio) { 369 writel(val, mmio); 370 return 0; 371 } 372 return -EINVAL; 373 } 374 375 static void sil_host_intr(struct ata_port *ap, u32 bmdma2) 376 { 377 struct ata_eh_info *ehi = &ap->eh_info; 378 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 379 u8 status; 380 381 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 382 u32 serror; 383 384 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 385 * controllers continue to assert IRQ as long as 386 * SError bits are pending. Clear SError immediately. 387 */ 388 sil_scr_read(ap, SCR_ERROR, &serror); 389 sil_scr_write(ap, SCR_ERROR, serror); 390 391 /* Trigger hotplug and accumulate SError only if the 392 * port isn't already frozen. Otherwise, PHY events 393 * during hardreset makes controllers with broken SIEN 394 * repeat probing needlessly. 395 */ 396 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 397 ata_ehi_hotplugged(&ap->eh_info); 398 ap->eh_info.serror |= serror; 399 } 400 401 goto freeze; 402 } 403 404 if (unlikely(!qc)) 405 goto freeze; 406 407 if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) { 408 /* this sometimes happens, just clear IRQ */ 409 ata_chk_status(ap); 410 return; 411 } 412 413 /* Check whether we are expecting interrupt in this state */ 414 switch (ap->hsm_task_state) { 415 case HSM_ST_FIRST: 416 /* Some pre-ATAPI-4 devices assert INTRQ 417 * at this state when ready to receive CDB. 418 */ 419 420 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 421 * The flag was turned on only for atapi devices. 422 * No need to check is_atapi_taskfile(&qc->tf) again. 423 */ 424 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 425 goto err_hsm; 426 break; 427 case HSM_ST_LAST: 428 if (qc->tf.protocol == ATA_PROT_DMA || 429 qc->tf.protocol == ATA_PROT_ATAPI_DMA) { 430 /* clear DMA-Start bit */ 431 ap->ops->bmdma_stop(qc); 432 433 if (bmdma2 & SIL_DMA_ERROR) { 434 qc->err_mask |= AC_ERR_HOST_BUS; 435 ap->hsm_task_state = HSM_ST_ERR; 436 } 437 } 438 break; 439 case HSM_ST: 440 break; 441 default: 442 goto err_hsm; 443 } 444 445 /* check main status, clearing INTRQ */ 446 status = ata_chk_status(ap); 447 if (unlikely(status & ATA_BUSY)) 448 goto err_hsm; 449 450 /* ack bmdma irq events */ 451 ata_bmdma_irq_clear(ap); 452 453 /* kick HSM in the ass */ 454 ata_hsm_move(ap, qc, status, 0); 455 456 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 457 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) 458 ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2); 459 460 return; 461 462 err_hsm: 463 qc->err_mask |= AC_ERR_HSM; 464 freeze: 465 ata_port_freeze(ap); 466 } 467 468 static irqreturn_t sil_interrupt(int irq, void *dev_instance) 469 { 470 struct ata_host *host = dev_instance; 471 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 472 int handled = 0; 473 int i; 474 475 spin_lock(&host->lock); 476 477 for (i = 0; i < host->n_ports; i++) { 478 struct ata_port *ap = host->ports[i]; 479 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 480 481 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 482 continue; 483 484 /* turn off SATA_IRQ if not supported */ 485 if (ap->flags & SIL_FLAG_NO_SATA_IRQ) 486 bmdma2 &= ~SIL_DMA_SATA_IRQ; 487 488 if (bmdma2 == 0xffffffff || 489 !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ))) 490 continue; 491 492 sil_host_intr(ap, bmdma2); 493 handled = 1; 494 } 495 496 spin_unlock(&host->lock); 497 498 return IRQ_RETVAL(handled); 499 } 500 501 static void sil_freeze(struct ata_port *ap) 502 { 503 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 504 u32 tmp; 505 506 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 507 writel(0, mmio_base + sil_port[ap->port_no].sien); 508 509 /* plug IRQ */ 510 tmp = readl(mmio_base + SIL_SYSCFG); 511 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 512 writel(tmp, mmio_base + SIL_SYSCFG); 513 readl(mmio_base + SIL_SYSCFG); /* flush */ 514 } 515 516 static void sil_thaw(struct ata_port *ap) 517 { 518 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR]; 519 u32 tmp; 520 521 /* clear IRQ */ 522 ata_chk_status(ap); 523 ata_bmdma_irq_clear(ap); 524 525 /* turn on SATA IRQ if supported */ 526 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 527 writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien); 528 529 /* turn on IRQ */ 530 tmp = readl(mmio_base + SIL_SYSCFG); 531 tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no); 532 writel(tmp, mmio_base + SIL_SYSCFG); 533 } 534 535 /** 536 * sil_dev_config - Apply device/host-specific errata fixups 537 * @dev: Device to be examined 538 * 539 * After the IDENTIFY [PACKET] DEVICE step is complete, and a 540 * device is known to be present, this function is called. 541 * We apply two errata fixups which are specific to Silicon Image, 542 * a Seagate and a Maxtor fixup. 543 * 544 * For certain Seagate devices, we must limit the maximum sectors 545 * to under 8K. 546 * 547 * For certain Maxtor devices, we must not program the drive 548 * beyond udma5. 549 * 550 * Both fixups are unfairly pessimistic. As soon as I get more 551 * information on these errata, I will create a more exhaustive 552 * list, and apply the fixups to only the specific 553 * devices/hosts/firmwares that need it. 554 * 555 * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted 556 * The Maxtor quirk is in the blacklist, but I'm keeping the original 557 * pessimistic fix for the following reasons... 558 * - There seems to be less info on it, only one device gleaned off the 559 * Windows driver, maybe only one is affected. More info would be greatly 560 * appreciated. 561 * - But then again UDMA5 is hardly anything to complain about 562 */ 563 static void sil_dev_config(struct ata_device *dev) 564 { 565 struct ata_port *ap = dev->ap; 566 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; 567 unsigned int n, quirks = 0; 568 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 569 570 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 571 572 for (n = 0; sil_blacklist[n].product; n++) 573 if (!strcmp(sil_blacklist[n].product, model_num)) { 574 quirks = sil_blacklist[n].quirk; 575 break; 576 } 577 578 /* limit requests to 15 sectors */ 579 if (slow_down || 580 ((ap->flags & SIL_FLAG_MOD15WRITE) && 581 (quirks & SIL_QUIRK_MOD15WRITE))) { 582 if (print_info) 583 ata_dev_printk(dev, KERN_INFO, "applying Seagate " 584 "errata fix (mod15write workaround)\n"); 585 dev->max_sectors = 15; 586 return; 587 } 588 589 /* limit to udma5 */ 590 if (quirks & SIL_QUIRK_UDMA5MAX) { 591 if (print_info) 592 ata_dev_printk(dev, KERN_INFO, "applying Maxtor " 593 "errata fix %s\n", model_num); 594 dev->udma_mask &= ATA_UDMA5; 595 return; 596 } 597 } 598 599 static void sil_init_controller(struct ata_host *host) 600 { 601 struct pci_dev *pdev = to_pci_dev(host->dev); 602 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR]; 603 u8 cls; 604 u32 tmp; 605 int i; 606 607 /* Initialize FIFO PCI bus arbitration */ 608 cls = sil_get_device_cache_line(pdev); 609 if (cls) { 610 cls >>= 3; 611 cls++; /* cls = (line_size/8)+1 */ 612 for (i = 0; i < host->n_ports; i++) 613 writew(cls << 8 | cls, 614 mmio_base + sil_port[i].fifo_cfg); 615 } else 616 dev_printk(KERN_WARNING, &pdev->dev, 617 "cache line size not set. Driver may not function\n"); 618 619 /* Apply R_ERR on DMA activate FIS errata workaround */ 620 if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) { 621 int cnt; 622 623 for (i = 0, cnt = 0; i < host->n_ports; i++) { 624 tmp = readl(mmio_base + sil_port[i].sfis_cfg); 625 if ((tmp & 0x3) != 0x01) 626 continue; 627 if (!cnt) 628 dev_printk(KERN_INFO, &pdev->dev, 629 "Applying R_ERR on DMA activate " 630 "FIS errata fix\n"); 631 writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg); 632 cnt++; 633 } 634 } 635 636 if (host->n_ports == 4) { 637 /* flip the magic "make 4 ports work" bit */ 638 tmp = readl(mmio_base + sil_port[2].bmdma); 639 if ((tmp & SIL_INTR_STEERING) == 0) 640 writel(tmp | SIL_INTR_STEERING, 641 mmio_base + sil_port[2].bmdma); 642 } 643 } 644 645 static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 646 { 647 static int printed_version; 648 int board_id = ent->driver_data; 649 const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL }; 650 struct ata_host *host; 651 void __iomem *mmio_base; 652 int n_ports, rc; 653 unsigned int i; 654 655 if (!printed_version++) 656 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 657 658 /* allocate host */ 659 n_ports = 2; 660 if (board_id == sil_3114) 661 n_ports = 4; 662 663 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 664 if (!host) 665 return -ENOMEM; 666 667 /* acquire resources and fill host */ 668 rc = pcim_enable_device(pdev); 669 if (rc) 670 return rc; 671 672 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME); 673 if (rc == -EBUSY) 674 pcim_pin_device(pdev); 675 if (rc) 676 return rc; 677 host->iomap = pcim_iomap_table(pdev); 678 679 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 680 if (rc) 681 return rc; 682 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 683 if (rc) 684 return rc; 685 686 mmio_base = host->iomap[SIL_MMIO_BAR]; 687 688 for (i = 0; i < host->n_ports; i++) { 689 struct ata_ioports *ioaddr = &host->ports[i]->ioaddr; 690 691 ioaddr->cmd_addr = mmio_base + sil_port[i].tf; 692 ioaddr->altstatus_addr = 693 ioaddr->ctl_addr = mmio_base + sil_port[i].ctl; 694 ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma; 695 ioaddr->scr_addr = mmio_base + sil_port[i].scr; 696 ata_std_ports(ioaddr); 697 } 698 699 /* initialize and activate */ 700 sil_init_controller(host); 701 702 pci_set_master(pdev); 703 return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED, 704 &sil_sht); 705 } 706 707 #ifdef CONFIG_PM 708 static int sil_pci_device_resume(struct pci_dev *pdev) 709 { 710 struct ata_host *host = dev_get_drvdata(&pdev->dev); 711 int rc; 712 713 rc = ata_pci_device_do_resume(pdev); 714 if (rc) 715 return rc; 716 717 sil_init_controller(host); 718 ata_host_resume(host); 719 720 return 0; 721 } 722 #endif 723 724 static int __init sil_init(void) 725 { 726 return pci_register_driver(&sil_pci_driver); 727 } 728 729 static void __exit sil_exit(void) 730 { 731 pci_unregister_driver(&sil_pci_driver); 732 } 733 734 735 module_init(sil_init); 736 module_exit(sil_exit); 737