1 /* 2 * sata_via.c - VIA Serial ATA controllers 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available under NDA. 31 * 32 * 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/device.h> 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_host.h> 45 #include <linux/libata.h> 46 47 #define DRV_NAME "sata_via" 48 #define DRV_VERSION "2.6" 49 50 /* 51 * vt8251 is different from other sata controllers of VIA. It has two 52 * channels, each channel has both Master and Slave slot. 53 */ 54 enum board_ids_enum { 55 vt6420, 56 vt6421, 57 vt8251, 58 }; 59 60 enum { 61 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 62 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 63 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 64 SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */ 65 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 66 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 67 68 PORT0 = (1 << 1), 69 PORT1 = (1 << 0), 70 ALL_PORTS = PORT0 | PORT1, 71 72 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 73 74 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 75 76 SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */ 77 }; 78 79 struct svia_priv { 80 bool wd_workaround; 81 }; 82 83 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 84 #ifdef CONFIG_PM_SLEEP 85 static int svia_pci_device_resume(struct pci_dev *pdev); 86 #endif 87 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 88 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 89 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val); 90 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val); 91 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 92 static void svia_noop_freeze(struct ata_port *ap); 93 static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 94 static void vt6420_bmdma_start(struct ata_queued_cmd *qc); 95 static int vt6421_pata_cable_detect(struct ata_port *ap); 96 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 97 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 98 static void vt6421_error_handler(struct ata_port *ap); 99 100 static const struct pci_device_id svia_pci_tbl[] = { 101 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 102 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */ 103 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */ 104 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */ 105 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 106 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 107 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */ 108 { PCI_VDEVICE(VIA, 0x9000), vt8251 }, 109 110 { } /* terminate list */ 111 }; 112 113 static struct pci_driver svia_pci_driver = { 114 .name = DRV_NAME, 115 .id_table = svia_pci_tbl, 116 .probe = svia_init_one, 117 #ifdef CONFIG_PM_SLEEP 118 .suspend = ata_pci_device_suspend, 119 .resume = svia_pci_device_resume, 120 #endif 121 .remove = ata_pci_remove_one, 122 }; 123 124 static struct scsi_host_template svia_sht = { 125 ATA_BMDMA_SHT(DRV_NAME), 126 }; 127 128 static struct ata_port_operations svia_base_ops = { 129 .inherits = &ata_bmdma_port_ops, 130 .sff_tf_load = svia_tf_load, 131 }; 132 133 static struct ata_port_operations vt6420_sata_ops = { 134 .inherits = &svia_base_ops, 135 .freeze = svia_noop_freeze, 136 .prereset = vt6420_prereset, 137 .bmdma_start = vt6420_bmdma_start, 138 }; 139 140 static struct ata_port_operations vt6421_pata_ops = { 141 .inherits = &svia_base_ops, 142 .cable_detect = vt6421_pata_cable_detect, 143 .set_piomode = vt6421_set_pio_mode, 144 .set_dmamode = vt6421_set_dma_mode, 145 }; 146 147 static struct ata_port_operations vt6421_sata_ops = { 148 .inherits = &svia_base_ops, 149 .scr_read = svia_scr_read, 150 .scr_write = svia_scr_write, 151 .error_handler = vt6421_error_handler, 152 }; 153 154 static struct ata_port_operations vt8251_ops = { 155 .inherits = &svia_base_ops, 156 .hardreset = sata_std_hardreset, 157 .scr_read = vt8251_scr_read, 158 .scr_write = vt8251_scr_write, 159 }; 160 161 static const struct ata_port_info vt6420_port_info = { 162 .flags = ATA_FLAG_SATA, 163 .pio_mask = ATA_PIO4, 164 .mwdma_mask = ATA_MWDMA2, 165 .udma_mask = ATA_UDMA6, 166 .port_ops = &vt6420_sata_ops, 167 }; 168 169 static struct ata_port_info vt6421_sport_info = { 170 .flags = ATA_FLAG_SATA, 171 .pio_mask = ATA_PIO4, 172 .mwdma_mask = ATA_MWDMA2, 173 .udma_mask = ATA_UDMA6, 174 .port_ops = &vt6421_sata_ops, 175 }; 176 177 static struct ata_port_info vt6421_pport_info = { 178 .flags = ATA_FLAG_SLAVE_POSS, 179 .pio_mask = ATA_PIO4, 180 /* No MWDMA */ 181 .udma_mask = ATA_UDMA6, 182 .port_ops = &vt6421_pata_ops, 183 }; 184 185 static struct ata_port_info vt8251_port_info = { 186 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, 187 .pio_mask = ATA_PIO4, 188 .mwdma_mask = ATA_MWDMA2, 189 .udma_mask = ATA_UDMA6, 190 .port_ops = &vt8251_ops, 191 }; 192 193 MODULE_AUTHOR("Jeff Garzik"); 194 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 195 MODULE_LICENSE("GPL"); 196 MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 197 MODULE_VERSION(DRV_VERSION); 198 199 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 200 { 201 if (sc_reg > SCR_CONTROL) 202 return -EINVAL; 203 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg)); 204 return 0; 205 } 206 207 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 208 { 209 if (sc_reg > SCR_CONTROL) 210 return -EINVAL; 211 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg)); 212 return 0; 213 } 214 215 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 216 { 217 static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; 218 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 219 int slot = 2 * link->ap->port_no + link->pmp; 220 u32 v = 0; 221 u8 raw; 222 223 switch (scr) { 224 case SCR_STATUS: 225 pci_read_config_byte(pdev, 0xA0 + slot, &raw); 226 227 /* read the DET field, bit0 and 1 of the config byte */ 228 v |= raw & 0x03; 229 230 /* read the SPD field, bit4 of the configure byte */ 231 if (raw & (1 << 4)) 232 v |= 0x02 << 4; 233 else 234 v |= 0x01 << 4; 235 236 /* read the IPM field, bit2 and 3 of the config byte */ 237 v |= ipm_tbl[(raw >> 2) & 0x3]; 238 break; 239 240 case SCR_ERROR: 241 /* devices other than 5287 uses 0xA8 as base */ 242 WARN_ON(pdev->device != 0x5287); 243 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v); 244 break; 245 246 case SCR_CONTROL: 247 pci_read_config_byte(pdev, 0xA4 + slot, &raw); 248 249 /* read the DET field, bit0 and bit1 */ 250 v |= ((raw & 0x02) << 1) | (raw & 0x01); 251 252 /* read the IPM field, bit2 and bit3 */ 253 v |= ((raw >> 2) & 0x03) << 8; 254 break; 255 256 default: 257 return -EINVAL; 258 } 259 260 *val = v; 261 return 0; 262 } 263 264 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val) 265 { 266 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 267 int slot = 2 * link->ap->port_no + link->pmp; 268 u32 v = 0; 269 270 switch (scr) { 271 case SCR_ERROR: 272 /* devices other than 5287 uses 0xA8 as base */ 273 WARN_ON(pdev->device != 0x5287); 274 pci_write_config_dword(pdev, 0xB0 + slot * 4, val); 275 return 0; 276 277 case SCR_CONTROL: 278 /* set the DET field */ 279 v |= ((val & 0x4) >> 1) | (val & 0x1); 280 281 /* set the IPM field */ 282 v |= ((val >> 8) & 0x3) << 2; 283 284 pci_write_config_byte(pdev, 0xA4 + slot, v); 285 return 0; 286 287 default: 288 return -EINVAL; 289 } 290 } 291 292 /** 293 * svia_tf_load - send taskfile registers to host controller 294 * @ap: Port to which output is sent 295 * @tf: ATA taskfile register set 296 * 297 * Outputs ATA taskfile to standard ATA host controller. 298 * 299 * This is to fix the internal bug of via chipsets, which will 300 * reset the device register after changing the IEN bit on ctl 301 * register. 302 */ 303 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 304 { 305 struct ata_taskfile ttf; 306 307 if (tf->ctl != ap->last_ctl) { 308 ttf = *tf; 309 ttf.flags |= ATA_TFLAG_DEVICE; 310 tf = &ttf; 311 } 312 ata_sff_tf_load(ap, tf); 313 } 314 315 static void svia_noop_freeze(struct ata_port *ap) 316 { 317 /* Some VIA controllers choke if ATA_NIEN is manipulated in 318 * certain way. Leave it alone and just clear pending IRQ. 319 */ 320 ap->ops->sff_check_status(ap); 321 ata_bmdma_irq_clear(ap); 322 } 323 324 /** 325 * vt6420_prereset - prereset for vt6420 326 * @link: target ATA link 327 * @deadline: deadline jiffies for the operation 328 * 329 * SCR registers on vt6420 are pieces of shit and may hang the 330 * whole machine completely if accessed with the wrong timing. 331 * To avoid such catastrophe, vt6420 doesn't provide generic SCR 332 * access operations, but uses SStatus and SControl only during 333 * boot probing in controlled way. 334 * 335 * As the old (pre EH update) probing code is proven to work, we 336 * strictly follow the access pattern. 337 * 338 * LOCKING: 339 * Kernel thread context (may sleep) 340 * 341 * RETURNS: 342 * 0 on success, -errno otherwise. 343 */ 344 static int vt6420_prereset(struct ata_link *link, unsigned long deadline) 345 { 346 struct ata_port *ap = link->ap; 347 struct ata_eh_context *ehc = &ap->link.eh_context; 348 unsigned long timeout = jiffies + (HZ * 5); 349 u32 sstatus, scontrol; 350 int online; 351 352 /* don't do any SCR stuff if we're not loading */ 353 if (!(ap->pflags & ATA_PFLAG_LOADING)) 354 goto skip_scr; 355 356 /* Resume phy. This is the old SATA resume sequence */ 357 svia_scr_write(link, SCR_CONTROL, 0x300); 358 svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */ 359 360 /* wait for phy to become ready, if necessary */ 361 do { 362 ata_msleep(link->ap, 200); 363 svia_scr_read(link, SCR_STATUS, &sstatus); 364 if ((sstatus & 0xf) != 1) 365 break; 366 } while (time_before(jiffies, timeout)); 367 368 /* open code sata_print_link_status() */ 369 svia_scr_read(link, SCR_STATUS, &sstatus); 370 svia_scr_read(link, SCR_CONTROL, &scontrol); 371 372 online = (sstatus & 0xf) == 0x3; 373 374 ata_port_info(ap, 375 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", 376 online ? "up" : "down", sstatus, scontrol); 377 378 /* SStatus is read one more time */ 379 svia_scr_read(link, SCR_STATUS, &sstatus); 380 381 if (!online) { 382 /* tell EH to bail */ 383 ehc->i.action &= ~ATA_EH_RESET; 384 return 0; 385 } 386 387 skip_scr: 388 /* wait for !BSY */ 389 ata_sff_wait_ready(link, deadline); 390 391 return 0; 392 } 393 394 static void vt6420_bmdma_start(struct ata_queued_cmd *qc) 395 { 396 struct ata_port *ap = qc->ap; 397 if ((qc->tf.command == ATA_CMD_PACKET) && 398 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) { 399 /* Prevents corruption on some ATAPI burners */ 400 ata_sff_pause(ap); 401 } 402 ata_bmdma_start(qc); 403 } 404 405 static int vt6421_pata_cable_detect(struct ata_port *ap) 406 { 407 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 408 u8 tmp; 409 410 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); 411 if (tmp & 0x10) 412 return ATA_CBL_PATA40; 413 return ATA_CBL_PATA80; 414 } 415 416 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) 417 { 418 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 419 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; 420 pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno, 421 pio_bits[adev->pio_mode - XFER_PIO_0]); 422 } 423 424 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) 425 { 426 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 427 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; 428 pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno, 429 udma_bits[adev->dma_mode - XFER_UDMA_0]); 430 } 431 432 static const unsigned int svia_bar_sizes[] = { 433 8, 4, 8, 4, 16, 256 434 }; 435 436 static const unsigned int vt6421_bar_sizes[] = { 437 16, 16, 16, 16, 32, 128 438 }; 439 440 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) 441 { 442 return addr + (port * 128); 443 } 444 445 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) 446 { 447 return addr + (port * 64); 448 } 449 450 static void vt6421_init_addrs(struct ata_port *ap) 451 { 452 void __iomem * const * iomap = ap->host->iomap; 453 void __iomem *reg_addr = iomap[ap->port_no]; 454 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); 455 struct ata_ioports *ioaddr = &ap->ioaddr; 456 457 ioaddr->cmd_addr = reg_addr; 458 ioaddr->altstatus_addr = 459 ioaddr->ctl_addr = (void __iomem *) 460 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); 461 ioaddr->bmdma_addr = bmdma_addr; 462 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 463 464 ata_sff_std_ports(ioaddr); 465 466 ata_port_pbar_desc(ap, ap->port_no, -1, "port"); 467 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); 468 } 469 470 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 471 { 472 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; 473 struct ata_host *host; 474 int rc; 475 476 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 477 if (rc) 478 return rc; 479 *r_host = host; 480 481 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 482 if (rc) { 483 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n"); 484 return rc; 485 } 486 487 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); 488 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); 489 490 return 0; 491 } 492 493 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 494 { 495 const struct ata_port_info *ppi[] = 496 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; 497 struct ata_host *host; 498 int i, rc; 499 500 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); 501 if (!host) { 502 dev_err(&pdev->dev, "failed to allocate host\n"); 503 return -ENOMEM; 504 } 505 506 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 507 if (rc) { 508 dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n", 509 rc); 510 return rc; 511 } 512 host->iomap = pcim_iomap_table(pdev); 513 514 for (i = 0; i < host->n_ports; i++) 515 vt6421_init_addrs(host->ports[i]); 516 517 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); 518 if (rc) 519 return rc; 520 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); 521 if (rc) 522 return rc; 523 524 return 0; 525 } 526 527 static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 528 { 529 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL }; 530 struct ata_host *host; 531 int i, rc; 532 533 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 534 if (rc) 535 return rc; 536 *r_host = host; 537 538 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 539 if (rc) { 540 dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n"); 541 return rc; 542 } 543 544 /* 8251 hosts four sata ports as M/S of the two channels */ 545 for (i = 0; i < host->n_ports; i++) 546 ata_slave_link_init(host->ports[i]); 547 548 return 0; 549 } 550 551 static void svia_wd_fix(struct pci_dev *pdev) 552 { 553 u8 tmp8; 554 555 pci_read_config_byte(pdev, 0x52, &tmp8); 556 pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2)); 557 } 558 559 static irqreturn_t vt6421_interrupt(int irq, void *dev_instance) 560 { 561 struct ata_host *host = dev_instance; 562 irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance); 563 564 /* if the IRQ was not handled, it might be a hotplug IRQ */ 565 if (rc != IRQ_HANDLED) { 566 u32 serror; 567 unsigned long flags; 568 569 spin_lock_irqsave(&host->lock, flags); 570 /* check for hotplug on port 0 */ 571 svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror); 572 if (serror & SERR_PHYRDY_CHG) { 573 ata_ehi_hotplugged(&host->ports[0]->link.eh_info); 574 ata_port_freeze(host->ports[0]); 575 rc = IRQ_HANDLED; 576 } 577 /* check for hotplug on port 1 */ 578 svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror); 579 if (serror & SERR_PHYRDY_CHG) { 580 ata_ehi_hotplugged(&host->ports[1]->link.eh_info); 581 ata_port_freeze(host->ports[1]); 582 rc = IRQ_HANDLED; 583 } 584 spin_unlock_irqrestore(&host->lock, flags); 585 } 586 587 return rc; 588 } 589 590 static void vt6421_error_handler(struct ata_port *ap) 591 { 592 struct svia_priv *hpriv = ap->host->private_data; 593 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 594 u32 serror; 595 596 /* see svia_configure() for description */ 597 if (!hpriv->wd_workaround) { 598 svia_scr_read(&ap->link, SCR_ERROR, &serror); 599 if (serror == 0x1000500) { 600 ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s"); 601 svia_wd_fix(pdev); 602 hpriv->wd_workaround = true; 603 ap->link.eh_context.i.flags |= ATA_EHI_QUIET; 604 } 605 } 606 607 ata_sff_error_handler(ap); 608 } 609 610 static void svia_configure(struct pci_dev *pdev, int board_id, 611 struct svia_priv *hpriv) 612 { 613 u8 tmp8; 614 615 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 616 dev_info(&pdev->dev, "routed to hard irq line %d\n", 617 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 618 619 /* make sure SATA channels are enabled */ 620 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 621 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 622 dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", 623 (int)tmp8); 624 tmp8 |= ALL_PORTS; 625 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 626 } 627 628 /* make sure interrupts for each channel sent to us */ 629 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 630 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 631 dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", 632 (int) tmp8); 633 tmp8 |= ALL_PORTS; 634 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 635 } 636 637 /* make sure native mode is enabled */ 638 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 639 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 640 dev_dbg(&pdev->dev, 641 "enabling SATA channel native mode (0x%x)\n", 642 (int) tmp8); 643 tmp8 |= NATIVE_MODE_ALL; 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 645 } 646 647 if (board_id == vt6421) { 648 /* enable IRQ on hotplug */ 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 651 dev_dbg(&pdev->dev, 652 "enabling SATA hotplug (0x%x)\n", 653 (int) tmp8); 654 tmp8 |= SATA_HOTPLUG; 655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 656 } 657 } 658 659 /* 660 * vt6420/1 has problems talking to some drives. The following 661 * is the fix from Joseph Chan <JosephChan@via.com.tw>. 662 * 663 * When host issues HOLD, device may send up to 20DW of data 664 * before acknowledging it with HOLDA and the host should be 665 * able to buffer them in FIFO. Unfortunately, some WD drives 666 * send up to 40DW before acknowledging HOLD and, in the 667 * default configuration, this ends up overflowing vt6421's 668 * FIFO, making the controller abort the transaction with 669 * R_ERR. 670 * 671 * Rx52[2] is the internal 128DW FIFO Flow control watermark 672 * adjusting mechanism enable bit and the default value 0 673 * means host will issue HOLD to device when the left FIFO 674 * size goes below 32DW. Setting it to 1 makes the watermark 675 * 64DW. 676 * 677 * https://bugzilla.kernel.org/show_bug.cgi?id=15173 678 * http://article.gmane.org/gmane.linux.ide/46352 679 * http://thread.gmane.org/gmane.linux.kernel/1062139 680 * 681 * As the fix slows down data transfer, apply it only if the error 682 * actually appears - see vt6421_error_handler() 683 * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be 684 * read safely. 685 */ 686 if (board_id == vt6420) { 687 svia_wd_fix(pdev); 688 hpriv->wd_workaround = true; 689 } 690 } 691 692 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 693 { 694 unsigned int i; 695 int rc; 696 struct ata_host *host = NULL; 697 int board_id = (int) ent->driver_data; 698 const unsigned *bar_sizes; 699 struct svia_priv *hpriv; 700 701 ata_print_version_once(&pdev->dev, DRV_VERSION); 702 703 rc = pcim_enable_device(pdev); 704 if (rc) 705 return rc; 706 707 if (board_id == vt6421) 708 bar_sizes = &vt6421_bar_sizes[0]; 709 else 710 bar_sizes = &svia_bar_sizes[0]; 711 712 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 713 if ((pci_resource_start(pdev, i) == 0) || 714 (pci_resource_len(pdev, i) < bar_sizes[i])) { 715 dev_err(&pdev->dev, 716 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", 717 i, 718 (unsigned long long)pci_resource_start(pdev, i), 719 (unsigned long long)pci_resource_len(pdev, i)); 720 return -ENODEV; 721 } 722 723 switch (board_id) { 724 case vt6420: 725 rc = vt6420_prepare_host(pdev, &host); 726 break; 727 case vt6421: 728 rc = vt6421_prepare_host(pdev, &host); 729 break; 730 case vt8251: 731 rc = vt8251_prepare_host(pdev, &host); 732 break; 733 default: 734 rc = -EINVAL; 735 } 736 if (rc) 737 return rc; 738 739 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 740 if (!hpriv) 741 return -ENOMEM; 742 host->private_data = hpriv; 743 744 svia_configure(pdev, board_id, hpriv); 745 746 pci_set_master(pdev); 747 if (board_id == vt6421) 748 return ata_host_activate(host, pdev->irq, vt6421_interrupt, 749 IRQF_SHARED, &svia_sht); 750 else 751 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 752 IRQF_SHARED, &svia_sht); 753 } 754 755 #ifdef CONFIG_PM_SLEEP 756 static int svia_pci_device_resume(struct pci_dev *pdev) 757 { 758 struct ata_host *host = pci_get_drvdata(pdev); 759 struct svia_priv *hpriv = host->private_data; 760 int rc; 761 762 rc = ata_pci_device_do_resume(pdev); 763 if (rc) 764 return rc; 765 766 if (hpriv->wd_workaround) 767 svia_wd_fix(pdev); 768 ata_host_resume(host); 769 770 return 0; 771 } 772 #endif 773 774 module_pci_driver(svia_pci_driver); 775