1 /* 2 * sata_via.c - VIA Serial ATA controllers 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available under NDA. 31 * 32 * 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/init.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/device.h> 43 #include <scsi/scsi_host.h> 44 #include <linux/libata.h> 45 46 #define DRV_NAME "sata_via" 47 #define DRV_VERSION "2.3" 48 49 enum board_ids_enum { 50 vt6420, 51 vt6421, 52 }; 53 54 enum { 55 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 56 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 57 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 58 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 59 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 60 61 PORT0 = (1 << 1), 62 PORT1 = (1 << 0), 63 ALL_PORTS = PORT0 | PORT1, 64 65 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 66 67 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 68 }; 69 70 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 71 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 72 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 73 static void svia_noop_freeze(struct ata_port *ap); 74 static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 75 static int vt6421_pata_cable_detect(struct ata_port *ap); 76 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 77 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 78 79 static const struct pci_device_id svia_pci_tbl[] = { 80 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 81 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, 82 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, 83 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, 84 { PCI_VDEVICE(VIA, 0x5287), vt6420 }, 85 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 86 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 87 88 { } /* terminate list */ 89 }; 90 91 static struct pci_driver svia_pci_driver = { 92 .name = DRV_NAME, 93 .id_table = svia_pci_tbl, 94 .probe = svia_init_one, 95 #ifdef CONFIG_PM 96 .suspend = ata_pci_device_suspend, 97 .resume = ata_pci_device_resume, 98 #endif 99 .remove = ata_pci_remove_one, 100 }; 101 102 static struct scsi_host_template svia_sht = { 103 ATA_BMDMA_SHT(DRV_NAME), 104 }; 105 106 static struct ata_port_operations vt6420_sata_ops = { 107 .inherits = &ata_bmdma_port_ops, 108 .freeze = svia_noop_freeze, 109 .prereset = vt6420_prereset, 110 }; 111 112 static struct ata_port_operations vt6421_pata_ops = { 113 .inherits = &ata_bmdma_port_ops, 114 .cable_detect = vt6421_pata_cable_detect, 115 .set_piomode = vt6421_set_pio_mode, 116 .set_dmamode = vt6421_set_dma_mode, 117 }; 118 119 static struct ata_port_operations vt6421_sata_ops = { 120 .inherits = &ata_bmdma_port_ops, 121 .scr_read = svia_scr_read, 122 .scr_write = svia_scr_write, 123 }; 124 125 static const struct ata_port_info vt6420_port_info = { 126 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 127 .pio_mask = 0x1f, 128 .mwdma_mask = 0x07, 129 .udma_mask = ATA_UDMA6, 130 .port_ops = &vt6420_sata_ops, 131 }; 132 133 static struct ata_port_info vt6421_sport_info = { 134 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 135 .pio_mask = 0x1f, 136 .mwdma_mask = 0x07, 137 .udma_mask = ATA_UDMA6, 138 .port_ops = &vt6421_sata_ops, 139 }; 140 141 static struct ata_port_info vt6421_pport_info = { 142 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 143 .pio_mask = 0x1f, 144 .mwdma_mask = 0, 145 .udma_mask = ATA_UDMA6, 146 .port_ops = &vt6421_pata_ops, 147 }; 148 149 MODULE_AUTHOR("Jeff Garzik"); 150 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 151 MODULE_LICENSE("GPL"); 152 MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 153 MODULE_VERSION(DRV_VERSION); 154 155 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 156 { 157 if (sc_reg > SCR_CONTROL) 158 return -EINVAL; 159 *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); 160 return 0; 161 } 162 163 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 164 { 165 if (sc_reg > SCR_CONTROL) 166 return -EINVAL; 167 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 168 return 0; 169 } 170 171 static void svia_noop_freeze(struct ata_port *ap) 172 { 173 /* Some VIA controllers choke if ATA_NIEN is manipulated in 174 * certain way. Leave it alone and just clear pending IRQ. 175 */ 176 ap->ops->sff_check_status(ap); 177 ata_sff_irq_clear(ap); 178 } 179 180 /** 181 * vt6420_prereset - prereset for vt6420 182 * @link: target ATA link 183 * @deadline: deadline jiffies for the operation 184 * 185 * SCR registers on vt6420 are pieces of shit and may hang the 186 * whole machine completely if accessed with the wrong timing. 187 * To avoid such catastrophe, vt6420 doesn't provide generic SCR 188 * access operations, but uses SStatus and SControl only during 189 * boot probing in controlled way. 190 * 191 * As the old (pre EH update) probing code is proven to work, we 192 * strictly follow the access pattern. 193 * 194 * LOCKING: 195 * Kernel thread context (may sleep) 196 * 197 * RETURNS: 198 * 0 on success, -errno otherwise. 199 */ 200 static int vt6420_prereset(struct ata_link *link, unsigned long deadline) 201 { 202 struct ata_port *ap = link->ap; 203 struct ata_eh_context *ehc = &ap->link.eh_context; 204 unsigned long timeout = jiffies + (HZ * 5); 205 u32 sstatus, scontrol; 206 int online; 207 208 /* don't do any SCR stuff if we're not loading */ 209 if (!(ap->pflags & ATA_PFLAG_LOADING)) 210 goto skip_scr; 211 212 /* Resume phy. This is the old SATA resume sequence */ 213 svia_scr_write(ap, SCR_CONTROL, 0x300); 214 svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ 215 216 /* wait for phy to become ready, if necessary */ 217 do { 218 msleep(200); 219 svia_scr_read(ap, SCR_STATUS, &sstatus); 220 if ((sstatus & 0xf) != 1) 221 break; 222 } while (time_before(jiffies, timeout)); 223 224 /* open code sata_print_link_status() */ 225 svia_scr_read(ap, SCR_STATUS, &sstatus); 226 svia_scr_read(ap, SCR_CONTROL, &scontrol); 227 228 online = (sstatus & 0xf) == 0x3; 229 230 ata_port_printk(ap, KERN_INFO, 231 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", 232 online ? "up" : "down", sstatus, scontrol); 233 234 /* SStatus is read one more time */ 235 svia_scr_read(ap, SCR_STATUS, &sstatus); 236 237 if (!online) { 238 /* tell EH to bail */ 239 ehc->i.action &= ~ATA_EH_RESET; 240 return 0; 241 } 242 243 skip_scr: 244 /* wait for !BSY */ 245 ata_sff_wait_ready(link, deadline); 246 247 return 0; 248 } 249 250 static int vt6421_pata_cable_detect(struct ata_port *ap) 251 { 252 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 253 u8 tmp; 254 255 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); 256 if (tmp & 0x10) 257 return ATA_CBL_PATA40; 258 return ATA_CBL_PATA80; 259 } 260 261 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) 262 { 263 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 264 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; 265 pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]); 266 } 267 268 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) 269 { 270 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 271 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; 272 pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]); 273 } 274 275 static const unsigned int svia_bar_sizes[] = { 276 8, 4, 8, 4, 16, 256 277 }; 278 279 static const unsigned int vt6421_bar_sizes[] = { 280 16, 16, 16, 16, 32, 128 281 }; 282 283 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) 284 { 285 return addr + (port * 128); 286 } 287 288 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) 289 { 290 return addr + (port * 64); 291 } 292 293 static void vt6421_init_addrs(struct ata_port *ap) 294 { 295 void __iomem * const * iomap = ap->host->iomap; 296 void __iomem *reg_addr = iomap[ap->port_no]; 297 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); 298 struct ata_ioports *ioaddr = &ap->ioaddr; 299 300 ioaddr->cmd_addr = reg_addr; 301 ioaddr->altstatus_addr = 302 ioaddr->ctl_addr = (void __iomem *) 303 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); 304 ioaddr->bmdma_addr = bmdma_addr; 305 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 306 307 ata_sff_std_ports(ioaddr); 308 309 ata_port_pbar_desc(ap, ap->port_no, -1, "port"); 310 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); 311 } 312 313 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 314 { 315 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; 316 struct ata_host *host; 317 int rc; 318 319 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 320 if (rc) 321 return rc; 322 *r_host = host; 323 324 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 325 if (rc) { 326 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n"); 327 return rc; 328 } 329 330 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); 331 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); 332 333 return 0; 334 } 335 336 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 337 { 338 const struct ata_port_info *ppi[] = 339 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; 340 struct ata_host *host; 341 int i, rc; 342 343 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); 344 if (!host) { 345 dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); 346 return -ENOMEM; 347 } 348 349 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 350 if (rc) { 351 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " 352 "PCI BARs (errno=%d)\n", rc); 353 return rc; 354 } 355 host->iomap = pcim_iomap_table(pdev); 356 357 for (i = 0; i < host->n_ports; i++) 358 vt6421_init_addrs(host->ports[i]); 359 360 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 361 if (rc) 362 return rc; 363 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 364 if (rc) 365 return rc; 366 367 return 0; 368 } 369 370 static void svia_configure(struct pci_dev *pdev) 371 { 372 u8 tmp8; 373 374 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 375 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", 376 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 377 378 /* make sure SATA channels are enabled */ 379 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 380 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 381 dev_printk(KERN_DEBUG, &pdev->dev, 382 "enabling SATA channels (0x%x)\n", 383 (int) tmp8); 384 tmp8 |= ALL_PORTS; 385 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 386 } 387 388 /* make sure interrupts for each channel sent to us */ 389 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 390 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 391 dev_printk(KERN_DEBUG, &pdev->dev, 392 "enabling SATA channel interrupts (0x%x)\n", 393 (int) tmp8); 394 tmp8 |= ALL_PORTS; 395 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 396 } 397 398 /* make sure native mode is enabled */ 399 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 400 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 401 dev_printk(KERN_DEBUG, &pdev->dev, 402 "enabling SATA channel native mode (0x%x)\n", 403 (int) tmp8); 404 tmp8 |= NATIVE_MODE_ALL; 405 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 406 } 407 } 408 409 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 410 { 411 static int printed_version; 412 unsigned int i; 413 int rc; 414 struct ata_host *host; 415 int board_id = (int) ent->driver_data; 416 const unsigned *bar_sizes; 417 418 if (!printed_version++) 419 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 420 421 rc = pcim_enable_device(pdev); 422 if (rc) 423 return rc; 424 425 if (board_id == vt6420) 426 bar_sizes = &svia_bar_sizes[0]; 427 else 428 bar_sizes = &vt6421_bar_sizes[0]; 429 430 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 431 if ((pci_resource_start(pdev, i) == 0) || 432 (pci_resource_len(pdev, i) < bar_sizes[i])) { 433 dev_printk(KERN_ERR, &pdev->dev, 434 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", 435 i, 436 (unsigned long long)pci_resource_start(pdev, i), 437 (unsigned long long)pci_resource_len(pdev, i)); 438 return -ENODEV; 439 } 440 441 if (board_id == vt6420) 442 rc = vt6420_prepare_host(pdev, &host); 443 else 444 rc = vt6421_prepare_host(pdev, &host); 445 if (rc) 446 return rc; 447 448 svia_configure(pdev); 449 450 pci_set_master(pdev); 451 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 452 IRQF_SHARED, &svia_sht); 453 } 454 455 static int __init svia_init(void) 456 { 457 return pci_register_driver(&svia_pci_driver); 458 } 459 460 static void __exit svia_exit(void) 461 { 462 pci_unregister_driver(&svia_pci_driver); 463 } 464 465 module_init(svia_init); 466 module_exit(svia_exit); 467