1 /* 2 * sata_via.c - VIA Serial ATA controllers 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available under NDA. 31 * 32 * 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/init.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/device.h> 43 #include <scsi/scsi_host.h> 44 #include <linux/libata.h> 45 46 #define DRV_NAME "sata_via" 47 #define DRV_VERSION "2.3" 48 49 enum board_ids_enum { 50 vt6420, 51 vt6421, 52 }; 53 54 enum { 55 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 56 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 57 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 58 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 59 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 60 61 PORT0 = (1 << 1), 62 PORT1 = (1 << 0), 63 ALL_PORTS = PORT0 | PORT1, 64 65 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 66 67 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 68 }; 69 70 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 71 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 72 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 73 static void svia_noop_freeze(struct ata_port *ap); 74 static void vt6420_error_handler(struct ata_port *ap); 75 static int vt6421_pata_cable_detect(struct ata_port *ap); 76 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 77 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 78 79 static const struct pci_device_id svia_pci_tbl[] = { 80 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 81 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, 82 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, 83 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, 84 { PCI_VDEVICE(VIA, 0x5287), vt6420 }, 85 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 86 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 87 88 { } /* terminate list */ 89 }; 90 91 static struct pci_driver svia_pci_driver = { 92 .name = DRV_NAME, 93 .id_table = svia_pci_tbl, 94 .probe = svia_init_one, 95 #ifdef CONFIG_PM 96 .suspend = ata_pci_device_suspend, 97 .resume = ata_pci_device_resume, 98 #endif 99 .remove = ata_pci_remove_one, 100 }; 101 102 static struct scsi_host_template svia_sht = { 103 .module = THIS_MODULE, 104 .name = DRV_NAME, 105 .ioctl = ata_scsi_ioctl, 106 .queuecommand = ata_scsi_queuecmd, 107 .can_queue = ATA_DEF_QUEUE, 108 .this_id = ATA_SHT_THIS_ID, 109 .sg_tablesize = LIBATA_MAX_PRD, 110 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 111 .emulated = ATA_SHT_EMULATED, 112 .use_clustering = ATA_SHT_USE_CLUSTERING, 113 .proc_name = DRV_NAME, 114 .dma_boundary = ATA_DMA_BOUNDARY, 115 .slave_configure = ata_scsi_slave_config, 116 .slave_destroy = ata_scsi_slave_destroy, 117 .bios_param = ata_std_bios_param, 118 }; 119 120 static const struct ata_port_operations vt6420_sata_ops = { 121 .tf_load = ata_tf_load, 122 .tf_read = ata_tf_read, 123 .check_status = ata_check_status, 124 .exec_command = ata_exec_command, 125 .dev_select = ata_std_dev_select, 126 127 .bmdma_setup = ata_bmdma_setup, 128 .bmdma_start = ata_bmdma_start, 129 .bmdma_stop = ata_bmdma_stop, 130 .bmdma_status = ata_bmdma_status, 131 132 .qc_prep = ata_qc_prep, 133 .qc_issue = ata_qc_issue_prot, 134 .data_xfer = ata_data_xfer, 135 136 .freeze = svia_noop_freeze, 137 .thaw = ata_bmdma_thaw, 138 .error_handler = vt6420_error_handler, 139 .post_internal_cmd = ata_bmdma_post_internal_cmd, 140 141 .irq_clear = ata_bmdma_irq_clear, 142 .irq_on = ata_irq_on, 143 144 .port_start = ata_port_start, 145 }; 146 147 static const struct ata_port_operations vt6421_pata_ops = { 148 .set_piomode = vt6421_set_pio_mode, 149 .set_dmamode = vt6421_set_dma_mode, 150 151 .tf_load = ata_tf_load, 152 .tf_read = ata_tf_read, 153 .check_status = ata_check_status, 154 .exec_command = ata_exec_command, 155 .dev_select = ata_std_dev_select, 156 157 .bmdma_setup = ata_bmdma_setup, 158 .bmdma_start = ata_bmdma_start, 159 .bmdma_stop = ata_bmdma_stop, 160 .bmdma_status = ata_bmdma_status, 161 162 .qc_prep = ata_qc_prep, 163 .qc_issue = ata_qc_issue_prot, 164 .data_xfer = ata_data_xfer, 165 166 .freeze = ata_bmdma_freeze, 167 .thaw = ata_bmdma_thaw, 168 .error_handler = ata_bmdma_error_handler, 169 .post_internal_cmd = ata_bmdma_post_internal_cmd, 170 .cable_detect = vt6421_pata_cable_detect, 171 172 .irq_clear = ata_bmdma_irq_clear, 173 .irq_on = ata_irq_on, 174 175 .port_start = ata_port_start, 176 }; 177 178 static const struct ata_port_operations vt6421_sata_ops = { 179 .tf_load = ata_tf_load, 180 .tf_read = ata_tf_read, 181 .check_status = ata_check_status, 182 .exec_command = ata_exec_command, 183 .dev_select = ata_std_dev_select, 184 185 .bmdma_setup = ata_bmdma_setup, 186 .bmdma_start = ata_bmdma_start, 187 .bmdma_stop = ata_bmdma_stop, 188 .bmdma_status = ata_bmdma_status, 189 190 .qc_prep = ata_qc_prep, 191 .qc_issue = ata_qc_issue_prot, 192 .data_xfer = ata_data_xfer, 193 194 .freeze = ata_bmdma_freeze, 195 .thaw = ata_bmdma_thaw, 196 .error_handler = ata_bmdma_error_handler, 197 .post_internal_cmd = ata_bmdma_post_internal_cmd, 198 .cable_detect = ata_cable_sata, 199 200 .irq_clear = ata_bmdma_irq_clear, 201 .irq_on = ata_irq_on, 202 203 .scr_read = svia_scr_read, 204 .scr_write = svia_scr_write, 205 206 .port_start = ata_port_start, 207 }; 208 209 static const struct ata_port_info vt6420_port_info = { 210 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 211 .pio_mask = 0x1f, 212 .mwdma_mask = 0x07, 213 .udma_mask = ATA_UDMA6, 214 .port_ops = &vt6420_sata_ops, 215 }; 216 217 static struct ata_port_info vt6421_sport_info = { 218 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 219 .pio_mask = 0x1f, 220 .mwdma_mask = 0x07, 221 .udma_mask = ATA_UDMA6, 222 .port_ops = &vt6421_sata_ops, 223 }; 224 225 static struct ata_port_info vt6421_pport_info = { 226 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 227 .pio_mask = 0x1f, 228 .mwdma_mask = 0, 229 .udma_mask = ATA_UDMA6, 230 .port_ops = &vt6421_pata_ops, 231 }; 232 233 MODULE_AUTHOR("Jeff Garzik"); 234 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 235 MODULE_LICENSE("GPL"); 236 MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 237 MODULE_VERSION(DRV_VERSION); 238 239 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 240 { 241 if (sc_reg > SCR_CONTROL) 242 return -EINVAL; 243 *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); 244 return 0; 245 } 246 247 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 248 { 249 if (sc_reg > SCR_CONTROL) 250 return -EINVAL; 251 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 252 return 0; 253 } 254 255 static void svia_noop_freeze(struct ata_port *ap) 256 { 257 /* Some VIA controllers choke if ATA_NIEN is manipulated in 258 * certain way. Leave it alone and just clear pending IRQ. 259 */ 260 ata_chk_status(ap); 261 ata_bmdma_irq_clear(ap); 262 } 263 264 /** 265 * vt6420_prereset - prereset for vt6420 266 * @link: target ATA link 267 * @deadline: deadline jiffies for the operation 268 * 269 * SCR registers on vt6420 are pieces of shit and may hang the 270 * whole machine completely if accessed with the wrong timing. 271 * To avoid such catastrophe, vt6420 doesn't provide generic SCR 272 * access operations, but uses SStatus and SControl only during 273 * boot probing in controlled way. 274 * 275 * As the old (pre EH update) probing code is proven to work, we 276 * strictly follow the access pattern. 277 * 278 * LOCKING: 279 * Kernel thread context (may sleep) 280 * 281 * RETURNS: 282 * 0 on success, -errno otherwise. 283 */ 284 static int vt6420_prereset(struct ata_link *link, unsigned long deadline) 285 { 286 struct ata_port *ap = link->ap; 287 struct ata_eh_context *ehc = &ap->link.eh_context; 288 unsigned long timeout = jiffies + (HZ * 5); 289 u32 sstatus, scontrol; 290 int online; 291 292 /* don't do any SCR stuff if we're not loading */ 293 if (!(ap->pflags & ATA_PFLAG_LOADING)) 294 goto skip_scr; 295 296 /* Resume phy. This is the old SATA resume sequence */ 297 svia_scr_write(ap, SCR_CONTROL, 0x300); 298 svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ 299 300 /* wait for phy to become ready, if necessary */ 301 do { 302 msleep(200); 303 svia_scr_read(ap, SCR_STATUS, &sstatus); 304 if ((sstatus & 0xf) != 1) 305 break; 306 } while (time_before(jiffies, timeout)); 307 308 /* open code sata_print_link_status() */ 309 svia_scr_read(ap, SCR_STATUS, &sstatus); 310 svia_scr_read(ap, SCR_CONTROL, &scontrol); 311 312 online = (sstatus & 0xf) == 0x3; 313 314 ata_port_printk(ap, KERN_INFO, 315 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", 316 online ? "up" : "down", sstatus, scontrol); 317 318 /* SStatus is read one more time */ 319 svia_scr_read(ap, SCR_STATUS, &sstatus); 320 321 if (!online) { 322 /* tell EH to bail */ 323 ehc->i.action &= ~ATA_EH_RESET_MASK; 324 return 0; 325 } 326 327 skip_scr: 328 /* wait for !BSY */ 329 ata_wait_ready(ap, deadline); 330 331 return 0; 332 } 333 334 static void vt6420_error_handler(struct ata_port *ap) 335 { 336 ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, NULL, 337 ata_std_postreset); 338 } 339 340 static int vt6421_pata_cable_detect(struct ata_port *ap) 341 { 342 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 343 u8 tmp; 344 345 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); 346 if (tmp & 0x10) 347 return ATA_CBL_PATA40; 348 return ATA_CBL_PATA80; 349 } 350 351 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) 352 { 353 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 354 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; 355 pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]); 356 } 357 358 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) 359 { 360 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 361 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; 362 pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]); 363 } 364 365 static const unsigned int svia_bar_sizes[] = { 366 8, 4, 8, 4, 16, 256 367 }; 368 369 static const unsigned int vt6421_bar_sizes[] = { 370 16, 16, 16, 16, 32, 128 371 }; 372 373 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) 374 { 375 return addr + (port * 128); 376 } 377 378 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) 379 { 380 return addr + (port * 64); 381 } 382 383 static void vt6421_init_addrs(struct ata_port *ap) 384 { 385 void __iomem * const * iomap = ap->host->iomap; 386 void __iomem *reg_addr = iomap[ap->port_no]; 387 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); 388 struct ata_ioports *ioaddr = &ap->ioaddr; 389 390 ioaddr->cmd_addr = reg_addr; 391 ioaddr->altstatus_addr = 392 ioaddr->ctl_addr = (void __iomem *) 393 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); 394 ioaddr->bmdma_addr = bmdma_addr; 395 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 396 397 ata_std_ports(ioaddr); 398 399 ata_port_pbar_desc(ap, ap->port_no, -1, "port"); 400 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); 401 } 402 403 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 404 { 405 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; 406 struct ata_host *host; 407 int rc; 408 409 rc = ata_pci_prepare_sff_host(pdev, ppi, &host); 410 if (rc) 411 return rc; 412 *r_host = host; 413 414 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 415 if (rc) { 416 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n"); 417 return rc; 418 } 419 420 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); 421 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); 422 423 return 0; 424 } 425 426 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 427 { 428 const struct ata_port_info *ppi[] = 429 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; 430 struct ata_host *host; 431 int i, rc; 432 433 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); 434 if (!host) { 435 dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); 436 return -ENOMEM; 437 } 438 439 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 440 if (rc) { 441 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " 442 "PCI BARs (errno=%d)\n", rc); 443 return rc; 444 } 445 host->iomap = pcim_iomap_table(pdev); 446 447 for (i = 0; i < host->n_ports; i++) 448 vt6421_init_addrs(host->ports[i]); 449 450 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 451 if (rc) 452 return rc; 453 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 454 if (rc) 455 return rc; 456 457 return 0; 458 } 459 460 static void svia_configure(struct pci_dev *pdev) 461 { 462 u8 tmp8; 463 464 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 465 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", 466 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 467 468 /* make sure SATA channels are enabled */ 469 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 470 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 471 dev_printk(KERN_DEBUG, &pdev->dev, 472 "enabling SATA channels (0x%x)\n", 473 (int) tmp8); 474 tmp8 |= ALL_PORTS; 475 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 476 } 477 478 /* make sure interrupts for each channel sent to us */ 479 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 480 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 481 dev_printk(KERN_DEBUG, &pdev->dev, 482 "enabling SATA channel interrupts (0x%x)\n", 483 (int) tmp8); 484 tmp8 |= ALL_PORTS; 485 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 486 } 487 488 /* make sure native mode is enabled */ 489 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 490 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 491 dev_printk(KERN_DEBUG, &pdev->dev, 492 "enabling SATA channel native mode (0x%x)\n", 493 (int) tmp8); 494 tmp8 |= NATIVE_MODE_ALL; 495 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 496 } 497 } 498 499 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 500 { 501 static int printed_version; 502 unsigned int i; 503 int rc; 504 struct ata_host *host; 505 int board_id = (int) ent->driver_data; 506 const unsigned *bar_sizes; 507 508 if (!printed_version++) 509 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 510 511 rc = pcim_enable_device(pdev); 512 if (rc) 513 return rc; 514 515 if (board_id == vt6420) 516 bar_sizes = &svia_bar_sizes[0]; 517 else 518 bar_sizes = &vt6421_bar_sizes[0]; 519 520 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 521 if ((pci_resource_start(pdev, i) == 0) || 522 (pci_resource_len(pdev, i) < bar_sizes[i])) { 523 dev_printk(KERN_ERR, &pdev->dev, 524 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", 525 i, 526 (unsigned long long)pci_resource_start(pdev, i), 527 (unsigned long long)pci_resource_len(pdev, i)); 528 return -ENODEV; 529 } 530 531 if (board_id == vt6420) 532 rc = vt6420_prepare_host(pdev, &host); 533 else 534 rc = vt6421_prepare_host(pdev, &host); 535 if (rc) 536 return rc; 537 538 svia_configure(pdev); 539 540 pci_set_master(pdev); 541 return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED, 542 &svia_sht); 543 } 544 545 static int __init svia_init(void) 546 { 547 return pci_register_driver(&svia_pci_driver); 548 } 549 550 static void __exit svia_exit(void) 551 { 552 pci_unregister_driver(&svia_pci_driver); 553 } 554 555 module_init(svia_init); 556 module_exit(svia_exit); 557