1 /* 2 * sata_via.c - VIA Serial ATA controllers 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available under NDA. 31 * 32 * 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/init.h> 40 #include <linux/blkdev.h> 41 #include <linux/delay.h> 42 #include <linux/device.h> 43 #include <scsi/scsi.h> 44 #include <scsi/scsi_cmnd.h> 45 #include <scsi/scsi_host.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "sata_via" 49 #define DRV_VERSION "2.6" 50 51 /* 52 * vt8251 is different from other sata controllers of VIA. It has two 53 * channels, each channel has both Master and Slave slot. 54 */ 55 enum board_ids_enum { 56 vt6420, 57 vt6421, 58 vt8251, 59 }; 60 61 enum { 62 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ 63 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 64 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 65 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ 66 PATA_PIO_TIMING = 0xAB, /* PATA timing register */ 67 68 PORT0 = (1 << 1), 69 PORT1 = (1 << 0), 70 ALL_PORTS = PORT0 | PORT1, 71 72 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 73 74 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ 75 }; 76 77 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 78 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 79 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 80 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val); 81 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val); 82 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 83 static void svia_noop_freeze(struct ata_port *ap); 84 static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 85 static void vt6420_bmdma_start(struct ata_queued_cmd *qc); 86 static int vt6421_pata_cable_detect(struct ata_port *ap); 87 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev); 88 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev); 89 90 static const struct pci_device_id svia_pci_tbl[] = { 91 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 92 { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */ 93 { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */ 94 { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */ 95 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 96 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 97 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */ 98 { PCI_VDEVICE(VIA, 0x9000), vt8251 }, 99 100 { } /* terminate list */ 101 }; 102 103 static struct pci_driver svia_pci_driver = { 104 .name = DRV_NAME, 105 .id_table = svia_pci_tbl, 106 .probe = svia_init_one, 107 #ifdef CONFIG_PM 108 .suspend = ata_pci_device_suspend, 109 .resume = ata_pci_device_resume, 110 #endif 111 .remove = ata_pci_remove_one, 112 }; 113 114 static struct scsi_host_template svia_sht = { 115 ATA_BMDMA_SHT(DRV_NAME), 116 }; 117 118 static struct ata_port_operations svia_base_ops = { 119 .inherits = &ata_bmdma_port_ops, 120 .sff_tf_load = svia_tf_load, 121 }; 122 123 static struct ata_port_operations vt6420_sata_ops = { 124 .inherits = &svia_base_ops, 125 .freeze = svia_noop_freeze, 126 .prereset = vt6420_prereset, 127 .bmdma_start = vt6420_bmdma_start, 128 }; 129 130 static struct ata_port_operations vt6421_pata_ops = { 131 .inherits = &svia_base_ops, 132 .cable_detect = vt6421_pata_cable_detect, 133 .set_piomode = vt6421_set_pio_mode, 134 .set_dmamode = vt6421_set_dma_mode, 135 }; 136 137 static struct ata_port_operations vt6421_sata_ops = { 138 .inherits = &svia_base_ops, 139 .scr_read = svia_scr_read, 140 .scr_write = svia_scr_write, 141 }; 142 143 static struct ata_port_operations vt8251_ops = { 144 .inherits = &svia_base_ops, 145 .hardreset = sata_std_hardreset, 146 .scr_read = vt8251_scr_read, 147 .scr_write = vt8251_scr_write, 148 }; 149 150 static const struct ata_port_info vt6420_port_info = { 151 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 152 .pio_mask = ATA_PIO4, 153 .mwdma_mask = ATA_MWDMA2, 154 .udma_mask = ATA_UDMA6, 155 .port_ops = &vt6420_sata_ops, 156 }; 157 158 static struct ata_port_info vt6421_sport_info = { 159 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 160 .pio_mask = ATA_PIO4, 161 .mwdma_mask = ATA_MWDMA2, 162 .udma_mask = ATA_UDMA6, 163 .port_ops = &vt6421_sata_ops, 164 }; 165 166 static struct ata_port_info vt6421_pport_info = { 167 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY, 168 .pio_mask = ATA_PIO4, 169 /* No MWDMA */ 170 .udma_mask = ATA_UDMA6, 171 .port_ops = &vt6421_pata_ops, 172 }; 173 174 static struct ata_port_info vt8251_port_info = { 175 .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS | 176 ATA_FLAG_NO_LEGACY, 177 .pio_mask = ATA_PIO4, 178 .mwdma_mask = ATA_MWDMA2, 179 .udma_mask = ATA_UDMA6, 180 .port_ops = &vt8251_ops, 181 }; 182 183 MODULE_AUTHOR("Jeff Garzik"); 184 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); 185 MODULE_LICENSE("GPL"); 186 MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 187 MODULE_VERSION(DRV_VERSION); 188 189 static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 190 { 191 if (sc_reg > SCR_CONTROL) 192 return -EINVAL; 193 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg)); 194 return 0; 195 } 196 197 static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 198 { 199 if (sc_reg > SCR_CONTROL) 200 return -EINVAL; 201 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg)); 202 return 0; 203 } 204 205 static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 206 { 207 static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; 208 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 209 int slot = 2 * link->ap->port_no + link->pmp; 210 u32 v = 0; 211 u8 raw; 212 213 switch (scr) { 214 case SCR_STATUS: 215 pci_read_config_byte(pdev, 0xA0 + slot, &raw); 216 217 /* read the DET field, bit0 and 1 of the config byte */ 218 v |= raw & 0x03; 219 220 /* read the SPD field, bit4 of the configure byte */ 221 if (raw & (1 << 4)) 222 v |= 0x02 << 4; 223 else 224 v |= 0x01 << 4; 225 226 /* read the IPM field, bit2 and 3 of the config byte */ 227 v |= ipm_tbl[(raw >> 2) & 0x3]; 228 break; 229 230 case SCR_ERROR: 231 /* devices other than 5287 uses 0xA8 as base */ 232 WARN_ON(pdev->device != 0x5287); 233 pci_read_config_dword(pdev, 0xB0 + slot * 4, &v); 234 break; 235 236 case SCR_CONTROL: 237 pci_read_config_byte(pdev, 0xA4 + slot, &raw); 238 239 /* read the DET field, bit0 and bit1 */ 240 v |= ((raw & 0x02) << 1) | (raw & 0x01); 241 242 /* read the IPM field, bit2 and bit3 */ 243 v |= ((raw >> 2) & 0x03) << 8; 244 break; 245 246 default: 247 return -EINVAL; 248 } 249 250 *val = v; 251 return 0; 252 } 253 254 static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val) 255 { 256 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 257 int slot = 2 * link->ap->port_no + link->pmp; 258 u32 v = 0; 259 260 switch (scr) { 261 case SCR_ERROR: 262 /* devices other than 5287 uses 0xA8 as base */ 263 WARN_ON(pdev->device != 0x5287); 264 pci_write_config_dword(pdev, 0xB0 + slot * 4, val); 265 return 0; 266 267 case SCR_CONTROL: 268 /* set the DET field */ 269 v |= ((val & 0x4) >> 1) | (val & 0x1); 270 271 /* set the IPM field */ 272 v |= ((val >> 8) & 0x3) << 2; 273 274 pci_write_config_byte(pdev, 0xA4 + slot, v); 275 return 0; 276 277 default: 278 return -EINVAL; 279 } 280 } 281 282 /** 283 * svia_tf_load - send taskfile registers to host controller 284 * @ap: Port to which output is sent 285 * @tf: ATA taskfile register set 286 * 287 * Outputs ATA taskfile to standard ATA host controller. 288 * 289 * This is to fix the internal bug of via chipsets, which will 290 * reset the device register after changing the IEN bit on ctl 291 * register. 292 */ 293 static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 294 { 295 struct ata_taskfile ttf; 296 297 if (tf->ctl != ap->last_ctl) { 298 ttf = *tf; 299 ttf.flags |= ATA_TFLAG_DEVICE; 300 tf = &ttf; 301 } 302 ata_sff_tf_load(ap, tf); 303 } 304 305 static void svia_noop_freeze(struct ata_port *ap) 306 { 307 /* Some VIA controllers choke if ATA_NIEN is manipulated in 308 * certain way. Leave it alone and just clear pending IRQ. 309 */ 310 ap->ops->sff_check_status(ap); 311 ata_bmdma_irq_clear(ap); 312 } 313 314 /** 315 * vt6420_prereset - prereset for vt6420 316 * @link: target ATA link 317 * @deadline: deadline jiffies for the operation 318 * 319 * SCR registers on vt6420 are pieces of shit and may hang the 320 * whole machine completely if accessed with the wrong timing. 321 * To avoid such catastrophe, vt6420 doesn't provide generic SCR 322 * access operations, but uses SStatus and SControl only during 323 * boot probing in controlled way. 324 * 325 * As the old (pre EH update) probing code is proven to work, we 326 * strictly follow the access pattern. 327 * 328 * LOCKING: 329 * Kernel thread context (may sleep) 330 * 331 * RETURNS: 332 * 0 on success, -errno otherwise. 333 */ 334 static int vt6420_prereset(struct ata_link *link, unsigned long deadline) 335 { 336 struct ata_port *ap = link->ap; 337 struct ata_eh_context *ehc = &ap->link.eh_context; 338 unsigned long timeout = jiffies + (HZ * 5); 339 u32 sstatus, scontrol; 340 int online; 341 342 /* don't do any SCR stuff if we're not loading */ 343 if (!(ap->pflags & ATA_PFLAG_LOADING)) 344 goto skip_scr; 345 346 /* Resume phy. This is the old SATA resume sequence */ 347 svia_scr_write(link, SCR_CONTROL, 0x300); 348 svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */ 349 350 /* wait for phy to become ready, if necessary */ 351 do { 352 ata_msleep(link->ap, 200); 353 svia_scr_read(link, SCR_STATUS, &sstatus); 354 if ((sstatus & 0xf) != 1) 355 break; 356 } while (time_before(jiffies, timeout)); 357 358 /* open code sata_print_link_status() */ 359 svia_scr_read(link, SCR_STATUS, &sstatus); 360 svia_scr_read(link, SCR_CONTROL, &scontrol); 361 362 online = (sstatus & 0xf) == 0x3; 363 364 ata_port_printk(ap, KERN_INFO, 365 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n", 366 online ? "up" : "down", sstatus, scontrol); 367 368 /* SStatus is read one more time */ 369 svia_scr_read(link, SCR_STATUS, &sstatus); 370 371 if (!online) { 372 /* tell EH to bail */ 373 ehc->i.action &= ~ATA_EH_RESET; 374 return 0; 375 } 376 377 skip_scr: 378 /* wait for !BSY */ 379 ata_sff_wait_ready(link, deadline); 380 381 return 0; 382 } 383 384 static void vt6420_bmdma_start(struct ata_queued_cmd *qc) 385 { 386 struct ata_port *ap = qc->ap; 387 if ((qc->tf.command == ATA_CMD_PACKET) && 388 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) { 389 /* Prevents corruption on some ATAPI burners */ 390 ata_sff_pause(ap); 391 } 392 ata_bmdma_start(qc); 393 } 394 395 static int vt6421_pata_cable_detect(struct ata_port *ap) 396 { 397 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 398 u8 tmp; 399 400 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp); 401 if (tmp & 0x10) 402 return ATA_CBL_PATA40; 403 return ATA_CBL_PATA80; 404 } 405 406 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev) 407 { 408 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 409 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 }; 410 pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno, 411 pio_bits[adev->pio_mode - XFER_PIO_0]); 412 } 413 414 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) 415 { 416 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 417 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; 418 pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno, 419 udma_bits[adev->dma_mode - XFER_UDMA_0]); 420 } 421 422 static const unsigned int svia_bar_sizes[] = { 423 8, 4, 8, 4, 16, 256 424 }; 425 426 static const unsigned int vt6421_bar_sizes[] = { 427 16, 16, 16, 16, 32, 128 428 }; 429 430 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) 431 { 432 return addr + (port * 128); 433 } 434 435 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) 436 { 437 return addr + (port * 64); 438 } 439 440 static void vt6421_init_addrs(struct ata_port *ap) 441 { 442 void __iomem * const * iomap = ap->host->iomap; 443 void __iomem *reg_addr = iomap[ap->port_no]; 444 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8); 445 struct ata_ioports *ioaddr = &ap->ioaddr; 446 447 ioaddr->cmd_addr = reg_addr; 448 ioaddr->altstatus_addr = 449 ioaddr->ctl_addr = (void __iomem *) 450 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS); 451 ioaddr->bmdma_addr = bmdma_addr; 452 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no); 453 454 ata_sff_std_ports(ioaddr); 455 456 ata_port_pbar_desc(ap, ap->port_no, -1, "port"); 457 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma"); 458 } 459 460 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 461 { 462 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL }; 463 struct ata_host *host; 464 int rc; 465 466 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 467 if (rc) 468 return rc; 469 *r_host = host; 470 471 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 472 if (rc) { 473 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n"); 474 return rc; 475 } 476 477 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0); 478 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1); 479 480 return 0; 481 } 482 483 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 484 { 485 const struct ata_port_info *ppi[] = 486 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info }; 487 struct ata_host *host; 488 int i, rc; 489 490 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi)); 491 if (!host) { 492 dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); 493 return -ENOMEM; 494 } 495 496 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 497 if (rc) { 498 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap " 499 "PCI BARs (errno=%d)\n", rc); 500 return rc; 501 } 502 host->iomap = pcim_iomap_table(pdev); 503 504 for (i = 0; i < host->n_ports; i++) 505 vt6421_init_addrs(host->ports[i]); 506 507 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 508 if (rc) 509 return rc; 510 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 511 if (rc) 512 return rc; 513 514 return 0; 515 } 516 517 static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) 518 { 519 const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL }; 520 struct ata_host *host; 521 int i, rc; 522 523 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 524 if (rc) 525 return rc; 526 *r_host = host; 527 528 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 529 if (rc) { 530 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n"); 531 return rc; 532 } 533 534 /* 8251 hosts four sata ports as M/S of the two channels */ 535 for (i = 0; i < host->n_ports; i++) 536 ata_slave_link_init(host->ports[i]); 537 538 return 0; 539 } 540 541 static void svia_configure(struct pci_dev *pdev, int board_id) 542 { 543 u8 tmp8; 544 545 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); 546 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n", 547 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); 548 549 /* make sure SATA channels are enabled */ 550 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); 551 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 552 dev_printk(KERN_DEBUG, &pdev->dev, 553 "enabling SATA channels (0x%x)\n", 554 (int) tmp8); 555 tmp8 |= ALL_PORTS; 556 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); 557 } 558 559 /* make sure interrupts for each channel sent to us */ 560 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); 561 if ((tmp8 & ALL_PORTS) != ALL_PORTS) { 562 dev_printk(KERN_DEBUG, &pdev->dev, 563 "enabling SATA channel interrupts (0x%x)\n", 564 (int) tmp8); 565 tmp8 |= ALL_PORTS; 566 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); 567 } 568 569 /* make sure native mode is enabled */ 570 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); 571 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { 572 dev_printk(KERN_DEBUG, &pdev->dev, 573 "enabling SATA channel native mode (0x%x)\n", 574 (int) tmp8); 575 tmp8 |= NATIVE_MODE_ALL; 576 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 577 } 578 579 /* 580 * vt6420/1 has problems talking to some drives. The following 581 * is the fix from Joseph Chan <JosephChan@via.com.tw>. 582 * 583 * When host issues HOLD, device may send up to 20DW of data 584 * before acknowledging it with HOLDA and the host should be 585 * able to buffer them in FIFO. Unfortunately, some WD drives 586 * send upto 40DW before acknowledging HOLD and, in the 587 * default configuration, this ends up overflowing vt6421's 588 * FIFO, making the controller abort the transaction with 589 * R_ERR. 590 * 591 * Rx52[2] is the internal 128DW FIFO Flow control watermark 592 * adjusting mechanism enable bit and the default value 0 593 * means host will issue HOLD to device when the left FIFO 594 * size goes below 32DW. Setting it to 1 makes the watermark 595 * 64DW. 596 * 597 * https://bugzilla.kernel.org/show_bug.cgi?id=15173 598 * http://article.gmane.org/gmane.linux.ide/46352 599 * http://thread.gmane.org/gmane.linux.kernel/1062139 600 */ 601 if (board_id == vt6420 || board_id == vt6421) { 602 pci_read_config_byte(pdev, 0x52, &tmp8); 603 tmp8 |= 1 << 2; 604 pci_write_config_byte(pdev, 0x52, tmp8); 605 } 606 } 607 608 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 609 { 610 static int printed_version; 611 unsigned int i; 612 int rc; 613 struct ata_host *host = NULL; 614 int board_id = (int) ent->driver_data; 615 const unsigned *bar_sizes; 616 617 if (!printed_version++) 618 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 619 620 rc = pcim_enable_device(pdev); 621 if (rc) 622 return rc; 623 624 if (board_id == vt6421) 625 bar_sizes = &vt6421_bar_sizes[0]; 626 else 627 bar_sizes = &svia_bar_sizes[0]; 628 629 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) 630 if ((pci_resource_start(pdev, i) == 0) || 631 (pci_resource_len(pdev, i) < bar_sizes[i])) { 632 dev_printk(KERN_ERR, &pdev->dev, 633 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", 634 i, 635 (unsigned long long)pci_resource_start(pdev, i), 636 (unsigned long long)pci_resource_len(pdev, i)); 637 return -ENODEV; 638 } 639 640 switch (board_id) { 641 case vt6420: 642 rc = vt6420_prepare_host(pdev, &host); 643 break; 644 case vt6421: 645 rc = vt6421_prepare_host(pdev, &host); 646 break; 647 case vt8251: 648 rc = vt8251_prepare_host(pdev, &host); 649 break; 650 default: 651 rc = -EINVAL; 652 } 653 if (rc) 654 return rc; 655 656 svia_configure(pdev, board_id); 657 658 pci_set_master(pdev); 659 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 660 IRQF_SHARED, &svia_sht); 661 } 662 663 static int __init svia_init(void) 664 { 665 return pci_register_driver(&svia_pci_driver); 666 } 667 668 static void __exit svia_exit(void) 669 { 670 pci_unregister_driver(&svia_pci_driver); 671 } 672 673 module_init(svia_init); 674 module_exit(svia_exit); 675