1 /* 2 * sata_inic162x.c - Driver for Initio 162x SATA controllers 3 * 4 * Copyright 2006 SUSE Linux Products GmbH 5 * Copyright 2006 Tejun Heo <teheo@novell.com> 6 * 7 * This file is released under GPL v2. 8 * 9 * This controller is eccentric and easily locks up if something isn't 10 * right. Documentation is available at initio's website but it only 11 * documents registers (not programming model). 12 * 13 * - ATA disks work. 14 * - Hotplug works. 15 * - ATAPI read works but burning doesn't. This thing is really 16 * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 17 * ATAPI DMA WRITE should be programmed. If you've got a clue, be 18 * my guest. 19 * - Both STR and STD work. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/pci.h> 25 #include <scsi/scsi_host.h> 26 #include <linux/libata.h> 27 #include <linux/blkdev.h> 28 #include <scsi/scsi_device.h> 29 30 #define DRV_NAME "sata_inic162x" 31 #define DRV_VERSION "0.3" 32 33 enum { 34 MMIO_BAR = 5, 35 36 NR_PORTS = 2, 37 38 HOST_CTL = 0x7c, 39 HOST_STAT = 0x7e, 40 HOST_IRQ_STAT = 0xbc, 41 HOST_IRQ_MASK = 0xbe, 42 43 PORT_SIZE = 0x40, 44 45 /* registers for ATA TF operation */ 46 PORT_TF = 0x00, 47 PORT_ALT_STAT = 0x08, 48 PORT_IRQ_STAT = 0x09, 49 PORT_IRQ_MASK = 0x0a, 50 PORT_PRD_CTL = 0x0b, 51 PORT_PRD_ADDR = 0x0c, 52 PORT_PRD_XFERLEN = 0x10, 53 54 /* IDMA register */ 55 PORT_IDMA_CTL = 0x14, 56 57 PORT_SCR = 0x20, 58 59 /* HOST_CTL bits */ 60 HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 61 HCTL_PWRDWN = (1 << 13), /* power down PHYs */ 62 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 63 HCTL_RPGSEL = (1 << 15), /* register page select */ 64 65 HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 66 HCTL_RPGSEL, 67 68 /* HOST_IRQ_(STAT|MASK) bits */ 69 HIRQ_PORT0 = (1 << 0), 70 HIRQ_PORT1 = (1 << 1), 71 HIRQ_SOFT = (1 << 14), 72 HIRQ_GLOBAL = (1 << 15), /* STAT only */ 73 74 /* PORT_IRQ_(STAT|MASK) bits */ 75 PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 76 PIRQ_ONLINE = (1 << 1), /* device plugged */ 77 PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 78 PIRQ_FATAL = (1 << 3), /* fatal error */ 79 PIRQ_ATA = (1 << 4), /* ATA interrupt */ 80 PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 81 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 82 83 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 84 85 PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, 86 PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, 87 PIRQ_MASK_FREEZE = 0xff, 88 89 /* PORT_PRD_CTL bits */ 90 PRD_CTL_START = (1 << 0), 91 PRD_CTL_WR = (1 << 3), 92 PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 93 94 /* PORT_IDMA_CTL bits */ 95 IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 96 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 97 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 98 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 99 }; 100 101 struct inic_host_priv { 102 u16 cached_hctl; 103 }; 104 105 struct inic_port_priv { 106 u8 dfl_prdctl; 107 u8 cached_prdctl; 108 u8 cached_pirq_mask; 109 }; 110 111 static struct scsi_host_template inic_sht = { 112 ATA_BMDMA_SHT(DRV_NAME), 113 }; 114 115 static const int scr_map[] = { 116 [SCR_STATUS] = 0, 117 [SCR_ERROR] = 1, 118 [SCR_CONTROL] = 2, 119 }; 120 121 static void __iomem *inic_port_base(struct ata_port *ap) 122 { 123 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 124 } 125 126 static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) 127 { 128 void __iomem *port_base = inic_port_base(ap); 129 struct inic_port_priv *pp = ap->private_data; 130 131 writeb(mask, port_base + PORT_IRQ_MASK); 132 pp->cached_pirq_mask = mask; 133 } 134 135 static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) 136 { 137 struct inic_port_priv *pp = ap->private_data; 138 139 if (pp->cached_pirq_mask != mask) 140 __inic_set_pirq_mask(ap, mask); 141 } 142 143 static void inic_reset_port(void __iomem *port_base) 144 { 145 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 146 u16 ctl; 147 148 ctl = readw(idma_ctl); 149 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 150 151 /* mask IRQ and assert reset */ 152 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 153 readw(idma_ctl); /* flush */ 154 155 /* give it some time */ 156 msleep(1); 157 158 /* release reset */ 159 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 160 161 /* clear irq */ 162 writeb(0xff, port_base + PORT_IRQ_STAT); 163 164 /* reenable ATA IRQ, turn off IDMA mode */ 165 writew(ctl, idma_ctl); 166 } 167 168 static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 169 { 170 void __iomem *scr_addr = ap->ioaddr.scr_addr; 171 void __iomem *addr; 172 173 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 174 return -EINVAL; 175 176 addr = scr_addr + scr_map[sc_reg] * 4; 177 *val = readl(scr_addr + scr_map[sc_reg] * 4); 178 179 /* this controller has stuck DIAG.N, ignore it */ 180 if (sc_reg == SCR_ERROR) 181 *val &= ~SERR_PHYRDY_CHG; 182 return 0; 183 } 184 185 static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 186 { 187 void __iomem *scr_addr = ap->ioaddr.scr_addr; 188 void __iomem *addr; 189 190 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 191 return -EINVAL; 192 193 addr = scr_addr + scr_map[sc_reg] * 4; 194 writel(val, scr_addr + scr_map[sc_reg] * 4); 195 return 0; 196 } 197 198 /* 199 * In TF mode, inic162x is very similar to SFF device. TF registers 200 * function the same. DMA engine behaves similary using the same PRD 201 * format as BMDMA but different command register, interrupt and event 202 * notification methods are used. The following inic_bmdma_*() 203 * functions do the impedance matching. 204 */ 205 static void inic_bmdma_setup(struct ata_queued_cmd *qc) 206 { 207 struct ata_port *ap = qc->ap; 208 struct inic_port_priv *pp = ap->private_data; 209 void __iomem *port_base = inic_port_base(ap); 210 int rw = qc->tf.flags & ATA_TFLAG_WRITE; 211 212 /* make sure device sees PRD table writes */ 213 wmb(); 214 215 /* load transfer length */ 216 writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); 217 218 /* turn on DMA and specify data direction */ 219 pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; 220 if (!rw) 221 pp->cached_prdctl |= PRD_CTL_WR; 222 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 223 224 /* issue r/w command */ 225 ap->ops->sff_exec_command(ap, &qc->tf); 226 } 227 228 static void inic_bmdma_start(struct ata_queued_cmd *qc) 229 { 230 struct ata_port *ap = qc->ap; 231 struct inic_port_priv *pp = ap->private_data; 232 void __iomem *port_base = inic_port_base(ap); 233 234 /* start host DMA transaction */ 235 pp->cached_prdctl |= PRD_CTL_START; 236 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 237 } 238 239 static void inic_bmdma_stop(struct ata_queued_cmd *qc) 240 { 241 struct ata_port *ap = qc->ap; 242 struct inic_port_priv *pp = ap->private_data; 243 void __iomem *port_base = inic_port_base(ap); 244 245 /* stop DMA engine */ 246 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 247 } 248 249 static u8 inic_bmdma_status(struct ata_port *ap) 250 { 251 /* event is already verified by the interrupt handler */ 252 return ATA_DMA_INTR; 253 } 254 255 static void inic_host_intr(struct ata_port *ap) 256 { 257 void __iomem *port_base = inic_port_base(ap); 258 struct ata_eh_info *ehi = &ap->link.eh_info; 259 u8 irq_stat; 260 261 /* fetch and clear irq */ 262 irq_stat = readb(port_base + PORT_IRQ_STAT); 263 writeb(irq_stat, port_base + PORT_IRQ_STAT); 264 265 if (likely(!(irq_stat & PIRQ_ERR))) { 266 struct ata_queued_cmd *qc = 267 ata_qc_from_tag(ap, ap->link.active_tag); 268 269 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 270 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 271 return; 272 } 273 274 if (likely(ata_sff_host_intr(ap, qc))) 275 return; 276 277 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 278 ata_port_printk(ap, KERN_WARNING, "unhandled " 279 "interrupt, irq_stat=%x\n", irq_stat); 280 return; 281 } 282 283 /* error */ 284 ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 285 286 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 287 ata_ehi_hotplugged(ehi); 288 ata_port_freeze(ap); 289 } else 290 ata_port_abort(ap); 291 } 292 293 static irqreturn_t inic_interrupt(int irq, void *dev_instance) 294 { 295 struct ata_host *host = dev_instance; 296 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 297 u16 host_irq_stat; 298 int i, handled = 0;; 299 300 host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 301 302 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 303 goto out; 304 305 spin_lock(&host->lock); 306 307 for (i = 0; i < NR_PORTS; i++) { 308 struct ata_port *ap = host->ports[i]; 309 310 if (!(host_irq_stat & (HIRQ_PORT0 << i))) 311 continue; 312 313 if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 314 inic_host_intr(ap); 315 handled++; 316 } else { 317 if (ata_ratelimit()) 318 dev_printk(KERN_ERR, host->dev, "interrupt " 319 "from disabled port %d (0x%x)\n", 320 i, host_irq_stat); 321 } 322 } 323 324 spin_unlock(&host->lock); 325 326 out: 327 return IRQ_RETVAL(handled); 328 } 329 330 static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 331 { 332 struct ata_port *ap = qc->ap; 333 334 /* ATA IRQ doesn't wait for DMA transfer completion and vice 335 * versa. Mask IRQ selectively to detect command completion. 336 * Without it, ATA DMA read command can cause data corruption. 337 * 338 * Something similar might be needed for ATAPI writes. I 339 * tried a lot of combinations but couldn't find the solution. 340 */ 341 if (qc->tf.protocol == ATA_PROT_DMA && 342 !(qc->tf.flags & ATA_TFLAG_WRITE)) 343 inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 344 else 345 inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 346 347 /* Issuing a command to yet uninitialized port locks up the 348 * controller. Most of the time, this happens for the first 349 * command after reset which are ATA and ATAPI IDENTIFYs. 350 * Fast fail if stat is 0x7f or 0xff for those commands. 351 */ 352 if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 353 qc->tf.command == ATA_CMD_ID_ATAPI)) { 354 u8 stat = ap->ops->sff_check_status(ap); 355 if (stat == 0x7f || stat == 0xff) 356 return AC_ERR_HSM; 357 } 358 359 return ata_sff_qc_issue(qc); 360 } 361 362 static void inic_freeze(struct ata_port *ap) 363 { 364 void __iomem *port_base = inic_port_base(ap); 365 366 __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 367 368 ap->ops->sff_check_status(ap); 369 writeb(0xff, port_base + PORT_IRQ_STAT); 370 371 readb(port_base + PORT_IRQ_STAT); /* flush */ 372 } 373 374 static void inic_thaw(struct ata_port *ap) 375 { 376 void __iomem *port_base = inic_port_base(ap); 377 378 ap->ops->sff_check_status(ap); 379 writeb(0xff, port_base + PORT_IRQ_STAT); 380 381 __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 382 383 readb(port_base + PORT_IRQ_STAT); /* flush */ 384 } 385 386 /* 387 * SRST and SControl hardreset don't give valid signature on this 388 * controller. Only controller specific hardreset mechanism works. 389 */ 390 static int inic_hardreset(struct ata_link *link, unsigned int *class, 391 unsigned long deadline) 392 { 393 struct ata_port *ap = link->ap; 394 void __iomem *port_base = inic_port_base(ap); 395 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 397 u16 val; 398 int rc; 399 400 /* hammer it into sane state */ 401 inic_reset_port(port_base); 402 403 val = readw(idma_ctl); 404 writew(val | IDMA_CTL_RST_ATA, idma_ctl); 405 readw(idma_ctl); /* flush */ 406 msleep(1); 407 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 408 409 rc = sata_link_resume(link, timing, deadline); 410 if (rc) { 411 ata_link_printk(link, KERN_WARNING, "failed to resume " 412 "link after reset (errno=%d)\n", rc); 413 return rc; 414 } 415 416 *class = ATA_DEV_NONE; 417 if (ata_link_online(link)) { 418 struct ata_taskfile tf; 419 420 /* wait for link to become ready */ 421 rc = ata_sff_wait_after_reset(link, 1, deadline); 422 /* link occupied, -ENODEV too is an error */ 423 if (rc) { 424 ata_link_printk(link, KERN_WARNING, "device not ready " 425 "after hardreset (errno=%d)\n", rc); 426 return rc; 427 } 428 429 ata_sff_tf_read(ap, &tf); 430 *class = ata_dev_classify(&tf); 431 } 432 433 return 0; 434 } 435 436 static void inic_error_handler(struct ata_port *ap) 437 { 438 void __iomem *port_base = inic_port_base(ap); 439 struct inic_port_priv *pp = ap->private_data; 440 unsigned long flags; 441 442 /* reset PIO HSM and stop DMA engine */ 443 inic_reset_port(port_base); 444 445 spin_lock_irqsave(ap->lock, flags); 446 ap->hsm_task_state = HSM_ST_IDLE; 447 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 448 spin_unlock_irqrestore(ap->lock, flags); 449 450 /* PIO and DMA engines have been stopped, perform recovery */ 451 ata_std_error_handler(ap); 452 } 453 454 static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 455 { 456 /* make DMA engine forget about the failed command */ 457 if (qc->flags & ATA_QCFLAG_FAILED) 458 inic_reset_port(inic_port_base(qc->ap)); 459 } 460 461 static void inic_dev_config(struct ata_device *dev) 462 { 463 /* inic can only handle upto LBA28 max sectors */ 464 if (dev->max_sectors > ATA_MAX_SECTORS) 465 dev->max_sectors = ATA_MAX_SECTORS; 466 467 if (dev->n_sectors >= 1 << 28) { 468 ata_dev_printk(dev, KERN_ERR, 469 "ERROR: This driver doesn't support LBA48 yet and may cause\n" 470 " data corruption on such devices. Disabling.\n"); 471 ata_dev_disable(dev); 472 } 473 } 474 475 static void init_port(struct ata_port *ap) 476 { 477 void __iomem *port_base = inic_port_base(ap); 478 479 /* Setup PRD address */ 480 writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 481 } 482 483 static int inic_port_resume(struct ata_port *ap) 484 { 485 init_port(ap); 486 return 0; 487 } 488 489 static int inic_port_start(struct ata_port *ap) 490 { 491 void __iomem *port_base = inic_port_base(ap); 492 struct inic_port_priv *pp; 493 u8 tmp; 494 int rc; 495 496 /* alloc and initialize private data */ 497 pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 498 if (!pp) 499 return -ENOMEM; 500 ap->private_data = pp; 501 502 /* default PRD_CTL value, DMAEN, WR and START off */ 503 tmp = readb(port_base + PORT_PRD_CTL); 504 tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); 505 pp->dfl_prdctl = tmp; 506 507 /* Alloc resources */ 508 rc = ata_port_start(ap); 509 if (rc) { 510 kfree(pp); 511 return rc; 512 } 513 514 init_port(ap); 515 516 return 0; 517 } 518 519 static struct ata_port_operations inic_port_ops = { 520 .inherits = &ata_sff_port_ops, 521 522 .bmdma_setup = inic_bmdma_setup, 523 .bmdma_start = inic_bmdma_start, 524 .bmdma_stop = inic_bmdma_stop, 525 .bmdma_status = inic_bmdma_status, 526 .qc_issue = inic_qc_issue, 527 528 .freeze = inic_freeze, 529 .thaw = inic_thaw, 530 .softreset = ATA_OP_NULL, /* softreset is broken */ 531 .hardreset = inic_hardreset, 532 .error_handler = inic_error_handler, 533 .post_internal_cmd = inic_post_internal_cmd, 534 .dev_config = inic_dev_config, 535 536 .scr_read = inic_scr_read, 537 .scr_write = inic_scr_write, 538 539 .port_resume = inic_port_resume, 540 .port_start = inic_port_start, 541 }; 542 543 static struct ata_port_info inic_port_info = { 544 /* For some reason, ATAPI_PROT_PIO is broken on this 545 * controller, and no, PIO_POLLING does't fix it. It somehow 546 * manages to report the wrong ireason and ignoring ireason 547 * results in machine lock up. Tell libata to always prefer 548 * DMA. 549 */ 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 551 .pio_mask = 0x1f, /* pio0-4 */ 552 .mwdma_mask = 0x07, /* mwdma0-2 */ 553 .udma_mask = ATA_UDMA6, 554 .port_ops = &inic_port_ops 555 }; 556 557 static int init_controller(void __iomem *mmio_base, u16 hctl) 558 { 559 int i; 560 u16 val; 561 562 hctl &= ~HCTL_KNOWN_BITS; 563 564 /* Soft reset whole controller. Spec says reset duration is 3 565 * PCI clocks, be generous and give it 10ms. 566 */ 567 writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 568 readw(mmio_base + HOST_CTL); /* flush */ 569 570 for (i = 0; i < 10; i++) { 571 msleep(1); 572 val = readw(mmio_base + HOST_CTL); 573 if (!(val & HCTL_SOFTRST)) 574 break; 575 } 576 577 if (val & HCTL_SOFTRST) 578 return -EIO; 579 580 /* mask all interrupts and reset ports */ 581 for (i = 0; i < NR_PORTS; i++) { 582 void __iomem *port_base = mmio_base + i * PORT_SIZE; 583 584 writeb(0xff, port_base + PORT_IRQ_MASK); 585 inic_reset_port(port_base); 586 } 587 588 /* port IRQ is masked now, unmask global IRQ */ 589 writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 590 val = readw(mmio_base + HOST_IRQ_MASK); 591 val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 592 writew(val, mmio_base + HOST_IRQ_MASK); 593 594 return 0; 595 } 596 597 #ifdef CONFIG_PM 598 static int inic_pci_device_resume(struct pci_dev *pdev) 599 { 600 struct ata_host *host = dev_get_drvdata(&pdev->dev); 601 struct inic_host_priv *hpriv = host->private_data; 602 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 603 int rc; 604 605 rc = ata_pci_device_do_resume(pdev); 606 if (rc) 607 return rc; 608 609 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 610 rc = init_controller(mmio_base, hpriv->cached_hctl); 611 if (rc) 612 return rc; 613 } 614 615 ata_host_resume(host); 616 617 return 0; 618 } 619 #endif 620 621 static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 622 { 623 static int printed_version; 624 const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; 625 struct ata_host *host; 626 struct inic_host_priv *hpriv; 627 void __iomem * const *iomap; 628 int i, rc; 629 630 if (!printed_version++) 631 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 632 633 /* alloc host */ 634 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 635 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 636 if (!host || !hpriv) 637 return -ENOMEM; 638 639 host->private_data = hpriv; 640 641 /* acquire resources and fill host */ 642 rc = pcim_enable_device(pdev); 643 if (rc) 644 return rc; 645 646 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 647 if (rc) 648 return rc; 649 host->iomap = iomap = pcim_iomap_table(pdev); 650 651 for (i = 0; i < NR_PORTS; i++) { 652 struct ata_port *ap = host->ports[i]; 653 struct ata_ioports *port = &ap->ioaddr; 654 unsigned int offset = i * PORT_SIZE; 655 656 port->cmd_addr = iomap[2 * i]; 657 port->altstatus_addr = 658 port->ctl_addr = (void __iomem *) 659 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 660 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; 661 662 ata_sff_std_ports(port); 663 664 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); 665 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); 666 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 667 (unsigned long long)pci_resource_start(pdev, 2 * i), 668 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | 669 ATA_PCI_CTL_OFS); 670 } 671 672 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 673 674 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 675 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 676 if (rc) { 677 dev_printk(KERN_ERR, &pdev->dev, 678 "32-bit DMA enable failed\n"); 679 return rc; 680 } 681 682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 683 if (rc) { 684 dev_printk(KERN_ERR, &pdev->dev, 685 "32-bit consistent DMA enable failed\n"); 686 return rc; 687 } 688 689 /* 690 * This controller is braindamaged. dma_boundary is 0xffff 691 * like others but it will lock up the whole machine HARD if 692 * 65536 byte PRD entry is fed. Reduce maximum segment size. 693 */ 694 rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); 695 if (rc) { 696 dev_printk(KERN_ERR, &pdev->dev, 697 "failed to set the maximum segment size.\n"); 698 return rc; 699 } 700 701 rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 702 if (rc) { 703 dev_printk(KERN_ERR, &pdev->dev, 704 "failed to initialize controller\n"); 705 return rc; 706 } 707 708 pci_set_master(pdev); 709 return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, 710 &inic_sht); 711 } 712 713 static const struct pci_device_id inic_pci_tbl[] = { 714 { PCI_VDEVICE(INIT, 0x1622), }, 715 { }, 716 }; 717 718 static struct pci_driver inic_pci_driver = { 719 .name = DRV_NAME, 720 .id_table = inic_pci_tbl, 721 #ifdef CONFIG_PM 722 .suspend = ata_pci_device_suspend, 723 .resume = inic_pci_device_resume, 724 #endif 725 .probe = inic_init_one, 726 .remove = ata_pci_remove_one, 727 }; 728 729 static int __init inic_init(void) 730 { 731 return pci_register_driver(&inic_pci_driver); 732 } 733 734 static void __exit inic_exit(void) 735 { 736 pci_unregister_driver(&inic_pci_driver); 737 } 738 739 MODULE_AUTHOR("Tejun Heo"); 740 MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 741 MODULE_LICENSE("GPL v2"); 742 MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 743 MODULE_VERSION(DRV_VERSION); 744 745 module_init(inic_init); 746 module_exit(inic_exit); 747