1 /* 2 * sata_promise.c - Promise SATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Mikael Pettersson 6 * Please ALWAYS copy linux-ide@vger.kernel.org 7 * on emails. 8 * 9 * Copyright 2003-2004 Red Hat, Inc. 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware information only available under NDA. 31 * 32 */ 33 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/gfp.h> 37 #include <linux/pci.h> 38 #include <linux/blkdev.h> 39 #include <linux/delay.h> 40 #include <linux/interrupt.h> 41 #include <linux/device.h> 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_cmnd.h> 45 #include <linux/libata.h> 46 #include "sata_promise.h" 47 48 #define DRV_NAME "sata_promise" 49 #define DRV_VERSION "2.12" 50 51 enum { 52 PDC_MAX_PORTS = 4, 53 PDC_MMIO_BAR = 3, 54 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ 55 56 /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */ 57 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 58 PDC_FLASH_CTL = 0x44, /* Flash control register */ 59 PDC_PCI_CTL = 0x48, /* PCI control/status reg */ 60 PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ 61 PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */ 62 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */ 63 PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */ 64 65 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */ 66 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ 67 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */ 68 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */ 69 PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */ 70 PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */ 71 PDC_DEVICE = 0x18, /* Device/Head reg (per port) */ 72 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */ 73 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */ 74 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 75 PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */ 76 PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */ 77 78 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */ 79 PDC_SATA_ERROR = 0x04, 80 PDC_PHYMODE4 = 0x14, 81 PDC_LINK_LAYER_ERRORS = 0x6C, 82 PDC_FPDMA_CTLSTAT = 0xD8, 83 PDC_INTERNAL_DEBUG_1 = 0xF8, /* also used for PATA */ 84 PDC_INTERNAL_DEBUG_2 = 0xFC, /* also used for PATA */ 85 86 /* PDC_FPDMA_CTLSTAT bit definitions */ 87 PDC_FPDMA_CTLSTAT_RESET = 1 << 3, 88 PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG = 1 << 10, 89 PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG = 1 << 11, 90 91 /* PDC_GLOBAL_CTL bit definitions */ 92 PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */ 93 PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */ 94 PDC_DH_ERR = (1 << 10), /* PCI error while loading data */ 95 PDC2_HTO_ERR = (1 << 12), /* host bus timeout */ 96 PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */ 97 PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */ 98 PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */ 99 PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */ 100 PDC_DRIVE_ERR = (1 << 21), /* drive error */ 101 PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */ 102 PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */ 103 PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR, 104 PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | 105 PDC2_ATA_DMA_CNT_ERR, 106 PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | 107 PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR | 108 PDC_DRIVE_ERR | PDC_PCI_SYS_ERR | 109 PDC1_ERR_MASK | PDC2_ERR_MASK, 110 111 board_2037x = 0, /* FastTrak S150 TX2plus */ 112 board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */ 113 board_20319 = 2, /* FastTrak S150 TX4 */ 114 board_20619 = 3, /* FastTrak TX4000 */ 115 board_2057x = 4, /* SATAII150 Tx2plus */ 116 board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */ 117 board_40518 = 6, /* SATAII150 Tx4 */ 118 119 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */ 120 121 /* Sequence counter control registers bit definitions */ 122 PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */ 123 124 /* Feature register values */ 125 PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */ 126 PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */ 127 128 /* Device/Head register values */ 129 PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */ 130 131 /* PDC_CTLSTAT bit definitions */ 132 PDC_DMA_ENABLE = (1 << 7), 133 PDC_IRQ_DISABLE = (1 << 10), 134 PDC_RESET = (1 << 11), /* HDMA reset */ 135 136 PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING, 137 138 /* ap->flags bits */ 139 PDC_FLAG_GEN_II = (1 << 24), 140 PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */ 141 PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */ 142 }; 143 144 struct pdc_port_priv { 145 u8 *pkt; 146 dma_addr_t pkt_dma; 147 }; 148 149 struct pdc_host_priv { 150 spinlock_t hard_reset_lock; 151 }; 152 153 static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 154 static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 155 static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 156 static int pdc_common_port_start(struct ata_port *ap); 157 static int pdc_sata_port_start(struct ata_port *ap); 158 static void pdc_qc_prep(struct ata_queued_cmd *qc); 159 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 160 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 161 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); 162 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); 163 static void pdc_irq_clear(struct ata_port *ap); 164 static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc); 165 static void pdc_freeze(struct ata_port *ap); 166 static void pdc_sata_freeze(struct ata_port *ap); 167 static void pdc_thaw(struct ata_port *ap); 168 static void pdc_sata_thaw(struct ata_port *ap); 169 static int pdc_pata_softreset(struct ata_link *link, unsigned int *class, 170 unsigned long deadline); 171 static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, 172 unsigned long deadline); 173 static void pdc_error_handler(struct ata_port *ap); 174 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 175 static int pdc_pata_cable_detect(struct ata_port *ap); 176 static int pdc_sata_cable_detect(struct ata_port *ap); 177 178 static struct scsi_host_template pdc_ata_sht = { 179 ATA_BASE_SHT(DRV_NAME), 180 .sg_tablesize = PDC_MAX_PRD, 181 .dma_boundary = ATA_DMA_BOUNDARY, 182 }; 183 184 static const struct ata_port_operations pdc_common_ops = { 185 .inherits = &ata_sff_port_ops, 186 187 .sff_tf_load = pdc_tf_load_mmio, 188 .sff_exec_command = pdc_exec_command_mmio, 189 .check_atapi_dma = pdc_check_atapi_dma, 190 .qc_prep = pdc_qc_prep, 191 .qc_issue = pdc_qc_issue, 192 193 .sff_irq_clear = pdc_irq_clear, 194 .lost_interrupt = ATA_OP_NULL, 195 196 .post_internal_cmd = pdc_post_internal_cmd, 197 .error_handler = pdc_error_handler, 198 }; 199 200 static struct ata_port_operations pdc_sata_ops = { 201 .inherits = &pdc_common_ops, 202 .cable_detect = pdc_sata_cable_detect, 203 .freeze = pdc_sata_freeze, 204 .thaw = pdc_sata_thaw, 205 .scr_read = pdc_sata_scr_read, 206 .scr_write = pdc_sata_scr_write, 207 .port_start = pdc_sata_port_start, 208 .hardreset = pdc_sata_hardreset, 209 }; 210 211 /* First-generation chips need a more restrictive ->check_atapi_dma op, 212 and ->freeze/thaw that ignore the hotplug controls. */ 213 static struct ata_port_operations pdc_old_sata_ops = { 214 .inherits = &pdc_sata_ops, 215 .freeze = pdc_freeze, 216 .thaw = pdc_thaw, 217 .check_atapi_dma = pdc_old_sata_check_atapi_dma, 218 }; 219 220 static struct ata_port_operations pdc_pata_ops = { 221 .inherits = &pdc_common_ops, 222 .cable_detect = pdc_pata_cable_detect, 223 .freeze = pdc_freeze, 224 .thaw = pdc_thaw, 225 .port_start = pdc_common_port_start, 226 .softreset = pdc_pata_softreset, 227 }; 228 229 static const struct ata_port_info pdc_port_info[] = { 230 [board_2037x] = 231 { 232 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | 233 PDC_FLAG_SATA_PATA, 234 .pio_mask = ATA_PIO4, 235 .mwdma_mask = ATA_MWDMA2, 236 .udma_mask = ATA_UDMA6, 237 .port_ops = &pdc_old_sata_ops, 238 }, 239 240 [board_2037x_pata] = 241 { 242 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, 243 .pio_mask = ATA_PIO4, 244 .mwdma_mask = ATA_MWDMA2, 245 .udma_mask = ATA_UDMA6, 246 .port_ops = &pdc_pata_ops, 247 }, 248 249 [board_20319] = 250 { 251 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | 252 PDC_FLAG_4_PORTS, 253 .pio_mask = ATA_PIO4, 254 .mwdma_mask = ATA_MWDMA2, 255 .udma_mask = ATA_UDMA6, 256 .port_ops = &pdc_old_sata_ops, 257 }, 258 259 [board_20619] = 260 { 261 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS | 262 PDC_FLAG_4_PORTS, 263 .pio_mask = ATA_PIO4, 264 .mwdma_mask = ATA_MWDMA2, 265 .udma_mask = ATA_UDMA6, 266 .port_ops = &pdc_pata_ops, 267 }, 268 269 [board_2057x] = 270 { 271 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | 272 PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA, 273 .pio_mask = ATA_PIO4, 274 .mwdma_mask = ATA_MWDMA2, 275 .udma_mask = ATA_UDMA6, 276 .port_ops = &pdc_sata_ops, 277 }, 278 279 [board_2057x_pata] = 280 { 281 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS | 282 PDC_FLAG_GEN_II, 283 .pio_mask = ATA_PIO4, 284 .mwdma_mask = ATA_MWDMA2, 285 .udma_mask = ATA_UDMA6, 286 .port_ops = &pdc_pata_ops, 287 }, 288 289 [board_40518] = 290 { 291 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA | 292 PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS, 293 .pio_mask = ATA_PIO4, 294 .mwdma_mask = ATA_MWDMA2, 295 .udma_mask = ATA_UDMA6, 296 .port_ops = &pdc_sata_ops, 297 }, 298 }; 299 300 static const struct pci_device_id pdc_ata_pci_tbl[] = { 301 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x }, 302 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x }, 303 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x }, 304 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x }, 305 { PCI_VDEVICE(PROMISE, 0x3570), board_2057x }, 306 { PCI_VDEVICE(PROMISE, 0x3571), board_2057x }, 307 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x }, 308 { PCI_VDEVICE(PROMISE, 0x3577), board_2057x }, 309 { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x }, 310 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x }, 311 312 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 }, 313 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 }, 314 { PCI_VDEVICE(PROMISE, 0x3515), board_40518 }, 315 { PCI_VDEVICE(PROMISE, 0x3519), board_40518 }, 316 { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 }, 317 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 }, 318 319 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 }, 320 321 { } /* terminate list */ 322 }; 323 324 static struct pci_driver pdc_ata_pci_driver = { 325 .name = DRV_NAME, 326 .id_table = pdc_ata_pci_tbl, 327 .probe = pdc_ata_init_one, 328 .remove = ata_pci_remove_one, 329 }; 330 331 static int pdc_common_port_start(struct ata_port *ap) 332 { 333 struct device *dev = ap->host->dev; 334 struct pdc_port_priv *pp; 335 int rc; 336 337 /* we use the same prd table as bmdma, allocate it */ 338 rc = ata_bmdma_port_start(ap); 339 if (rc) 340 return rc; 341 342 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 343 if (!pp) 344 return -ENOMEM; 345 346 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 347 if (!pp->pkt) 348 return -ENOMEM; 349 350 ap->private_data = pp; 351 352 return 0; 353 } 354 355 static int pdc_sata_port_start(struct ata_port *ap) 356 { 357 int rc; 358 359 rc = pdc_common_port_start(ap); 360 if (rc) 361 return rc; 362 363 /* fix up PHYMODE4 align timing */ 364 if (ap->flags & PDC_FLAG_GEN_II) { 365 void __iomem *sata_mmio = ap->ioaddr.scr_addr; 366 unsigned int tmp; 367 368 tmp = readl(sata_mmio + PDC_PHYMODE4); 369 tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */ 370 writel(tmp, sata_mmio + PDC_PHYMODE4); 371 } 372 373 return 0; 374 } 375 376 static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap) 377 { 378 void __iomem *sata_mmio = ap->ioaddr.scr_addr; 379 u32 tmp; 380 381 tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT); 382 tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG; 383 tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG; 384 385 /* It's not allowed to write to the entire FPDMA_CTLSTAT register 386 when NCQ is running. So do a byte-sized write to bits 10 and 11. */ 387 writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1); 388 readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */ 389 } 390 391 static void pdc_fpdma_reset(struct ata_port *ap) 392 { 393 void __iomem *sata_mmio = ap->ioaddr.scr_addr; 394 u8 tmp; 395 396 tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT); 397 tmp &= 0x7F; 398 tmp |= PDC_FPDMA_CTLSTAT_RESET; 399 writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT); 400 readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */ 401 udelay(100); 402 tmp &= ~PDC_FPDMA_CTLSTAT_RESET; 403 writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT); 404 readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */ 405 406 pdc_fpdma_clear_interrupt_flag(ap); 407 } 408 409 static void pdc_not_at_command_packet_phase(struct ata_port *ap) 410 { 411 void __iomem *sata_mmio = ap->ioaddr.scr_addr; 412 unsigned int i; 413 u32 tmp; 414 415 /* check not at ASIC packet command phase */ 416 for (i = 0; i < 100; ++i) { 417 writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1); 418 tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2); 419 if ((tmp & 0xF) != 1) 420 break; 421 udelay(100); 422 } 423 } 424 425 static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap) 426 { 427 void __iomem *sata_mmio = ap->ioaddr.scr_addr; 428 429 writel(0xffffffff, sata_mmio + PDC_SATA_ERROR); 430 writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS); 431 } 432 433 static void pdc_reset_port(struct ata_port *ap) 434 { 435 void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; 436 unsigned int i; 437 u32 tmp; 438 439 if (ap->flags & PDC_FLAG_GEN_II) 440 pdc_not_at_command_packet_phase(ap); 441 442 tmp = readl(ata_ctlstat_mmio); 443 tmp |= PDC_RESET; 444 writel(tmp, ata_ctlstat_mmio); 445 446 for (i = 11; i > 0; i--) { 447 tmp = readl(ata_ctlstat_mmio); 448 if (tmp & PDC_RESET) 449 break; 450 451 udelay(100); 452 453 tmp |= PDC_RESET; 454 writel(tmp, ata_ctlstat_mmio); 455 } 456 457 tmp &= ~PDC_RESET; 458 writel(tmp, ata_ctlstat_mmio); 459 readl(ata_ctlstat_mmio); /* flush */ 460 461 if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) { 462 pdc_fpdma_reset(ap); 463 pdc_clear_internal_debug_record_error_register(ap); 464 } 465 } 466 467 static int pdc_pata_cable_detect(struct ata_port *ap) 468 { 469 u8 tmp; 470 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 471 472 tmp = readb(ata_mmio + PDC_CTLSTAT + 3); 473 if (tmp & 0x01) 474 return ATA_CBL_PATA40; 475 return ATA_CBL_PATA80; 476 } 477 478 static int pdc_sata_cable_detect(struct ata_port *ap) 479 { 480 return ATA_CBL_SATA; 481 } 482 483 static int pdc_sata_scr_read(struct ata_link *link, 484 unsigned int sc_reg, u32 *val) 485 { 486 if (sc_reg > SCR_CONTROL) 487 return -EINVAL; 488 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); 489 return 0; 490 } 491 492 static int pdc_sata_scr_write(struct ata_link *link, 493 unsigned int sc_reg, u32 val) 494 { 495 if (sc_reg > SCR_CONTROL) 496 return -EINVAL; 497 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); 498 return 0; 499 } 500 501 static void pdc_atapi_pkt(struct ata_queued_cmd *qc) 502 { 503 struct ata_port *ap = qc->ap; 504 dma_addr_t sg_table = ap->bmdma_prd_dma; 505 unsigned int cdb_len = qc->dev->cdb_len; 506 u8 *cdb = qc->cdb; 507 struct pdc_port_priv *pp = ap->private_data; 508 u8 *buf = pp->pkt; 509 __le32 *buf32 = (__le32 *) buf; 510 unsigned int dev_sel, feature; 511 512 /* set control bits (byte 0), zero delay seq id (byte 3), 513 * and seq id (byte 2) 514 */ 515 switch (qc->tf.protocol) { 516 case ATAPI_PROT_DMA: 517 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 518 buf32[0] = cpu_to_le32(PDC_PKT_READ); 519 else 520 buf32[0] = 0; 521 break; 522 case ATAPI_PROT_NODATA: 523 buf32[0] = cpu_to_le32(PDC_PKT_NODATA); 524 break; 525 default: 526 BUG(); 527 break; 528 } 529 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ 530 buf32[2] = 0; /* no next-packet */ 531 532 /* select drive */ 533 if (sata_scr_valid(&ap->link)) 534 dev_sel = PDC_DEVICE_SATA; 535 else 536 dev_sel = qc->tf.device; 537 538 buf[12] = (1 << 5) | ATA_REG_DEVICE; 539 buf[13] = dev_sel; 540 buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY; 541 buf[15] = dev_sel; /* once more, waiting for BSY to clear */ 542 543 buf[16] = (1 << 5) | ATA_REG_NSECT; 544 buf[17] = qc->tf.nsect; 545 buf[18] = (1 << 5) | ATA_REG_LBAL; 546 buf[19] = qc->tf.lbal; 547 548 /* set feature and byte counter registers */ 549 if (qc->tf.protocol != ATAPI_PROT_DMA) 550 feature = PDC_FEATURE_ATAPI_PIO; 551 else 552 feature = PDC_FEATURE_ATAPI_DMA; 553 554 buf[20] = (1 << 5) | ATA_REG_FEATURE; 555 buf[21] = feature; 556 buf[22] = (1 << 5) | ATA_REG_BYTEL; 557 buf[23] = qc->tf.lbam; 558 buf[24] = (1 << 5) | ATA_REG_BYTEH; 559 buf[25] = qc->tf.lbah; 560 561 /* send ATAPI packet command 0xA0 */ 562 buf[26] = (1 << 5) | ATA_REG_CMD; 563 buf[27] = qc->tf.command; 564 565 /* select drive and check DRQ */ 566 buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY; 567 buf[29] = dev_sel; 568 569 /* we can represent cdb lengths 2/4/6/8/10/12/14/16 */ 570 BUG_ON(cdb_len & ~0x1E); 571 572 /* append the CDB as the final part */ 573 buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG; 574 memcpy(buf+31, cdb, cdb_len); 575 } 576 577 /** 578 * pdc_fill_sg - Fill PCI IDE PRD table 579 * @qc: Metadata associated with taskfile to be transferred 580 * 581 * Fill PCI IDE PRD (scatter-gather) table with segments 582 * associated with the current disk command. 583 * Make sure hardware does not choke on it. 584 * 585 * LOCKING: 586 * spin_lock_irqsave(host lock) 587 * 588 */ 589 static void pdc_fill_sg(struct ata_queued_cmd *qc) 590 { 591 struct ata_port *ap = qc->ap; 592 struct ata_bmdma_prd *prd = ap->bmdma_prd; 593 struct scatterlist *sg; 594 const u32 SG_COUNT_ASIC_BUG = 41*4; 595 unsigned int si, idx; 596 u32 len; 597 598 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 599 return; 600 601 idx = 0; 602 for_each_sg(qc->sg, sg, qc->n_elem, si) { 603 u32 addr, offset; 604 u32 sg_len; 605 606 /* determine if physical DMA addr spans 64K boundary. 607 * Note h/w doesn't support 64-bit, so we unconditionally 608 * truncate dma_addr_t to u32. 609 */ 610 addr = (u32) sg_dma_address(sg); 611 sg_len = sg_dma_len(sg); 612 613 while (sg_len) { 614 offset = addr & 0xffff; 615 len = sg_len; 616 if ((offset + sg_len) > 0x10000) 617 len = 0x10000 - offset; 618 619 prd[idx].addr = cpu_to_le32(addr); 620 prd[idx].flags_len = cpu_to_le32(len & 0xffff); 621 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 622 623 idx++; 624 sg_len -= len; 625 addr += len; 626 } 627 } 628 629 len = le32_to_cpu(prd[idx - 1].flags_len); 630 631 if (len > SG_COUNT_ASIC_BUG) { 632 u32 addr; 633 634 VPRINTK("Splitting last PRD.\n"); 635 636 addr = le32_to_cpu(prd[idx - 1].addr); 637 prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); 638 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); 639 640 addr = addr + len - SG_COUNT_ASIC_BUG; 641 len = SG_COUNT_ASIC_BUG; 642 prd[idx].addr = cpu_to_le32(addr); 643 prd[idx].flags_len = cpu_to_le32(len); 644 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 645 646 idx++; 647 } 648 649 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 650 } 651 652 static void pdc_qc_prep(struct ata_queued_cmd *qc) 653 { 654 struct pdc_port_priv *pp = qc->ap->private_data; 655 unsigned int i; 656 657 VPRINTK("ENTER\n"); 658 659 switch (qc->tf.protocol) { 660 case ATA_PROT_DMA: 661 pdc_fill_sg(qc); 662 /*FALLTHROUGH*/ 663 case ATA_PROT_NODATA: 664 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, 665 qc->dev->devno, pp->pkt); 666 if (qc->tf.flags & ATA_TFLAG_LBA48) 667 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); 668 else 669 i = pdc_prep_lba28(&qc->tf, pp->pkt, i); 670 pdc_pkt_footer(&qc->tf, pp->pkt, i); 671 break; 672 case ATAPI_PROT_PIO: 673 pdc_fill_sg(qc); 674 break; 675 case ATAPI_PROT_DMA: 676 pdc_fill_sg(qc); 677 /*FALLTHROUGH*/ 678 case ATAPI_PROT_NODATA: 679 pdc_atapi_pkt(qc); 680 break; 681 default: 682 break; 683 } 684 } 685 686 static int pdc_is_sataii_tx4(unsigned long flags) 687 { 688 const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; 689 return (flags & mask) == mask; 690 } 691 692 static unsigned int pdc_port_no_to_ata_no(unsigned int port_no, 693 int is_sataii_tx4) 694 { 695 static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; 696 return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; 697 } 698 699 static unsigned int pdc_sata_nr_ports(const struct ata_port *ap) 700 { 701 return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2; 702 } 703 704 static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap) 705 { 706 const struct ata_host *host = ap->host; 707 unsigned int nr_ports = pdc_sata_nr_ports(ap); 708 unsigned int i; 709 710 for (i = 0; i < nr_ports && host->ports[i] != ap; ++i) 711 ; 712 BUG_ON(i >= nr_ports); 713 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); 714 } 715 716 static void pdc_freeze(struct ata_port *ap) 717 { 718 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 719 u32 tmp; 720 721 tmp = readl(ata_mmio + PDC_CTLSTAT); 722 tmp |= PDC_IRQ_DISABLE; 723 tmp &= ~PDC_DMA_ENABLE; 724 writel(tmp, ata_mmio + PDC_CTLSTAT); 725 readl(ata_mmio + PDC_CTLSTAT); /* flush */ 726 } 727 728 static void pdc_sata_freeze(struct ata_port *ap) 729 { 730 struct ata_host *host = ap->host; 731 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; 732 unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR; 733 unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); 734 u32 hotplug_status; 735 736 /* Disable hotplug events on this port. 737 * 738 * Locking: 739 * 1) hotplug register accesses must be serialised via host->lock 740 * 2) ap->lock == &ap->host->lock 741 * 3) ->freeze() and ->thaw() are called with ap->lock held 742 */ 743 hotplug_status = readl(host_mmio + hotplug_offset); 744 hotplug_status |= 0x11 << (ata_no + 16); 745 writel(hotplug_status, host_mmio + hotplug_offset); 746 readl(host_mmio + hotplug_offset); /* flush */ 747 748 pdc_freeze(ap); 749 } 750 751 static void pdc_thaw(struct ata_port *ap) 752 { 753 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 754 u32 tmp; 755 756 /* clear IRQ */ 757 readl(ata_mmio + PDC_COMMAND); 758 759 /* turn IRQ back on */ 760 tmp = readl(ata_mmio + PDC_CTLSTAT); 761 tmp &= ~PDC_IRQ_DISABLE; 762 writel(tmp, ata_mmio + PDC_CTLSTAT); 763 readl(ata_mmio + PDC_CTLSTAT); /* flush */ 764 } 765 766 static void pdc_sata_thaw(struct ata_port *ap) 767 { 768 struct ata_host *host = ap->host; 769 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; 770 unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR; 771 unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap); 772 u32 hotplug_status; 773 774 pdc_thaw(ap); 775 776 /* Enable hotplug events on this port. 777 * Locking: see pdc_sata_freeze(). 778 */ 779 hotplug_status = readl(host_mmio + hotplug_offset); 780 hotplug_status |= 0x11 << ata_no; 781 hotplug_status &= ~(0x11 << (ata_no + 16)); 782 writel(hotplug_status, host_mmio + hotplug_offset); 783 readl(host_mmio + hotplug_offset); /* flush */ 784 } 785 786 static int pdc_pata_softreset(struct ata_link *link, unsigned int *class, 787 unsigned long deadline) 788 { 789 pdc_reset_port(link->ap); 790 return ata_sff_softreset(link, class, deadline); 791 } 792 793 static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap) 794 { 795 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 796 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; 797 798 /* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */ 799 return (ata_mmio - host_mmio - 0x200) / 0x80; 800 } 801 802 static void pdc_hard_reset_port(struct ata_port *ap) 803 { 804 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; 805 void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1; 806 unsigned int ata_no = pdc_ata_port_to_ata_no(ap); 807 struct pdc_host_priv *hpriv = ap->host->private_data; 808 u8 tmp; 809 810 spin_lock(&hpriv->hard_reset_lock); 811 812 tmp = readb(pcictl_b1_mmio); 813 tmp &= ~(0x10 << ata_no); 814 writeb(tmp, pcictl_b1_mmio); 815 readb(pcictl_b1_mmio); /* flush */ 816 udelay(100); 817 tmp |= (0x10 << ata_no); 818 writeb(tmp, pcictl_b1_mmio); 819 readb(pcictl_b1_mmio); /* flush */ 820 821 spin_unlock(&hpriv->hard_reset_lock); 822 } 823 824 static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, 825 unsigned long deadline) 826 { 827 if (link->ap->flags & PDC_FLAG_GEN_II) 828 pdc_not_at_command_packet_phase(link->ap); 829 /* hotplug IRQs should have been masked by pdc_sata_freeze() */ 830 pdc_hard_reset_port(link->ap); 831 pdc_reset_port(link->ap); 832 833 /* sata_promise can't reliably acquire the first D2H Reg FIS 834 * after hardreset. Do non-waiting hardreset and request 835 * follow-up SRST. 836 */ 837 return sata_std_hardreset(link, class, deadline); 838 } 839 840 static void pdc_error_handler(struct ata_port *ap) 841 { 842 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 843 pdc_reset_port(ap); 844 845 ata_sff_error_handler(ap); 846 } 847 848 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) 849 { 850 struct ata_port *ap = qc->ap; 851 852 /* make DMA engine forget about the failed command */ 853 if (qc->flags & ATA_QCFLAG_FAILED) 854 pdc_reset_port(ap); 855 } 856 857 static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, 858 u32 port_status, u32 err_mask) 859 { 860 struct ata_eh_info *ehi = &ap->link.eh_info; 861 unsigned int ac_err_mask = 0; 862 863 ata_ehi_clear_desc(ehi); 864 ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status); 865 port_status &= err_mask; 866 867 if (port_status & PDC_DRIVE_ERR) 868 ac_err_mask |= AC_ERR_DEV; 869 if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR)) 870 ac_err_mask |= AC_ERR_OTHER; 871 if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR)) 872 ac_err_mask |= AC_ERR_ATA_BUS; 873 if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR 874 | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR)) 875 ac_err_mask |= AC_ERR_HOST_BUS; 876 877 if (sata_scr_valid(&ap->link)) { 878 u32 serror; 879 880 pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror); 881 ehi->serror |= serror; 882 } 883 884 qc->err_mask |= ac_err_mask; 885 886 pdc_reset_port(ap); 887 888 ata_port_abort(ap); 889 } 890 891 static unsigned int pdc_host_intr(struct ata_port *ap, 892 struct ata_queued_cmd *qc) 893 { 894 unsigned int handled = 0; 895 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 896 u32 port_status, err_mask; 897 898 err_mask = PDC_ERR_MASK; 899 if (ap->flags & PDC_FLAG_GEN_II) 900 err_mask &= ~PDC1_ERR_MASK; 901 else 902 err_mask &= ~PDC2_ERR_MASK; 903 port_status = readl(ata_mmio + PDC_GLOBAL_CTL); 904 if (unlikely(port_status & err_mask)) { 905 pdc_error_intr(ap, qc, port_status, err_mask); 906 return 1; 907 } 908 909 switch (qc->tf.protocol) { 910 case ATA_PROT_DMA: 911 case ATA_PROT_NODATA: 912 case ATAPI_PROT_DMA: 913 case ATAPI_PROT_NODATA: 914 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 915 ata_qc_complete(qc); 916 handled = 1; 917 break; 918 default: 919 ap->stats.idle_irq++; 920 break; 921 } 922 923 return handled; 924 } 925 926 static void pdc_irq_clear(struct ata_port *ap) 927 { 928 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 929 930 readl(ata_mmio + PDC_COMMAND); 931 } 932 933 static irqreturn_t pdc_interrupt(int irq, void *dev_instance) 934 { 935 struct ata_host *host = dev_instance; 936 struct ata_port *ap; 937 u32 mask = 0; 938 unsigned int i, tmp; 939 unsigned int handled = 0; 940 void __iomem *host_mmio; 941 unsigned int hotplug_offset, ata_no; 942 u32 hotplug_status; 943 int is_sataii_tx4; 944 945 VPRINTK("ENTER\n"); 946 947 if (!host || !host->iomap[PDC_MMIO_BAR]) { 948 VPRINTK("QUICK EXIT\n"); 949 return IRQ_NONE; 950 } 951 952 host_mmio = host->iomap[PDC_MMIO_BAR]; 953 954 spin_lock(&host->lock); 955 956 /* read and clear hotplug flags for all ports */ 957 if (host->ports[0]->flags & PDC_FLAG_GEN_II) { 958 hotplug_offset = PDC2_SATA_PLUG_CSR; 959 hotplug_status = readl(host_mmio + hotplug_offset); 960 if (hotplug_status & 0xff) 961 writel(hotplug_status | 0xff, host_mmio + hotplug_offset); 962 hotplug_status &= 0xff; /* clear uninteresting bits */ 963 } else 964 hotplug_status = 0; 965 966 /* reading should also clear interrupts */ 967 mask = readl(host_mmio + PDC_INT_SEQMASK); 968 969 if (mask == 0xffffffff && hotplug_status == 0) { 970 VPRINTK("QUICK EXIT 2\n"); 971 goto done_irq; 972 } 973 974 mask &= 0xffff; /* only 16 SEQIDs possible */ 975 if (mask == 0 && hotplug_status == 0) { 976 VPRINTK("QUICK EXIT 3\n"); 977 goto done_irq; 978 } 979 980 writel(mask, host_mmio + PDC_INT_SEQMASK); 981 982 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); 983 984 for (i = 0; i < host->n_ports; i++) { 985 VPRINTK("port %u\n", i); 986 ap = host->ports[i]; 987 988 /* check for a plug or unplug event */ 989 ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 990 tmp = hotplug_status & (0x11 << ata_no); 991 if (tmp) { 992 struct ata_eh_info *ehi = &ap->link.eh_info; 993 ata_ehi_clear_desc(ehi); 994 ata_ehi_hotplugged(ehi); 995 ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp); 996 ata_port_freeze(ap); 997 ++handled; 998 continue; 999 } 1000 1001 /* check for a packet interrupt */ 1002 tmp = mask & (1 << (i + 1)); 1003 if (tmp) { 1004 struct ata_queued_cmd *qc; 1005 1006 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1007 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) 1008 handled += pdc_host_intr(ap, qc); 1009 } 1010 } 1011 1012 VPRINTK("EXIT\n"); 1013 1014 done_irq: 1015 spin_unlock(&host->lock); 1016 return IRQ_RETVAL(handled); 1017 } 1018 1019 static void pdc_packet_start(struct ata_queued_cmd *qc) 1020 { 1021 struct ata_port *ap = qc->ap; 1022 struct pdc_port_priv *pp = ap->private_data; 1023 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; 1024 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; 1025 unsigned int port_no = ap->port_no; 1026 u8 seq = (u8) (port_no + 1); 1027 1028 VPRINTK("ENTER, ap %p\n", ap); 1029 1030 writel(0x00000001, host_mmio + (seq * 4)); 1031 readl(host_mmio + (seq * 4)); /* flush */ 1032 1033 pp->pkt[2] = seq; 1034 wmb(); /* flush PRD, pkt writes */ 1035 writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT); 1036 readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */ 1037 } 1038 1039 static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) 1040 { 1041 switch (qc->tf.protocol) { 1042 case ATAPI_PROT_NODATA: 1043 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 1044 break; 1045 /*FALLTHROUGH*/ 1046 case ATA_PROT_NODATA: 1047 if (qc->tf.flags & ATA_TFLAG_POLLING) 1048 break; 1049 /*FALLTHROUGH*/ 1050 case ATAPI_PROT_DMA: 1051 case ATA_PROT_DMA: 1052 pdc_packet_start(qc); 1053 return 0; 1054 default: 1055 break; 1056 } 1057 return ata_sff_qc_issue(qc); 1058 } 1059 1060 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 1061 { 1062 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); 1063 ata_sff_tf_load(ap, tf); 1064 } 1065 1066 static void pdc_exec_command_mmio(struct ata_port *ap, 1067 const struct ata_taskfile *tf) 1068 { 1069 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); 1070 ata_sff_exec_command(ap, tf); 1071 } 1072 1073 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) 1074 { 1075 u8 *scsicmd = qc->scsicmd->cmnd; 1076 int pio = 1; /* atapi dma off by default */ 1077 1078 /* Whitelist commands that may use DMA. */ 1079 switch (scsicmd[0]) { 1080 case WRITE_12: 1081 case WRITE_10: 1082 case WRITE_6: 1083 case READ_12: 1084 case READ_10: 1085 case READ_6: 1086 case 0xad: /* READ_DVD_STRUCTURE */ 1087 case 0xbe: /* READ_CD */ 1088 pio = 0; 1089 } 1090 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ 1091 if (scsicmd[0] == WRITE_10) { 1092 unsigned int lba = 1093 (scsicmd[2] << 24) | 1094 (scsicmd[3] << 16) | 1095 (scsicmd[4] << 8) | 1096 scsicmd[5]; 1097 if (lba >= 0xFFFF4FA2) 1098 pio = 1; 1099 } 1100 return pio; 1101 } 1102 1103 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc) 1104 { 1105 /* First generation chips cannot use ATAPI DMA on SATA ports */ 1106 return 1; 1107 } 1108 1109 static void pdc_ata_setup_port(struct ata_port *ap, 1110 void __iomem *base, void __iomem *scr_addr) 1111 { 1112 ap->ioaddr.cmd_addr = base; 1113 ap->ioaddr.data_addr = base; 1114 ap->ioaddr.feature_addr = 1115 ap->ioaddr.error_addr = base + 0x4; 1116 ap->ioaddr.nsect_addr = base + 0x8; 1117 ap->ioaddr.lbal_addr = base + 0xc; 1118 ap->ioaddr.lbam_addr = base + 0x10; 1119 ap->ioaddr.lbah_addr = base + 0x14; 1120 ap->ioaddr.device_addr = base + 0x18; 1121 ap->ioaddr.command_addr = 1122 ap->ioaddr.status_addr = base + 0x1c; 1123 ap->ioaddr.altstatus_addr = 1124 ap->ioaddr.ctl_addr = base + 0x38; 1125 ap->ioaddr.scr_addr = scr_addr; 1126 } 1127 1128 static void pdc_host_init(struct ata_host *host) 1129 { 1130 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; 1131 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II; 1132 int hotplug_offset; 1133 u32 tmp; 1134 1135 if (is_gen2) 1136 hotplug_offset = PDC2_SATA_PLUG_CSR; 1137 else 1138 hotplug_offset = PDC_SATA_PLUG_CSR; 1139 1140 /* 1141 * Except for the hotplug stuff, this is voodoo from the 1142 * Promise driver. Label this entire section 1143 * "TODO: figure out why we do this" 1144 */ 1145 1146 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */ 1147 tmp = readl(host_mmio + PDC_FLASH_CTL); 1148 tmp |= 0x02000; /* bit 13 (enable bmr burst) */ 1149 if (!is_gen2) 1150 tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */ 1151 writel(tmp, host_mmio + PDC_FLASH_CTL); 1152 1153 /* clear plug/unplug flags for all ports */ 1154 tmp = readl(host_mmio + hotplug_offset); 1155 writel(tmp | 0xff, host_mmio + hotplug_offset); 1156 1157 tmp = readl(host_mmio + hotplug_offset); 1158 if (is_gen2) /* unmask plug/unplug ints */ 1159 writel(tmp & ~0xff0000, host_mmio + hotplug_offset); 1160 else /* mask plug/unplug ints */ 1161 writel(tmp | 0xff0000, host_mmio + hotplug_offset); 1162 1163 /* don't initialise TBG or SLEW on 2nd generation chips */ 1164 if (is_gen2) 1165 return; 1166 1167 /* reduce TBG clock to 133 Mhz. */ 1168 tmp = readl(host_mmio + PDC_TBG_MODE); 1169 tmp &= ~0x30000; /* clear bit 17, 16*/ 1170 tmp |= 0x10000; /* set bit 17:16 = 0:1 */ 1171 writel(tmp, host_mmio + PDC_TBG_MODE); 1172 1173 readl(host_mmio + PDC_TBG_MODE); /* flush */ 1174 msleep(10); 1175 1176 /* adjust slew rate control register. */ 1177 tmp = readl(host_mmio + PDC_SLEW_CTL); 1178 tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ 1179 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ 1180 writel(tmp, host_mmio + PDC_SLEW_CTL); 1181 } 1182 1183 static int pdc_ata_init_one(struct pci_dev *pdev, 1184 const struct pci_device_id *ent) 1185 { 1186 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; 1187 const struct ata_port_info *ppi[PDC_MAX_PORTS]; 1188 struct ata_host *host; 1189 struct pdc_host_priv *hpriv; 1190 void __iomem *host_mmio; 1191 int n_ports, i, rc; 1192 int is_sataii_tx4; 1193 1194 ata_print_version_once(&pdev->dev, DRV_VERSION); 1195 1196 /* enable and acquire resources */ 1197 rc = pcim_enable_device(pdev); 1198 if (rc) 1199 return rc; 1200 1201 rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME); 1202 if (rc == -EBUSY) 1203 pcim_pin_device(pdev); 1204 if (rc) 1205 return rc; 1206 host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR]; 1207 1208 /* determine port configuration and setup host */ 1209 n_ports = 2; 1210 if (pi->flags & PDC_FLAG_4_PORTS) 1211 n_ports = 4; 1212 for (i = 0; i < n_ports; i++) 1213 ppi[i] = pi; 1214 1215 if (pi->flags & PDC_FLAG_SATA_PATA) { 1216 u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1); 1217 if (!(tmp & 0x80)) 1218 ppi[n_ports++] = pi + 1; 1219 } 1220 1221 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 1222 if (!host) { 1223 dev_err(&pdev->dev, "failed to allocate host\n"); 1224 return -ENOMEM; 1225 } 1226 hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); 1227 if (!hpriv) 1228 return -ENOMEM; 1229 spin_lock_init(&hpriv->hard_reset_lock); 1230 host->private_data = hpriv; 1231 host->iomap = pcim_iomap_table(pdev); 1232 1233 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); 1234 for (i = 0; i < host->n_ports; i++) { 1235 struct ata_port *ap = host->ports[i]; 1236 unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); 1237 unsigned int ata_offset = 0x200 + ata_no * 0x80; 1238 unsigned int scr_offset = 0x400 + ata_no * 0x100; 1239 1240 pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset); 1241 1242 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); 1243 ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata"); 1244 } 1245 1246 /* initialize adapter */ 1247 pdc_host_init(host); 1248 1249 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1250 if (rc) 1251 return rc; 1252 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1253 if (rc) 1254 return rc; 1255 1256 /* start host, request IRQ and attach */ 1257 pci_set_master(pdev); 1258 return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED, 1259 &pdc_ata_sht); 1260 } 1261 1262 module_pci_driver(pdc_ata_pci_driver); 1263 1264 MODULE_AUTHOR("Jeff Garzik"); 1265 MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver"); 1266 MODULE_LICENSE("GPL"); 1267 MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl); 1268 MODULE_VERSION(DRV_VERSION); 1269