1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers 4 * 5 * Copyright 2005 Tejun Heo 6 * 7 * Based on preview driver from Silicon Image. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/gfp.h> 13 #include <linux/pci.h> 14 #include <linux/blkdev.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/device.h> 19 #include <scsi/scsi_host.h> 20 #include <scsi/scsi_cmnd.h> 21 #include <linux/libata.h> 22 23 #define DRV_NAME "sata_sil24" 24 #define DRV_VERSION "1.1" 25 26 /* 27 * Port request block (PRB) 32 bytes 28 */ 29 struct sil24_prb { 30 __le16 ctrl; 31 __le16 prot; 32 __le32 rx_cnt; 33 u8 fis[6 * 4]; 34 }; 35 36 /* 37 * Scatter gather entry (SGE) 16 bytes 38 */ 39 struct sil24_sge { 40 __le64 addr; 41 __le32 cnt; 42 __le32 flags; 43 }; 44 45 46 enum { 47 SIL24_HOST_BAR = 0, 48 SIL24_PORT_BAR = 2, 49 50 /* sil24 fetches in chunks of 64bytes. The first block 51 * contains the PRB and two SGEs. From the second block, it's 52 * consisted of four SGEs and called SGT. Calculate the 53 * number of SGTs that fit into one page. 54 */ 55 SIL24_PRB_SZ = sizeof(struct sil24_prb) 56 + 2 * sizeof(struct sil24_sge), 57 SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ) 58 / (4 * sizeof(struct sil24_sge)), 59 60 /* This will give us one unused SGEs for ATA. This extra SGE 61 * will be used to store CDB for ATAPI devices. 62 */ 63 SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1, 64 65 /* 66 * Global controller registers (128 bytes @ BAR0) 67 */ 68 /* 32 bit regs */ 69 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */ 70 HOST_CTRL = 0x40, 71 HOST_IRQ_STAT = 0x44, 72 HOST_PHY_CFG = 0x48, 73 HOST_BIST_CTRL = 0x50, 74 HOST_BIST_PTRN = 0x54, 75 HOST_BIST_STAT = 0x58, 76 HOST_MEM_BIST_STAT = 0x5c, 77 HOST_FLASH_CMD = 0x70, 78 /* 8 bit regs */ 79 HOST_FLASH_DATA = 0x74, 80 HOST_TRANSITION_DETECT = 0x75, 81 HOST_GPIO_CTRL = 0x76, 82 HOST_I2C_ADDR = 0x78, /* 32 bit */ 83 HOST_I2C_DATA = 0x7c, 84 HOST_I2C_XFER_CNT = 0x7e, 85 HOST_I2C_CTRL = 0x7f, 86 87 /* HOST_SLOT_STAT bits */ 88 HOST_SSTAT_ATTN = (1 << 31), 89 90 /* HOST_CTRL bits */ 91 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */ 92 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */ 93 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ 94 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ 95 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ 96 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */ 97 98 /* 99 * Port registers 100 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) 101 */ 102 PORT_REGS_SIZE = 0x2000, 103 104 PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */ 105 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */ 106 107 PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */ 108 PORT_PMP_STATUS = 0x0000, /* port device status offset */ 109 PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */ 110 PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */ 111 112 /* 32 bit regs */ 113 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ 114 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ 115 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */ 116 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */ 117 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */ 118 PORT_ACTIVATE_UPPER_ADDR= 0x101c, 119 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */ 120 PORT_CMD_ERR = 0x1024, /* command error number */ 121 PORT_FIS_CFG = 0x1028, 122 PORT_FIFO_THRES = 0x102c, 123 /* 16 bit regs */ 124 PORT_DECODE_ERR_CNT = 0x1040, 125 PORT_DECODE_ERR_THRESH = 0x1042, 126 PORT_CRC_ERR_CNT = 0x1044, 127 PORT_CRC_ERR_THRESH = 0x1046, 128 PORT_HSHK_ERR_CNT = 0x1048, 129 PORT_HSHK_ERR_THRESH = 0x104a, 130 /* 32 bit regs */ 131 PORT_PHY_CFG = 0x1050, 132 PORT_SLOT_STAT = 0x1800, 133 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ 134 PORT_CONTEXT = 0x1e04, 135 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ 136 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ 137 PORT_SCONTROL = 0x1f00, 138 PORT_SSTATUS = 0x1f04, 139 PORT_SERROR = 0x1f08, 140 PORT_SACTIVE = 0x1f0c, 141 142 /* PORT_CTRL_STAT bits */ 143 PORT_CS_PORT_RST = (1 << 0), /* port reset */ 144 PORT_CS_DEV_RST = (1 << 1), /* device reset */ 145 PORT_CS_INIT = (1 << 2), /* port initialize */ 146 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ 147 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */ 148 PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */ 149 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ 150 PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */ 151 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ 152 153 /* PORT_IRQ_STAT/ENABLE_SET/CLR */ 154 /* bits[11:0] are masked */ 155 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */ 156 PORT_IRQ_ERROR = (1 << 1), /* command execution error */ 157 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */ 158 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ 159 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ 160 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ 161 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */ 162 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */ 163 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */ 164 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */ 165 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */ 166 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */ 167 168 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | 169 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | 170 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY, 171 172 /* bits[27:16] are unmasked (raw) */ 173 PORT_IRQ_RAW_SHIFT = 16, 174 PORT_IRQ_MASKED_MASK = 0x7ff, 175 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT), 176 177 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */ 178 PORT_IRQ_STEER_SHIFT = 30, 179 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT), 180 181 /* PORT_CMD_ERR constants */ 182 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */ 183 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */ 184 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */ 185 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */ 186 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */ 187 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */ 188 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */ 189 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */ 190 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */ 191 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */ 192 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */ 193 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */ 194 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */ 195 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */ 196 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */ 197 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */ 198 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ 199 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ 200 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ 201 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */ 202 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ 203 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ 204 205 /* bits of PRB control field */ 206 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */ 207 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */ 208 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */ 209 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */ 210 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */ 211 212 /* PRB protocol field */ 213 PRB_PROT_PACKET = (1 << 0), 214 PRB_PROT_TCQ = (1 << 1), 215 PRB_PROT_NCQ = (1 << 2), 216 PRB_PROT_READ = (1 << 3), 217 PRB_PROT_WRITE = (1 << 4), 218 PRB_PROT_TRANSPARENT = (1 << 5), 219 220 /* 221 * Other constants 222 */ 223 SGE_TRM = (1 << 31), /* Last SGE in chain */ 224 SGE_LNK = (1 << 30), /* linked list 225 Points to SGT, not SGE */ 226 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 227 data address ignored */ 228 229 SIL24_MAX_CMDS = 31, 230 231 /* board id */ 232 BID_SIL3124 = 0, 233 BID_SIL3132 = 1, 234 BID_SIL3131 = 2, 235 236 /* host flags */ 237 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 238 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | 239 ATA_FLAG_AN | ATA_FLAG_PMP, 240 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 241 242 IRQ_STAT_4PORTS = 0xf, 243 }; 244 245 struct sil24_ata_block { 246 struct sil24_prb prb; 247 struct sil24_sge sge[SIL24_MAX_SGE]; 248 }; 249 250 struct sil24_atapi_block { 251 struct sil24_prb prb; 252 u8 cdb[16]; 253 struct sil24_sge sge[SIL24_MAX_SGE]; 254 }; 255 256 union sil24_cmd_block { 257 struct sil24_ata_block ata; 258 struct sil24_atapi_block atapi; 259 }; 260 261 static const struct sil24_cerr_info { 262 unsigned int err_mask, action; 263 const char *desc; 264 } sil24_cerr_db[] = { 265 [0] = { AC_ERR_DEV, 0, 266 "device error" }, 267 [PORT_CERR_DEV] = { AC_ERR_DEV, 0, 268 "device error via D2H FIS" }, 269 [PORT_CERR_SDB] = { AC_ERR_DEV, 0, 270 "device error via SDB FIS" }, 271 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET, 272 "error in data FIS" }, 273 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET, 274 "failed to transmit command FIS" }, 275 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, 276 "protocol mismatch" }, 277 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, 278 "data direction mismatch" }, 279 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, 280 "ran out of SGEs while writing" }, 281 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, 282 "ran out of SGEs while reading" }, 283 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, 284 "invalid data direction for ATAPI CDB" }, 285 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, 286 "SGT not on qword boundary" }, 287 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 288 "PCI target abort while fetching SGT" }, 289 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 290 "PCI master abort while fetching SGT" }, 291 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 292 "PCI parity error while fetching SGT" }, 293 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, 294 "PRB not on qword boundary" }, 295 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 296 "PCI target abort while fetching PRB" }, 297 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 298 "PCI master abort while fetching PRB" }, 299 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 300 "PCI parity error while fetching PRB" }, 301 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 302 "undefined error while transferring data" }, 303 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 304 "PCI target abort while transferring data" }, 305 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 306 "PCI master abort while transferring data" }, 307 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 308 "PCI parity error while transferring data" }, 309 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET, 310 "FIS received while sending service FIS" }, 311 }; 312 313 /* 314 * ap->private_data 315 * 316 * The preview driver always returned 0 for status. We emulate it 317 * here from the previous interrupt. 318 */ 319 struct sil24_port_priv { 320 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ 321 dma_addr_t cmd_block_dma; /* DMA base addr for them */ 322 int do_port_rst; 323 }; 324 325 static void sil24_dev_config(struct ata_device *dev); 326 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); 327 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); 328 static int sil24_qc_defer(struct ata_queued_cmd *qc); 329 static void sil24_qc_prep(struct ata_queued_cmd *qc); 330 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 331 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); 332 static void sil24_pmp_attach(struct ata_port *ap); 333 static void sil24_pmp_detach(struct ata_port *ap); 334 static void sil24_freeze(struct ata_port *ap); 335 static void sil24_thaw(struct ata_port *ap); 336 static int sil24_softreset(struct ata_link *link, unsigned int *class, 337 unsigned long deadline); 338 static int sil24_hardreset(struct ata_link *link, unsigned int *class, 339 unsigned long deadline); 340 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 341 unsigned long deadline); 342 static void sil24_error_handler(struct ata_port *ap); 343 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 344 static int sil24_port_start(struct ata_port *ap); 345 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 346 #ifdef CONFIG_PM_SLEEP 347 static int sil24_pci_device_resume(struct pci_dev *pdev); 348 #endif 349 #ifdef CONFIG_PM 350 static int sil24_port_resume(struct ata_port *ap); 351 #endif 352 353 static const struct pci_device_id sil24_pci_tbl[] = { 354 { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 }, 355 { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, 356 { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, 357 { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, 358 { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 }, 359 { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, 360 { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, 361 362 { } /* terminate list */ 363 }; 364 365 static struct pci_driver sil24_pci_driver = { 366 .name = DRV_NAME, 367 .id_table = sil24_pci_tbl, 368 .probe = sil24_init_one, 369 .remove = ata_pci_remove_one, 370 #ifdef CONFIG_PM_SLEEP 371 .suspend = ata_pci_device_suspend, 372 .resume = sil24_pci_device_resume, 373 #endif 374 }; 375 376 static struct scsi_host_template sil24_sht = { 377 ATA_NCQ_SHT(DRV_NAME), 378 .can_queue = SIL24_MAX_CMDS, 379 .sg_tablesize = SIL24_MAX_SGE, 380 .dma_boundary = ATA_DMA_BOUNDARY, 381 .tag_alloc_policy = BLK_TAG_ALLOC_FIFO, 382 }; 383 384 static struct ata_port_operations sil24_ops = { 385 .inherits = &sata_pmp_port_ops, 386 387 .qc_defer = sil24_qc_defer, 388 .qc_prep = sil24_qc_prep, 389 .qc_issue = sil24_qc_issue, 390 .qc_fill_rtf = sil24_qc_fill_rtf, 391 392 .freeze = sil24_freeze, 393 .thaw = sil24_thaw, 394 .softreset = sil24_softreset, 395 .hardreset = sil24_hardreset, 396 .pmp_softreset = sil24_softreset, 397 .pmp_hardreset = sil24_pmp_hardreset, 398 .error_handler = sil24_error_handler, 399 .post_internal_cmd = sil24_post_internal_cmd, 400 .dev_config = sil24_dev_config, 401 402 .scr_read = sil24_scr_read, 403 .scr_write = sil24_scr_write, 404 .pmp_attach = sil24_pmp_attach, 405 .pmp_detach = sil24_pmp_detach, 406 407 .port_start = sil24_port_start, 408 #ifdef CONFIG_PM 409 .port_resume = sil24_port_resume, 410 #endif 411 }; 412 413 static bool sata_sil24_msi; /* Disable MSI */ 414 module_param_named(msi, sata_sil24_msi, bool, S_IRUGO); 415 MODULE_PARM_DESC(msi, "Enable MSI (Default: false)"); 416 417 /* 418 * Use bits 30-31 of port_flags to encode available port numbers. 419 * Current maxium is 4. 420 */ 421 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) 422 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1) 423 424 static const struct ata_port_info sil24_port_info[] = { 425 /* sil_3124 */ 426 { 427 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 428 SIL24_FLAG_PCIX_IRQ_WOC, 429 .pio_mask = ATA_PIO4, 430 .mwdma_mask = ATA_MWDMA2, 431 .udma_mask = ATA_UDMA5, 432 .port_ops = &sil24_ops, 433 }, 434 /* sil_3132 */ 435 { 436 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 437 .pio_mask = ATA_PIO4, 438 .mwdma_mask = ATA_MWDMA2, 439 .udma_mask = ATA_UDMA5, 440 .port_ops = &sil24_ops, 441 }, 442 /* sil_3131/sil_3531 */ 443 { 444 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 445 .pio_mask = ATA_PIO4, 446 .mwdma_mask = ATA_MWDMA2, 447 .udma_mask = ATA_UDMA5, 448 .port_ops = &sil24_ops, 449 }, 450 }; 451 452 static int sil24_tag(int tag) 453 { 454 if (unlikely(ata_tag_internal(tag))) 455 return 0; 456 return tag; 457 } 458 459 static unsigned long sil24_port_offset(struct ata_port *ap) 460 { 461 return ap->port_no * PORT_REGS_SIZE; 462 } 463 464 static void __iomem *sil24_port_base(struct ata_port *ap) 465 { 466 return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap); 467 } 468 469 static void sil24_dev_config(struct ata_device *dev) 470 { 471 void __iomem *port = sil24_port_base(dev->link->ap); 472 473 if (dev->cdb_len == 16) 474 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 475 else 476 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 477 } 478 479 static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) 480 { 481 void __iomem *port = sil24_port_base(ap); 482 struct sil24_prb __iomem *prb; 483 u8 fis[6 * 4]; 484 485 prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ; 486 memcpy_fromio(fis, prb->fis, sizeof(fis)); 487 ata_tf_from_fis(fis, tf); 488 } 489 490 static int sil24_scr_map[] = { 491 [SCR_CONTROL] = 0, 492 [SCR_STATUS] = 1, 493 [SCR_ERROR] = 2, 494 [SCR_ACTIVE] = 3, 495 }; 496 497 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) 498 { 499 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; 500 501 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 502 *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4); 503 return 0; 504 } 505 return -EINVAL; 506 } 507 508 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) 509 { 510 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; 511 512 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 513 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); 514 return 0; 515 } 516 return -EINVAL; 517 } 518 519 static void sil24_config_port(struct ata_port *ap) 520 { 521 void __iomem *port = sil24_port_base(ap); 522 523 /* configure IRQ WoC */ 524 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) 525 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 526 else 527 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 528 529 /* zero error counters. */ 530 writew(0x8000, port + PORT_DECODE_ERR_THRESH); 531 writew(0x8000, port + PORT_CRC_ERR_THRESH); 532 writew(0x8000, port + PORT_HSHK_ERR_THRESH); 533 writew(0x0000, port + PORT_DECODE_ERR_CNT); 534 writew(0x0000, port + PORT_CRC_ERR_CNT); 535 writew(0x0000, port + PORT_HSHK_ERR_CNT); 536 537 /* always use 64bit activation */ 538 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 539 540 /* clear port multiplier enable and resume bits */ 541 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); 542 } 543 544 static void sil24_config_pmp(struct ata_port *ap, int attached) 545 { 546 void __iomem *port = sil24_port_base(ap); 547 548 if (attached) 549 writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); 550 else 551 writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR); 552 } 553 554 static void sil24_clear_pmp(struct ata_port *ap) 555 { 556 void __iomem *port = sil24_port_base(ap); 557 int i; 558 559 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); 560 561 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { 562 void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE; 563 564 writel(0, pmp_base + PORT_PMP_STATUS); 565 writel(0, pmp_base + PORT_PMP_QACTIVE); 566 } 567 } 568 569 static int sil24_init_port(struct ata_port *ap) 570 { 571 void __iomem *port = sil24_port_base(ap); 572 struct sil24_port_priv *pp = ap->private_data; 573 u32 tmp; 574 575 /* clear PMP error status */ 576 if (sata_pmp_attached(ap)) 577 sil24_clear_pmp(ap); 578 579 writel(PORT_CS_INIT, port + PORT_CTRL_STAT); 580 ata_wait_register(ap, port + PORT_CTRL_STAT, 581 PORT_CS_INIT, PORT_CS_INIT, 10, 100); 582 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, 583 PORT_CS_RDY, 0, 10, 100); 584 585 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { 586 pp->do_port_rst = 1; 587 ap->link.eh_context.i.action |= ATA_EH_RESET; 588 return -EIO; 589 } 590 591 return 0; 592 } 593 594 static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, 595 const struct ata_taskfile *tf, 596 int is_cmd, u32 ctrl, 597 unsigned long timeout_msec) 598 { 599 void __iomem *port = sil24_port_base(ap); 600 struct sil24_port_priv *pp = ap->private_data; 601 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 602 dma_addr_t paddr = pp->cmd_block_dma; 603 u32 irq_enabled, irq_mask, irq_stat; 604 int rc; 605 606 prb->ctrl = cpu_to_le16(ctrl); 607 ata_tf_to_fis(tf, pmp, is_cmd, prb->fis); 608 609 /* temporarily plug completion and error interrupts */ 610 irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); 611 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); 612 613 /* 614 * The barrier is required to ensure that writes to cmd_block reach 615 * the memory before the write to PORT_CMD_ACTIVATE. 616 */ 617 wmb(); 618 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 619 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 620 621 irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; 622 irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0, 623 10, timeout_msec); 624 625 writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ 626 irq_stat >>= PORT_IRQ_RAW_SHIFT; 627 628 if (irq_stat & PORT_IRQ_COMPLETE) 629 rc = 0; 630 else { 631 /* force port into known state */ 632 sil24_init_port(ap); 633 634 if (irq_stat & PORT_IRQ_ERROR) 635 rc = -EIO; 636 else 637 rc = -EBUSY; 638 } 639 640 /* restore IRQ enabled */ 641 writel(irq_enabled, port + PORT_IRQ_ENABLE_SET); 642 643 return rc; 644 } 645 646 static int sil24_softreset(struct ata_link *link, unsigned int *class, 647 unsigned long deadline) 648 { 649 struct ata_port *ap = link->ap; 650 int pmp = sata_srst_pmp(link); 651 unsigned long timeout_msec = 0; 652 struct ata_taskfile tf; 653 const char *reason; 654 int rc; 655 656 DPRINTK("ENTER\n"); 657 658 /* put the port into known state */ 659 if (sil24_init_port(ap)) { 660 reason = "port not ready"; 661 goto err; 662 } 663 664 /* do SRST */ 665 if (time_after(deadline, jiffies)) 666 timeout_msec = jiffies_to_msecs(deadline - jiffies); 667 668 ata_tf_init(link->device, &tf); /* doesn't really matter */ 669 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, 670 timeout_msec); 671 if (rc == -EBUSY) { 672 reason = "timeout"; 673 goto err; 674 } else if (rc) { 675 reason = "SRST command error"; 676 goto err; 677 } 678 679 sil24_read_tf(ap, 0, &tf); 680 *class = ata_dev_classify(&tf); 681 682 DPRINTK("EXIT, class=%u\n", *class); 683 return 0; 684 685 err: 686 ata_link_err(link, "softreset failed (%s)\n", reason); 687 return -EIO; 688 } 689 690 static int sil24_hardreset(struct ata_link *link, unsigned int *class, 691 unsigned long deadline) 692 { 693 struct ata_port *ap = link->ap; 694 void __iomem *port = sil24_port_base(ap); 695 struct sil24_port_priv *pp = ap->private_data; 696 int did_port_rst = 0; 697 const char *reason; 698 int tout_msec, rc; 699 u32 tmp; 700 701 retry: 702 /* Sometimes, DEV_RST is not enough to recover the controller. 703 * This happens often after PM DMA CS errata. 704 */ 705 if (pp->do_port_rst) { 706 ata_port_warn(ap, 707 "controller in dubious state, performing PORT_RST\n"); 708 709 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); 710 ata_msleep(ap, 10); 711 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 712 ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 713 10, 5000); 714 715 /* restore port configuration */ 716 sil24_config_port(ap); 717 sil24_config_pmp(ap, ap->nr_pmp_links); 718 719 pp->do_port_rst = 0; 720 did_port_rst = 1; 721 } 722 723 /* sil24 does the right thing(tm) without any protection */ 724 sata_set_spd(link); 725 726 tout_msec = 100; 727 if (ata_link_online(link)) 728 tout_msec = 5000; 729 730 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 731 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, 732 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, 733 tout_msec); 734 735 /* SStatus oscillates between zero and valid status after 736 * DEV_RST, debounce it. 737 */ 738 rc = sata_link_debounce(link, sata_deb_timing_long, deadline); 739 if (rc) { 740 reason = "PHY debouncing failed"; 741 goto err; 742 } 743 744 if (tmp & PORT_CS_DEV_RST) { 745 if (ata_link_offline(link)) 746 return 0; 747 reason = "link not ready"; 748 goto err; 749 } 750 751 /* Sil24 doesn't store signature FIS after hardreset, so we 752 * can't wait for BSY to clear. Some devices take a long time 753 * to get ready and those devices will choke if we don't wait 754 * for BSY clearance here. Tell libata to perform follow-up 755 * softreset. 756 */ 757 return -EAGAIN; 758 759 err: 760 if (!did_port_rst) { 761 pp->do_port_rst = 1; 762 goto retry; 763 } 764 765 ata_link_err(link, "hardreset failed (%s)\n", reason); 766 return -EIO; 767 } 768 769 static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 770 struct sil24_sge *sge) 771 { 772 struct scatterlist *sg; 773 struct sil24_sge *last_sge = NULL; 774 unsigned int si; 775 776 for_each_sg(qc->sg, sg, qc->n_elem, si) { 777 sge->addr = cpu_to_le64(sg_dma_address(sg)); 778 sge->cnt = cpu_to_le32(sg_dma_len(sg)); 779 sge->flags = 0; 780 781 last_sge = sge; 782 sge++; 783 } 784 785 last_sge->flags = cpu_to_le32(SGE_TRM); 786 } 787 788 static int sil24_qc_defer(struct ata_queued_cmd *qc) 789 { 790 struct ata_link *link = qc->dev->link; 791 struct ata_port *ap = link->ap; 792 u8 prot = qc->tf.protocol; 793 794 /* 795 * There is a bug in the chip: 796 * Port LRAM Causes the PRB/SGT Data to be Corrupted 797 * If the host issues a read request for LRAM and SActive registers 798 * while active commands are available in the port, PRB/SGT data in 799 * the LRAM can become corrupted. This issue applies only when 800 * reading from, but not writing to, the LRAM. 801 * 802 * Therefore, reading LRAM when there is no particular error [and 803 * other commands may be outstanding] is prohibited. 804 * 805 * To avoid this bug there are two situations where a command must run 806 * exclusive of any other commands on the port: 807 * 808 * - ATAPI commands which check the sense data 809 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF 810 * set. 811 * 812 */ 813 int is_excl = (ata_is_atapi(prot) || 814 (qc->flags & ATA_QCFLAG_RESULT_TF)); 815 816 if (unlikely(ap->excl_link)) { 817 if (link == ap->excl_link) { 818 if (ap->nr_active_links) 819 return ATA_DEFER_PORT; 820 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 821 } else 822 return ATA_DEFER_PORT; 823 } else if (unlikely(is_excl)) { 824 ap->excl_link = link; 825 if (ap->nr_active_links) 826 return ATA_DEFER_PORT; 827 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 828 } 829 830 return ata_std_qc_defer(qc); 831 } 832 833 static void sil24_qc_prep(struct ata_queued_cmd *qc) 834 { 835 struct ata_port *ap = qc->ap; 836 struct sil24_port_priv *pp = ap->private_data; 837 union sil24_cmd_block *cb; 838 struct sil24_prb *prb; 839 struct sil24_sge *sge; 840 u16 ctrl = 0; 841 842 cb = &pp->cmd_block[sil24_tag(qc->hw_tag)]; 843 844 if (!ata_is_atapi(qc->tf.protocol)) { 845 prb = &cb->ata.prb; 846 sge = cb->ata.sge; 847 if (ata_is_data(qc->tf.protocol)) { 848 u16 prot = 0; 849 ctrl = PRB_CTRL_PROTOCOL; 850 if (ata_is_ncq(qc->tf.protocol)) 851 prot |= PRB_PROT_NCQ; 852 if (qc->tf.flags & ATA_TFLAG_WRITE) 853 prot |= PRB_PROT_WRITE; 854 else 855 prot |= PRB_PROT_READ; 856 prb->prot = cpu_to_le16(prot); 857 } 858 } else { 859 prb = &cb->atapi.prb; 860 sge = cb->atapi.sge; 861 memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb)); 862 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); 863 864 if (ata_is_data(qc->tf.protocol)) { 865 if (qc->tf.flags & ATA_TFLAG_WRITE) 866 ctrl = PRB_CTRL_PACKET_WRITE; 867 else 868 ctrl = PRB_CTRL_PACKET_READ; 869 } 870 } 871 872 prb->ctrl = cpu_to_le16(ctrl); 873 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis); 874 875 if (qc->flags & ATA_QCFLAG_DMAMAP) 876 sil24_fill_sg(qc, sge); 877 } 878 879 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 880 { 881 struct ata_port *ap = qc->ap; 882 struct sil24_port_priv *pp = ap->private_data; 883 void __iomem *port = sil24_port_base(ap); 884 unsigned int tag = sil24_tag(qc->hw_tag); 885 dma_addr_t paddr; 886 void __iomem *activate; 887 888 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); 889 activate = port + PORT_CMD_ACTIVATE + tag * 8; 890 891 /* 892 * The barrier is required to ensure that writes to cmd_block reach 893 * the memory before the write to PORT_CMD_ACTIVATE. 894 */ 895 wmb(); 896 writel((u32)paddr, activate); 897 writel((u64)paddr >> 32, activate + 4); 898 899 return 0; 900 } 901 902 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) 903 { 904 sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf); 905 return true; 906 } 907 908 static void sil24_pmp_attach(struct ata_port *ap) 909 { 910 u32 *gscr = ap->link.device->gscr; 911 912 sil24_config_pmp(ap, 1); 913 sil24_init_port(ap); 914 915 if (sata_pmp_gscr_vendor(gscr) == 0x11ab && 916 sata_pmp_gscr_devid(gscr) == 0x4140) { 917 ata_port_info(ap, 918 "disabling NCQ support due to sil24-mv4140 quirk\n"); 919 ap->flags &= ~ATA_FLAG_NCQ; 920 } 921 } 922 923 static void sil24_pmp_detach(struct ata_port *ap) 924 { 925 sil24_init_port(ap); 926 sil24_config_pmp(ap, 0); 927 928 ap->flags |= ATA_FLAG_NCQ; 929 } 930 931 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 932 unsigned long deadline) 933 { 934 int rc; 935 936 rc = sil24_init_port(link->ap); 937 if (rc) { 938 ata_link_err(link, "hardreset failed (port not ready)\n"); 939 return rc; 940 } 941 942 return sata_std_hardreset(link, class, deadline); 943 } 944 945 static void sil24_freeze(struct ata_port *ap) 946 { 947 void __iomem *port = sil24_port_base(ap); 948 949 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear 950 * PORT_IRQ_ENABLE instead. 951 */ 952 writel(0xffff, port + PORT_IRQ_ENABLE_CLR); 953 } 954 955 static void sil24_thaw(struct ata_port *ap) 956 { 957 void __iomem *port = sil24_port_base(ap); 958 u32 tmp; 959 960 /* clear IRQ */ 961 tmp = readl(port + PORT_IRQ_STAT); 962 writel(tmp, port + PORT_IRQ_STAT); 963 964 /* turn IRQ back on */ 965 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET); 966 } 967 968 static void sil24_error_intr(struct ata_port *ap) 969 { 970 void __iomem *port = sil24_port_base(ap); 971 struct sil24_port_priv *pp = ap->private_data; 972 struct ata_queued_cmd *qc = NULL; 973 struct ata_link *link; 974 struct ata_eh_info *ehi; 975 int abort = 0, freeze = 0; 976 u32 irq_stat; 977 978 /* on error, we need to clear IRQ explicitly */ 979 irq_stat = readl(port + PORT_IRQ_STAT); 980 writel(irq_stat, port + PORT_IRQ_STAT); 981 982 /* first, analyze and record host port events */ 983 link = &ap->link; 984 ehi = &link->eh_info; 985 ata_ehi_clear_desc(ehi); 986 987 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); 988 989 if (irq_stat & PORT_IRQ_SDB_NOTIFY) { 990 ata_ehi_push_desc(ehi, "SDB notify"); 991 sata_async_notification(ap); 992 } 993 994 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { 995 ata_ehi_hotplugged(ehi); 996 ata_ehi_push_desc(ehi, "%s", 997 irq_stat & PORT_IRQ_PHYRDY_CHG ? 998 "PHY RDY changed" : "device exchanged"); 999 freeze = 1; 1000 } 1001 1002 if (irq_stat & PORT_IRQ_UNK_FIS) { 1003 ehi->err_mask |= AC_ERR_HSM; 1004 ehi->action |= ATA_EH_RESET; 1005 ata_ehi_push_desc(ehi, "unknown FIS"); 1006 freeze = 1; 1007 } 1008 1009 /* deal with command error */ 1010 if (irq_stat & PORT_IRQ_ERROR) { 1011 const struct sil24_cerr_info *ci = NULL; 1012 unsigned int err_mask = 0, action = 0; 1013 u32 context, cerr; 1014 int pmp; 1015 1016 abort = 1; 1017 1018 /* DMA Context Switch Failure in Port Multiplier Mode 1019 * errata. If we have active commands to 3 or more 1020 * devices, any error condition on active devices can 1021 * corrupt DMA context switching. 1022 */ 1023 if (ap->nr_active_links >= 3) { 1024 ehi->err_mask |= AC_ERR_OTHER; 1025 ehi->action |= ATA_EH_RESET; 1026 ata_ehi_push_desc(ehi, "PMP DMA CS errata"); 1027 pp->do_port_rst = 1; 1028 freeze = 1; 1029 } 1030 1031 /* find out the offending link and qc */ 1032 if (sata_pmp_attached(ap)) { 1033 context = readl(port + PORT_CONTEXT); 1034 pmp = (context >> 5) & 0xf; 1035 1036 if (pmp < ap->nr_pmp_links) { 1037 link = &ap->pmp_link[pmp]; 1038 ehi = &link->eh_info; 1039 qc = ata_qc_from_tag(ap, link->active_tag); 1040 1041 ata_ehi_clear_desc(ehi); 1042 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", 1043 irq_stat); 1044 } else { 1045 err_mask |= AC_ERR_HSM; 1046 action |= ATA_EH_RESET; 1047 freeze = 1; 1048 } 1049 } else 1050 qc = ata_qc_from_tag(ap, link->active_tag); 1051 1052 /* analyze CMD_ERR */ 1053 cerr = readl(port + PORT_CMD_ERR); 1054 if (cerr < ARRAY_SIZE(sil24_cerr_db)) 1055 ci = &sil24_cerr_db[cerr]; 1056 1057 if (ci && ci->desc) { 1058 err_mask |= ci->err_mask; 1059 action |= ci->action; 1060 if (action & ATA_EH_RESET) 1061 freeze = 1; 1062 ata_ehi_push_desc(ehi, "%s", ci->desc); 1063 } else { 1064 err_mask |= AC_ERR_OTHER; 1065 action |= ATA_EH_RESET; 1066 freeze = 1; 1067 ata_ehi_push_desc(ehi, "unknown command error %d", 1068 cerr); 1069 } 1070 1071 /* record error info */ 1072 if (qc) 1073 qc->err_mask |= err_mask; 1074 else 1075 ehi->err_mask |= err_mask; 1076 1077 ehi->action |= action; 1078 1079 /* if PMP, resume */ 1080 if (sata_pmp_attached(ap)) 1081 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); 1082 } 1083 1084 /* freeze or abort */ 1085 if (freeze) 1086 ata_port_freeze(ap); 1087 else if (abort) { 1088 if (qc) 1089 ata_link_abort(qc->dev->link); 1090 else 1091 ata_port_abort(ap); 1092 } 1093 } 1094 1095 static inline void sil24_host_intr(struct ata_port *ap) 1096 { 1097 void __iomem *port = sil24_port_base(ap); 1098 u32 slot_stat, qc_active; 1099 int rc; 1100 1101 /* If PCIX_IRQ_WOC, there's an inherent race window between 1102 * clearing IRQ pending status and reading PORT_SLOT_STAT 1103 * which may cause spurious interrupts afterwards. This is 1104 * unavoidable and much better than losing interrupts which 1105 * happens if IRQ pending is cleared after reading 1106 * PORT_SLOT_STAT. 1107 */ 1108 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) 1109 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); 1110 1111 slot_stat = readl(port + PORT_SLOT_STAT); 1112 1113 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { 1114 sil24_error_intr(ap); 1115 return; 1116 } 1117 1118 qc_active = slot_stat & ~HOST_SSTAT_ATTN; 1119 rc = ata_qc_complete_multiple(ap, qc_active); 1120 if (rc > 0) 1121 return; 1122 if (rc < 0) { 1123 struct ata_eh_info *ehi = &ap->link.eh_info; 1124 ehi->err_mask |= AC_ERR_HSM; 1125 ehi->action |= ATA_EH_RESET; 1126 ata_port_freeze(ap); 1127 return; 1128 } 1129 1130 /* spurious interrupts are expected if PCIX_IRQ_WOC */ 1131 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) 1132 ata_port_info(ap, 1133 "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n", 1134 slot_stat, ap->link.active_tag, ap->link.sactive); 1135 } 1136 1137 static irqreturn_t sil24_interrupt(int irq, void *dev_instance) 1138 { 1139 struct ata_host *host = dev_instance; 1140 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1141 unsigned handled = 0; 1142 u32 status; 1143 int i; 1144 1145 status = readl(host_base + HOST_IRQ_STAT); 1146 1147 if (status == 0xffffffff) { 1148 dev_err(host->dev, "IRQ status == 0xffffffff, " 1149 "PCI fault or device removal?\n"); 1150 goto out; 1151 } 1152 1153 if (!(status & IRQ_STAT_4PORTS)) 1154 goto out; 1155 1156 spin_lock(&host->lock); 1157 1158 for (i = 0; i < host->n_ports; i++) 1159 if (status & (1 << i)) { 1160 sil24_host_intr(host->ports[i]); 1161 handled++; 1162 } 1163 1164 spin_unlock(&host->lock); 1165 out: 1166 return IRQ_RETVAL(handled); 1167 } 1168 1169 static void sil24_error_handler(struct ata_port *ap) 1170 { 1171 struct sil24_port_priv *pp = ap->private_data; 1172 1173 if (sil24_init_port(ap)) 1174 ata_eh_freeze_port(ap); 1175 1176 sata_pmp_error_handler(ap); 1177 1178 pp->do_port_rst = 0; 1179 } 1180 1181 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) 1182 { 1183 struct ata_port *ap = qc->ap; 1184 1185 /* make DMA engine forget about the failed command */ 1186 if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) 1187 ata_eh_freeze_port(ap); 1188 } 1189 1190 static int sil24_port_start(struct ata_port *ap) 1191 { 1192 struct device *dev = ap->host->dev; 1193 struct sil24_port_priv *pp; 1194 union sil24_cmd_block *cb; 1195 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1196 dma_addr_t cb_dma; 1197 1198 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1199 if (!pp) 1200 return -ENOMEM; 1201 1202 cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); 1203 if (!cb) 1204 return -ENOMEM; 1205 memset(cb, 0, cb_size); 1206 1207 pp->cmd_block = cb; 1208 pp->cmd_block_dma = cb_dma; 1209 1210 ap->private_data = pp; 1211 1212 ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); 1213 ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port"); 1214 1215 return 0; 1216 } 1217 1218 static void sil24_init_controller(struct ata_host *host) 1219 { 1220 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1221 u32 tmp; 1222 int i; 1223 1224 /* GPIO off */ 1225 writel(0, host_base + HOST_FLASH_CMD); 1226 1227 /* clear global reset & mask interrupts during initialization */ 1228 writel(0, host_base + HOST_CTRL); 1229 1230 /* init ports */ 1231 for (i = 0; i < host->n_ports; i++) { 1232 struct ata_port *ap = host->ports[i]; 1233 void __iomem *port = sil24_port_base(ap); 1234 1235 1236 /* Initial PHY setting */ 1237 writel(0x20c, port + PORT_PHY_CFG); 1238 1239 /* Clear port RST */ 1240 tmp = readl(port + PORT_CTRL_STAT); 1241 if (tmp & PORT_CS_PORT_RST) { 1242 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1243 tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT, 1244 PORT_CS_PORT_RST, 1245 PORT_CS_PORT_RST, 10, 100); 1246 if (tmp & PORT_CS_PORT_RST) 1247 dev_err(host->dev, 1248 "failed to clear port RST\n"); 1249 } 1250 1251 /* configure port */ 1252 sil24_config_port(ap); 1253 } 1254 1255 /* Turn on interrupts */ 1256 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1257 } 1258 1259 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1260 { 1261 extern int __MARKER__sil24_cmd_block_is_sized_wrongly; 1262 struct ata_port_info pi = sil24_port_info[ent->driver_data]; 1263 const struct ata_port_info *ppi[] = { &pi, NULL }; 1264 void __iomem * const *iomap; 1265 struct ata_host *host; 1266 int rc; 1267 u32 tmp; 1268 1269 /* cause link error if sil24_cmd_block is sized wrongly */ 1270 if (sizeof(union sil24_cmd_block) != PAGE_SIZE) 1271 __MARKER__sil24_cmd_block_is_sized_wrongly = 1; 1272 1273 ata_print_version_once(&pdev->dev, DRV_VERSION); 1274 1275 /* acquire resources */ 1276 rc = pcim_enable_device(pdev); 1277 if (rc) 1278 return rc; 1279 1280 rc = pcim_iomap_regions(pdev, 1281 (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR), 1282 DRV_NAME); 1283 if (rc) 1284 return rc; 1285 iomap = pcim_iomap_table(pdev); 1286 1287 /* apply workaround for completion IRQ loss on PCI-X errata */ 1288 if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1289 tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL); 1290 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) 1291 dev_info(&pdev->dev, 1292 "Applying completion IRQ loss on PCI-X errata fix\n"); 1293 else 1294 pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1295 } 1296 1297 /* allocate and fill host */ 1298 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1299 SIL24_FLAG2NPORTS(ppi[0]->flags)); 1300 if (!host) 1301 return -ENOMEM; 1302 host->iomap = iomap; 1303 1304 /* configure and activate the device */ 1305 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1306 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1307 if (rc) { 1308 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1309 if (rc) { 1310 dev_err(&pdev->dev, 1311 "64-bit DMA enable failed\n"); 1312 return rc; 1313 } 1314 } 1315 } else { 1316 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1317 if (rc) { 1318 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 1319 return rc; 1320 } 1321 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1322 if (rc) { 1323 dev_err(&pdev->dev, 1324 "32-bit consistent DMA enable failed\n"); 1325 return rc; 1326 } 1327 } 1328 1329 /* Set max read request size to 4096. This slightly increases 1330 * write throughput for pci-e variants. 1331 */ 1332 pcie_set_readrq(pdev, 4096); 1333 1334 sil24_init_controller(host); 1335 1336 if (sata_sil24_msi && !pci_enable_msi(pdev)) { 1337 dev_info(&pdev->dev, "Using MSI\n"); 1338 pci_intx(pdev, 0); 1339 } 1340 1341 pci_set_master(pdev); 1342 return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED, 1343 &sil24_sht); 1344 } 1345 1346 #ifdef CONFIG_PM_SLEEP 1347 static int sil24_pci_device_resume(struct pci_dev *pdev) 1348 { 1349 struct ata_host *host = pci_get_drvdata(pdev); 1350 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1351 int rc; 1352 1353 rc = ata_pci_device_do_resume(pdev); 1354 if (rc) 1355 return rc; 1356 1357 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1358 writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL); 1359 1360 sil24_init_controller(host); 1361 1362 ata_host_resume(host); 1363 1364 return 0; 1365 } 1366 #endif 1367 1368 #ifdef CONFIG_PM 1369 static int sil24_port_resume(struct ata_port *ap) 1370 { 1371 sil24_config_pmp(ap, ap->nr_pmp_links); 1372 return 0; 1373 } 1374 #endif 1375 1376 module_pci_driver(sil24_pci_driver); 1377 1378 MODULE_AUTHOR("Tejun Heo"); 1379 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); 1380 MODULE_LICENSE("GPL"); 1381 MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); 1382