1 /* 2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers 3 * 4 * Copyright 2005 Tejun Heo 5 * 6 * Based on preview driver from Silicon Image. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2, or (at your option) any 11 * later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/gfp.h> 23 #include <linux/pci.h> 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/interrupt.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_cmnd.h> 31 #include <linux/libata.h> 32 33 #define DRV_NAME "sata_sil24" 34 #define DRV_VERSION "1.1" 35 36 /* 37 * Port request block (PRB) 32 bytes 38 */ 39 struct sil24_prb { 40 __le16 ctrl; 41 __le16 prot; 42 __le32 rx_cnt; 43 u8 fis[6 * 4]; 44 }; 45 46 /* 47 * Scatter gather entry (SGE) 16 bytes 48 */ 49 struct sil24_sge { 50 __le64 addr; 51 __le32 cnt; 52 __le32 flags; 53 }; 54 55 56 enum { 57 SIL24_HOST_BAR = 0, 58 SIL24_PORT_BAR = 2, 59 60 /* sil24 fetches in chunks of 64bytes. The first block 61 * contains the PRB and two SGEs. From the second block, it's 62 * consisted of four SGEs and called SGT. Calculate the 63 * number of SGTs that fit into one page. 64 */ 65 SIL24_PRB_SZ = sizeof(struct sil24_prb) 66 + 2 * sizeof(struct sil24_sge), 67 SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ) 68 / (4 * sizeof(struct sil24_sge)), 69 70 /* This will give us one unused SGEs for ATA. This extra SGE 71 * will be used to store CDB for ATAPI devices. 72 */ 73 SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1, 74 75 /* 76 * Global controller registers (128 bytes @ BAR0) 77 */ 78 /* 32 bit regs */ 79 HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */ 80 HOST_CTRL = 0x40, 81 HOST_IRQ_STAT = 0x44, 82 HOST_PHY_CFG = 0x48, 83 HOST_BIST_CTRL = 0x50, 84 HOST_BIST_PTRN = 0x54, 85 HOST_BIST_STAT = 0x58, 86 HOST_MEM_BIST_STAT = 0x5c, 87 HOST_FLASH_CMD = 0x70, 88 /* 8 bit regs */ 89 HOST_FLASH_DATA = 0x74, 90 HOST_TRANSITION_DETECT = 0x75, 91 HOST_GPIO_CTRL = 0x76, 92 HOST_I2C_ADDR = 0x78, /* 32 bit */ 93 HOST_I2C_DATA = 0x7c, 94 HOST_I2C_XFER_CNT = 0x7e, 95 HOST_I2C_CTRL = 0x7f, 96 97 /* HOST_SLOT_STAT bits */ 98 HOST_SSTAT_ATTN = (1 << 31), 99 100 /* HOST_CTRL bits */ 101 HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */ 102 HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */ 103 HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */ 104 HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */ 105 HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */ 106 HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */ 107 108 /* 109 * Port registers 110 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) 111 */ 112 PORT_REGS_SIZE = 0x2000, 113 114 PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */ 115 PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */ 116 117 PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */ 118 PORT_PMP_STATUS = 0x0000, /* port device status offset */ 119 PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */ 120 PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */ 121 122 /* 32 bit regs */ 123 PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ 124 PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ 125 PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */ 126 PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */ 127 PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */ 128 PORT_ACTIVATE_UPPER_ADDR= 0x101c, 129 PORT_EXEC_FIFO = 0x1020, /* command execution fifo */ 130 PORT_CMD_ERR = 0x1024, /* command error number */ 131 PORT_FIS_CFG = 0x1028, 132 PORT_FIFO_THRES = 0x102c, 133 /* 16 bit regs */ 134 PORT_DECODE_ERR_CNT = 0x1040, 135 PORT_DECODE_ERR_THRESH = 0x1042, 136 PORT_CRC_ERR_CNT = 0x1044, 137 PORT_CRC_ERR_THRESH = 0x1046, 138 PORT_HSHK_ERR_CNT = 0x1048, 139 PORT_HSHK_ERR_THRESH = 0x104a, 140 /* 32 bit regs */ 141 PORT_PHY_CFG = 0x1050, 142 PORT_SLOT_STAT = 0x1800, 143 PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ 144 PORT_CONTEXT = 0x1e04, 145 PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ 146 PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ 147 PORT_SCONTROL = 0x1f00, 148 PORT_SSTATUS = 0x1f04, 149 PORT_SERROR = 0x1f08, 150 PORT_SACTIVE = 0x1f0c, 151 152 /* PORT_CTRL_STAT bits */ 153 PORT_CS_PORT_RST = (1 << 0), /* port reset */ 154 PORT_CS_DEV_RST = (1 << 1), /* device reset */ 155 PORT_CS_INIT = (1 << 2), /* port initialize */ 156 PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ 157 PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */ 158 PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */ 159 PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ 160 PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */ 161 PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ 162 163 /* PORT_IRQ_STAT/ENABLE_SET/CLR */ 164 /* bits[11:0] are masked */ 165 PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */ 166 PORT_IRQ_ERROR = (1 << 1), /* command execution error */ 167 PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */ 168 PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ 169 PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ 170 PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ 171 PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */ 172 PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */ 173 PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */ 174 PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */ 175 PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */ 176 PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */ 177 178 DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | 179 PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG | 180 PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY, 181 182 /* bits[27:16] are unmasked (raw) */ 183 PORT_IRQ_RAW_SHIFT = 16, 184 PORT_IRQ_MASKED_MASK = 0x7ff, 185 PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT), 186 187 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */ 188 PORT_IRQ_STEER_SHIFT = 30, 189 PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT), 190 191 /* PORT_CMD_ERR constants */ 192 PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */ 193 PORT_CERR_SDB = 2, /* Error bit in SDB FIS */ 194 PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */ 195 PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */ 196 PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */ 197 PORT_CERR_DIRECTION = 6, /* Data direction mismatch */ 198 PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */ 199 PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */ 200 PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */ 201 PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */ 202 PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */ 203 PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */ 204 PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */ 205 PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */ 206 PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */ 207 PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */ 208 PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ 209 PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ 210 PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ 211 PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */ 212 PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ 213 PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ 214 215 /* bits of PRB control field */ 216 PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */ 217 PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */ 218 PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */ 219 PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */ 220 PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */ 221 222 /* PRB protocol field */ 223 PRB_PROT_PACKET = (1 << 0), 224 PRB_PROT_TCQ = (1 << 1), 225 PRB_PROT_NCQ = (1 << 2), 226 PRB_PROT_READ = (1 << 3), 227 PRB_PROT_WRITE = (1 << 4), 228 PRB_PROT_TRANSPARENT = (1 << 5), 229 230 /* 231 * Other constants 232 */ 233 SGE_TRM = (1 << 31), /* Last SGE in chain */ 234 SGE_LNK = (1 << 30), /* linked list 235 Points to SGT, not SGE */ 236 SGE_DRD = (1 << 29), /* discard data read (/dev/null) 237 data address ignored */ 238 239 SIL24_MAX_CMDS = 31, 240 241 /* board id */ 242 BID_SIL3124 = 0, 243 BID_SIL3132 = 1, 244 BID_SIL3131 = 2, 245 246 /* host flags */ 247 SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 248 ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | 249 ATA_FLAG_AN | ATA_FLAG_PMP, 250 SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ 251 252 IRQ_STAT_4PORTS = 0xf, 253 }; 254 255 struct sil24_ata_block { 256 struct sil24_prb prb; 257 struct sil24_sge sge[SIL24_MAX_SGE]; 258 }; 259 260 struct sil24_atapi_block { 261 struct sil24_prb prb; 262 u8 cdb[16]; 263 struct sil24_sge sge[SIL24_MAX_SGE]; 264 }; 265 266 union sil24_cmd_block { 267 struct sil24_ata_block ata; 268 struct sil24_atapi_block atapi; 269 }; 270 271 static const struct sil24_cerr_info { 272 unsigned int err_mask, action; 273 const char *desc; 274 } sil24_cerr_db[] = { 275 [0] = { AC_ERR_DEV, 0, 276 "device error" }, 277 [PORT_CERR_DEV] = { AC_ERR_DEV, 0, 278 "device error via D2H FIS" }, 279 [PORT_CERR_SDB] = { AC_ERR_DEV, 0, 280 "device error via SDB FIS" }, 281 [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET, 282 "error in data FIS" }, 283 [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET, 284 "failed to transmit command FIS" }, 285 [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, 286 "protocol mismatch" }, 287 [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, 288 "data direction mismatch" }, 289 [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, 290 "ran out of SGEs while writing" }, 291 [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, 292 "ran out of SGEs while reading" }, 293 [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, 294 "invalid data direction for ATAPI CDB" }, 295 [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, 296 "SGT not on qword boundary" }, 297 [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 298 "PCI target abort while fetching SGT" }, 299 [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 300 "PCI master abort while fetching SGT" }, 301 [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 302 "PCI parity error while fetching SGT" }, 303 [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, 304 "PRB not on qword boundary" }, 305 [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 306 "PCI target abort while fetching PRB" }, 307 [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 308 "PCI master abort while fetching PRB" }, 309 [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 310 "PCI parity error while fetching PRB" }, 311 [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 312 "undefined error while transferring data" }, 313 [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 314 "PCI target abort while transferring data" }, 315 [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 316 "PCI master abort while transferring data" }, 317 [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET, 318 "PCI parity error while transferring data" }, 319 [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET, 320 "FIS received while sending service FIS" }, 321 }; 322 323 /* 324 * ap->private_data 325 * 326 * The preview driver always returned 0 for status. We emulate it 327 * here from the previous interrupt. 328 */ 329 struct sil24_port_priv { 330 union sil24_cmd_block *cmd_block; /* 32 cmd blocks */ 331 dma_addr_t cmd_block_dma; /* DMA base addr for them */ 332 int do_port_rst; 333 }; 334 335 static void sil24_dev_config(struct ata_device *dev); 336 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); 337 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); 338 static int sil24_qc_defer(struct ata_queued_cmd *qc); 339 static void sil24_qc_prep(struct ata_queued_cmd *qc); 340 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 341 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); 342 static void sil24_pmp_attach(struct ata_port *ap); 343 static void sil24_pmp_detach(struct ata_port *ap); 344 static void sil24_freeze(struct ata_port *ap); 345 static void sil24_thaw(struct ata_port *ap); 346 static int sil24_softreset(struct ata_link *link, unsigned int *class, 347 unsigned long deadline); 348 static int sil24_hardreset(struct ata_link *link, unsigned int *class, 349 unsigned long deadline); 350 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 351 unsigned long deadline); 352 static void sil24_error_handler(struct ata_port *ap); 353 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 354 static int sil24_port_start(struct ata_port *ap); 355 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 356 #ifdef CONFIG_PM_SLEEP 357 static int sil24_pci_device_resume(struct pci_dev *pdev); 358 #endif 359 #ifdef CONFIG_PM 360 static int sil24_port_resume(struct ata_port *ap); 361 #endif 362 363 static const struct pci_device_id sil24_pci_tbl[] = { 364 { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 }, 365 { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, 366 { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, 367 { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, 368 { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 }, 369 { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, 370 { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, 371 372 { } /* terminate list */ 373 }; 374 375 static struct pci_driver sil24_pci_driver = { 376 .name = DRV_NAME, 377 .id_table = sil24_pci_tbl, 378 .probe = sil24_init_one, 379 .remove = ata_pci_remove_one, 380 #ifdef CONFIG_PM_SLEEP 381 .suspend = ata_pci_device_suspend, 382 .resume = sil24_pci_device_resume, 383 #endif 384 }; 385 386 static struct scsi_host_template sil24_sht = { 387 ATA_NCQ_SHT(DRV_NAME), 388 .can_queue = SIL24_MAX_CMDS, 389 .sg_tablesize = SIL24_MAX_SGE, 390 .dma_boundary = ATA_DMA_BOUNDARY, 391 .tag_alloc_policy = BLK_TAG_ALLOC_FIFO, 392 }; 393 394 static struct ata_port_operations sil24_ops = { 395 .inherits = &sata_pmp_port_ops, 396 397 .qc_defer = sil24_qc_defer, 398 .qc_prep = sil24_qc_prep, 399 .qc_issue = sil24_qc_issue, 400 .qc_fill_rtf = sil24_qc_fill_rtf, 401 402 .freeze = sil24_freeze, 403 .thaw = sil24_thaw, 404 .softreset = sil24_softreset, 405 .hardreset = sil24_hardreset, 406 .pmp_softreset = sil24_softreset, 407 .pmp_hardreset = sil24_pmp_hardreset, 408 .error_handler = sil24_error_handler, 409 .post_internal_cmd = sil24_post_internal_cmd, 410 .dev_config = sil24_dev_config, 411 412 .scr_read = sil24_scr_read, 413 .scr_write = sil24_scr_write, 414 .pmp_attach = sil24_pmp_attach, 415 .pmp_detach = sil24_pmp_detach, 416 417 .port_start = sil24_port_start, 418 #ifdef CONFIG_PM 419 .port_resume = sil24_port_resume, 420 #endif 421 }; 422 423 static bool sata_sil24_msi; /* Disable MSI */ 424 module_param_named(msi, sata_sil24_msi, bool, S_IRUGO); 425 MODULE_PARM_DESC(msi, "Enable MSI (Default: false)"); 426 427 /* 428 * Use bits 30-31 of port_flags to encode available port numbers. 429 * Current maxium is 4. 430 */ 431 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) 432 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1) 433 434 static const struct ata_port_info sil24_port_info[] = { 435 /* sil_3124 */ 436 { 437 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 438 SIL24_FLAG_PCIX_IRQ_WOC, 439 .pio_mask = ATA_PIO4, 440 .mwdma_mask = ATA_MWDMA2, 441 .udma_mask = ATA_UDMA5, 442 .port_ops = &sil24_ops, 443 }, 444 /* sil_3132 */ 445 { 446 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 447 .pio_mask = ATA_PIO4, 448 .mwdma_mask = ATA_MWDMA2, 449 .udma_mask = ATA_UDMA5, 450 .port_ops = &sil24_ops, 451 }, 452 /* sil_3131/sil_3531 */ 453 { 454 .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 455 .pio_mask = ATA_PIO4, 456 .mwdma_mask = ATA_MWDMA2, 457 .udma_mask = ATA_UDMA5, 458 .port_ops = &sil24_ops, 459 }, 460 }; 461 462 static int sil24_tag(int tag) 463 { 464 if (unlikely(ata_tag_internal(tag))) 465 return 0; 466 return tag; 467 } 468 469 static unsigned long sil24_port_offset(struct ata_port *ap) 470 { 471 return ap->port_no * PORT_REGS_SIZE; 472 } 473 474 static void __iomem *sil24_port_base(struct ata_port *ap) 475 { 476 return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap); 477 } 478 479 static void sil24_dev_config(struct ata_device *dev) 480 { 481 void __iomem *port = sil24_port_base(dev->link->ap); 482 483 if (dev->cdb_len == 16) 484 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 485 else 486 writel(PORT_CS_CDB16, port + PORT_CTRL_CLR); 487 } 488 489 static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf) 490 { 491 void __iomem *port = sil24_port_base(ap); 492 struct sil24_prb __iomem *prb; 493 u8 fis[6 * 4]; 494 495 prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ; 496 memcpy_fromio(fis, prb->fis, sizeof(fis)); 497 ata_tf_from_fis(fis, tf); 498 } 499 500 static int sil24_scr_map[] = { 501 [SCR_CONTROL] = 0, 502 [SCR_STATUS] = 1, 503 [SCR_ERROR] = 2, 504 [SCR_ACTIVE] = 3, 505 }; 506 507 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) 508 { 509 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; 510 511 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 512 *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4); 513 return 0; 514 } 515 return -EINVAL; 516 } 517 518 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) 519 { 520 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL; 521 522 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 523 writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); 524 return 0; 525 } 526 return -EINVAL; 527 } 528 529 static void sil24_config_port(struct ata_port *ap) 530 { 531 void __iomem *port = sil24_port_base(ap); 532 533 /* configure IRQ WoC */ 534 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) 535 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 536 else 537 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); 538 539 /* zero error counters. */ 540 writew(0x8000, port + PORT_DECODE_ERR_THRESH); 541 writew(0x8000, port + PORT_CRC_ERR_THRESH); 542 writew(0x8000, port + PORT_HSHK_ERR_THRESH); 543 writew(0x0000, port + PORT_DECODE_ERR_CNT); 544 writew(0x0000, port + PORT_CRC_ERR_CNT); 545 writew(0x0000, port + PORT_HSHK_ERR_CNT); 546 547 /* always use 64bit activation */ 548 writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); 549 550 /* clear port multiplier enable and resume bits */ 551 writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); 552 } 553 554 static void sil24_config_pmp(struct ata_port *ap, int attached) 555 { 556 void __iomem *port = sil24_port_base(ap); 557 558 if (attached) 559 writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT); 560 else 561 writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR); 562 } 563 564 static void sil24_clear_pmp(struct ata_port *ap) 565 { 566 void __iomem *port = sil24_port_base(ap); 567 int i; 568 569 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR); 570 571 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { 572 void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE; 573 574 writel(0, pmp_base + PORT_PMP_STATUS); 575 writel(0, pmp_base + PORT_PMP_QACTIVE); 576 } 577 } 578 579 static int sil24_init_port(struct ata_port *ap) 580 { 581 void __iomem *port = sil24_port_base(ap); 582 struct sil24_port_priv *pp = ap->private_data; 583 u32 tmp; 584 585 /* clear PMP error status */ 586 if (sata_pmp_attached(ap)) 587 sil24_clear_pmp(ap); 588 589 writel(PORT_CS_INIT, port + PORT_CTRL_STAT); 590 ata_wait_register(ap, port + PORT_CTRL_STAT, 591 PORT_CS_INIT, PORT_CS_INIT, 10, 100); 592 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, 593 PORT_CS_RDY, 0, 10, 100); 594 595 if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { 596 pp->do_port_rst = 1; 597 ap->link.eh_context.i.action |= ATA_EH_RESET; 598 return -EIO; 599 } 600 601 return 0; 602 } 603 604 static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, 605 const struct ata_taskfile *tf, 606 int is_cmd, u32 ctrl, 607 unsigned long timeout_msec) 608 { 609 void __iomem *port = sil24_port_base(ap); 610 struct sil24_port_priv *pp = ap->private_data; 611 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 612 dma_addr_t paddr = pp->cmd_block_dma; 613 u32 irq_enabled, irq_mask, irq_stat; 614 int rc; 615 616 prb->ctrl = cpu_to_le16(ctrl); 617 ata_tf_to_fis(tf, pmp, is_cmd, prb->fis); 618 619 /* temporarily plug completion and error interrupts */ 620 irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); 621 writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); 622 623 /* 624 * The barrier is required to ensure that writes to cmd_block reach 625 * the memory before the write to PORT_CMD_ACTIVATE. 626 */ 627 wmb(); 628 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 629 writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); 630 631 irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; 632 irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0, 633 10, timeout_msec); 634 635 writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ 636 irq_stat >>= PORT_IRQ_RAW_SHIFT; 637 638 if (irq_stat & PORT_IRQ_COMPLETE) 639 rc = 0; 640 else { 641 /* force port into known state */ 642 sil24_init_port(ap); 643 644 if (irq_stat & PORT_IRQ_ERROR) 645 rc = -EIO; 646 else 647 rc = -EBUSY; 648 } 649 650 /* restore IRQ enabled */ 651 writel(irq_enabled, port + PORT_IRQ_ENABLE_SET); 652 653 return rc; 654 } 655 656 static int sil24_softreset(struct ata_link *link, unsigned int *class, 657 unsigned long deadline) 658 { 659 struct ata_port *ap = link->ap; 660 int pmp = sata_srst_pmp(link); 661 unsigned long timeout_msec = 0; 662 struct ata_taskfile tf; 663 const char *reason; 664 int rc; 665 666 DPRINTK("ENTER\n"); 667 668 /* put the port into known state */ 669 if (sil24_init_port(ap)) { 670 reason = "port not ready"; 671 goto err; 672 } 673 674 /* do SRST */ 675 if (time_after(deadline, jiffies)) 676 timeout_msec = jiffies_to_msecs(deadline - jiffies); 677 678 ata_tf_init(link->device, &tf); /* doesn't really matter */ 679 rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST, 680 timeout_msec); 681 if (rc == -EBUSY) { 682 reason = "timeout"; 683 goto err; 684 } else if (rc) { 685 reason = "SRST command error"; 686 goto err; 687 } 688 689 sil24_read_tf(ap, 0, &tf); 690 *class = ata_dev_classify(&tf); 691 692 DPRINTK("EXIT, class=%u\n", *class); 693 return 0; 694 695 err: 696 ata_link_err(link, "softreset failed (%s)\n", reason); 697 return -EIO; 698 } 699 700 static int sil24_hardreset(struct ata_link *link, unsigned int *class, 701 unsigned long deadline) 702 { 703 struct ata_port *ap = link->ap; 704 void __iomem *port = sil24_port_base(ap); 705 struct sil24_port_priv *pp = ap->private_data; 706 int did_port_rst = 0; 707 const char *reason; 708 int tout_msec, rc; 709 u32 tmp; 710 711 retry: 712 /* Sometimes, DEV_RST is not enough to recover the controller. 713 * This happens often after PM DMA CS errata. 714 */ 715 if (pp->do_port_rst) { 716 ata_port_warn(ap, 717 "controller in dubious state, performing PORT_RST\n"); 718 719 writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); 720 ata_msleep(ap, 10); 721 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 722 ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 723 10, 5000); 724 725 /* restore port configuration */ 726 sil24_config_port(ap); 727 sil24_config_pmp(ap, ap->nr_pmp_links); 728 729 pp->do_port_rst = 0; 730 did_port_rst = 1; 731 } 732 733 /* sil24 does the right thing(tm) without any protection */ 734 sata_set_spd(link); 735 736 tout_msec = 100; 737 if (ata_link_online(link)) 738 tout_msec = 5000; 739 740 writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); 741 tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, 742 PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, 743 tout_msec); 744 745 /* SStatus oscillates between zero and valid status after 746 * DEV_RST, debounce it. 747 */ 748 rc = sata_link_debounce(link, sata_deb_timing_long, deadline); 749 if (rc) { 750 reason = "PHY debouncing failed"; 751 goto err; 752 } 753 754 if (tmp & PORT_CS_DEV_RST) { 755 if (ata_link_offline(link)) 756 return 0; 757 reason = "link not ready"; 758 goto err; 759 } 760 761 /* Sil24 doesn't store signature FIS after hardreset, so we 762 * can't wait for BSY to clear. Some devices take a long time 763 * to get ready and those devices will choke if we don't wait 764 * for BSY clearance here. Tell libata to perform follow-up 765 * softreset. 766 */ 767 return -EAGAIN; 768 769 err: 770 if (!did_port_rst) { 771 pp->do_port_rst = 1; 772 goto retry; 773 } 774 775 ata_link_err(link, "hardreset failed (%s)\n", reason); 776 return -EIO; 777 } 778 779 static inline void sil24_fill_sg(struct ata_queued_cmd *qc, 780 struct sil24_sge *sge) 781 { 782 struct scatterlist *sg; 783 struct sil24_sge *last_sge = NULL; 784 unsigned int si; 785 786 for_each_sg(qc->sg, sg, qc->n_elem, si) { 787 sge->addr = cpu_to_le64(sg_dma_address(sg)); 788 sge->cnt = cpu_to_le32(sg_dma_len(sg)); 789 sge->flags = 0; 790 791 last_sge = sge; 792 sge++; 793 } 794 795 last_sge->flags = cpu_to_le32(SGE_TRM); 796 } 797 798 static int sil24_qc_defer(struct ata_queued_cmd *qc) 799 { 800 struct ata_link *link = qc->dev->link; 801 struct ata_port *ap = link->ap; 802 u8 prot = qc->tf.protocol; 803 804 /* 805 * There is a bug in the chip: 806 * Port LRAM Causes the PRB/SGT Data to be Corrupted 807 * If the host issues a read request for LRAM and SActive registers 808 * while active commands are available in the port, PRB/SGT data in 809 * the LRAM can become corrupted. This issue applies only when 810 * reading from, but not writing to, the LRAM. 811 * 812 * Therefore, reading LRAM when there is no particular error [and 813 * other commands may be outstanding] is prohibited. 814 * 815 * To avoid this bug there are two situations where a command must run 816 * exclusive of any other commands on the port: 817 * 818 * - ATAPI commands which check the sense data 819 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF 820 * set. 821 * 822 */ 823 int is_excl = (ata_is_atapi(prot) || 824 (qc->flags & ATA_QCFLAG_RESULT_TF)); 825 826 if (unlikely(ap->excl_link)) { 827 if (link == ap->excl_link) { 828 if (ap->nr_active_links) 829 return ATA_DEFER_PORT; 830 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 831 } else 832 return ATA_DEFER_PORT; 833 } else if (unlikely(is_excl)) { 834 ap->excl_link = link; 835 if (ap->nr_active_links) 836 return ATA_DEFER_PORT; 837 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 838 } 839 840 return ata_std_qc_defer(qc); 841 } 842 843 static void sil24_qc_prep(struct ata_queued_cmd *qc) 844 { 845 struct ata_port *ap = qc->ap; 846 struct sil24_port_priv *pp = ap->private_data; 847 union sil24_cmd_block *cb; 848 struct sil24_prb *prb; 849 struct sil24_sge *sge; 850 u16 ctrl = 0; 851 852 cb = &pp->cmd_block[sil24_tag(qc->hw_tag)]; 853 854 if (!ata_is_atapi(qc->tf.protocol)) { 855 prb = &cb->ata.prb; 856 sge = cb->ata.sge; 857 if (ata_is_data(qc->tf.protocol)) { 858 u16 prot = 0; 859 ctrl = PRB_CTRL_PROTOCOL; 860 if (ata_is_ncq(qc->tf.protocol)) 861 prot |= PRB_PROT_NCQ; 862 if (qc->tf.flags & ATA_TFLAG_WRITE) 863 prot |= PRB_PROT_WRITE; 864 else 865 prot |= PRB_PROT_READ; 866 prb->prot = cpu_to_le16(prot); 867 } 868 } else { 869 prb = &cb->atapi.prb; 870 sge = cb->atapi.sge; 871 memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb)); 872 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); 873 874 if (ata_is_data(qc->tf.protocol)) { 875 if (qc->tf.flags & ATA_TFLAG_WRITE) 876 ctrl = PRB_CTRL_PACKET_WRITE; 877 else 878 ctrl = PRB_CTRL_PACKET_READ; 879 } 880 } 881 882 prb->ctrl = cpu_to_le16(ctrl); 883 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis); 884 885 if (qc->flags & ATA_QCFLAG_DMAMAP) 886 sil24_fill_sg(qc, sge); 887 } 888 889 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) 890 { 891 struct ata_port *ap = qc->ap; 892 struct sil24_port_priv *pp = ap->private_data; 893 void __iomem *port = sil24_port_base(ap); 894 unsigned int tag = sil24_tag(qc->hw_tag); 895 dma_addr_t paddr; 896 void __iomem *activate; 897 898 paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); 899 activate = port + PORT_CMD_ACTIVATE + tag * 8; 900 901 /* 902 * The barrier is required to ensure that writes to cmd_block reach 903 * the memory before the write to PORT_CMD_ACTIVATE. 904 */ 905 wmb(); 906 writel((u32)paddr, activate); 907 writel((u64)paddr >> 32, activate + 4); 908 909 return 0; 910 } 911 912 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) 913 { 914 sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf); 915 return true; 916 } 917 918 static void sil24_pmp_attach(struct ata_port *ap) 919 { 920 u32 *gscr = ap->link.device->gscr; 921 922 sil24_config_pmp(ap, 1); 923 sil24_init_port(ap); 924 925 if (sata_pmp_gscr_vendor(gscr) == 0x11ab && 926 sata_pmp_gscr_devid(gscr) == 0x4140) { 927 ata_port_info(ap, 928 "disabling NCQ support due to sil24-mv4140 quirk\n"); 929 ap->flags &= ~ATA_FLAG_NCQ; 930 } 931 } 932 933 static void sil24_pmp_detach(struct ata_port *ap) 934 { 935 sil24_init_port(ap); 936 sil24_config_pmp(ap, 0); 937 938 ap->flags |= ATA_FLAG_NCQ; 939 } 940 941 static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class, 942 unsigned long deadline) 943 { 944 int rc; 945 946 rc = sil24_init_port(link->ap); 947 if (rc) { 948 ata_link_err(link, "hardreset failed (port not ready)\n"); 949 return rc; 950 } 951 952 return sata_std_hardreset(link, class, deadline); 953 } 954 955 static void sil24_freeze(struct ata_port *ap) 956 { 957 void __iomem *port = sil24_port_base(ap); 958 959 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear 960 * PORT_IRQ_ENABLE instead. 961 */ 962 writel(0xffff, port + PORT_IRQ_ENABLE_CLR); 963 } 964 965 static void sil24_thaw(struct ata_port *ap) 966 { 967 void __iomem *port = sil24_port_base(ap); 968 u32 tmp; 969 970 /* clear IRQ */ 971 tmp = readl(port + PORT_IRQ_STAT); 972 writel(tmp, port + PORT_IRQ_STAT); 973 974 /* turn IRQ back on */ 975 writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET); 976 } 977 978 static void sil24_error_intr(struct ata_port *ap) 979 { 980 void __iomem *port = sil24_port_base(ap); 981 struct sil24_port_priv *pp = ap->private_data; 982 struct ata_queued_cmd *qc = NULL; 983 struct ata_link *link; 984 struct ata_eh_info *ehi; 985 int abort = 0, freeze = 0; 986 u32 irq_stat; 987 988 /* on error, we need to clear IRQ explicitly */ 989 irq_stat = readl(port + PORT_IRQ_STAT); 990 writel(irq_stat, port + PORT_IRQ_STAT); 991 992 /* first, analyze and record host port events */ 993 link = &ap->link; 994 ehi = &link->eh_info; 995 ata_ehi_clear_desc(ehi); 996 997 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat); 998 999 if (irq_stat & PORT_IRQ_SDB_NOTIFY) { 1000 ata_ehi_push_desc(ehi, "SDB notify"); 1001 sata_async_notification(ap); 1002 } 1003 1004 if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) { 1005 ata_ehi_hotplugged(ehi); 1006 ata_ehi_push_desc(ehi, "%s", 1007 irq_stat & PORT_IRQ_PHYRDY_CHG ? 1008 "PHY RDY changed" : "device exchanged"); 1009 freeze = 1; 1010 } 1011 1012 if (irq_stat & PORT_IRQ_UNK_FIS) { 1013 ehi->err_mask |= AC_ERR_HSM; 1014 ehi->action |= ATA_EH_RESET; 1015 ata_ehi_push_desc(ehi, "unknown FIS"); 1016 freeze = 1; 1017 } 1018 1019 /* deal with command error */ 1020 if (irq_stat & PORT_IRQ_ERROR) { 1021 const struct sil24_cerr_info *ci = NULL; 1022 unsigned int err_mask = 0, action = 0; 1023 u32 context, cerr; 1024 int pmp; 1025 1026 abort = 1; 1027 1028 /* DMA Context Switch Failure in Port Multiplier Mode 1029 * errata. If we have active commands to 3 or more 1030 * devices, any error condition on active devices can 1031 * corrupt DMA context switching. 1032 */ 1033 if (ap->nr_active_links >= 3) { 1034 ehi->err_mask |= AC_ERR_OTHER; 1035 ehi->action |= ATA_EH_RESET; 1036 ata_ehi_push_desc(ehi, "PMP DMA CS errata"); 1037 pp->do_port_rst = 1; 1038 freeze = 1; 1039 } 1040 1041 /* find out the offending link and qc */ 1042 if (sata_pmp_attached(ap)) { 1043 context = readl(port + PORT_CONTEXT); 1044 pmp = (context >> 5) & 0xf; 1045 1046 if (pmp < ap->nr_pmp_links) { 1047 link = &ap->pmp_link[pmp]; 1048 ehi = &link->eh_info; 1049 qc = ata_qc_from_tag(ap, link->active_tag); 1050 1051 ata_ehi_clear_desc(ehi); 1052 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", 1053 irq_stat); 1054 } else { 1055 err_mask |= AC_ERR_HSM; 1056 action |= ATA_EH_RESET; 1057 freeze = 1; 1058 } 1059 } else 1060 qc = ata_qc_from_tag(ap, link->active_tag); 1061 1062 /* analyze CMD_ERR */ 1063 cerr = readl(port + PORT_CMD_ERR); 1064 if (cerr < ARRAY_SIZE(sil24_cerr_db)) 1065 ci = &sil24_cerr_db[cerr]; 1066 1067 if (ci && ci->desc) { 1068 err_mask |= ci->err_mask; 1069 action |= ci->action; 1070 if (action & ATA_EH_RESET) 1071 freeze = 1; 1072 ata_ehi_push_desc(ehi, "%s", ci->desc); 1073 } else { 1074 err_mask |= AC_ERR_OTHER; 1075 action |= ATA_EH_RESET; 1076 freeze = 1; 1077 ata_ehi_push_desc(ehi, "unknown command error %d", 1078 cerr); 1079 } 1080 1081 /* record error info */ 1082 if (qc) 1083 qc->err_mask |= err_mask; 1084 else 1085 ehi->err_mask |= err_mask; 1086 1087 ehi->action |= action; 1088 1089 /* if PMP, resume */ 1090 if (sata_pmp_attached(ap)) 1091 writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT); 1092 } 1093 1094 /* freeze or abort */ 1095 if (freeze) 1096 ata_port_freeze(ap); 1097 else if (abort) { 1098 if (qc) 1099 ata_link_abort(qc->dev->link); 1100 else 1101 ata_port_abort(ap); 1102 } 1103 } 1104 1105 static inline void sil24_host_intr(struct ata_port *ap) 1106 { 1107 void __iomem *port = sil24_port_base(ap); 1108 u32 slot_stat, qc_active; 1109 int rc; 1110 1111 /* If PCIX_IRQ_WOC, there's an inherent race window between 1112 * clearing IRQ pending status and reading PORT_SLOT_STAT 1113 * which may cause spurious interrupts afterwards. This is 1114 * unavoidable and much better than losing interrupts which 1115 * happens if IRQ pending is cleared after reading 1116 * PORT_SLOT_STAT. 1117 */ 1118 if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) 1119 writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT); 1120 1121 slot_stat = readl(port + PORT_SLOT_STAT); 1122 1123 if (unlikely(slot_stat & HOST_SSTAT_ATTN)) { 1124 sil24_error_intr(ap); 1125 return; 1126 } 1127 1128 qc_active = slot_stat & ~HOST_SSTAT_ATTN; 1129 rc = ata_qc_complete_multiple(ap, qc_active); 1130 if (rc > 0) 1131 return; 1132 if (rc < 0) { 1133 struct ata_eh_info *ehi = &ap->link.eh_info; 1134 ehi->err_mask |= AC_ERR_HSM; 1135 ehi->action |= ATA_EH_RESET; 1136 ata_port_freeze(ap); 1137 return; 1138 } 1139 1140 /* spurious interrupts are expected if PCIX_IRQ_WOC */ 1141 if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit()) 1142 ata_port_info(ap, 1143 "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n", 1144 slot_stat, ap->link.active_tag, ap->link.sactive); 1145 } 1146 1147 static irqreturn_t sil24_interrupt(int irq, void *dev_instance) 1148 { 1149 struct ata_host *host = dev_instance; 1150 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1151 unsigned handled = 0; 1152 u32 status; 1153 int i; 1154 1155 status = readl(host_base + HOST_IRQ_STAT); 1156 1157 if (status == 0xffffffff) { 1158 dev_err(host->dev, "IRQ status == 0xffffffff, " 1159 "PCI fault or device removal?\n"); 1160 goto out; 1161 } 1162 1163 if (!(status & IRQ_STAT_4PORTS)) 1164 goto out; 1165 1166 spin_lock(&host->lock); 1167 1168 for (i = 0; i < host->n_ports; i++) 1169 if (status & (1 << i)) { 1170 sil24_host_intr(host->ports[i]); 1171 handled++; 1172 } 1173 1174 spin_unlock(&host->lock); 1175 out: 1176 return IRQ_RETVAL(handled); 1177 } 1178 1179 static void sil24_error_handler(struct ata_port *ap) 1180 { 1181 struct sil24_port_priv *pp = ap->private_data; 1182 1183 if (sil24_init_port(ap)) 1184 ata_eh_freeze_port(ap); 1185 1186 sata_pmp_error_handler(ap); 1187 1188 pp->do_port_rst = 0; 1189 } 1190 1191 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) 1192 { 1193 struct ata_port *ap = qc->ap; 1194 1195 /* make DMA engine forget about the failed command */ 1196 if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) 1197 ata_eh_freeze_port(ap); 1198 } 1199 1200 static int sil24_port_start(struct ata_port *ap) 1201 { 1202 struct device *dev = ap->host->dev; 1203 struct sil24_port_priv *pp; 1204 union sil24_cmd_block *cb; 1205 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1206 dma_addr_t cb_dma; 1207 1208 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1209 if (!pp) 1210 return -ENOMEM; 1211 1212 cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); 1213 if (!cb) 1214 return -ENOMEM; 1215 memset(cb, 0, cb_size); 1216 1217 pp->cmd_block = cb; 1218 pp->cmd_block_dma = cb_dma; 1219 1220 ap->private_data = pp; 1221 1222 ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host"); 1223 ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port"); 1224 1225 return 0; 1226 } 1227 1228 static void sil24_init_controller(struct ata_host *host) 1229 { 1230 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1231 u32 tmp; 1232 int i; 1233 1234 /* GPIO off */ 1235 writel(0, host_base + HOST_FLASH_CMD); 1236 1237 /* clear global reset & mask interrupts during initialization */ 1238 writel(0, host_base + HOST_CTRL); 1239 1240 /* init ports */ 1241 for (i = 0; i < host->n_ports; i++) { 1242 struct ata_port *ap = host->ports[i]; 1243 void __iomem *port = sil24_port_base(ap); 1244 1245 1246 /* Initial PHY setting */ 1247 writel(0x20c, port + PORT_PHY_CFG); 1248 1249 /* Clear port RST */ 1250 tmp = readl(port + PORT_CTRL_STAT); 1251 if (tmp & PORT_CS_PORT_RST) { 1252 writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); 1253 tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT, 1254 PORT_CS_PORT_RST, 1255 PORT_CS_PORT_RST, 10, 100); 1256 if (tmp & PORT_CS_PORT_RST) 1257 dev_err(host->dev, 1258 "failed to clear port RST\n"); 1259 } 1260 1261 /* configure port */ 1262 sil24_config_port(ap); 1263 } 1264 1265 /* Turn on interrupts */ 1266 writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); 1267 } 1268 1269 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1270 { 1271 extern int __MARKER__sil24_cmd_block_is_sized_wrongly; 1272 struct ata_port_info pi = sil24_port_info[ent->driver_data]; 1273 const struct ata_port_info *ppi[] = { &pi, NULL }; 1274 void __iomem * const *iomap; 1275 struct ata_host *host; 1276 int rc; 1277 u32 tmp; 1278 1279 /* cause link error if sil24_cmd_block is sized wrongly */ 1280 if (sizeof(union sil24_cmd_block) != PAGE_SIZE) 1281 __MARKER__sil24_cmd_block_is_sized_wrongly = 1; 1282 1283 ata_print_version_once(&pdev->dev, DRV_VERSION); 1284 1285 /* acquire resources */ 1286 rc = pcim_enable_device(pdev); 1287 if (rc) 1288 return rc; 1289 1290 rc = pcim_iomap_regions(pdev, 1291 (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR), 1292 DRV_NAME); 1293 if (rc) 1294 return rc; 1295 iomap = pcim_iomap_table(pdev); 1296 1297 /* apply workaround for completion IRQ loss on PCI-X errata */ 1298 if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1299 tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL); 1300 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) 1301 dev_info(&pdev->dev, 1302 "Applying completion IRQ loss on PCI-X errata fix\n"); 1303 else 1304 pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1305 } 1306 1307 /* allocate and fill host */ 1308 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1309 SIL24_FLAG2NPORTS(ppi[0]->flags)); 1310 if (!host) 1311 return -ENOMEM; 1312 host->iomap = iomap; 1313 1314 /* configure and activate the device */ 1315 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1316 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1317 if (rc) { 1318 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1319 if (rc) { 1320 dev_err(&pdev->dev, 1321 "64-bit DMA enable failed\n"); 1322 return rc; 1323 } 1324 } 1325 } else { 1326 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1327 if (rc) { 1328 dev_err(&pdev->dev, "32-bit DMA enable failed\n"); 1329 return rc; 1330 } 1331 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1332 if (rc) { 1333 dev_err(&pdev->dev, 1334 "32-bit consistent DMA enable failed\n"); 1335 return rc; 1336 } 1337 } 1338 1339 /* Set max read request size to 4096. This slightly increases 1340 * write throughput for pci-e variants. 1341 */ 1342 pcie_set_readrq(pdev, 4096); 1343 1344 sil24_init_controller(host); 1345 1346 if (sata_sil24_msi && !pci_enable_msi(pdev)) { 1347 dev_info(&pdev->dev, "Using MSI\n"); 1348 pci_intx(pdev, 0); 1349 } 1350 1351 pci_set_master(pdev); 1352 return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED, 1353 &sil24_sht); 1354 } 1355 1356 #ifdef CONFIG_PM_SLEEP 1357 static int sil24_pci_device_resume(struct pci_dev *pdev) 1358 { 1359 struct ata_host *host = pci_get_drvdata(pdev); 1360 void __iomem *host_base = host->iomap[SIL24_HOST_BAR]; 1361 int rc; 1362 1363 rc = ata_pci_device_do_resume(pdev); 1364 if (rc) 1365 return rc; 1366 1367 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1368 writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL); 1369 1370 sil24_init_controller(host); 1371 1372 ata_host_resume(host); 1373 1374 return 0; 1375 } 1376 #endif 1377 1378 #ifdef CONFIG_PM 1379 static int sil24_port_resume(struct ata_port *ap) 1380 { 1381 sil24_config_pmp(ap, ap->nr_pmp_links); 1382 return 0; 1383 } 1384 #endif 1385 1386 module_pci_driver(sil24_pci_driver); 1387 1388 MODULE_AUTHOR("Tejun Heo"); 1389 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); 1390 MODULE_LICENSE("GPL"); 1391 MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); 1392