1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <linux/libata.h> 47 48 #define DRV_NAME "ahci" 49 #define DRV_VERSION "3.0" 50 51 52 enum { 53 AHCI_PCI_BAR = 5, 54 AHCI_MAX_PORTS = 32, 55 AHCI_MAX_SG = 168, /* hardware max is 64K */ 56 AHCI_DMA_BOUNDARY = 0xffffffff, 57 AHCI_USE_CLUSTERING = 1, 58 AHCI_MAX_CMDS = 32, 59 AHCI_CMD_SZ = 32, 60 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 61 AHCI_RX_FIS_SZ = 256, 62 AHCI_CMD_TBL_CDB = 0x40, 63 AHCI_CMD_TBL_HDR_SZ = 0x80, 64 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 65 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 66 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 67 AHCI_RX_FIS_SZ, 68 AHCI_IRQ_ON_SG = (1 << 31), 69 AHCI_CMD_ATAPI = (1 << 5), 70 AHCI_CMD_WRITE = (1 << 6), 71 AHCI_CMD_PREFETCH = (1 << 7), 72 AHCI_CMD_RESET = (1 << 8), 73 AHCI_CMD_CLR_BUSY = (1 << 10), 74 75 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 76 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 78 79 board_ahci = 0, 80 board_ahci_vt8251 = 1, 81 board_ahci_ign_iferr = 2, 82 board_ahci_sb600 = 3, 83 board_ahci_mv = 4, 84 85 /* global controller registers */ 86 HOST_CAP = 0x00, /* host capabilities */ 87 HOST_CTL = 0x04, /* global host control */ 88 HOST_IRQ_STAT = 0x08, /* interrupt status */ 89 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 90 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 91 92 /* HOST_CTL bits */ 93 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 94 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 95 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 96 97 /* HOST_CAP bits */ 98 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 99 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 100 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 101 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 102 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 103 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 104 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 105 106 /* registers for each SATA port */ 107 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 108 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 109 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 110 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 111 PORT_IRQ_STAT = 0x10, /* interrupt status */ 112 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 113 PORT_CMD = 0x18, /* port command */ 114 PORT_TFDATA = 0x20, /* taskfile data */ 115 PORT_SIG = 0x24, /* device TF signature */ 116 PORT_CMD_ISSUE = 0x38, /* command issue */ 117 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 118 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 119 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 120 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 121 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ 122 123 /* PORT_IRQ_{STAT,MASK} bits */ 124 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 125 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 126 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 127 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 128 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 129 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 130 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 131 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 132 133 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 134 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 135 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 136 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 137 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 138 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 139 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 140 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 141 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 142 143 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 144 PORT_IRQ_IF_ERR | 145 PORT_IRQ_CONNECT | 146 PORT_IRQ_PHYRDY | 147 PORT_IRQ_UNK_FIS | 148 PORT_IRQ_BAD_PMP, 149 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 150 PORT_IRQ_TF_ERR | 151 PORT_IRQ_HBUS_DATA_ERR, 152 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 153 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 154 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 155 156 /* PORT_CMD bits */ 157 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 158 PORT_CMD_PMP = (1 << 17), /* PMP attached */ 159 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 160 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 161 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 162 PORT_CMD_CLO = (1 << 3), /* Command list override */ 163 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 164 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 165 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 166 167 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 168 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 169 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 170 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 171 172 /* hpriv->flags bits */ 173 AHCI_HFLAG_NO_NCQ = (1 << 0), 174 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ 175 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ 176 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ 177 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 178 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 179 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 180 181 /* ap->flags bits */ 182 AHCI_FLAG_NO_HOTPLUG = (1 << 24), /* ignore PxSERR.DIAG.N */ 183 184 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 185 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 186 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, 187 AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, 188 }; 189 190 struct ahci_cmd_hdr { 191 u32 opts; 192 u32 status; 193 u32 tbl_addr; 194 u32 tbl_addr_hi; 195 u32 reserved[4]; 196 }; 197 198 struct ahci_sg { 199 u32 addr; 200 u32 addr_hi; 201 u32 reserved; 202 u32 flags_size; 203 }; 204 205 struct ahci_host_priv { 206 unsigned int flags; /* AHCI_HFLAG_* */ 207 u32 cap; /* cap to use */ 208 u32 port_map; /* port map to use */ 209 u32 saved_cap; /* saved initial cap */ 210 u32 saved_port_map; /* saved initial port_map */ 211 }; 212 213 struct ahci_port_priv { 214 struct ata_link *active_link; 215 struct ahci_cmd_hdr *cmd_slot; 216 dma_addr_t cmd_slot_dma; 217 void *cmd_tbl; 218 dma_addr_t cmd_tbl_dma; 219 void *rx_fis; 220 dma_addr_t rx_fis_dma; 221 /* for NCQ spurious interrupt analysis */ 222 unsigned int ncq_saw_d2h:1; 223 unsigned int ncq_saw_dmas:1; 224 unsigned int ncq_saw_sdb:1; 225 u32 intr_mask; /* interrupts to enable */ 226 }; 227 228 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 229 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 230 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 231 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 232 static void ahci_irq_clear(struct ata_port *ap); 233 static int ahci_port_start(struct ata_port *ap); 234 static void ahci_port_stop(struct ata_port *ap); 235 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 236 static void ahci_qc_prep(struct ata_queued_cmd *qc); 237 static u8 ahci_check_status(struct ata_port *ap); 238 static void ahci_freeze(struct ata_port *ap); 239 static void ahci_thaw(struct ata_port *ap); 240 static void ahci_pmp_attach(struct ata_port *ap); 241 static void ahci_pmp_detach(struct ata_port *ap); 242 static void ahci_error_handler(struct ata_port *ap); 243 static void ahci_vt8251_error_handler(struct ata_port *ap); 244 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 245 static int ahci_port_resume(struct ata_port *ap); 246 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); 247 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 248 u32 opts); 249 #ifdef CONFIG_PM 250 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 251 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 252 static int ahci_pci_device_resume(struct pci_dev *pdev); 253 #endif 254 255 static struct scsi_host_template ahci_sht = { 256 .module = THIS_MODULE, 257 .name = DRV_NAME, 258 .ioctl = ata_scsi_ioctl, 259 .queuecommand = ata_scsi_queuecmd, 260 .change_queue_depth = ata_scsi_change_queue_depth, 261 .can_queue = AHCI_MAX_CMDS - 1, 262 .this_id = ATA_SHT_THIS_ID, 263 .sg_tablesize = AHCI_MAX_SG, 264 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 265 .emulated = ATA_SHT_EMULATED, 266 .use_clustering = AHCI_USE_CLUSTERING, 267 .proc_name = DRV_NAME, 268 .dma_boundary = AHCI_DMA_BOUNDARY, 269 .slave_configure = ata_scsi_slave_config, 270 .slave_destroy = ata_scsi_slave_destroy, 271 .bios_param = ata_std_bios_param, 272 }; 273 274 static const struct ata_port_operations ahci_ops = { 275 .check_status = ahci_check_status, 276 .check_altstatus = ahci_check_status, 277 .dev_select = ata_noop_dev_select, 278 279 .tf_read = ahci_tf_read, 280 281 .qc_defer = sata_pmp_qc_defer_cmd_switch, 282 .qc_prep = ahci_qc_prep, 283 .qc_issue = ahci_qc_issue, 284 285 .irq_clear = ahci_irq_clear, 286 287 .scr_read = ahci_scr_read, 288 .scr_write = ahci_scr_write, 289 290 .freeze = ahci_freeze, 291 .thaw = ahci_thaw, 292 293 .error_handler = ahci_error_handler, 294 .post_internal_cmd = ahci_post_internal_cmd, 295 296 .pmp_attach = ahci_pmp_attach, 297 .pmp_detach = ahci_pmp_detach, 298 299 #ifdef CONFIG_PM 300 .port_suspend = ahci_port_suspend, 301 .port_resume = ahci_port_resume, 302 #endif 303 304 .port_start = ahci_port_start, 305 .port_stop = ahci_port_stop, 306 }; 307 308 static const struct ata_port_operations ahci_vt8251_ops = { 309 .check_status = ahci_check_status, 310 .check_altstatus = ahci_check_status, 311 .dev_select = ata_noop_dev_select, 312 313 .tf_read = ahci_tf_read, 314 315 .qc_defer = sata_pmp_qc_defer_cmd_switch, 316 .qc_prep = ahci_qc_prep, 317 .qc_issue = ahci_qc_issue, 318 319 .irq_clear = ahci_irq_clear, 320 321 .scr_read = ahci_scr_read, 322 .scr_write = ahci_scr_write, 323 324 .freeze = ahci_freeze, 325 .thaw = ahci_thaw, 326 327 .error_handler = ahci_vt8251_error_handler, 328 .post_internal_cmd = ahci_post_internal_cmd, 329 330 .pmp_attach = ahci_pmp_attach, 331 .pmp_detach = ahci_pmp_detach, 332 333 #ifdef CONFIG_PM 334 .port_suspend = ahci_port_suspend, 335 .port_resume = ahci_port_resume, 336 #endif 337 338 .port_start = ahci_port_start, 339 .port_stop = ahci_port_stop, 340 }; 341 342 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 343 344 static const struct ata_port_info ahci_port_info[] = { 345 /* board_ahci */ 346 { 347 .flags = AHCI_FLAG_COMMON, 348 .link_flags = AHCI_LFLAG_COMMON, 349 .pio_mask = 0x1f, /* pio0-4 */ 350 .udma_mask = ATA_UDMA6, 351 .port_ops = &ahci_ops, 352 }, 353 /* board_ahci_vt8251 */ 354 { 355 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 356 .flags = AHCI_FLAG_COMMON, 357 .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME, 358 .pio_mask = 0x1f, /* pio0-4 */ 359 .udma_mask = ATA_UDMA6, 360 .port_ops = &ahci_vt8251_ops, 361 }, 362 /* board_ahci_ign_iferr */ 363 { 364 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 365 .flags = AHCI_FLAG_COMMON, 366 .link_flags = AHCI_LFLAG_COMMON, 367 .pio_mask = 0x1f, /* pio0-4 */ 368 .udma_mask = ATA_UDMA6, 369 .port_ops = &ahci_ops, 370 }, 371 /* board_ahci_sb600 */ 372 { 373 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 374 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), 375 .flags = AHCI_FLAG_COMMON, 376 .link_flags = AHCI_LFLAG_COMMON, 377 .pio_mask = 0x1f, /* pio0-4 */ 378 .udma_mask = ATA_UDMA6, 379 .port_ops = &ahci_ops, 380 }, 381 /* board_ahci_mv */ 382 { 383 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 384 AHCI_HFLAG_MV_PATA), 385 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 386 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 387 .link_flags = AHCI_LFLAG_COMMON, 388 .pio_mask = 0x1f, /* pio0-4 */ 389 .udma_mask = ATA_UDMA6, 390 .port_ops = &ahci_ops, 391 }, 392 }; 393 394 static const struct pci_device_id ahci_pci_tbl[] = { 395 /* Intel */ 396 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 397 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 398 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 399 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 400 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 401 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 402 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 403 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 404 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 405 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 406 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 407 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 408 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 409 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 410 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 411 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 412 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 413 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 414 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 415 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 416 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 417 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 418 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 419 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 420 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 421 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 422 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 423 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 424 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 425 426 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 427 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 428 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 429 430 /* ATI */ 431 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 432 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700/800 */ 433 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700/800 */ 434 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700/800 */ 435 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700/800 */ 436 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb600 }, /* ATI SB700/800 */ 437 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb600 }, /* ATI SB700/800 */ 438 439 /* VIA */ 440 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 441 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 442 443 /* NVIDIA */ 444 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ 445 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ 446 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ 447 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ 448 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ 449 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ 450 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ 451 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ 452 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 453 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 454 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 455 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 456 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 457 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 458 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 459 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 460 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 461 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 462 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 463 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 464 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ 465 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ 466 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ 467 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ 468 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ 469 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ 470 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ 471 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ 472 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ 473 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ 474 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ 475 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ 476 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 477 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 478 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 479 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 480 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 481 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 482 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 483 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 484 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 485 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 486 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 487 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 488 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 489 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 490 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 491 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 492 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 493 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 494 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 495 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 496 497 /* SiS */ 498 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 499 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 500 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 501 502 /* Marvell */ 503 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 504 505 /* Generic, PCI class code for AHCI */ 506 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 507 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 508 509 { } /* terminate list */ 510 }; 511 512 513 static struct pci_driver ahci_pci_driver = { 514 .name = DRV_NAME, 515 .id_table = ahci_pci_tbl, 516 .probe = ahci_init_one, 517 .remove = ata_pci_remove_one, 518 #ifdef CONFIG_PM 519 .suspend = ahci_pci_device_suspend, 520 .resume = ahci_pci_device_resume, 521 #endif 522 }; 523 524 525 static inline int ahci_nr_ports(u32 cap) 526 { 527 return (cap & 0x1f) + 1; 528 } 529 530 static inline void __iomem *__ahci_port_base(struct ata_host *host, 531 unsigned int port_no) 532 { 533 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 534 535 return mmio + 0x100 + (port_no * 0x80); 536 } 537 538 static inline void __iomem *ahci_port_base(struct ata_port *ap) 539 { 540 return __ahci_port_base(ap->host, ap->port_no); 541 } 542 543 /** 544 * ahci_save_initial_config - Save and fixup initial config values 545 * @pdev: target PCI device 546 * @hpriv: host private area to store config values 547 * 548 * Some registers containing configuration info might be setup by 549 * BIOS and might be cleared on reset. This function saves the 550 * initial values of those registers into @hpriv such that they 551 * can be restored after controller reset. 552 * 553 * If inconsistent, config values are fixed up by this function. 554 * 555 * LOCKING: 556 * None. 557 */ 558 static void ahci_save_initial_config(struct pci_dev *pdev, 559 struct ahci_host_priv *hpriv) 560 { 561 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 562 u32 cap, port_map; 563 int i; 564 565 /* Values prefixed with saved_ are written back to host after 566 * reset. Values without are used for driver operation. 567 */ 568 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 569 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 570 571 /* some chips have errata preventing 64bit use */ 572 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 573 dev_printk(KERN_INFO, &pdev->dev, 574 "controller can't do 64bit DMA, forcing 32bit\n"); 575 cap &= ~HOST_CAP_64; 576 } 577 578 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 579 dev_printk(KERN_INFO, &pdev->dev, 580 "controller can't do NCQ, turning off CAP_NCQ\n"); 581 cap &= ~HOST_CAP_NCQ; 582 } 583 584 if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 585 dev_printk(KERN_INFO, &pdev->dev, 586 "controller can't do PMP, turning off CAP_PMP\n"); 587 cap &= ~HOST_CAP_PMP; 588 } 589 590 /* 591 * Temporary Marvell 6145 hack: PATA port presence 592 * is asserted through the standard AHCI port 593 * presence register, as bit 4 (counting from 0) 594 */ 595 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 596 dev_printk(KERN_ERR, &pdev->dev, 597 "MV_AHCI HACK: port_map %x -> %x\n", 598 hpriv->port_map, 599 hpriv->port_map & 0xf); 600 601 port_map &= 0xf; 602 } 603 604 /* cross check port_map and cap.n_ports */ 605 if (port_map) { 606 u32 tmp_port_map = port_map; 607 int n_ports = ahci_nr_ports(cap); 608 609 for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) { 610 if (tmp_port_map & (1 << i)) { 611 n_ports--; 612 tmp_port_map &= ~(1 << i); 613 } 614 } 615 616 /* If n_ports and port_map are inconsistent, whine and 617 * clear port_map and let it be generated from n_ports. 618 */ 619 if (n_ports || tmp_port_map) { 620 dev_printk(KERN_WARNING, &pdev->dev, 621 "nr_ports (%u) and implemented port map " 622 "(0x%x) don't match, using nr_ports\n", 623 ahci_nr_ports(cap), port_map); 624 port_map = 0; 625 } 626 } 627 628 /* fabricate port_map from cap.nr_ports */ 629 if (!port_map) { 630 port_map = (1 << ahci_nr_ports(cap)) - 1; 631 dev_printk(KERN_WARNING, &pdev->dev, 632 "forcing PORTS_IMPL to 0x%x\n", port_map); 633 634 /* write the fixed up value to the PI register */ 635 hpriv->saved_port_map = port_map; 636 } 637 638 /* record values to use during operation */ 639 hpriv->cap = cap; 640 hpriv->port_map = port_map; 641 } 642 643 /** 644 * ahci_restore_initial_config - Restore initial config 645 * @host: target ATA host 646 * 647 * Restore initial config stored by ahci_save_initial_config(). 648 * 649 * LOCKING: 650 * None. 651 */ 652 static void ahci_restore_initial_config(struct ata_host *host) 653 { 654 struct ahci_host_priv *hpriv = host->private_data; 655 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 656 657 writel(hpriv->saved_cap, mmio + HOST_CAP); 658 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 659 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 660 } 661 662 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 663 { 664 static const int offset[] = { 665 [SCR_STATUS] = PORT_SCR_STAT, 666 [SCR_CONTROL] = PORT_SCR_CTL, 667 [SCR_ERROR] = PORT_SCR_ERR, 668 [SCR_ACTIVE] = PORT_SCR_ACT, 669 [SCR_NOTIFICATION] = PORT_SCR_NTF, 670 }; 671 struct ahci_host_priv *hpriv = ap->host->private_data; 672 673 if (sc_reg < ARRAY_SIZE(offset) && 674 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 675 return offset[sc_reg]; 676 return 0; 677 } 678 679 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 680 { 681 void __iomem *port_mmio = ahci_port_base(ap); 682 int offset = ahci_scr_offset(ap, sc_reg); 683 684 if (offset) { 685 *val = readl(port_mmio + offset); 686 return 0; 687 } 688 return -EINVAL; 689 } 690 691 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 692 { 693 void __iomem *port_mmio = ahci_port_base(ap); 694 int offset = ahci_scr_offset(ap, sc_reg); 695 696 if (offset) { 697 writel(val, port_mmio + offset); 698 return 0; 699 } 700 return -EINVAL; 701 } 702 703 static void ahci_start_engine(struct ata_port *ap) 704 { 705 void __iomem *port_mmio = ahci_port_base(ap); 706 u32 tmp; 707 708 /* start DMA */ 709 tmp = readl(port_mmio + PORT_CMD); 710 tmp |= PORT_CMD_START; 711 writel(tmp, port_mmio + PORT_CMD); 712 readl(port_mmio + PORT_CMD); /* flush */ 713 } 714 715 static int ahci_stop_engine(struct ata_port *ap) 716 { 717 void __iomem *port_mmio = ahci_port_base(ap); 718 u32 tmp; 719 720 tmp = readl(port_mmio + PORT_CMD); 721 722 /* check if the HBA is idle */ 723 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 724 return 0; 725 726 /* setting HBA to idle */ 727 tmp &= ~PORT_CMD_START; 728 writel(tmp, port_mmio + PORT_CMD); 729 730 /* wait for engine to stop. This could be as long as 500 msec */ 731 tmp = ata_wait_register(port_mmio + PORT_CMD, 732 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 733 if (tmp & PORT_CMD_LIST_ON) 734 return -EIO; 735 736 return 0; 737 } 738 739 static void ahci_start_fis_rx(struct ata_port *ap) 740 { 741 void __iomem *port_mmio = ahci_port_base(ap); 742 struct ahci_host_priv *hpriv = ap->host->private_data; 743 struct ahci_port_priv *pp = ap->private_data; 744 u32 tmp; 745 746 /* set FIS registers */ 747 if (hpriv->cap & HOST_CAP_64) 748 writel((pp->cmd_slot_dma >> 16) >> 16, 749 port_mmio + PORT_LST_ADDR_HI); 750 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 751 752 if (hpriv->cap & HOST_CAP_64) 753 writel((pp->rx_fis_dma >> 16) >> 16, 754 port_mmio + PORT_FIS_ADDR_HI); 755 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 756 757 /* enable FIS reception */ 758 tmp = readl(port_mmio + PORT_CMD); 759 tmp |= PORT_CMD_FIS_RX; 760 writel(tmp, port_mmio + PORT_CMD); 761 762 /* flush */ 763 readl(port_mmio + PORT_CMD); 764 } 765 766 static int ahci_stop_fis_rx(struct ata_port *ap) 767 { 768 void __iomem *port_mmio = ahci_port_base(ap); 769 u32 tmp; 770 771 /* disable FIS reception */ 772 tmp = readl(port_mmio + PORT_CMD); 773 tmp &= ~PORT_CMD_FIS_RX; 774 writel(tmp, port_mmio + PORT_CMD); 775 776 /* wait for completion, spec says 500ms, give it 1000 */ 777 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 778 PORT_CMD_FIS_ON, 10, 1000); 779 if (tmp & PORT_CMD_FIS_ON) 780 return -EBUSY; 781 782 return 0; 783 } 784 785 static void ahci_power_up(struct ata_port *ap) 786 { 787 struct ahci_host_priv *hpriv = ap->host->private_data; 788 void __iomem *port_mmio = ahci_port_base(ap); 789 u32 cmd; 790 791 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 792 793 /* spin up device */ 794 if (hpriv->cap & HOST_CAP_SSS) { 795 cmd |= PORT_CMD_SPIN_UP; 796 writel(cmd, port_mmio + PORT_CMD); 797 } 798 799 /* wake up link */ 800 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 801 } 802 803 #ifdef CONFIG_PM 804 static void ahci_power_down(struct ata_port *ap) 805 { 806 struct ahci_host_priv *hpriv = ap->host->private_data; 807 void __iomem *port_mmio = ahci_port_base(ap); 808 u32 cmd, scontrol; 809 810 if (!(hpriv->cap & HOST_CAP_SSS)) 811 return; 812 813 /* put device into listen mode, first set PxSCTL.DET to 0 */ 814 scontrol = readl(port_mmio + PORT_SCR_CTL); 815 scontrol &= ~0xf; 816 writel(scontrol, port_mmio + PORT_SCR_CTL); 817 818 /* then set PxCMD.SUD to 0 */ 819 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 820 cmd &= ~PORT_CMD_SPIN_UP; 821 writel(cmd, port_mmio + PORT_CMD); 822 } 823 #endif 824 825 static void ahci_start_port(struct ata_port *ap) 826 { 827 /* enable FIS reception */ 828 ahci_start_fis_rx(ap); 829 830 /* enable DMA */ 831 ahci_start_engine(ap); 832 } 833 834 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 835 { 836 int rc; 837 838 /* disable DMA */ 839 rc = ahci_stop_engine(ap); 840 if (rc) { 841 *emsg = "failed to stop engine"; 842 return rc; 843 } 844 845 /* disable FIS reception */ 846 rc = ahci_stop_fis_rx(ap); 847 if (rc) { 848 *emsg = "failed stop FIS RX"; 849 return rc; 850 } 851 852 return 0; 853 } 854 855 static int ahci_reset_controller(struct ata_host *host) 856 { 857 struct pci_dev *pdev = to_pci_dev(host->dev); 858 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 859 u32 tmp; 860 861 /* we must be in AHCI mode, before using anything 862 * AHCI-specific, such as HOST_RESET. 863 */ 864 tmp = readl(mmio + HOST_CTL); 865 if (!(tmp & HOST_AHCI_EN)) 866 writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL); 867 868 /* global controller reset */ 869 if ((tmp & HOST_RESET) == 0) { 870 writel(tmp | HOST_RESET, mmio + HOST_CTL); 871 readl(mmio + HOST_CTL); /* flush */ 872 } 873 874 /* reset must complete within 1 second, or 875 * the hardware should be considered fried. 876 */ 877 ssleep(1); 878 879 tmp = readl(mmio + HOST_CTL); 880 if (tmp & HOST_RESET) { 881 dev_printk(KERN_ERR, host->dev, 882 "controller reset failed (0x%x)\n", tmp); 883 return -EIO; 884 } 885 886 /* turn on AHCI mode */ 887 writel(HOST_AHCI_EN, mmio + HOST_CTL); 888 (void) readl(mmio + HOST_CTL); /* flush */ 889 890 /* some registers might be cleared on reset. restore initial values */ 891 ahci_restore_initial_config(host); 892 893 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 894 u16 tmp16; 895 896 /* configure PCS */ 897 pci_read_config_word(pdev, 0x92, &tmp16); 898 tmp16 |= 0xf; 899 pci_write_config_word(pdev, 0x92, tmp16); 900 } 901 902 return 0; 903 } 904 905 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 906 int port_no, void __iomem *mmio, 907 void __iomem *port_mmio) 908 { 909 const char *emsg = NULL; 910 int rc; 911 u32 tmp; 912 913 /* make sure port is not active */ 914 rc = ahci_deinit_port(ap, &emsg); 915 if (rc) 916 dev_printk(KERN_WARNING, &pdev->dev, 917 "%s (%d)\n", emsg, rc); 918 919 /* clear SError */ 920 tmp = readl(port_mmio + PORT_SCR_ERR); 921 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 922 writel(tmp, port_mmio + PORT_SCR_ERR); 923 924 /* clear port IRQ */ 925 tmp = readl(port_mmio + PORT_IRQ_STAT); 926 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 927 if (tmp) 928 writel(tmp, port_mmio + PORT_IRQ_STAT); 929 930 writel(1 << port_no, mmio + HOST_IRQ_STAT); 931 } 932 933 static void ahci_init_controller(struct ata_host *host) 934 { 935 struct ahci_host_priv *hpriv = host->private_data; 936 struct pci_dev *pdev = to_pci_dev(host->dev); 937 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 938 int i; 939 void __iomem *port_mmio; 940 u32 tmp; 941 942 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 943 port_mmio = __ahci_port_base(host, 4); 944 945 writel(0, port_mmio + PORT_IRQ_MASK); 946 947 /* clear port IRQ */ 948 tmp = readl(port_mmio + PORT_IRQ_STAT); 949 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 950 if (tmp) 951 writel(tmp, port_mmio + PORT_IRQ_STAT); 952 } 953 954 for (i = 0; i < host->n_ports; i++) { 955 struct ata_port *ap = host->ports[i]; 956 957 port_mmio = ahci_port_base(ap); 958 if (ata_port_is_dummy(ap)) 959 continue; 960 961 ahci_port_init(pdev, ap, i, mmio, port_mmio); 962 } 963 964 tmp = readl(mmio + HOST_CTL); 965 VPRINTK("HOST_CTL 0x%x\n", tmp); 966 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 967 tmp = readl(mmio + HOST_CTL); 968 VPRINTK("HOST_CTL 0x%x\n", tmp); 969 } 970 971 static unsigned int ahci_dev_classify(struct ata_port *ap) 972 { 973 void __iomem *port_mmio = ahci_port_base(ap); 974 struct ata_taskfile tf; 975 u32 tmp; 976 977 tmp = readl(port_mmio + PORT_SIG); 978 tf.lbah = (tmp >> 24) & 0xff; 979 tf.lbam = (tmp >> 16) & 0xff; 980 tf.lbal = (tmp >> 8) & 0xff; 981 tf.nsect = (tmp) & 0xff; 982 983 return ata_dev_classify(&tf); 984 } 985 986 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 987 u32 opts) 988 { 989 dma_addr_t cmd_tbl_dma; 990 991 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 992 993 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 994 pp->cmd_slot[tag].status = 0; 995 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 996 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 997 } 998 999 static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1000 { 1001 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1002 struct ahci_host_priv *hpriv = ap->host->private_data; 1003 u32 tmp; 1004 int busy, rc; 1005 1006 /* do we need to kick the port? */ 1007 busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ); 1008 if (!busy && !force_restart) 1009 return 0; 1010 1011 /* stop engine */ 1012 rc = ahci_stop_engine(ap); 1013 if (rc) 1014 goto out_restart; 1015 1016 /* need to do CLO? */ 1017 if (!busy) { 1018 rc = 0; 1019 goto out_restart; 1020 } 1021 1022 if (!(hpriv->cap & HOST_CAP_CLO)) { 1023 rc = -EOPNOTSUPP; 1024 goto out_restart; 1025 } 1026 1027 /* perform CLO */ 1028 tmp = readl(port_mmio + PORT_CMD); 1029 tmp |= PORT_CMD_CLO; 1030 writel(tmp, port_mmio + PORT_CMD); 1031 1032 rc = 0; 1033 tmp = ata_wait_register(port_mmio + PORT_CMD, 1034 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1035 if (tmp & PORT_CMD_CLO) 1036 rc = -EIO; 1037 1038 /* restart engine */ 1039 out_restart: 1040 ahci_start_engine(ap); 1041 return rc; 1042 } 1043 1044 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1045 struct ata_taskfile *tf, int is_cmd, u16 flags, 1046 unsigned long timeout_msec) 1047 { 1048 const u32 cmd_fis_len = 5; /* five dwords */ 1049 struct ahci_port_priv *pp = ap->private_data; 1050 void __iomem *port_mmio = ahci_port_base(ap); 1051 u8 *fis = pp->cmd_tbl; 1052 u32 tmp; 1053 1054 /* prep the command */ 1055 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1056 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1057 1058 /* issue & wait */ 1059 writel(1, port_mmio + PORT_CMD_ISSUE); 1060 1061 if (timeout_msec) { 1062 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1063 1, timeout_msec); 1064 if (tmp & 0x1) { 1065 ahci_kick_engine(ap, 1); 1066 return -EBUSY; 1067 } 1068 } else 1069 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1070 1071 return 0; 1072 } 1073 1074 static int ahci_do_softreset(struct ata_link *link, unsigned int *class, 1075 int pmp, unsigned long deadline) 1076 { 1077 struct ata_port *ap = link->ap; 1078 const char *reason = NULL; 1079 unsigned long now, msecs; 1080 struct ata_taskfile tf; 1081 int rc; 1082 1083 DPRINTK("ENTER\n"); 1084 1085 if (ata_link_offline(link)) { 1086 DPRINTK("PHY reports no device\n"); 1087 *class = ATA_DEV_NONE; 1088 return 0; 1089 } 1090 1091 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1092 rc = ahci_kick_engine(ap, 1); 1093 if (rc) 1094 ata_link_printk(link, KERN_WARNING, 1095 "failed to reset engine (errno=%d)", rc); 1096 1097 ata_tf_init(link->device, &tf); 1098 1099 /* issue the first D2H Register FIS */ 1100 msecs = 0; 1101 now = jiffies; 1102 if (time_after(now, deadline)) 1103 msecs = jiffies_to_msecs(deadline - now); 1104 1105 tf.ctl |= ATA_SRST; 1106 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1107 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1108 rc = -EIO; 1109 reason = "1st FIS failed"; 1110 goto fail; 1111 } 1112 1113 /* spec says at least 5us, but be generous and sleep for 1ms */ 1114 msleep(1); 1115 1116 /* issue the second D2H Register FIS */ 1117 tf.ctl &= ~ATA_SRST; 1118 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1119 1120 /* spec mandates ">= 2ms" before checking status. 1121 * We wait 150ms, because that was the magic delay used for 1122 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 1123 * between when the ATA command register is written, and then 1124 * status is checked. Because waiting for "a while" before 1125 * checking status is fine, post SRST, we perform this magic 1126 * delay here as well. 1127 */ 1128 msleep(150); 1129 1130 rc = ata_wait_ready(ap, deadline); 1131 /* link occupied, -ENODEV too is an error */ 1132 if (rc) { 1133 reason = "device not ready"; 1134 goto fail; 1135 } 1136 *class = ahci_dev_classify(ap); 1137 1138 DPRINTK("EXIT, class=%u\n", *class); 1139 return 0; 1140 1141 fail: 1142 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1143 return rc; 1144 } 1145 1146 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1147 unsigned long deadline) 1148 { 1149 int pmp = 0; 1150 1151 if (link->ap->flags & ATA_FLAG_PMP) 1152 pmp = SATA_PMP_CTRL_PORT; 1153 1154 return ahci_do_softreset(link, class, pmp, deadline); 1155 } 1156 1157 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1158 unsigned long deadline) 1159 { 1160 struct ata_port *ap = link->ap; 1161 struct ahci_port_priv *pp = ap->private_data; 1162 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1163 struct ata_taskfile tf; 1164 int rc; 1165 1166 DPRINTK("ENTER\n"); 1167 1168 ahci_stop_engine(ap); 1169 1170 /* clear D2H reception area to properly wait for D2H FIS */ 1171 ata_tf_init(link->device, &tf); 1172 tf.command = 0x80; 1173 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1174 1175 rc = sata_std_hardreset(link, class, deadline); 1176 1177 ahci_start_engine(ap); 1178 1179 if (rc == 0 && ata_link_online(link)) 1180 *class = ahci_dev_classify(ap); 1181 if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN) 1182 *class = ATA_DEV_NONE; 1183 1184 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1185 return rc; 1186 } 1187 1188 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 1189 unsigned long deadline) 1190 { 1191 struct ata_port *ap = link->ap; 1192 u32 serror; 1193 int rc; 1194 1195 DPRINTK("ENTER\n"); 1196 1197 ahci_stop_engine(ap); 1198 1199 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1200 deadline); 1201 1202 /* vt8251 needs SError cleared for the port to operate */ 1203 ahci_scr_read(ap, SCR_ERROR, &serror); 1204 ahci_scr_write(ap, SCR_ERROR, serror); 1205 1206 ahci_start_engine(ap); 1207 1208 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1209 1210 /* vt8251 doesn't clear BSY on signature FIS reception, 1211 * request follow-up softreset. 1212 */ 1213 return rc ?: -EAGAIN; 1214 } 1215 1216 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1217 { 1218 struct ata_port *ap = link->ap; 1219 void __iomem *port_mmio = ahci_port_base(ap); 1220 u32 new_tmp, tmp; 1221 1222 ata_std_postreset(link, class); 1223 1224 /* Make sure port's ATAPI bit is set appropriately */ 1225 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1226 if (*class == ATA_DEV_ATAPI) 1227 new_tmp |= PORT_CMD_ATAPI; 1228 else 1229 new_tmp &= ~PORT_CMD_ATAPI; 1230 if (new_tmp != tmp) { 1231 writel(new_tmp, port_mmio + PORT_CMD); 1232 readl(port_mmio + PORT_CMD); /* flush */ 1233 } 1234 } 1235 1236 static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class, 1237 unsigned long deadline) 1238 { 1239 return ahci_do_softreset(link, class, link->pmp, deadline); 1240 } 1241 1242 static u8 ahci_check_status(struct ata_port *ap) 1243 { 1244 void __iomem *mmio = ap->ioaddr.cmd_addr; 1245 1246 return readl(mmio + PORT_TFDATA) & 0xFF; 1247 } 1248 1249 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 1250 { 1251 struct ahci_port_priv *pp = ap->private_data; 1252 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1253 1254 ata_tf_from_fis(d2h_fis, tf); 1255 } 1256 1257 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1258 { 1259 struct scatterlist *sg; 1260 struct ahci_sg *ahci_sg; 1261 unsigned int n_sg = 0; 1262 1263 VPRINTK("ENTER\n"); 1264 1265 /* 1266 * Next, the S/G list. 1267 */ 1268 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1269 ata_for_each_sg(sg, qc) { 1270 dma_addr_t addr = sg_dma_address(sg); 1271 u32 sg_len = sg_dma_len(sg); 1272 1273 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); 1274 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1275 ahci_sg->flags_size = cpu_to_le32(sg_len - 1); 1276 1277 ahci_sg++; 1278 n_sg++; 1279 } 1280 1281 return n_sg; 1282 } 1283 1284 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1285 { 1286 struct ata_port *ap = qc->ap; 1287 struct ahci_port_priv *pp = ap->private_data; 1288 int is_atapi = is_atapi_taskfile(&qc->tf); 1289 void *cmd_tbl; 1290 u32 opts; 1291 const u32 cmd_fis_len = 5; /* five dwords */ 1292 unsigned int n_elem; 1293 1294 /* 1295 * Fill in command table information. First, the header, 1296 * a SATA Register - Host to Device command FIS. 1297 */ 1298 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1299 1300 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1301 if (is_atapi) { 1302 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1303 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1304 } 1305 1306 n_elem = 0; 1307 if (qc->flags & ATA_QCFLAG_DMAMAP) 1308 n_elem = ahci_fill_sg(qc, cmd_tbl); 1309 1310 /* 1311 * Fill in command slot information. 1312 */ 1313 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1314 if (qc->tf.flags & ATA_TFLAG_WRITE) 1315 opts |= AHCI_CMD_WRITE; 1316 if (is_atapi) 1317 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1318 1319 ahci_fill_cmd_slot(pp, qc->tag, opts); 1320 } 1321 1322 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1323 { 1324 struct ahci_host_priv *hpriv = ap->host->private_data; 1325 struct ahci_port_priv *pp = ap->private_data; 1326 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1327 struct ata_link *link = NULL; 1328 struct ata_queued_cmd *active_qc; 1329 struct ata_eh_info *active_ehi; 1330 u32 serror; 1331 1332 /* determine active link */ 1333 ata_port_for_each_link(link, ap) 1334 if (ata_link_active(link)) 1335 break; 1336 if (!link) 1337 link = &ap->link; 1338 1339 active_qc = ata_qc_from_tag(ap, link->active_tag); 1340 active_ehi = &link->eh_info; 1341 1342 /* record irq stat */ 1343 ata_ehi_clear_desc(host_ehi); 1344 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1345 1346 /* AHCI needs SError cleared; otherwise, it might lock up */ 1347 ahci_scr_read(ap, SCR_ERROR, &serror); 1348 ahci_scr_write(ap, SCR_ERROR, serror); 1349 host_ehi->serror |= serror; 1350 1351 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1352 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1353 irq_stat &= ~PORT_IRQ_IF_ERR; 1354 1355 if (irq_stat & PORT_IRQ_TF_ERR) { 1356 /* If qc is active, charge it; otherwise, the active 1357 * link. There's no active qc on NCQ errors. It will 1358 * be determined by EH by reading log page 10h. 1359 */ 1360 if (active_qc) 1361 active_qc->err_mask |= AC_ERR_DEV; 1362 else 1363 active_ehi->err_mask |= AC_ERR_DEV; 1364 1365 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1366 host_ehi->serror &= ~SERR_INTERNAL; 1367 } 1368 1369 if (irq_stat & PORT_IRQ_UNK_FIS) { 1370 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1371 1372 active_ehi->err_mask |= AC_ERR_HSM; 1373 active_ehi->action |= ATA_EH_SOFTRESET; 1374 ata_ehi_push_desc(active_ehi, 1375 "unknown FIS %08x %08x %08x %08x" , 1376 unk[0], unk[1], unk[2], unk[3]); 1377 } 1378 1379 if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) { 1380 active_ehi->err_mask |= AC_ERR_HSM; 1381 active_ehi->action |= ATA_EH_SOFTRESET; 1382 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1383 } 1384 1385 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1386 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1387 host_ehi->action |= ATA_EH_SOFTRESET; 1388 ata_ehi_push_desc(host_ehi, "host bus error"); 1389 } 1390 1391 if (irq_stat & PORT_IRQ_IF_ERR) { 1392 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1393 host_ehi->action |= ATA_EH_SOFTRESET; 1394 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1395 } 1396 1397 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1398 ata_ehi_hotplugged(host_ehi); 1399 ata_ehi_push_desc(host_ehi, "%s", 1400 irq_stat & PORT_IRQ_CONNECT ? 1401 "connection status changed" : "PHY RDY changed"); 1402 } 1403 1404 /* okay, let's hand over to EH */ 1405 1406 if (irq_stat & PORT_IRQ_FREEZE) 1407 ata_port_freeze(ap); 1408 else 1409 ata_port_abort(ap); 1410 } 1411 1412 static void ahci_port_intr(struct ata_port *ap) 1413 { 1414 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1415 struct ata_eh_info *ehi = &ap->link.eh_info; 1416 struct ahci_port_priv *pp = ap->private_data; 1417 struct ahci_host_priv *hpriv = ap->host->private_data; 1418 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1419 u32 status, qc_active; 1420 int rc, known_irq = 0; 1421 1422 status = readl(port_mmio + PORT_IRQ_STAT); 1423 writel(status, port_mmio + PORT_IRQ_STAT); 1424 1425 /* ignore BAD_PMP while resetting */ 1426 if (unlikely(resetting)) 1427 status &= ~PORT_IRQ_BAD_PMP; 1428 1429 if (unlikely(status & PORT_IRQ_ERROR)) { 1430 ahci_error_intr(ap, status); 1431 return; 1432 } 1433 1434 if (status & PORT_IRQ_SDB_FIS) { 1435 /* If SNotification is available, leave notification 1436 * handling to sata_async_notification(). If not, 1437 * emulate it by snooping SDB FIS RX area. 1438 * 1439 * Snooping FIS RX area is probably cheaper than 1440 * poking SNotification but some constrollers which 1441 * implement SNotification, ICH9 for example, don't 1442 * store AN SDB FIS into receive area. 1443 */ 1444 if (hpriv->cap & HOST_CAP_SNTF) 1445 sata_async_notification(ap); 1446 else { 1447 /* If the 'N' bit in word 0 of the FIS is set, 1448 * we just received asynchronous notification. 1449 * Tell libata about it. 1450 */ 1451 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1452 u32 f0 = le32_to_cpu(f[0]); 1453 1454 if (f0 & (1 << 15)) 1455 sata_async_notification(ap); 1456 } 1457 } 1458 1459 /* pp->active_link is valid iff any command is in flight */ 1460 if (ap->qc_active && pp->active_link->sactive) 1461 qc_active = readl(port_mmio + PORT_SCR_ACT); 1462 else 1463 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1464 1465 rc = ata_qc_complete_multiple(ap, qc_active, NULL); 1466 1467 /* If resetting, spurious or invalid completions are expected, 1468 * return unconditionally. 1469 */ 1470 if (resetting) 1471 return; 1472 1473 if (rc > 0) 1474 return; 1475 if (rc < 0) { 1476 ehi->err_mask |= AC_ERR_HSM; 1477 ehi->action |= ATA_EH_SOFTRESET; 1478 ata_port_freeze(ap); 1479 return; 1480 } 1481 1482 /* hmmm... a spurious interrupt */ 1483 1484 /* if !NCQ, ignore. No modern ATA device has broken HSM 1485 * implementation for non-NCQ commands. 1486 */ 1487 if (!ap->link.sactive) 1488 return; 1489 1490 if (status & PORT_IRQ_D2H_REG_FIS) { 1491 if (!pp->ncq_saw_d2h) 1492 ata_port_printk(ap, KERN_INFO, 1493 "D2H reg with I during NCQ, " 1494 "this message won't be printed again\n"); 1495 pp->ncq_saw_d2h = 1; 1496 known_irq = 1; 1497 } 1498 1499 if (status & PORT_IRQ_DMAS_FIS) { 1500 if (!pp->ncq_saw_dmas) 1501 ata_port_printk(ap, KERN_INFO, 1502 "DMAS FIS during NCQ, " 1503 "this message won't be printed again\n"); 1504 pp->ncq_saw_dmas = 1; 1505 known_irq = 1; 1506 } 1507 1508 if (status & PORT_IRQ_SDB_FIS) { 1509 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1510 1511 if (le32_to_cpu(f[1])) { 1512 /* SDB FIS containing spurious completions 1513 * might be dangerous, whine and fail commands 1514 * with HSM violation. EH will turn off NCQ 1515 * after several such failures. 1516 */ 1517 ata_ehi_push_desc(ehi, 1518 "spurious completions during NCQ " 1519 "issue=0x%x SAct=0x%x FIS=%08x:%08x", 1520 readl(port_mmio + PORT_CMD_ISSUE), 1521 readl(port_mmio + PORT_SCR_ACT), 1522 le32_to_cpu(f[0]), le32_to_cpu(f[1])); 1523 ehi->err_mask |= AC_ERR_HSM; 1524 ehi->action |= ATA_EH_SOFTRESET; 1525 ata_port_freeze(ap); 1526 } else { 1527 if (!pp->ncq_saw_sdb) 1528 ata_port_printk(ap, KERN_INFO, 1529 "spurious SDB FIS %08x:%08x during NCQ, " 1530 "this message won't be printed again\n", 1531 le32_to_cpu(f[0]), le32_to_cpu(f[1])); 1532 pp->ncq_saw_sdb = 1; 1533 } 1534 known_irq = 1; 1535 } 1536 1537 if (!known_irq) 1538 ata_port_printk(ap, KERN_INFO, "spurious interrupt " 1539 "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", 1540 status, ap->link.active_tag, ap->link.sactive); 1541 } 1542 1543 static void ahci_irq_clear(struct ata_port *ap) 1544 { 1545 /* TODO */ 1546 } 1547 1548 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1549 { 1550 struct ata_host *host = dev_instance; 1551 struct ahci_host_priv *hpriv; 1552 unsigned int i, handled = 0; 1553 void __iomem *mmio; 1554 u32 irq_stat, irq_ack = 0; 1555 1556 VPRINTK("ENTER\n"); 1557 1558 hpriv = host->private_data; 1559 mmio = host->iomap[AHCI_PCI_BAR]; 1560 1561 /* sigh. 0xffffffff is a valid return from h/w */ 1562 irq_stat = readl(mmio + HOST_IRQ_STAT); 1563 irq_stat &= hpriv->port_map; 1564 if (!irq_stat) 1565 return IRQ_NONE; 1566 1567 spin_lock(&host->lock); 1568 1569 for (i = 0; i < host->n_ports; i++) { 1570 struct ata_port *ap; 1571 1572 if (!(irq_stat & (1 << i))) 1573 continue; 1574 1575 ap = host->ports[i]; 1576 if (ap) { 1577 ahci_port_intr(ap); 1578 VPRINTK("port %u\n", i); 1579 } else { 1580 VPRINTK("port %u (no irq)\n", i); 1581 if (ata_ratelimit()) 1582 dev_printk(KERN_WARNING, host->dev, 1583 "interrupt on disabled port %u\n", i); 1584 } 1585 1586 irq_ack |= (1 << i); 1587 } 1588 1589 if (irq_ack) { 1590 writel(irq_ack, mmio + HOST_IRQ_STAT); 1591 handled = 1; 1592 } 1593 1594 spin_unlock(&host->lock); 1595 1596 VPRINTK("EXIT\n"); 1597 1598 return IRQ_RETVAL(handled); 1599 } 1600 1601 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1602 { 1603 struct ata_port *ap = qc->ap; 1604 void __iomem *port_mmio = ahci_port_base(ap); 1605 struct ahci_port_priv *pp = ap->private_data; 1606 1607 /* Keep track of the currently active link. It will be used 1608 * in completion path to determine whether NCQ phase is in 1609 * progress. 1610 */ 1611 pp->active_link = qc->dev->link; 1612 1613 if (qc->tf.protocol == ATA_PROT_NCQ) 1614 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1615 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1616 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1617 1618 return 0; 1619 } 1620 1621 static void ahci_freeze(struct ata_port *ap) 1622 { 1623 void __iomem *port_mmio = ahci_port_base(ap); 1624 1625 /* turn IRQ off */ 1626 writel(0, port_mmio + PORT_IRQ_MASK); 1627 } 1628 1629 static void ahci_thaw(struct ata_port *ap) 1630 { 1631 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1632 void __iomem *port_mmio = ahci_port_base(ap); 1633 u32 tmp; 1634 struct ahci_port_priv *pp = ap->private_data; 1635 1636 /* clear IRQ */ 1637 tmp = readl(port_mmio + PORT_IRQ_STAT); 1638 writel(tmp, port_mmio + PORT_IRQ_STAT); 1639 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1640 1641 /* turn IRQ back on */ 1642 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1643 } 1644 1645 static void ahci_error_handler(struct ata_port *ap) 1646 { 1647 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1648 /* restart engine */ 1649 ahci_stop_engine(ap); 1650 ahci_start_engine(ap); 1651 } 1652 1653 /* perform recovery */ 1654 sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset, 1655 ahci_hardreset, ahci_postreset, 1656 sata_pmp_std_prereset, ahci_pmp_softreset, 1657 sata_pmp_std_hardreset, sata_pmp_std_postreset); 1658 } 1659 1660 static void ahci_vt8251_error_handler(struct ata_port *ap) 1661 { 1662 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1663 /* restart engine */ 1664 ahci_stop_engine(ap); 1665 ahci_start_engine(ap); 1666 } 1667 1668 /* perform recovery */ 1669 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset, 1670 ahci_postreset); 1671 } 1672 1673 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1674 { 1675 struct ata_port *ap = qc->ap; 1676 1677 /* make DMA engine forget about the failed command */ 1678 if (qc->flags & ATA_QCFLAG_FAILED) 1679 ahci_kick_engine(ap, 1); 1680 } 1681 1682 static void ahci_pmp_attach(struct ata_port *ap) 1683 { 1684 void __iomem *port_mmio = ahci_port_base(ap); 1685 struct ahci_port_priv *pp = ap->private_data; 1686 u32 cmd; 1687 1688 cmd = readl(port_mmio + PORT_CMD); 1689 cmd |= PORT_CMD_PMP; 1690 writel(cmd, port_mmio + PORT_CMD); 1691 1692 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1693 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1694 } 1695 1696 static void ahci_pmp_detach(struct ata_port *ap) 1697 { 1698 void __iomem *port_mmio = ahci_port_base(ap); 1699 struct ahci_port_priv *pp = ap->private_data; 1700 u32 cmd; 1701 1702 cmd = readl(port_mmio + PORT_CMD); 1703 cmd &= ~PORT_CMD_PMP; 1704 writel(cmd, port_mmio + PORT_CMD); 1705 1706 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1707 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1708 } 1709 1710 static int ahci_port_resume(struct ata_port *ap) 1711 { 1712 ahci_power_up(ap); 1713 ahci_start_port(ap); 1714 1715 if (ap->nr_pmp_links) 1716 ahci_pmp_attach(ap); 1717 else 1718 ahci_pmp_detach(ap); 1719 1720 return 0; 1721 } 1722 1723 #ifdef CONFIG_PM 1724 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1725 { 1726 const char *emsg = NULL; 1727 int rc; 1728 1729 rc = ahci_deinit_port(ap, &emsg); 1730 if (rc == 0) 1731 ahci_power_down(ap); 1732 else { 1733 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1734 ahci_start_port(ap); 1735 } 1736 1737 return rc; 1738 } 1739 1740 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1741 { 1742 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1743 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1744 u32 ctl; 1745 1746 if (mesg.event == PM_EVENT_SUSPEND) { 1747 /* AHCI spec rev1.1 section 8.3.3: 1748 * Software must disable interrupts prior to requesting a 1749 * transition of the HBA to D3 state. 1750 */ 1751 ctl = readl(mmio + HOST_CTL); 1752 ctl &= ~HOST_IRQ_EN; 1753 writel(ctl, mmio + HOST_CTL); 1754 readl(mmio + HOST_CTL); /* flush */ 1755 } 1756 1757 return ata_pci_device_suspend(pdev, mesg); 1758 } 1759 1760 static int ahci_pci_device_resume(struct pci_dev *pdev) 1761 { 1762 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1763 int rc; 1764 1765 rc = ata_pci_device_do_resume(pdev); 1766 if (rc) 1767 return rc; 1768 1769 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 1770 rc = ahci_reset_controller(host); 1771 if (rc) 1772 return rc; 1773 1774 ahci_init_controller(host); 1775 } 1776 1777 ata_host_resume(host); 1778 1779 return 0; 1780 } 1781 #endif 1782 1783 static int ahci_port_start(struct ata_port *ap) 1784 { 1785 struct device *dev = ap->host->dev; 1786 struct ahci_port_priv *pp; 1787 void *mem; 1788 dma_addr_t mem_dma; 1789 int rc; 1790 1791 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1792 if (!pp) 1793 return -ENOMEM; 1794 1795 rc = ata_pad_alloc(ap, dev); 1796 if (rc) 1797 return rc; 1798 1799 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1800 GFP_KERNEL); 1801 if (!mem) 1802 return -ENOMEM; 1803 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 1804 1805 /* 1806 * First item in chunk of DMA memory: 32-slot command table, 1807 * 32 bytes each in size 1808 */ 1809 pp->cmd_slot = mem; 1810 pp->cmd_slot_dma = mem_dma; 1811 1812 mem += AHCI_CMD_SLOT_SZ; 1813 mem_dma += AHCI_CMD_SLOT_SZ; 1814 1815 /* 1816 * Second item: Received-FIS area 1817 */ 1818 pp->rx_fis = mem; 1819 pp->rx_fis_dma = mem_dma; 1820 1821 mem += AHCI_RX_FIS_SZ; 1822 mem_dma += AHCI_RX_FIS_SZ; 1823 1824 /* 1825 * Third item: data area for storing a single command 1826 * and its scatter-gather table 1827 */ 1828 pp->cmd_tbl = mem; 1829 pp->cmd_tbl_dma = mem_dma; 1830 1831 /* 1832 * Save off initial list of interrupts to be enabled. 1833 * This could be changed later 1834 */ 1835 pp->intr_mask = DEF_PORT_IRQ; 1836 1837 ap->private_data = pp; 1838 1839 /* engage engines, captain */ 1840 return ahci_port_resume(ap); 1841 } 1842 1843 static void ahci_port_stop(struct ata_port *ap) 1844 { 1845 const char *emsg = NULL; 1846 int rc; 1847 1848 /* de-initialize port */ 1849 rc = ahci_deinit_port(ap, &emsg); 1850 if (rc) 1851 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 1852 } 1853 1854 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 1855 { 1856 int rc; 1857 1858 if (using_dac && 1859 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1860 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1861 if (rc) { 1862 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1863 if (rc) { 1864 dev_printk(KERN_ERR, &pdev->dev, 1865 "64-bit DMA enable failed\n"); 1866 return rc; 1867 } 1868 } 1869 } else { 1870 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1871 if (rc) { 1872 dev_printk(KERN_ERR, &pdev->dev, 1873 "32-bit DMA enable failed\n"); 1874 return rc; 1875 } 1876 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1877 if (rc) { 1878 dev_printk(KERN_ERR, &pdev->dev, 1879 "32-bit consistent DMA enable failed\n"); 1880 return rc; 1881 } 1882 } 1883 return 0; 1884 } 1885 1886 static void ahci_print_info(struct ata_host *host) 1887 { 1888 struct ahci_host_priv *hpriv = host->private_data; 1889 struct pci_dev *pdev = to_pci_dev(host->dev); 1890 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1891 u32 vers, cap, impl, speed; 1892 const char *speed_s; 1893 u16 cc; 1894 const char *scc_s; 1895 1896 vers = readl(mmio + HOST_VERSION); 1897 cap = hpriv->cap; 1898 impl = hpriv->port_map; 1899 1900 speed = (cap >> 20) & 0xf; 1901 if (speed == 1) 1902 speed_s = "1.5"; 1903 else if (speed == 2) 1904 speed_s = "3"; 1905 else 1906 speed_s = "?"; 1907 1908 pci_read_config_word(pdev, 0x0a, &cc); 1909 if (cc == PCI_CLASS_STORAGE_IDE) 1910 scc_s = "IDE"; 1911 else if (cc == PCI_CLASS_STORAGE_SATA) 1912 scc_s = "SATA"; 1913 else if (cc == PCI_CLASS_STORAGE_RAID) 1914 scc_s = "RAID"; 1915 else 1916 scc_s = "unknown"; 1917 1918 dev_printk(KERN_INFO, &pdev->dev, 1919 "AHCI %02x%02x.%02x%02x " 1920 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 1921 , 1922 1923 (vers >> 24) & 0xff, 1924 (vers >> 16) & 0xff, 1925 (vers >> 8) & 0xff, 1926 vers & 0xff, 1927 1928 ((cap >> 8) & 0x1f) + 1, 1929 (cap & 0x1f) + 1, 1930 speed_s, 1931 impl, 1932 scc_s); 1933 1934 dev_printk(KERN_INFO, &pdev->dev, 1935 "flags: " 1936 "%s%s%s%s%s%s%s" 1937 "%s%s%s%s%s%s%s\n" 1938 , 1939 1940 cap & (1 << 31) ? "64bit " : "", 1941 cap & (1 << 30) ? "ncq " : "", 1942 cap & (1 << 29) ? "sntf " : "", 1943 cap & (1 << 28) ? "ilck " : "", 1944 cap & (1 << 27) ? "stag " : "", 1945 cap & (1 << 26) ? "pm " : "", 1946 cap & (1 << 25) ? "led " : "", 1947 1948 cap & (1 << 24) ? "clo " : "", 1949 cap & (1 << 19) ? "nz " : "", 1950 cap & (1 << 18) ? "only " : "", 1951 cap & (1 << 17) ? "pmp " : "", 1952 cap & (1 << 15) ? "pio " : "", 1953 cap & (1 << 14) ? "slum " : "", 1954 cap & (1 << 13) ? "part " : "" 1955 ); 1956 } 1957 1958 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1959 { 1960 static int printed_version; 1961 struct ata_port_info pi = ahci_port_info[ent->driver_data]; 1962 const struct ata_port_info *ppi[] = { &pi, NULL }; 1963 struct device *dev = &pdev->dev; 1964 struct ahci_host_priv *hpriv; 1965 struct ata_host *host; 1966 int i, rc; 1967 1968 VPRINTK("ENTER\n"); 1969 1970 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 1971 1972 if (!printed_version++) 1973 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1974 1975 /* acquire resources */ 1976 rc = pcim_enable_device(pdev); 1977 if (rc) 1978 return rc; 1979 1980 rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 1981 if (rc == -EBUSY) 1982 pcim_pin_device(pdev); 1983 if (rc) 1984 return rc; 1985 1986 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1987 if (!hpriv) 1988 return -ENOMEM; 1989 hpriv->flags |= (unsigned long)pi.private_data; 1990 1991 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 1992 pci_intx(pdev, 1); 1993 1994 /* save initial config */ 1995 ahci_save_initial_config(pdev, hpriv); 1996 1997 /* prepare host */ 1998 if (hpriv->cap & HOST_CAP_NCQ) 1999 pi.flags |= ATA_FLAG_NCQ; 2000 2001 if (hpriv->cap & HOST_CAP_PMP) 2002 pi.flags |= ATA_FLAG_PMP; 2003 2004 host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map)); 2005 if (!host) 2006 return -ENOMEM; 2007 host->iomap = pcim_iomap_table(pdev); 2008 host->private_data = hpriv; 2009 2010 for (i = 0; i < host->n_ports; i++) { 2011 struct ata_port *ap = host->ports[i]; 2012 void __iomem *port_mmio = ahci_port_base(ap); 2013 2014 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 2015 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 2016 0x100 + ap->port_no * 0x80, "port"); 2017 2018 /* standard SATA port setup */ 2019 if (hpriv->port_map & (1 << i)) 2020 ap->ioaddr.cmd_addr = port_mmio; 2021 2022 /* disabled/not-implemented port */ 2023 else 2024 ap->ops = &ata_dummy_port_ops; 2025 } 2026 2027 /* initialize adapter */ 2028 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 2029 if (rc) 2030 return rc; 2031 2032 rc = ahci_reset_controller(host); 2033 if (rc) 2034 return rc; 2035 2036 ahci_init_controller(host); 2037 ahci_print_info(host); 2038 2039 pci_set_master(pdev); 2040 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 2041 &ahci_sht); 2042 } 2043 2044 static int __init ahci_init(void) 2045 { 2046 return pci_register_driver(&ahci_pci_driver); 2047 } 2048 2049 static void __exit ahci_exit(void) 2050 { 2051 pci_unregister_driver(&ahci_pci_driver); 2052 } 2053 2054 2055 MODULE_AUTHOR("Jeff Garzik"); 2056 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 2057 MODULE_LICENSE("GPL"); 2058 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 2059 MODULE_VERSION(DRV_VERSION); 2060 2061 module_init(ahci_init); 2062 module_exit(ahci_exit); 2063