1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <linux/dmi.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <linux/libata.h> 48 49 #define DRV_NAME "ahci" 50 #define DRV_VERSION "3.0" 51 52 static int ahci_skip_host_reset; 53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 55 56 static int ahci_enable_alpm(struct ata_port *ap, 57 enum link_pm policy); 58 static void ahci_disable_alpm(struct ata_port *ap); 59 60 enum { 61 AHCI_PCI_BAR = 5, 62 AHCI_MAX_PORTS = 32, 63 AHCI_MAX_SG = 168, /* hardware max is 64K */ 64 AHCI_DMA_BOUNDARY = 0xffffffff, 65 AHCI_USE_CLUSTERING = 1, 66 AHCI_MAX_CMDS = 32, 67 AHCI_CMD_SZ = 32, 68 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 69 AHCI_RX_FIS_SZ = 256, 70 AHCI_CMD_TBL_CDB = 0x40, 71 AHCI_CMD_TBL_HDR_SZ = 0x80, 72 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 73 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 74 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 75 AHCI_RX_FIS_SZ, 76 AHCI_IRQ_ON_SG = (1 << 31), 77 AHCI_CMD_ATAPI = (1 << 5), 78 AHCI_CMD_WRITE = (1 << 6), 79 AHCI_CMD_PREFETCH = (1 << 7), 80 AHCI_CMD_RESET = (1 << 8), 81 AHCI_CMD_CLR_BUSY = (1 << 10), 82 83 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 84 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 85 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 86 87 board_ahci = 0, 88 board_ahci_vt8251 = 1, 89 board_ahci_ign_iferr = 2, 90 board_ahci_sb600 = 3, 91 board_ahci_mv = 4, 92 board_ahci_sb700 = 5, 93 94 /* global controller registers */ 95 HOST_CAP = 0x00, /* host capabilities */ 96 HOST_CTL = 0x04, /* global host control */ 97 HOST_IRQ_STAT = 0x08, /* interrupt status */ 98 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 99 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 100 101 /* HOST_CTL bits */ 102 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 103 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 104 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 105 106 /* HOST_CAP bits */ 107 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 108 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 109 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 110 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ 111 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 112 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 113 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 114 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 115 116 /* registers for each SATA port */ 117 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 118 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 119 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 120 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 121 PORT_IRQ_STAT = 0x10, /* interrupt status */ 122 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 123 PORT_CMD = 0x18, /* port command */ 124 PORT_TFDATA = 0x20, /* taskfile data */ 125 PORT_SIG = 0x24, /* device TF signature */ 126 PORT_CMD_ISSUE = 0x38, /* command issue */ 127 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 128 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 129 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 130 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 131 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ 132 133 /* PORT_IRQ_{STAT,MASK} bits */ 134 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 135 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 136 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 137 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 138 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 139 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 140 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 141 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 142 143 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 144 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 145 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 146 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 147 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 148 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 149 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 150 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 151 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 152 153 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 154 PORT_IRQ_IF_ERR | 155 PORT_IRQ_CONNECT | 156 PORT_IRQ_PHYRDY | 157 PORT_IRQ_UNK_FIS | 158 PORT_IRQ_BAD_PMP, 159 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 160 PORT_IRQ_TF_ERR | 161 PORT_IRQ_HBUS_DATA_ERR, 162 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 163 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 164 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 165 166 /* PORT_CMD bits */ 167 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ 168 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ 169 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 170 PORT_CMD_PMP = (1 << 17), /* PMP attached */ 171 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 172 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 173 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 174 PORT_CMD_CLO = (1 << 3), /* Command list override */ 175 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 176 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 177 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 178 179 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 180 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 181 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 182 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 183 184 /* hpriv->flags bits */ 185 AHCI_HFLAG_NO_NCQ = (1 << 0), 186 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ 187 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ 188 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ 189 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 190 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 191 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 192 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 193 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 194 195 /* ap->flags bits */ 196 197 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 198 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 199 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | 200 ATA_FLAG_IPM, 201 AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, 202 203 ICH_MAP = 0x90, /* ICH MAP register */ 204 }; 205 206 struct ahci_cmd_hdr { 207 __le32 opts; 208 __le32 status; 209 __le32 tbl_addr; 210 __le32 tbl_addr_hi; 211 __le32 reserved[4]; 212 }; 213 214 struct ahci_sg { 215 __le32 addr; 216 __le32 addr_hi; 217 __le32 reserved; 218 __le32 flags_size; 219 }; 220 221 struct ahci_host_priv { 222 unsigned int flags; /* AHCI_HFLAG_* */ 223 u32 cap; /* cap to use */ 224 u32 port_map; /* port map to use */ 225 u32 saved_cap; /* saved initial cap */ 226 u32 saved_port_map; /* saved initial port_map */ 227 }; 228 229 struct ahci_port_priv { 230 struct ata_link *active_link; 231 struct ahci_cmd_hdr *cmd_slot; 232 dma_addr_t cmd_slot_dma; 233 void *cmd_tbl; 234 dma_addr_t cmd_tbl_dma; 235 void *rx_fis; 236 dma_addr_t rx_fis_dma; 237 /* for NCQ spurious interrupt analysis */ 238 unsigned int ncq_saw_d2h:1; 239 unsigned int ncq_saw_dmas:1; 240 unsigned int ncq_saw_sdb:1; 241 u32 intr_mask; /* interrupts to enable */ 242 }; 243 244 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 245 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 246 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 247 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 248 static void ahci_irq_clear(struct ata_port *ap); 249 static int ahci_port_start(struct ata_port *ap); 250 static void ahci_port_stop(struct ata_port *ap); 251 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 252 static void ahci_qc_prep(struct ata_queued_cmd *qc); 253 static u8 ahci_check_status(struct ata_port *ap); 254 static void ahci_freeze(struct ata_port *ap); 255 static void ahci_thaw(struct ata_port *ap); 256 static void ahci_pmp_attach(struct ata_port *ap); 257 static void ahci_pmp_detach(struct ata_port *ap); 258 static void ahci_error_handler(struct ata_port *ap); 259 static void ahci_vt8251_error_handler(struct ata_port *ap); 260 static void ahci_p5wdh_error_handler(struct ata_port *ap); 261 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 262 static int ahci_port_resume(struct ata_port *ap); 263 static void ahci_dev_config(struct ata_device *dev); 264 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); 265 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 266 u32 opts); 267 #ifdef CONFIG_PM 268 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 269 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 270 static int ahci_pci_device_resume(struct pci_dev *pdev); 271 #endif 272 273 static struct class_device_attribute *ahci_shost_attrs[] = { 274 &class_device_attr_link_power_management_policy, 275 NULL 276 }; 277 278 static struct scsi_host_template ahci_sht = { 279 .module = THIS_MODULE, 280 .name = DRV_NAME, 281 .ioctl = ata_scsi_ioctl, 282 .queuecommand = ata_scsi_queuecmd, 283 .change_queue_depth = ata_scsi_change_queue_depth, 284 .can_queue = AHCI_MAX_CMDS - 1, 285 .this_id = ATA_SHT_THIS_ID, 286 .sg_tablesize = AHCI_MAX_SG, 287 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 288 .emulated = ATA_SHT_EMULATED, 289 .use_clustering = AHCI_USE_CLUSTERING, 290 .proc_name = DRV_NAME, 291 .dma_boundary = AHCI_DMA_BOUNDARY, 292 .slave_configure = ata_scsi_slave_config, 293 .slave_destroy = ata_scsi_slave_destroy, 294 .bios_param = ata_std_bios_param, 295 .shost_attrs = ahci_shost_attrs, 296 }; 297 298 static const struct ata_port_operations ahci_ops = { 299 .check_status = ahci_check_status, 300 .check_altstatus = ahci_check_status, 301 .dev_select = ata_noop_dev_select, 302 303 .dev_config = ahci_dev_config, 304 305 .tf_read = ahci_tf_read, 306 307 .qc_defer = sata_pmp_qc_defer_cmd_switch, 308 .qc_prep = ahci_qc_prep, 309 .qc_issue = ahci_qc_issue, 310 311 .irq_clear = ahci_irq_clear, 312 313 .scr_read = ahci_scr_read, 314 .scr_write = ahci_scr_write, 315 316 .freeze = ahci_freeze, 317 .thaw = ahci_thaw, 318 319 .error_handler = ahci_error_handler, 320 .post_internal_cmd = ahci_post_internal_cmd, 321 322 .pmp_attach = ahci_pmp_attach, 323 .pmp_detach = ahci_pmp_detach, 324 325 #ifdef CONFIG_PM 326 .port_suspend = ahci_port_suspend, 327 .port_resume = ahci_port_resume, 328 #endif 329 .enable_pm = ahci_enable_alpm, 330 .disable_pm = ahci_disable_alpm, 331 332 .port_start = ahci_port_start, 333 .port_stop = ahci_port_stop, 334 }; 335 336 static const struct ata_port_operations ahci_vt8251_ops = { 337 .check_status = ahci_check_status, 338 .check_altstatus = ahci_check_status, 339 .dev_select = ata_noop_dev_select, 340 341 .tf_read = ahci_tf_read, 342 343 .qc_defer = sata_pmp_qc_defer_cmd_switch, 344 .qc_prep = ahci_qc_prep, 345 .qc_issue = ahci_qc_issue, 346 347 .irq_clear = ahci_irq_clear, 348 349 .scr_read = ahci_scr_read, 350 .scr_write = ahci_scr_write, 351 352 .freeze = ahci_freeze, 353 .thaw = ahci_thaw, 354 355 .error_handler = ahci_vt8251_error_handler, 356 .post_internal_cmd = ahci_post_internal_cmd, 357 358 .pmp_attach = ahci_pmp_attach, 359 .pmp_detach = ahci_pmp_detach, 360 361 #ifdef CONFIG_PM 362 .port_suspend = ahci_port_suspend, 363 .port_resume = ahci_port_resume, 364 #endif 365 366 .port_start = ahci_port_start, 367 .port_stop = ahci_port_stop, 368 }; 369 370 static const struct ata_port_operations ahci_p5wdh_ops = { 371 .check_status = ahci_check_status, 372 .check_altstatus = ahci_check_status, 373 .dev_select = ata_noop_dev_select, 374 375 .tf_read = ahci_tf_read, 376 377 .qc_defer = sata_pmp_qc_defer_cmd_switch, 378 .qc_prep = ahci_qc_prep, 379 .qc_issue = ahci_qc_issue, 380 381 .irq_clear = ahci_irq_clear, 382 383 .scr_read = ahci_scr_read, 384 .scr_write = ahci_scr_write, 385 386 .freeze = ahci_freeze, 387 .thaw = ahci_thaw, 388 389 .error_handler = ahci_p5wdh_error_handler, 390 .post_internal_cmd = ahci_post_internal_cmd, 391 392 .pmp_attach = ahci_pmp_attach, 393 .pmp_detach = ahci_pmp_detach, 394 395 #ifdef CONFIG_PM 396 .port_suspend = ahci_port_suspend, 397 .port_resume = ahci_port_resume, 398 #endif 399 400 .port_start = ahci_port_start, 401 .port_stop = ahci_port_stop, 402 }; 403 404 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 405 406 static const struct ata_port_info ahci_port_info[] = { 407 /* board_ahci */ 408 { 409 .flags = AHCI_FLAG_COMMON, 410 .link_flags = AHCI_LFLAG_COMMON, 411 .pio_mask = 0x1f, /* pio0-4 */ 412 .udma_mask = ATA_UDMA6, 413 .port_ops = &ahci_ops, 414 }, 415 /* board_ahci_vt8251 */ 416 { 417 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 418 .flags = AHCI_FLAG_COMMON, 419 .link_flags = AHCI_LFLAG_COMMON | ATA_LFLAG_HRST_TO_RESUME, 420 .pio_mask = 0x1f, /* pio0-4 */ 421 .udma_mask = ATA_UDMA6, 422 .port_ops = &ahci_vt8251_ops, 423 }, 424 /* board_ahci_ign_iferr */ 425 { 426 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 427 .flags = AHCI_FLAG_COMMON, 428 .link_flags = AHCI_LFLAG_COMMON, 429 .pio_mask = 0x1f, /* pio0-4 */ 430 .udma_mask = ATA_UDMA6, 431 .port_ops = &ahci_ops, 432 }, 433 /* board_ahci_sb600 */ 434 { 435 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 436 AHCI_HFLAG_32BIT_ONLY | 437 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), 438 .flags = AHCI_FLAG_COMMON, 439 .link_flags = AHCI_LFLAG_COMMON, 440 .pio_mask = 0x1f, /* pio0-4 */ 441 .udma_mask = ATA_UDMA6, 442 .port_ops = &ahci_ops, 443 }, 444 /* board_ahci_mv */ 445 { 446 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 447 AHCI_HFLAG_MV_PATA), 448 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 449 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 450 .link_flags = AHCI_LFLAG_COMMON, 451 .pio_mask = 0x1f, /* pio0-4 */ 452 .udma_mask = ATA_UDMA6, 453 .port_ops = &ahci_ops, 454 }, 455 /* board_ahci_sb700 */ 456 { 457 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 458 AHCI_HFLAG_NO_PMP), 459 .flags = AHCI_FLAG_COMMON, 460 .link_flags = AHCI_LFLAG_COMMON, 461 .pio_mask = 0x1f, /* pio0-4 */ 462 .udma_mask = ATA_UDMA6, 463 .port_ops = &ahci_ops, 464 }, 465 }; 466 467 static const struct pci_device_id ahci_pci_tbl[] = { 468 /* Intel */ 469 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 470 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 471 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 472 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 473 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 474 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 475 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 476 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 477 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 478 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 479 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 480 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 481 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 482 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 483 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 484 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 485 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 486 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 487 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 488 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 489 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 490 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 491 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 492 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 493 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 494 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 495 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 496 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 497 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 498 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 499 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 500 501 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 502 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 503 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 504 505 /* ATI */ 506 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 507 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ 508 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ 509 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ 510 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ 511 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 512 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 513 514 /* VIA */ 515 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 516 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 517 518 /* NVIDIA */ 519 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ 520 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ 521 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ 522 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ 523 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ 524 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ 525 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ 526 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ 527 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 528 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 529 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 530 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 531 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 532 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 533 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 534 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 535 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 536 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 537 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 538 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 539 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ 540 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ 541 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ 542 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ 543 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ 544 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ 545 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ 546 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ 547 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ 548 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ 549 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ 550 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ 551 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 552 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 553 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 554 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 555 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 556 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 557 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 558 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 559 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 560 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 561 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 562 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 563 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 564 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 565 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 566 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 567 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 568 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 569 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 570 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 571 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 572 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 573 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 574 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 575 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */ 576 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */ 577 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */ 578 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */ 579 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */ 580 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ 581 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ 582 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ 583 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ 584 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ 585 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ 586 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ 587 588 /* SiS */ 589 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 590 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 591 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 592 593 /* Marvell */ 594 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 595 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 596 597 /* Generic, PCI class code for AHCI */ 598 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 599 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 600 601 { } /* terminate list */ 602 }; 603 604 605 static struct pci_driver ahci_pci_driver = { 606 .name = DRV_NAME, 607 .id_table = ahci_pci_tbl, 608 .probe = ahci_init_one, 609 .remove = ata_pci_remove_one, 610 #ifdef CONFIG_PM 611 .suspend = ahci_pci_device_suspend, 612 .resume = ahci_pci_device_resume, 613 #endif 614 }; 615 616 617 static inline int ahci_nr_ports(u32 cap) 618 { 619 return (cap & 0x1f) + 1; 620 } 621 622 static inline void __iomem *__ahci_port_base(struct ata_host *host, 623 unsigned int port_no) 624 { 625 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 626 627 return mmio + 0x100 + (port_no * 0x80); 628 } 629 630 static inline void __iomem *ahci_port_base(struct ata_port *ap) 631 { 632 return __ahci_port_base(ap->host, ap->port_no); 633 } 634 635 static void ahci_enable_ahci(void __iomem *mmio) 636 { 637 u32 tmp; 638 639 /* turn on AHCI_EN */ 640 tmp = readl(mmio + HOST_CTL); 641 if (!(tmp & HOST_AHCI_EN)) { 642 tmp |= HOST_AHCI_EN; 643 writel(tmp, mmio + HOST_CTL); 644 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 645 WARN_ON(!(tmp & HOST_AHCI_EN)); 646 } 647 } 648 649 /** 650 * ahci_save_initial_config - Save and fixup initial config values 651 * @pdev: target PCI device 652 * @hpriv: host private area to store config values 653 * 654 * Some registers containing configuration info might be setup by 655 * BIOS and might be cleared on reset. This function saves the 656 * initial values of those registers into @hpriv such that they 657 * can be restored after controller reset. 658 * 659 * If inconsistent, config values are fixed up by this function. 660 * 661 * LOCKING: 662 * None. 663 */ 664 static void ahci_save_initial_config(struct pci_dev *pdev, 665 struct ahci_host_priv *hpriv) 666 { 667 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 668 u32 cap, port_map; 669 int i; 670 int mv; 671 672 /* make sure AHCI mode is enabled before accessing CAP */ 673 ahci_enable_ahci(mmio); 674 675 /* Values prefixed with saved_ are written back to host after 676 * reset. Values without are used for driver operation. 677 */ 678 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 679 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 680 681 /* some chips have errata preventing 64bit use */ 682 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 683 dev_printk(KERN_INFO, &pdev->dev, 684 "controller can't do 64bit DMA, forcing 32bit\n"); 685 cap &= ~HOST_CAP_64; 686 } 687 688 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 689 dev_printk(KERN_INFO, &pdev->dev, 690 "controller can't do NCQ, turning off CAP_NCQ\n"); 691 cap &= ~HOST_CAP_NCQ; 692 } 693 694 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 695 dev_printk(KERN_INFO, &pdev->dev, 696 "controller can't do PMP, turning off CAP_PMP\n"); 697 cap &= ~HOST_CAP_PMP; 698 } 699 700 /* 701 * Temporary Marvell 6145 hack: PATA port presence 702 * is asserted through the standard AHCI port 703 * presence register, as bit 4 (counting from 0) 704 */ 705 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 706 if (pdev->device == 0x6121) 707 mv = 0x3; 708 else 709 mv = 0xf; 710 dev_printk(KERN_ERR, &pdev->dev, 711 "MV_AHCI HACK: port_map %x -> %x\n", 712 port_map, 713 port_map & mv); 714 715 port_map &= mv; 716 } 717 718 /* cross check port_map and cap.n_ports */ 719 if (port_map) { 720 int map_ports = 0; 721 722 for (i = 0; i < AHCI_MAX_PORTS; i++) 723 if (port_map & (1 << i)) 724 map_ports++; 725 726 /* If PI has more ports than n_ports, whine, clear 727 * port_map and let it be generated from n_ports. 728 */ 729 if (map_ports > ahci_nr_ports(cap)) { 730 dev_printk(KERN_WARNING, &pdev->dev, 731 "implemented port map (0x%x) contains more " 732 "ports than nr_ports (%u), using nr_ports\n", 733 port_map, ahci_nr_ports(cap)); 734 port_map = 0; 735 } 736 } 737 738 /* fabricate port_map from cap.nr_ports */ 739 if (!port_map) { 740 port_map = (1 << ahci_nr_ports(cap)) - 1; 741 dev_printk(KERN_WARNING, &pdev->dev, 742 "forcing PORTS_IMPL to 0x%x\n", port_map); 743 744 /* write the fixed up value to the PI register */ 745 hpriv->saved_port_map = port_map; 746 } 747 748 /* record values to use during operation */ 749 hpriv->cap = cap; 750 hpriv->port_map = port_map; 751 } 752 753 /** 754 * ahci_restore_initial_config - Restore initial config 755 * @host: target ATA host 756 * 757 * Restore initial config stored by ahci_save_initial_config(). 758 * 759 * LOCKING: 760 * None. 761 */ 762 static void ahci_restore_initial_config(struct ata_host *host) 763 { 764 struct ahci_host_priv *hpriv = host->private_data; 765 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 766 767 writel(hpriv->saved_cap, mmio + HOST_CAP); 768 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 769 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 770 } 771 772 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 773 { 774 static const int offset[] = { 775 [SCR_STATUS] = PORT_SCR_STAT, 776 [SCR_CONTROL] = PORT_SCR_CTL, 777 [SCR_ERROR] = PORT_SCR_ERR, 778 [SCR_ACTIVE] = PORT_SCR_ACT, 779 [SCR_NOTIFICATION] = PORT_SCR_NTF, 780 }; 781 struct ahci_host_priv *hpriv = ap->host->private_data; 782 783 if (sc_reg < ARRAY_SIZE(offset) && 784 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 785 return offset[sc_reg]; 786 return 0; 787 } 788 789 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 790 { 791 void __iomem *port_mmio = ahci_port_base(ap); 792 int offset = ahci_scr_offset(ap, sc_reg); 793 794 if (offset) { 795 *val = readl(port_mmio + offset); 796 return 0; 797 } 798 return -EINVAL; 799 } 800 801 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 802 { 803 void __iomem *port_mmio = ahci_port_base(ap); 804 int offset = ahci_scr_offset(ap, sc_reg); 805 806 if (offset) { 807 writel(val, port_mmio + offset); 808 return 0; 809 } 810 return -EINVAL; 811 } 812 813 static void ahci_start_engine(struct ata_port *ap) 814 { 815 void __iomem *port_mmio = ahci_port_base(ap); 816 u32 tmp; 817 818 /* start DMA */ 819 tmp = readl(port_mmio + PORT_CMD); 820 tmp |= PORT_CMD_START; 821 writel(tmp, port_mmio + PORT_CMD); 822 readl(port_mmio + PORT_CMD); /* flush */ 823 } 824 825 static int ahci_stop_engine(struct ata_port *ap) 826 { 827 void __iomem *port_mmio = ahci_port_base(ap); 828 u32 tmp; 829 830 tmp = readl(port_mmio + PORT_CMD); 831 832 /* check if the HBA is idle */ 833 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 834 return 0; 835 836 /* setting HBA to idle */ 837 tmp &= ~PORT_CMD_START; 838 writel(tmp, port_mmio + PORT_CMD); 839 840 /* wait for engine to stop. This could be as long as 500 msec */ 841 tmp = ata_wait_register(port_mmio + PORT_CMD, 842 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 843 if (tmp & PORT_CMD_LIST_ON) 844 return -EIO; 845 846 return 0; 847 } 848 849 static void ahci_start_fis_rx(struct ata_port *ap) 850 { 851 void __iomem *port_mmio = ahci_port_base(ap); 852 struct ahci_host_priv *hpriv = ap->host->private_data; 853 struct ahci_port_priv *pp = ap->private_data; 854 u32 tmp; 855 856 /* set FIS registers */ 857 if (hpriv->cap & HOST_CAP_64) 858 writel((pp->cmd_slot_dma >> 16) >> 16, 859 port_mmio + PORT_LST_ADDR_HI); 860 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 861 862 if (hpriv->cap & HOST_CAP_64) 863 writel((pp->rx_fis_dma >> 16) >> 16, 864 port_mmio + PORT_FIS_ADDR_HI); 865 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 866 867 /* enable FIS reception */ 868 tmp = readl(port_mmio + PORT_CMD); 869 tmp |= PORT_CMD_FIS_RX; 870 writel(tmp, port_mmio + PORT_CMD); 871 872 /* flush */ 873 readl(port_mmio + PORT_CMD); 874 } 875 876 static int ahci_stop_fis_rx(struct ata_port *ap) 877 { 878 void __iomem *port_mmio = ahci_port_base(ap); 879 u32 tmp; 880 881 /* disable FIS reception */ 882 tmp = readl(port_mmio + PORT_CMD); 883 tmp &= ~PORT_CMD_FIS_RX; 884 writel(tmp, port_mmio + PORT_CMD); 885 886 /* wait for completion, spec says 500ms, give it 1000 */ 887 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 888 PORT_CMD_FIS_ON, 10, 1000); 889 if (tmp & PORT_CMD_FIS_ON) 890 return -EBUSY; 891 892 return 0; 893 } 894 895 static void ahci_power_up(struct ata_port *ap) 896 { 897 struct ahci_host_priv *hpriv = ap->host->private_data; 898 void __iomem *port_mmio = ahci_port_base(ap); 899 u32 cmd; 900 901 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 902 903 /* spin up device */ 904 if (hpriv->cap & HOST_CAP_SSS) { 905 cmd |= PORT_CMD_SPIN_UP; 906 writel(cmd, port_mmio + PORT_CMD); 907 } 908 909 /* wake up link */ 910 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 911 } 912 913 static void ahci_disable_alpm(struct ata_port *ap) 914 { 915 struct ahci_host_priv *hpriv = ap->host->private_data; 916 void __iomem *port_mmio = ahci_port_base(ap); 917 u32 cmd; 918 struct ahci_port_priv *pp = ap->private_data; 919 920 /* IPM bits should be disabled by libata-core */ 921 /* get the existing command bits */ 922 cmd = readl(port_mmio + PORT_CMD); 923 924 /* disable ALPM and ASP */ 925 cmd &= ~PORT_CMD_ASP; 926 cmd &= ~PORT_CMD_ALPE; 927 928 /* force the interface back to active */ 929 cmd |= PORT_CMD_ICC_ACTIVE; 930 931 /* write out new cmd value */ 932 writel(cmd, port_mmio + PORT_CMD); 933 cmd = readl(port_mmio + PORT_CMD); 934 935 /* wait 10ms to be sure we've come out of any low power state */ 936 msleep(10); 937 938 /* clear out any PhyRdy stuff from interrupt status */ 939 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 940 941 /* go ahead and clean out PhyRdy Change from Serror too */ 942 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 943 944 /* 945 * Clear flag to indicate that we should ignore all PhyRdy 946 * state changes 947 */ 948 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; 949 950 /* 951 * Enable interrupts on Phy Ready. 952 */ 953 pp->intr_mask |= PORT_IRQ_PHYRDY; 954 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 955 956 /* 957 * don't change the link pm policy - we can be called 958 * just to turn of link pm temporarily 959 */ 960 } 961 962 static int ahci_enable_alpm(struct ata_port *ap, 963 enum link_pm policy) 964 { 965 struct ahci_host_priv *hpriv = ap->host->private_data; 966 void __iomem *port_mmio = ahci_port_base(ap); 967 u32 cmd; 968 struct ahci_port_priv *pp = ap->private_data; 969 u32 asp; 970 971 /* Make sure the host is capable of link power management */ 972 if (!(hpriv->cap & HOST_CAP_ALPM)) 973 return -EINVAL; 974 975 switch (policy) { 976 case MAX_PERFORMANCE: 977 case NOT_AVAILABLE: 978 /* 979 * if we came here with NOT_AVAILABLE, 980 * it just means this is the first time we 981 * have tried to enable - default to max performance, 982 * and let the user go to lower power modes on request. 983 */ 984 ahci_disable_alpm(ap); 985 return 0; 986 case MIN_POWER: 987 /* configure HBA to enter SLUMBER */ 988 asp = PORT_CMD_ASP; 989 break; 990 case MEDIUM_POWER: 991 /* configure HBA to enter PARTIAL */ 992 asp = 0; 993 break; 994 default: 995 return -EINVAL; 996 } 997 998 /* 999 * Disable interrupts on Phy Ready. This keeps us from 1000 * getting woken up due to spurious phy ready interrupts 1001 * TBD - Hot plug should be done via polling now, is 1002 * that even supported? 1003 */ 1004 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 1005 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1006 1007 /* 1008 * Set a flag to indicate that we should ignore all PhyRdy 1009 * state changes since these can happen now whenever we 1010 * change link state 1011 */ 1012 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; 1013 1014 /* get the existing command bits */ 1015 cmd = readl(port_mmio + PORT_CMD); 1016 1017 /* 1018 * Set ASP based on Policy 1019 */ 1020 cmd |= asp; 1021 1022 /* 1023 * Setting this bit will instruct the HBA to aggressively 1024 * enter a lower power link state when it's appropriate and 1025 * based on the value set above for ASP 1026 */ 1027 cmd |= PORT_CMD_ALPE; 1028 1029 /* write out new cmd value */ 1030 writel(cmd, port_mmio + PORT_CMD); 1031 cmd = readl(port_mmio + PORT_CMD); 1032 1033 /* IPM bits should be set by libata-core */ 1034 return 0; 1035 } 1036 1037 #ifdef CONFIG_PM 1038 static void ahci_power_down(struct ata_port *ap) 1039 { 1040 struct ahci_host_priv *hpriv = ap->host->private_data; 1041 void __iomem *port_mmio = ahci_port_base(ap); 1042 u32 cmd, scontrol; 1043 1044 if (!(hpriv->cap & HOST_CAP_SSS)) 1045 return; 1046 1047 /* put device into listen mode, first set PxSCTL.DET to 0 */ 1048 scontrol = readl(port_mmio + PORT_SCR_CTL); 1049 scontrol &= ~0xf; 1050 writel(scontrol, port_mmio + PORT_SCR_CTL); 1051 1052 /* then set PxCMD.SUD to 0 */ 1053 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 1054 cmd &= ~PORT_CMD_SPIN_UP; 1055 writel(cmd, port_mmio + PORT_CMD); 1056 } 1057 #endif 1058 1059 static void ahci_start_port(struct ata_port *ap) 1060 { 1061 /* enable FIS reception */ 1062 ahci_start_fis_rx(ap); 1063 1064 /* enable DMA */ 1065 ahci_start_engine(ap); 1066 } 1067 1068 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 1069 { 1070 int rc; 1071 1072 /* disable DMA */ 1073 rc = ahci_stop_engine(ap); 1074 if (rc) { 1075 *emsg = "failed to stop engine"; 1076 return rc; 1077 } 1078 1079 /* disable FIS reception */ 1080 rc = ahci_stop_fis_rx(ap); 1081 if (rc) { 1082 *emsg = "failed stop FIS RX"; 1083 return rc; 1084 } 1085 1086 return 0; 1087 } 1088 1089 static int ahci_reset_controller(struct ata_host *host) 1090 { 1091 struct pci_dev *pdev = to_pci_dev(host->dev); 1092 struct ahci_host_priv *hpriv = host->private_data; 1093 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1094 u32 tmp; 1095 1096 /* we must be in AHCI mode, before using anything 1097 * AHCI-specific, such as HOST_RESET. 1098 */ 1099 ahci_enable_ahci(mmio); 1100 1101 /* global controller reset */ 1102 if (!ahci_skip_host_reset) { 1103 tmp = readl(mmio + HOST_CTL); 1104 if ((tmp & HOST_RESET) == 0) { 1105 writel(tmp | HOST_RESET, mmio + HOST_CTL); 1106 readl(mmio + HOST_CTL); /* flush */ 1107 } 1108 1109 /* reset must complete within 1 second, or 1110 * the hardware should be considered fried. 1111 */ 1112 ssleep(1); 1113 1114 tmp = readl(mmio + HOST_CTL); 1115 if (tmp & HOST_RESET) { 1116 dev_printk(KERN_ERR, host->dev, 1117 "controller reset failed (0x%x)\n", tmp); 1118 return -EIO; 1119 } 1120 1121 /* turn on AHCI mode */ 1122 ahci_enable_ahci(mmio); 1123 1124 /* Some registers might be cleared on reset. Restore 1125 * initial values. 1126 */ 1127 ahci_restore_initial_config(host); 1128 } else 1129 dev_printk(KERN_INFO, host->dev, 1130 "skipping global host reset\n"); 1131 1132 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 1133 u16 tmp16; 1134 1135 /* configure PCS */ 1136 pci_read_config_word(pdev, 0x92, &tmp16); 1137 if ((tmp16 & hpriv->port_map) != hpriv->port_map) { 1138 tmp16 |= hpriv->port_map; 1139 pci_write_config_word(pdev, 0x92, tmp16); 1140 } 1141 } 1142 1143 return 0; 1144 } 1145 1146 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 1147 int port_no, void __iomem *mmio, 1148 void __iomem *port_mmio) 1149 { 1150 const char *emsg = NULL; 1151 int rc; 1152 u32 tmp; 1153 1154 /* make sure port is not active */ 1155 rc = ahci_deinit_port(ap, &emsg); 1156 if (rc) 1157 dev_printk(KERN_WARNING, &pdev->dev, 1158 "%s (%d)\n", emsg, rc); 1159 1160 /* clear SError */ 1161 tmp = readl(port_mmio + PORT_SCR_ERR); 1162 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1163 writel(tmp, port_mmio + PORT_SCR_ERR); 1164 1165 /* clear port IRQ */ 1166 tmp = readl(port_mmio + PORT_IRQ_STAT); 1167 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1168 if (tmp) 1169 writel(tmp, port_mmio + PORT_IRQ_STAT); 1170 1171 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1172 } 1173 1174 static void ahci_init_controller(struct ata_host *host) 1175 { 1176 struct ahci_host_priv *hpriv = host->private_data; 1177 struct pci_dev *pdev = to_pci_dev(host->dev); 1178 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1179 int i; 1180 void __iomem *port_mmio; 1181 u32 tmp; 1182 int mv; 1183 1184 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 1185 if (pdev->device == 0x6121) 1186 mv = 2; 1187 else 1188 mv = 4; 1189 port_mmio = __ahci_port_base(host, mv); 1190 1191 writel(0, port_mmio + PORT_IRQ_MASK); 1192 1193 /* clear port IRQ */ 1194 tmp = readl(port_mmio + PORT_IRQ_STAT); 1195 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1196 if (tmp) 1197 writel(tmp, port_mmio + PORT_IRQ_STAT); 1198 } 1199 1200 for (i = 0; i < host->n_ports; i++) { 1201 struct ata_port *ap = host->ports[i]; 1202 1203 port_mmio = ahci_port_base(ap); 1204 if (ata_port_is_dummy(ap)) 1205 continue; 1206 1207 ahci_port_init(pdev, ap, i, mmio, port_mmio); 1208 } 1209 1210 tmp = readl(mmio + HOST_CTL); 1211 VPRINTK("HOST_CTL 0x%x\n", tmp); 1212 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1213 tmp = readl(mmio + HOST_CTL); 1214 VPRINTK("HOST_CTL 0x%x\n", tmp); 1215 } 1216 1217 static void ahci_dev_config(struct ata_device *dev) 1218 { 1219 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1220 1221 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1222 dev->max_sectors = 255; 1223 ata_dev_printk(dev, KERN_INFO, 1224 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1225 } 1226 } 1227 1228 static unsigned int ahci_dev_classify(struct ata_port *ap) 1229 { 1230 void __iomem *port_mmio = ahci_port_base(ap); 1231 struct ata_taskfile tf; 1232 u32 tmp; 1233 1234 tmp = readl(port_mmio + PORT_SIG); 1235 tf.lbah = (tmp >> 24) & 0xff; 1236 tf.lbam = (tmp >> 16) & 0xff; 1237 tf.lbal = (tmp >> 8) & 0xff; 1238 tf.nsect = (tmp) & 0xff; 1239 1240 return ata_dev_classify(&tf); 1241 } 1242 1243 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1244 u32 opts) 1245 { 1246 dma_addr_t cmd_tbl_dma; 1247 1248 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1249 1250 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1251 pp->cmd_slot[tag].status = 0; 1252 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1253 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1254 } 1255 1256 static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1257 { 1258 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1259 struct ahci_host_priv *hpriv = ap->host->private_data; 1260 u32 tmp; 1261 int busy, rc; 1262 1263 /* do we need to kick the port? */ 1264 busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ); 1265 if (!busy && !force_restart) 1266 return 0; 1267 1268 /* stop engine */ 1269 rc = ahci_stop_engine(ap); 1270 if (rc) 1271 goto out_restart; 1272 1273 /* need to do CLO? */ 1274 if (!busy) { 1275 rc = 0; 1276 goto out_restart; 1277 } 1278 1279 if (!(hpriv->cap & HOST_CAP_CLO)) { 1280 rc = -EOPNOTSUPP; 1281 goto out_restart; 1282 } 1283 1284 /* perform CLO */ 1285 tmp = readl(port_mmio + PORT_CMD); 1286 tmp |= PORT_CMD_CLO; 1287 writel(tmp, port_mmio + PORT_CMD); 1288 1289 rc = 0; 1290 tmp = ata_wait_register(port_mmio + PORT_CMD, 1291 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1292 if (tmp & PORT_CMD_CLO) 1293 rc = -EIO; 1294 1295 /* restart engine */ 1296 out_restart: 1297 ahci_start_engine(ap); 1298 return rc; 1299 } 1300 1301 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1302 struct ata_taskfile *tf, int is_cmd, u16 flags, 1303 unsigned long timeout_msec) 1304 { 1305 const u32 cmd_fis_len = 5; /* five dwords */ 1306 struct ahci_port_priv *pp = ap->private_data; 1307 void __iomem *port_mmio = ahci_port_base(ap); 1308 u8 *fis = pp->cmd_tbl; 1309 u32 tmp; 1310 1311 /* prep the command */ 1312 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1313 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1314 1315 /* issue & wait */ 1316 writel(1, port_mmio + PORT_CMD_ISSUE); 1317 1318 if (timeout_msec) { 1319 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1320 1, timeout_msec); 1321 if (tmp & 0x1) { 1322 ahci_kick_engine(ap, 1); 1323 return -EBUSY; 1324 } 1325 } else 1326 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1327 1328 return 0; 1329 } 1330 1331 static int ahci_do_softreset(struct ata_link *link, unsigned int *class, 1332 int pmp, unsigned long deadline) 1333 { 1334 struct ata_port *ap = link->ap; 1335 const char *reason = NULL; 1336 unsigned long now, msecs; 1337 struct ata_taskfile tf; 1338 int rc; 1339 1340 DPRINTK("ENTER\n"); 1341 1342 if (ata_link_offline(link)) { 1343 DPRINTK("PHY reports no device\n"); 1344 *class = ATA_DEV_NONE; 1345 return 0; 1346 } 1347 1348 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1349 rc = ahci_kick_engine(ap, 1); 1350 if (rc && rc != -EOPNOTSUPP) 1351 ata_link_printk(link, KERN_WARNING, 1352 "failed to reset engine (errno=%d)\n", rc); 1353 1354 ata_tf_init(link->device, &tf); 1355 1356 /* issue the first D2H Register FIS */ 1357 msecs = 0; 1358 now = jiffies; 1359 if (time_after(now, deadline)) 1360 msecs = jiffies_to_msecs(deadline - now); 1361 1362 tf.ctl |= ATA_SRST; 1363 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1364 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1365 rc = -EIO; 1366 reason = "1st FIS failed"; 1367 goto fail; 1368 } 1369 1370 /* spec says at least 5us, but be generous and sleep for 1ms */ 1371 msleep(1); 1372 1373 /* issue the second D2H Register FIS */ 1374 tf.ctl &= ~ATA_SRST; 1375 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1376 1377 /* wait a while before checking status */ 1378 ata_wait_after_reset(ap, deadline); 1379 1380 rc = ata_wait_ready(ap, deadline); 1381 /* link occupied, -ENODEV too is an error */ 1382 if (rc) { 1383 reason = "device not ready"; 1384 goto fail; 1385 } 1386 *class = ahci_dev_classify(ap); 1387 1388 DPRINTK("EXIT, class=%u\n", *class); 1389 return 0; 1390 1391 fail: 1392 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1393 return rc; 1394 } 1395 1396 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1397 unsigned long deadline) 1398 { 1399 int pmp = 0; 1400 1401 if (link->ap->flags & ATA_FLAG_PMP) 1402 pmp = SATA_PMP_CTRL_PORT; 1403 1404 return ahci_do_softreset(link, class, pmp, deadline); 1405 } 1406 1407 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1408 unsigned long deadline) 1409 { 1410 struct ata_port *ap = link->ap; 1411 struct ahci_port_priv *pp = ap->private_data; 1412 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1413 struct ata_taskfile tf; 1414 int rc; 1415 1416 DPRINTK("ENTER\n"); 1417 1418 ahci_stop_engine(ap); 1419 1420 /* clear D2H reception area to properly wait for D2H FIS */ 1421 ata_tf_init(link->device, &tf); 1422 tf.command = 0x80; 1423 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1424 1425 rc = sata_std_hardreset(link, class, deadline); 1426 1427 ahci_start_engine(ap); 1428 1429 if (rc == 0 && ata_link_online(link)) 1430 *class = ahci_dev_classify(ap); 1431 if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN) 1432 *class = ATA_DEV_NONE; 1433 1434 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1435 return rc; 1436 } 1437 1438 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 1439 unsigned long deadline) 1440 { 1441 struct ata_port *ap = link->ap; 1442 u32 serror; 1443 int rc; 1444 1445 DPRINTK("ENTER\n"); 1446 1447 ahci_stop_engine(ap); 1448 1449 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1450 deadline); 1451 1452 /* vt8251 needs SError cleared for the port to operate */ 1453 ahci_scr_read(ap, SCR_ERROR, &serror); 1454 ahci_scr_write(ap, SCR_ERROR, serror); 1455 1456 ahci_start_engine(ap); 1457 1458 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1459 1460 /* vt8251 doesn't clear BSY on signature FIS reception, 1461 * request follow-up softreset. 1462 */ 1463 return rc ?: -EAGAIN; 1464 } 1465 1466 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 1467 unsigned long deadline) 1468 { 1469 struct ata_port *ap = link->ap; 1470 struct ahci_port_priv *pp = ap->private_data; 1471 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1472 struct ata_taskfile tf; 1473 int rc; 1474 1475 ahci_stop_engine(ap); 1476 1477 /* clear D2H reception area to properly wait for D2H FIS */ 1478 ata_tf_init(link->device, &tf); 1479 tf.command = 0x80; 1480 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1481 1482 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1483 deadline); 1484 1485 ahci_start_engine(ap); 1486 1487 if (rc || ata_link_offline(link)) 1488 return rc; 1489 1490 /* spec mandates ">= 2ms" before checking status */ 1491 msleep(150); 1492 1493 /* The pseudo configuration device on SIMG4726 attached to 1494 * ASUS P5W-DH Deluxe doesn't send signature FIS after 1495 * hardreset if no device is attached to the first downstream 1496 * port && the pseudo device locks up on SRST w/ PMP==0. To 1497 * work around this, wait for !BSY only briefly. If BSY isn't 1498 * cleared, perform CLO and proceed to IDENTIFY (achieved by 1499 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). 1500 * 1501 * Wait for two seconds. Devices attached to downstream port 1502 * which can't process the following IDENTIFY after this will 1503 * have to be reset again. For most cases, this should 1504 * suffice while making probing snappish enough. 1505 */ 1506 rc = ata_wait_ready(ap, jiffies + 2 * HZ); 1507 if (rc) 1508 ahci_kick_engine(ap, 0); 1509 1510 return 0; 1511 } 1512 1513 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1514 { 1515 struct ata_port *ap = link->ap; 1516 void __iomem *port_mmio = ahci_port_base(ap); 1517 u32 new_tmp, tmp; 1518 1519 ata_std_postreset(link, class); 1520 1521 /* Make sure port's ATAPI bit is set appropriately */ 1522 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1523 if (*class == ATA_DEV_ATAPI) 1524 new_tmp |= PORT_CMD_ATAPI; 1525 else 1526 new_tmp &= ~PORT_CMD_ATAPI; 1527 if (new_tmp != tmp) { 1528 writel(new_tmp, port_mmio + PORT_CMD); 1529 readl(port_mmio + PORT_CMD); /* flush */ 1530 } 1531 } 1532 1533 static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class, 1534 unsigned long deadline) 1535 { 1536 return ahci_do_softreset(link, class, link->pmp, deadline); 1537 } 1538 1539 static u8 ahci_check_status(struct ata_port *ap) 1540 { 1541 void __iomem *mmio = ap->ioaddr.cmd_addr; 1542 1543 return readl(mmio + PORT_TFDATA) & 0xFF; 1544 } 1545 1546 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 1547 { 1548 struct ahci_port_priv *pp = ap->private_data; 1549 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1550 1551 ata_tf_from_fis(d2h_fis, tf); 1552 } 1553 1554 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1555 { 1556 struct scatterlist *sg; 1557 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1558 unsigned int si; 1559 1560 VPRINTK("ENTER\n"); 1561 1562 /* 1563 * Next, the S/G list. 1564 */ 1565 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1566 dma_addr_t addr = sg_dma_address(sg); 1567 u32 sg_len = sg_dma_len(sg); 1568 1569 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1570 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1571 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1572 } 1573 1574 return si; 1575 } 1576 1577 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1578 { 1579 struct ata_port *ap = qc->ap; 1580 struct ahci_port_priv *pp = ap->private_data; 1581 int is_atapi = ata_is_atapi(qc->tf.protocol); 1582 void *cmd_tbl; 1583 u32 opts; 1584 const u32 cmd_fis_len = 5; /* five dwords */ 1585 unsigned int n_elem; 1586 1587 /* 1588 * Fill in command table information. First, the header, 1589 * a SATA Register - Host to Device command FIS. 1590 */ 1591 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1592 1593 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1594 if (is_atapi) { 1595 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1596 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1597 } 1598 1599 n_elem = 0; 1600 if (qc->flags & ATA_QCFLAG_DMAMAP) 1601 n_elem = ahci_fill_sg(qc, cmd_tbl); 1602 1603 /* 1604 * Fill in command slot information. 1605 */ 1606 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1607 if (qc->tf.flags & ATA_TFLAG_WRITE) 1608 opts |= AHCI_CMD_WRITE; 1609 if (is_atapi) 1610 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1611 1612 ahci_fill_cmd_slot(pp, qc->tag, opts); 1613 } 1614 1615 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1616 { 1617 struct ahci_host_priv *hpriv = ap->host->private_data; 1618 struct ahci_port_priv *pp = ap->private_data; 1619 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1620 struct ata_link *link = NULL; 1621 struct ata_queued_cmd *active_qc; 1622 struct ata_eh_info *active_ehi; 1623 u32 serror; 1624 1625 /* determine active link */ 1626 ata_port_for_each_link(link, ap) 1627 if (ata_link_active(link)) 1628 break; 1629 if (!link) 1630 link = &ap->link; 1631 1632 active_qc = ata_qc_from_tag(ap, link->active_tag); 1633 active_ehi = &link->eh_info; 1634 1635 /* record irq stat */ 1636 ata_ehi_clear_desc(host_ehi); 1637 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1638 1639 /* AHCI needs SError cleared; otherwise, it might lock up */ 1640 ahci_scr_read(ap, SCR_ERROR, &serror); 1641 ahci_scr_write(ap, SCR_ERROR, serror); 1642 host_ehi->serror |= serror; 1643 1644 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1645 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1646 irq_stat &= ~PORT_IRQ_IF_ERR; 1647 1648 if (irq_stat & PORT_IRQ_TF_ERR) { 1649 /* If qc is active, charge it; otherwise, the active 1650 * link. There's no active qc on NCQ errors. It will 1651 * be determined by EH by reading log page 10h. 1652 */ 1653 if (active_qc) 1654 active_qc->err_mask |= AC_ERR_DEV; 1655 else 1656 active_ehi->err_mask |= AC_ERR_DEV; 1657 1658 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1659 host_ehi->serror &= ~SERR_INTERNAL; 1660 } 1661 1662 if (irq_stat & PORT_IRQ_UNK_FIS) { 1663 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1664 1665 active_ehi->err_mask |= AC_ERR_HSM; 1666 active_ehi->action |= ATA_EH_SOFTRESET; 1667 ata_ehi_push_desc(active_ehi, 1668 "unknown FIS %08x %08x %08x %08x" , 1669 unk[0], unk[1], unk[2], unk[3]); 1670 } 1671 1672 if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) { 1673 active_ehi->err_mask |= AC_ERR_HSM; 1674 active_ehi->action |= ATA_EH_SOFTRESET; 1675 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1676 } 1677 1678 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1679 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1680 host_ehi->action |= ATA_EH_SOFTRESET; 1681 ata_ehi_push_desc(host_ehi, "host bus error"); 1682 } 1683 1684 if (irq_stat & PORT_IRQ_IF_ERR) { 1685 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1686 host_ehi->action |= ATA_EH_SOFTRESET; 1687 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1688 } 1689 1690 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1691 ata_ehi_hotplugged(host_ehi); 1692 ata_ehi_push_desc(host_ehi, "%s", 1693 irq_stat & PORT_IRQ_CONNECT ? 1694 "connection status changed" : "PHY RDY changed"); 1695 } 1696 1697 /* okay, let's hand over to EH */ 1698 1699 if (irq_stat & PORT_IRQ_FREEZE) 1700 ata_port_freeze(ap); 1701 else 1702 ata_port_abort(ap); 1703 } 1704 1705 static void ahci_port_intr(struct ata_port *ap) 1706 { 1707 void __iomem *port_mmio = ap->ioaddr.cmd_addr; 1708 struct ata_eh_info *ehi = &ap->link.eh_info; 1709 struct ahci_port_priv *pp = ap->private_data; 1710 struct ahci_host_priv *hpriv = ap->host->private_data; 1711 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1712 u32 status, qc_active; 1713 int rc; 1714 1715 status = readl(port_mmio + PORT_IRQ_STAT); 1716 writel(status, port_mmio + PORT_IRQ_STAT); 1717 1718 /* ignore BAD_PMP while resetting */ 1719 if (unlikely(resetting)) 1720 status &= ~PORT_IRQ_BAD_PMP; 1721 1722 /* If we are getting PhyRdy, this is 1723 * just a power state change, we should 1724 * clear out this, plus the PhyRdy/Comm 1725 * Wake bits from Serror 1726 */ 1727 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 1728 (status & PORT_IRQ_PHYRDY)) { 1729 status &= ~PORT_IRQ_PHYRDY; 1730 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 1731 } 1732 1733 if (unlikely(status & PORT_IRQ_ERROR)) { 1734 ahci_error_intr(ap, status); 1735 return; 1736 } 1737 1738 if (status & PORT_IRQ_SDB_FIS) { 1739 /* If SNotification is available, leave notification 1740 * handling to sata_async_notification(). If not, 1741 * emulate it by snooping SDB FIS RX area. 1742 * 1743 * Snooping FIS RX area is probably cheaper than 1744 * poking SNotification but some constrollers which 1745 * implement SNotification, ICH9 for example, don't 1746 * store AN SDB FIS into receive area. 1747 */ 1748 if (hpriv->cap & HOST_CAP_SNTF) 1749 sata_async_notification(ap); 1750 else { 1751 /* If the 'N' bit in word 0 of the FIS is set, 1752 * we just received asynchronous notification. 1753 * Tell libata about it. 1754 */ 1755 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1756 u32 f0 = le32_to_cpu(f[0]); 1757 1758 if (f0 & (1 << 15)) 1759 sata_async_notification(ap); 1760 } 1761 } 1762 1763 /* pp->active_link is valid iff any command is in flight */ 1764 if (ap->qc_active && pp->active_link->sactive) 1765 qc_active = readl(port_mmio + PORT_SCR_ACT); 1766 else 1767 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1768 1769 rc = ata_qc_complete_multiple(ap, qc_active, NULL); 1770 1771 /* while resetting, invalid completions are expected */ 1772 if (unlikely(rc < 0 && !resetting)) { 1773 ehi->err_mask |= AC_ERR_HSM; 1774 ehi->action |= ATA_EH_SOFTRESET; 1775 ata_port_freeze(ap); 1776 } 1777 } 1778 1779 static void ahci_irq_clear(struct ata_port *ap) 1780 { 1781 /* TODO */ 1782 } 1783 1784 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1785 { 1786 struct ata_host *host = dev_instance; 1787 struct ahci_host_priv *hpriv; 1788 unsigned int i, handled = 0; 1789 void __iomem *mmio; 1790 u32 irq_stat, irq_ack = 0; 1791 1792 VPRINTK("ENTER\n"); 1793 1794 hpriv = host->private_data; 1795 mmio = host->iomap[AHCI_PCI_BAR]; 1796 1797 /* sigh. 0xffffffff is a valid return from h/w */ 1798 irq_stat = readl(mmio + HOST_IRQ_STAT); 1799 irq_stat &= hpriv->port_map; 1800 if (!irq_stat) 1801 return IRQ_NONE; 1802 1803 spin_lock(&host->lock); 1804 1805 for (i = 0; i < host->n_ports; i++) { 1806 struct ata_port *ap; 1807 1808 if (!(irq_stat & (1 << i))) 1809 continue; 1810 1811 ap = host->ports[i]; 1812 if (ap) { 1813 ahci_port_intr(ap); 1814 VPRINTK("port %u\n", i); 1815 } else { 1816 VPRINTK("port %u (no irq)\n", i); 1817 if (ata_ratelimit()) 1818 dev_printk(KERN_WARNING, host->dev, 1819 "interrupt on disabled port %u\n", i); 1820 } 1821 1822 irq_ack |= (1 << i); 1823 } 1824 1825 if (irq_ack) { 1826 writel(irq_ack, mmio + HOST_IRQ_STAT); 1827 handled = 1; 1828 } 1829 1830 spin_unlock(&host->lock); 1831 1832 VPRINTK("EXIT\n"); 1833 1834 return IRQ_RETVAL(handled); 1835 } 1836 1837 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1838 { 1839 struct ata_port *ap = qc->ap; 1840 void __iomem *port_mmio = ahci_port_base(ap); 1841 struct ahci_port_priv *pp = ap->private_data; 1842 1843 /* Keep track of the currently active link. It will be used 1844 * in completion path to determine whether NCQ phase is in 1845 * progress. 1846 */ 1847 pp->active_link = qc->dev->link; 1848 1849 if (qc->tf.protocol == ATA_PROT_NCQ) 1850 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1851 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1852 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1853 1854 return 0; 1855 } 1856 1857 static void ahci_freeze(struct ata_port *ap) 1858 { 1859 void __iomem *port_mmio = ahci_port_base(ap); 1860 1861 /* turn IRQ off */ 1862 writel(0, port_mmio + PORT_IRQ_MASK); 1863 } 1864 1865 static void ahci_thaw(struct ata_port *ap) 1866 { 1867 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1868 void __iomem *port_mmio = ahci_port_base(ap); 1869 u32 tmp; 1870 struct ahci_port_priv *pp = ap->private_data; 1871 1872 /* clear IRQ */ 1873 tmp = readl(port_mmio + PORT_IRQ_STAT); 1874 writel(tmp, port_mmio + PORT_IRQ_STAT); 1875 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1876 1877 /* turn IRQ back on */ 1878 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1879 } 1880 1881 static void ahci_error_handler(struct ata_port *ap) 1882 { 1883 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1884 /* restart engine */ 1885 ahci_stop_engine(ap); 1886 ahci_start_engine(ap); 1887 } 1888 1889 /* perform recovery */ 1890 sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset, 1891 ahci_hardreset, ahci_postreset, 1892 sata_pmp_std_prereset, ahci_pmp_softreset, 1893 sata_pmp_std_hardreset, sata_pmp_std_postreset); 1894 } 1895 1896 static void ahci_vt8251_error_handler(struct ata_port *ap) 1897 { 1898 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1899 /* restart engine */ 1900 ahci_stop_engine(ap); 1901 ahci_start_engine(ap); 1902 } 1903 1904 /* perform recovery */ 1905 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset, 1906 ahci_postreset); 1907 } 1908 1909 static void ahci_p5wdh_error_handler(struct ata_port *ap) 1910 { 1911 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1912 /* restart engine */ 1913 ahci_stop_engine(ap); 1914 ahci_start_engine(ap); 1915 } 1916 1917 /* perform recovery */ 1918 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset, 1919 ahci_postreset); 1920 } 1921 1922 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1923 { 1924 struct ata_port *ap = qc->ap; 1925 1926 /* make DMA engine forget about the failed command */ 1927 if (qc->flags & ATA_QCFLAG_FAILED) 1928 ahci_kick_engine(ap, 1); 1929 } 1930 1931 static void ahci_pmp_attach(struct ata_port *ap) 1932 { 1933 void __iomem *port_mmio = ahci_port_base(ap); 1934 struct ahci_port_priv *pp = ap->private_data; 1935 u32 cmd; 1936 1937 cmd = readl(port_mmio + PORT_CMD); 1938 cmd |= PORT_CMD_PMP; 1939 writel(cmd, port_mmio + PORT_CMD); 1940 1941 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1942 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1943 } 1944 1945 static void ahci_pmp_detach(struct ata_port *ap) 1946 { 1947 void __iomem *port_mmio = ahci_port_base(ap); 1948 struct ahci_port_priv *pp = ap->private_data; 1949 u32 cmd; 1950 1951 cmd = readl(port_mmio + PORT_CMD); 1952 cmd &= ~PORT_CMD_PMP; 1953 writel(cmd, port_mmio + PORT_CMD); 1954 1955 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1956 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1957 } 1958 1959 static int ahci_port_resume(struct ata_port *ap) 1960 { 1961 ahci_power_up(ap); 1962 ahci_start_port(ap); 1963 1964 if (ap->nr_pmp_links) 1965 ahci_pmp_attach(ap); 1966 else 1967 ahci_pmp_detach(ap); 1968 1969 return 0; 1970 } 1971 1972 #ifdef CONFIG_PM 1973 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1974 { 1975 const char *emsg = NULL; 1976 int rc; 1977 1978 rc = ahci_deinit_port(ap, &emsg); 1979 if (rc == 0) 1980 ahci_power_down(ap); 1981 else { 1982 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1983 ahci_start_port(ap); 1984 } 1985 1986 return rc; 1987 } 1988 1989 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1990 { 1991 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1992 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1993 u32 ctl; 1994 1995 if (mesg.event & PM_EVENT_SLEEP) { 1996 /* AHCI spec rev1.1 section 8.3.3: 1997 * Software must disable interrupts prior to requesting a 1998 * transition of the HBA to D3 state. 1999 */ 2000 ctl = readl(mmio + HOST_CTL); 2001 ctl &= ~HOST_IRQ_EN; 2002 writel(ctl, mmio + HOST_CTL); 2003 readl(mmio + HOST_CTL); /* flush */ 2004 } 2005 2006 return ata_pci_device_suspend(pdev, mesg); 2007 } 2008 2009 static int ahci_pci_device_resume(struct pci_dev *pdev) 2010 { 2011 struct ata_host *host = dev_get_drvdata(&pdev->dev); 2012 int rc; 2013 2014 rc = ata_pci_device_do_resume(pdev); 2015 if (rc) 2016 return rc; 2017 2018 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 2019 rc = ahci_reset_controller(host); 2020 if (rc) 2021 return rc; 2022 2023 ahci_init_controller(host); 2024 } 2025 2026 ata_host_resume(host); 2027 2028 return 0; 2029 } 2030 #endif 2031 2032 static int ahci_port_start(struct ata_port *ap) 2033 { 2034 struct device *dev = ap->host->dev; 2035 struct ahci_port_priv *pp; 2036 void *mem; 2037 dma_addr_t mem_dma; 2038 2039 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 2040 if (!pp) 2041 return -ENOMEM; 2042 2043 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 2044 GFP_KERNEL); 2045 if (!mem) 2046 return -ENOMEM; 2047 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 2048 2049 /* 2050 * First item in chunk of DMA memory: 32-slot command table, 2051 * 32 bytes each in size 2052 */ 2053 pp->cmd_slot = mem; 2054 pp->cmd_slot_dma = mem_dma; 2055 2056 mem += AHCI_CMD_SLOT_SZ; 2057 mem_dma += AHCI_CMD_SLOT_SZ; 2058 2059 /* 2060 * Second item: Received-FIS area 2061 */ 2062 pp->rx_fis = mem; 2063 pp->rx_fis_dma = mem_dma; 2064 2065 mem += AHCI_RX_FIS_SZ; 2066 mem_dma += AHCI_RX_FIS_SZ; 2067 2068 /* 2069 * Third item: data area for storing a single command 2070 * and its scatter-gather table 2071 */ 2072 pp->cmd_tbl = mem; 2073 pp->cmd_tbl_dma = mem_dma; 2074 2075 /* 2076 * Save off initial list of interrupts to be enabled. 2077 * This could be changed later 2078 */ 2079 pp->intr_mask = DEF_PORT_IRQ; 2080 2081 ap->private_data = pp; 2082 2083 /* engage engines, captain */ 2084 return ahci_port_resume(ap); 2085 } 2086 2087 static void ahci_port_stop(struct ata_port *ap) 2088 { 2089 const char *emsg = NULL; 2090 int rc; 2091 2092 /* de-initialize port */ 2093 rc = ahci_deinit_port(ap, &emsg); 2094 if (rc) 2095 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 2096 } 2097 2098 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 2099 { 2100 int rc; 2101 2102 if (using_dac && 2103 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2104 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2105 if (rc) { 2106 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2107 if (rc) { 2108 dev_printk(KERN_ERR, &pdev->dev, 2109 "64-bit DMA enable failed\n"); 2110 return rc; 2111 } 2112 } 2113 } else { 2114 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2115 if (rc) { 2116 dev_printk(KERN_ERR, &pdev->dev, 2117 "32-bit DMA enable failed\n"); 2118 return rc; 2119 } 2120 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2121 if (rc) { 2122 dev_printk(KERN_ERR, &pdev->dev, 2123 "32-bit consistent DMA enable failed\n"); 2124 return rc; 2125 } 2126 } 2127 return 0; 2128 } 2129 2130 static void ahci_print_info(struct ata_host *host) 2131 { 2132 struct ahci_host_priv *hpriv = host->private_data; 2133 struct pci_dev *pdev = to_pci_dev(host->dev); 2134 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2135 u32 vers, cap, impl, speed; 2136 const char *speed_s; 2137 u16 cc; 2138 const char *scc_s; 2139 2140 vers = readl(mmio + HOST_VERSION); 2141 cap = hpriv->cap; 2142 impl = hpriv->port_map; 2143 2144 speed = (cap >> 20) & 0xf; 2145 if (speed == 1) 2146 speed_s = "1.5"; 2147 else if (speed == 2) 2148 speed_s = "3"; 2149 else 2150 speed_s = "?"; 2151 2152 pci_read_config_word(pdev, 0x0a, &cc); 2153 if (cc == PCI_CLASS_STORAGE_IDE) 2154 scc_s = "IDE"; 2155 else if (cc == PCI_CLASS_STORAGE_SATA) 2156 scc_s = "SATA"; 2157 else if (cc == PCI_CLASS_STORAGE_RAID) 2158 scc_s = "RAID"; 2159 else 2160 scc_s = "unknown"; 2161 2162 dev_printk(KERN_INFO, &pdev->dev, 2163 "AHCI %02x%02x.%02x%02x " 2164 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2165 , 2166 2167 (vers >> 24) & 0xff, 2168 (vers >> 16) & 0xff, 2169 (vers >> 8) & 0xff, 2170 vers & 0xff, 2171 2172 ((cap >> 8) & 0x1f) + 1, 2173 (cap & 0x1f) + 1, 2174 speed_s, 2175 impl, 2176 scc_s); 2177 2178 dev_printk(KERN_INFO, &pdev->dev, 2179 "flags: " 2180 "%s%s%s%s%s%s%s" 2181 "%s%s%s%s%s%s%s\n" 2182 , 2183 2184 cap & (1 << 31) ? "64bit " : "", 2185 cap & (1 << 30) ? "ncq " : "", 2186 cap & (1 << 29) ? "sntf " : "", 2187 cap & (1 << 28) ? "ilck " : "", 2188 cap & (1 << 27) ? "stag " : "", 2189 cap & (1 << 26) ? "pm " : "", 2190 cap & (1 << 25) ? "led " : "", 2191 2192 cap & (1 << 24) ? "clo " : "", 2193 cap & (1 << 19) ? "nz " : "", 2194 cap & (1 << 18) ? "only " : "", 2195 cap & (1 << 17) ? "pmp " : "", 2196 cap & (1 << 15) ? "pio " : "", 2197 cap & (1 << 14) ? "slum " : "", 2198 cap & (1 << 13) ? "part " : "" 2199 ); 2200 } 2201 2202 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 2203 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't 2204 * support PMP and the 4726 either directly exports the device 2205 * attached to the first downstream port or acts as a hardware storage 2206 * controller and emulate a single ATA device (can be RAID 0/1 or some 2207 * other configuration). 2208 * 2209 * When there's no device attached to the first downstream port of the 2210 * 4726, "Config Disk" appears, which is a pseudo ATA device to 2211 * configure the 4726. However, ATA emulation of the device is very 2212 * lame. It doesn't send signature D2H Reg FIS after the initial 2213 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. 2214 * 2215 * The following function works around the problem by always using 2216 * hardreset on the port and not depending on receiving signature FIS 2217 * afterward. If signature FIS isn't received soon, ATA class is 2218 * assumed without follow-up softreset. 2219 */ 2220 static void ahci_p5wdh_workaround(struct ata_host *host) 2221 { 2222 static struct dmi_system_id sysids[] = { 2223 { 2224 .ident = "P5W DH Deluxe", 2225 .matches = { 2226 DMI_MATCH(DMI_SYS_VENDOR, 2227 "ASUSTEK COMPUTER INC"), 2228 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 2229 }, 2230 }, 2231 { } 2232 }; 2233 struct pci_dev *pdev = to_pci_dev(host->dev); 2234 2235 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && 2236 dmi_check_system(sysids)) { 2237 struct ata_port *ap = host->ports[1]; 2238 2239 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " 2240 "Deluxe on-board SIMG4726 workaround\n"); 2241 2242 ap->ops = &ahci_p5wdh_ops; 2243 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; 2244 } 2245 } 2246 2247 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2248 { 2249 static int printed_version; 2250 struct ata_port_info pi = ahci_port_info[ent->driver_data]; 2251 const struct ata_port_info *ppi[] = { &pi, NULL }; 2252 struct device *dev = &pdev->dev; 2253 struct ahci_host_priv *hpriv; 2254 struct ata_host *host; 2255 int n_ports, i, rc; 2256 2257 VPRINTK("ENTER\n"); 2258 2259 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 2260 2261 if (!printed_version++) 2262 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2263 2264 /* acquire resources */ 2265 rc = pcim_enable_device(pdev); 2266 if (rc) 2267 return rc; 2268 2269 /* AHCI controllers often implement SFF compatible interface. 2270 * Grab all PCI BARs just in case. 2271 */ 2272 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 2273 if (rc == -EBUSY) 2274 pcim_pin_device(pdev); 2275 if (rc) 2276 return rc; 2277 2278 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 2279 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 2280 u8 map; 2281 2282 /* ICH6s share the same PCI ID for both piix and ahci 2283 * modes. Enabling ahci mode while MAP indicates 2284 * combined mode is a bad idea. Yield to ata_piix. 2285 */ 2286 pci_read_config_byte(pdev, ICH_MAP, &map); 2287 if (map & 0x3) { 2288 dev_printk(KERN_INFO, &pdev->dev, "controller is in " 2289 "combined mode, can't enable AHCI mode\n"); 2290 return -ENODEV; 2291 } 2292 } 2293 2294 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 2295 if (!hpriv) 2296 return -ENOMEM; 2297 hpriv->flags |= (unsigned long)pi.private_data; 2298 2299 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 2300 pci_intx(pdev, 1); 2301 2302 /* save initial config */ 2303 ahci_save_initial_config(pdev, hpriv); 2304 2305 /* prepare host */ 2306 if (hpriv->cap & HOST_CAP_NCQ) 2307 pi.flags |= ATA_FLAG_NCQ; 2308 2309 if (hpriv->cap & HOST_CAP_PMP) 2310 pi.flags |= ATA_FLAG_PMP; 2311 2312 /* CAP.NP sometimes indicate the index of the last enabled 2313 * port, at other times, that of the last possible port, so 2314 * determining the maximum port number requires looking at 2315 * both CAP.NP and port_map. 2316 */ 2317 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 2318 2319 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2320 if (!host) 2321 return -ENOMEM; 2322 host->iomap = pcim_iomap_table(pdev); 2323 host->private_data = hpriv; 2324 2325 for (i = 0; i < host->n_ports; i++) { 2326 struct ata_port *ap = host->ports[i]; 2327 void __iomem *port_mmio = ahci_port_base(ap); 2328 2329 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 2330 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 2331 0x100 + ap->port_no * 0x80, "port"); 2332 2333 /* set initial link pm policy */ 2334 ap->pm_policy = NOT_AVAILABLE; 2335 2336 /* standard SATA port setup */ 2337 if (hpriv->port_map & (1 << i)) 2338 ap->ioaddr.cmd_addr = port_mmio; 2339 2340 /* disabled/not-implemented port */ 2341 else 2342 ap->ops = &ata_dummy_port_ops; 2343 } 2344 2345 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 2346 ahci_p5wdh_workaround(host); 2347 2348 /* initialize adapter */ 2349 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 2350 if (rc) 2351 return rc; 2352 2353 rc = ahci_reset_controller(host); 2354 if (rc) 2355 return rc; 2356 2357 ahci_init_controller(host); 2358 ahci_print_info(host); 2359 2360 pci_set_master(pdev); 2361 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 2362 &ahci_sht); 2363 } 2364 2365 static int __init ahci_init(void) 2366 { 2367 return pci_register_driver(&ahci_pci_driver); 2368 } 2369 2370 static void __exit ahci_exit(void) 2371 { 2372 pci_unregister_driver(&ahci_pci_driver); 2373 } 2374 2375 2376 MODULE_AUTHOR("Jeff Garzik"); 2377 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 2378 MODULE_LICENSE("GPL"); 2379 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 2380 MODULE_VERSION(DRV_VERSION); 2381 2382 module_init(ahci_init); 2383 module_exit(ahci_exit); 2384