1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <linux/dmi.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <linux/libata.h> 48 49 #define DRV_NAME "ahci" 50 #define DRV_VERSION "3.0" 51 52 /* Enclosure Management Control */ 53 #define EM_CTRL_MSG_TYPE 0x000f0000 54 55 /* Enclosure Management LED Message Type */ 56 #define EM_MSG_LED_HBA_PORT 0x0000000f 57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00 58 #define EM_MSG_LED_VALUE 0xffff0000 59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 60 #define EM_MSG_LED_VALUE_OFF 0xfff80000 61 #define EM_MSG_LED_VALUE_ON 0x00010000 62 63 static int ahci_skip_host_reset; 64 static int ahci_ignore_sss; 65 66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 68 69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); 70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); 71 72 static int ahci_enable_alpm(struct ata_port *ap, 73 enum link_pm policy); 74 static void ahci_disable_alpm(struct ata_port *ap); 75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf); 76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 77 size_t size); 78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 79 ssize_t size); 80 81 enum { 82 AHCI_PCI_BAR = 5, 83 AHCI_MAX_PORTS = 32, 84 AHCI_MAX_SG = 168, /* hardware max is 64K */ 85 AHCI_DMA_BOUNDARY = 0xffffffff, 86 AHCI_MAX_CMDS = 32, 87 AHCI_CMD_SZ = 32, 88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 89 AHCI_RX_FIS_SZ = 256, 90 AHCI_CMD_TBL_CDB = 0x40, 91 AHCI_CMD_TBL_HDR_SZ = 0x80, 92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 95 AHCI_RX_FIS_SZ, 96 AHCI_IRQ_ON_SG = (1 << 31), 97 AHCI_CMD_ATAPI = (1 << 5), 98 AHCI_CMD_WRITE = (1 << 6), 99 AHCI_CMD_PREFETCH = (1 << 7), 100 AHCI_CMD_RESET = (1 << 8), 101 AHCI_CMD_CLR_BUSY = (1 << 10), 102 103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 106 107 board_ahci = 0, 108 board_ahci_vt8251 = 1, 109 board_ahci_ign_iferr = 2, 110 board_ahci_sb600 = 3, 111 board_ahci_mv = 4, 112 board_ahci_sb700 = 5, /* for SB700 and SB800 */ 113 board_ahci_mcp65 = 6, 114 board_ahci_nopmp = 7, 115 board_ahci_yesncq = 8, 116 117 /* global controller registers */ 118 HOST_CAP = 0x00, /* host capabilities */ 119 HOST_CTL = 0x04, /* global host control */ 120 HOST_IRQ_STAT = 0x08, /* interrupt status */ 121 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 122 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 123 HOST_EM_LOC = 0x1c, /* Enclosure Management location */ 124 HOST_EM_CTL = 0x20, /* Enclosure Management Control */ 125 126 /* HOST_CTL bits */ 127 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 128 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 129 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 130 131 /* HOST_CAP bits */ 132 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ 133 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 134 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 135 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 136 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ 137 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 138 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 139 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 140 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 141 142 /* registers for each SATA port */ 143 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 144 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 145 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 146 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 147 PORT_IRQ_STAT = 0x10, /* interrupt status */ 148 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 149 PORT_CMD = 0x18, /* port command */ 150 PORT_TFDATA = 0x20, /* taskfile data */ 151 PORT_SIG = 0x24, /* device TF signature */ 152 PORT_CMD_ISSUE = 0x38, /* command issue */ 153 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 154 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 155 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 156 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 157 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ 158 159 /* PORT_IRQ_{STAT,MASK} bits */ 160 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 161 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 162 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 163 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 164 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 165 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 166 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 167 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 168 169 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 170 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 171 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 172 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 173 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 174 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 175 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 176 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 177 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 178 179 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 180 PORT_IRQ_IF_ERR | 181 PORT_IRQ_CONNECT | 182 PORT_IRQ_PHYRDY | 183 PORT_IRQ_UNK_FIS | 184 PORT_IRQ_BAD_PMP, 185 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 186 PORT_IRQ_TF_ERR | 187 PORT_IRQ_HBUS_DATA_ERR, 188 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 189 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 190 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 191 192 /* PORT_CMD bits */ 193 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ 194 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ 195 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 196 PORT_CMD_PMP = (1 << 17), /* PMP attached */ 197 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 198 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 199 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 200 PORT_CMD_CLO = (1 << 3), /* Command list override */ 201 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 202 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 203 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 204 205 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 206 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 207 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 208 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 209 210 /* hpriv->flags bits */ 211 AHCI_HFLAG_NO_NCQ = (1 << 0), 212 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ 213 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ 214 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ 215 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 216 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 217 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 218 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 219 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 220 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ 221 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ 222 223 /* ap->flags bits */ 224 225 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 226 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 227 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | 228 ATA_FLAG_IPM, 229 230 ICH_MAP = 0x90, /* ICH MAP register */ 231 232 /* em constants */ 233 EM_MAX_SLOTS = 8, 234 EM_MAX_RETRY = 5, 235 236 /* em_ctl bits */ 237 EM_CTL_RST = (1 << 9), /* Reset */ 238 EM_CTL_TM = (1 << 8), /* Transmit Message */ 239 EM_CTL_ALHD = (1 << 26), /* Activity LED */ 240 }; 241 242 struct ahci_cmd_hdr { 243 __le32 opts; 244 __le32 status; 245 __le32 tbl_addr; 246 __le32 tbl_addr_hi; 247 __le32 reserved[4]; 248 }; 249 250 struct ahci_sg { 251 __le32 addr; 252 __le32 addr_hi; 253 __le32 reserved; 254 __le32 flags_size; 255 }; 256 257 struct ahci_em_priv { 258 enum sw_activity blink_policy; 259 struct timer_list timer; 260 unsigned long saved_activity; 261 unsigned long activity; 262 unsigned long led_state; 263 }; 264 265 struct ahci_host_priv { 266 unsigned int flags; /* AHCI_HFLAG_* */ 267 u32 cap; /* cap to use */ 268 u32 port_map; /* port map to use */ 269 u32 saved_cap; /* saved initial cap */ 270 u32 saved_port_map; /* saved initial port_map */ 271 u32 em_loc; /* enclosure management location */ 272 }; 273 274 struct ahci_port_priv { 275 struct ata_link *active_link; 276 struct ahci_cmd_hdr *cmd_slot; 277 dma_addr_t cmd_slot_dma; 278 void *cmd_tbl; 279 dma_addr_t cmd_tbl_dma; 280 void *rx_fis; 281 dma_addr_t rx_fis_dma; 282 /* for NCQ spurious interrupt analysis */ 283 unsigned int ncq_saw_d2h:1; 284 unsigned int ncq_saw_dmas:1; 285 unsigned int ncq_saw_sdb:1; 286 u32 intr_mask; /* interrupts to enable */ 287 /* enclosure management info per PM slot */ 288 struct ahci_em_priv em_priv[EM_MAX_SLOTS]; 289 }; 290 291 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 292 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 293 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 294 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 295 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 296 static int ahci_port_start(struct ata_port *ap); 297 static void ahci_port_stop(struct ata_port *ap); 298 static void ahci_qc_prep(struct ata_queued_cmd *qc); 299 static void ahci_freeze(struct ata_port *ap); 300 static void ahci_thaw(struct ata_port *ap); 301 static void ahci_pmp_attach(struct ata_port *ap); 302 static void ahci_pmp_detach(struct ata_port *ap); 303 static int ahci_softreset(struct ata_link *link, unsigned int *class, 304 unsigned long deadline); 305 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 306 unsigned long deadline); 307 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 308 unsigned long deadline); 309 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 310 unsigned long deadline); 311 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 312 unsigned long deadline); 313 static void ahci_postreset(struct ata_link *link, unsigned int *class); 314 static void ahci_error_handler(struct ata_port *ap); 315 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 316 static int ahci_port_resume(struct ata_port *ap); 317 static void ahci_dev_config(struct ata_device *dev); 318 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 319 u32 opts); 320 #ifdef CONFIG_PM 321 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 322 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 323 static int ahci_pci_device_resume(struct pci_dev *pdev); 324 #endif 325 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); 326 static ssize_t ahci_activity_store(struct ata_device *dev, 327 enum sw_activity val); 328 static void ahci_init_sw_activity(struct ata_link *link); 329 330 static struct device_attribute *ahci_shost_attrs[] = { 331 &dev_attr_link_power_management_policy, 332 &dev_attr_em_message_type, 333 &dev_attr_em_message, 334 NULL 335 }; 336 337 static struct device_attribute *ahci_sdev_attrs[] = { 338 &dev_attr_sw_activity, 339 &dev_attr_unload_heads, 340 NULL 341 }; 342 343 static struct scsi_host_template ahci_sht = { 344 ATA_NCQ_SHT(DRV_NAME), 345 .can_queue = AHCI_MAX_CMDS - 1, 346 .sg_tablesize = AHCI_MAX_SG, 347 .dma_boundary = AHCI_DMA_BOUNDARY, 348 .shost_attrs = ahci_shost_attrs, 349 .sdev_attrs = ahci_sdev_attrs, 350 }; 351 352 static struct ata_port_operations ahci_ops = { 353 .inherits = &sata_pmp_port_ops, 354 355 .qc_defer = sata_pmp_qc_defer_cmd_switch, 356 .qc_prep = ahci_qc_prep, 357 .qc_issue = ahci_qc_issue, 358 .qc_fill_rtf = ahci_qc_fill_rtf, 359 360 .freeze = ahci_freeze, 361 .thaw = ahci_thaw, 362 .softreset = ahci_softreset, 363 .hardreset = ahci_hardreset, 364 .postreset = ahci_postreset, 365 .pmp_softreset = ahci_softreset, 366 .error_handler = ahci_error_handler, 367 .post_internal_cmd = ahci_post_internal_cmd, 368 .dev_config = ahci_dev_config, 369 370 .scr_read = ahci_scr_read, 371 .scr_write = ahci_scr_write, 372 .pmp_attach = ahci_pmp_attach, 373 .pmp_detach = ahci_pmp_detach, 374 375 .enable_pm = ahci_enable_alpm, 376 .disable_pm = ahci_disable_alpm, 377 .em_show = ahci_led_show, 378 .em_store = ahci_led_store, 379 .sw_activity_show = ahci_activity_show, 380 .sw_activity_store = ahci_activity_store, 381 #ifdef CONFIG_PM 382 .port_suspend = ahci_port_suspend, 383 .port_resume = ahci_port_resume, 384 #endif 385 .port_start = ahci_port_start, 386 .port_stop = ahci_port_stop, 387 }; 388 389 static struct ata_port_operations ahci_vt8251_ops = { 390 .inherits = &ahci_ops, 391 .hardreset = ahci_vt8251_hardreset, 392 }; 393 394 static struct ata_port_operations ahci_p5wdh_ops = { 395 .inherits = &ahci_ops, 396 .hardreset = ahci_p5wdh_hardreset, 397 }; 398 399 static struct ata_port_operations ahci_sb600_ops = { 400 .inherits = &ahci_ops, 401 .softreset = ahci_sb600_softreset, 402 .pmp_softreset = ahci_sb600_softreset, 403 }; 404 405 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 406 407 static const struct ata_port_info ahci_port_info[] = { 408 [board_ahci] = 409 { 410 .flags = AHCI_FLAG_COMMON, 411 .pio_mask = ATA_PIO4, 412 .udma_mask = ATA_UDMA6, 413 .port_ops = &ahci_ops, 414 }, 415 [board_ahci_vt8251] = 416 { 417 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 418 .flags = AHCI_FLAG_COMMON, 419 .pio_mask = ATA_PIO4, 420 .udma_mask = ATA_UDMA6, 421 .port_ops = &ahci_vt8251_ops, 422 }, 423 [board_ahci_ign_iferr] = 424 { 425 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 426 .flags = AHCI_FLAG_COMMON, 427 .pio_mask = ATA_PIO4, 428 .udma_mask = ATA_UDMA6, 429 .port_ops = &ahci_ops, 430 }, 431 [board_ahci_sb600] = 432 { 433 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 434 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255), 435 .flags = AHCI_FLAG_COMMON, 436 .pio_mask = ATA_PIO4, 437 .udma_mask = ATA_UDMA6, 438 .port_ops = &ahci_sb600_ops, 439 }, 440 [board_ahci_mv] = 441 { 442 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 443 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 444 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 445 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 446 .pio_mask = ATA_PIO4, 447 .udma_mask = ATA_UDMA6, 448 .port_ops = &ahci_ops, 449 }, 450 [board_ahci_sb700] = /* for SB700 and SB800 */ 451 { 452 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 453 .flags = AHCI_FLAG_COMMON, 454 .pio_mask = ATA_PIO4, 455 .udma_mask = ATA_UDMA6, 456 .port_ops = &ahci_sb600_ops, 457 }, 458 [board_ahci_mcp65] = 459 { 460 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 461 .flags = AHCI_FLAG_COMMON, 462 .pio_mask = ATA_PIO4, 463 .udma_mask = ATA_UDMA6, 464 .port_ops = &ahci_ops, 465 }, 466 [board_ahci_nopmp] = 467 { 468 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), 469 .flags = AHCI_FLAG_COMMON, 470 .pio_mask = ATA_PIO4, 471 .udma_mask = ATA_UDMA6, 472 .port_ops = &ahci_ops, 473 }, 474 /* board_ahci_yesncq */ 475 { 476 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 477 .flags = AHCI_FLAG_COMMON, 478 .pio_mask = ATA_PIO4, 479 .udma_mask = ATA_UDMA6, 480 .port_ops = &ahci_ops, 481 }, 482 }; 483 484 static const struct pci_device_id ahci_pci_tbl[] = { 485 /* Intel */ 486 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 487 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 488 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 489 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 490 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 491 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 492 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 493 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 494 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 495 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 496 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 497 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 498 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 499 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 500 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 501 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 502 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 503 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 504 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 505 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 506 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 507 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 508 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 509 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 510 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 511 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 512 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 513 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 514 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 515 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 516 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 517 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 518 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ 519 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 520 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 521 522 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 523 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 524 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 525 526 /* ATI */ 527 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 528 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ 529 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ 530 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ 531 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ 532 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 533 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 534 535 /* VIA */ 536 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 537 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 538 539 /* NVIDIA */ 540 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ 541 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ 542 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ 543 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ 544 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ 545 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ 546 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ 547 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ 548 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ 549 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ 550 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ 551 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ 552 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ 553 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ 554 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ 555 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ 556 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ 557 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ 558 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ 559 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ 560 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ 561 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ 562 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ 563 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ 564 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ 565 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ 566 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ 567 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ 568 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ 569 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ 570 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ 571 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ 572 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 573 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 574 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 575 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 576 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 577 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 578 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 579 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 580 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 581 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 582 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 583 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 584 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 585 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 586 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 587 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 588 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 589 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 590 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 591 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 592 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 593 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 594 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 595 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 596 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */ 597 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */ 598 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */ 599 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */ 600 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */ 601 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */ 602 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */ 603 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */ 604 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */ 605 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */ 606 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */ 607 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */ 608 609 /* SiS */ 610 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 611 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ 612 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 613 614 /* Marvell */ 615 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 616 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 617 618 /* Promise */ 619 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 620 621 /* Generic, PCI class code for AHCI */ 622 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 623 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 624 625 { } /* terminate list */ 626 }; 627 628 629 static struct pci_driver ahci_pci_driver = { 630 .name = DRV_NAME, 631 .id_table = ahci_pci_tbl, 632 .probe = ahci_init_one, 633 .remove = ata_pci_remove_one, 634 #ifdef CONFIG_PM 635 .suspend = ahci_pci_device_suspend, 636 .resume = ahci_pci_device_resume, 637 #endif 638 }; 639 640 static int ahci_em_messages = 1; 641 module_param(ahci_em_messages, int, 0444); 642 /* add other LED protocol types when they become supported */ 643 MODULE_PARM_DESC(ahci_em_messages, 644 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); 645 646 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) 647 static int marvell_enable; 648 #else 649 static int marvell_enable = 1; 650 #endif 651 module_param(marvell_enable, int, 0644); 652 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); 653 654 655 static inline int ahci_nr_ports(u32 cap) 656 { 657 return (cap & 0x1f) + 1; 658 } 659 660 static inline void __iomem *__ahci_port_base(struct ata_host *host, 661 unsigned int port_no) 662 { 663 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 664 665 return mmio + 0x100 + (port_no * 0x80); 666 } 667 668 static inline void __iomem *ahci_port_base(struct ata_port *ap) 669 { 670 return __ahci_port_base(ap->host, ap->port_no); 671 } 672 673 static void ahci_enable_ahci(void __iomem *mmio) 674 { 675 int i; 676 u32 tmp; 677 678 /* turn on AHCI_EN */ 679 tmp = readl(mmio + HOST_CTL); 680 if (tmp & HOST_AHCI_EN) 681 return; 682 683 /* Some controllers need AHCI_EN to be written multiple times. 684 * Try a few times before giving up. 685 */ 686 for (i = 0; i < 5; i++) { 687 tmp |= HOST_AHCI_EN; 688 writel(tmp, mmio + HOST_CTL); 689 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 690 if (tmp & HOST_AHCI_EN) 691 return; 692 msleep(10); 693 } 694 695 WARN_ON(1); 696 } 697 698 /** 699 * ahci_save_initial_config - Save and fixup initial config values 700 * @pdev: target PCI device 701 * @hpriv: host private area to store config values 702 * 703 * Some registers containing configuration info might be setup by 704 * BIOS and might be cleared on reset. This function saves the 705 * initial values of those registers into @hpriv such that they 706 * can be restored after controller reset. 707 * 708 * If inconsistent, config values are fixed up by this function. 709 * 710 * LOCKING: 711 * None. 712 */ 713 static void ahci_save_initial_config(struct pci_dev *pdev, 714 struct ahci_host_priv *hpriv) 715 { 716 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 717 u32 cap, port_map; 718 int i; 719 int mv; 720 721 /* make sure AHCI mode is enabled before accessing CAP */ 722 ahci_enable_ahci(mmio); 723 724 /* Values prefixed with saved_ are written back to host after 725 * reset. Values without are used for driver operation. 726 */ 727 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 728 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 729 730 /* some chips have errata preventing 64bit use */ 731 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 732 dev_printk(KERN_INFO, &pdev->dev, 733 "controller can't do 64bit DMA, forcing 32bit\n"); 734 cap &= ~HOST_CAP_64; 735 } 736 737 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 738 dev_printk(KERN_INFO, &pdev->dev, 739 "controller can't do NCQ, turning off CAP_NCQ\n"); 740 cap &= ~HOST_CAP_NCQ; 741 } 742 743 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { 744 dev_printk(KERN_INFO, &pdev->dev, 745 "controller can do NCQ, turning on CAP_NCQ\n"); 746 cap |= HOST_CAP_NCQ; 747 } 748 749 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 750 dev_printk(KERN_INFO, &pdev->dev, 751 "controller can't do PMP, turning off CAP_PMP\n"); 752 cap &= ~HOST_CAP_PMP; 753 } 754 755 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && 756 port_map != 1) { 757 dev_printk(KERN_INFO, &pdev->dev, 758 "JMB361 has only one port, port_map 0x%x -> 0x%x\n", 759 port_map, 1); 760 port_map = 1; 761 } 762 763 /* 764 * Temporary Marvell 6145 hack: PATA port presence 765 * is asserted through the standard AHCI port 766 * presence register, as bit 4 (counting from 0) 767 */ 768 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 769 if (pdev->device == 0x6121) 770 mv = 0x3; 771 else 772 mv = 0xf; 773 dev_printk(KERN_ERR, &pdev->dev, 774 "MV_AHCI HACK: port_map %x -> %x\n", 775 port_map, 776 port_map & mv); 777 dev_printk(KERN_ERR, &pdev->dev, 778 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 779 780 port_map &= mv; 781 } 782 783 /* cross check port_map and cap.n_ports */ 784 if (port_map) { 785 int map_ports = 0; 786 787 for (i = 0; i < AHCI_MAX_PORTS; i++) 788 if (port_map & (1 << i)) 789 map_ports++; 790 791 /* If PI has more ports than n_ports, whine, clear 792 * port_map and let it be generated from n_ports. 793 */ 794 if (map_ports > ahci_nr_ports(cap)) { 795 dev_printk(KERN_WARNING, &pdev->dev, 796 "implemented port map (0x%x) contains more " 797 "ports than nr_ports (%u), using nr_ports\n", 798 port_map, ahci_nr_ports(cap)); 799 port_map = 0; 800 } 801 } 802 803 /* fabricate port_map from cap.nr_ports */ 804 if (!port_map) { 805 port_map = (1 << ahci_nr_ports(cap)) - 1; 806 dev_printk(KERN_WARNING, &pdev->dev, 807 "forcing PORTS_IMPL to 0x%x\n", port_map); 808 809 /* write the fixed up value to the PI register */ 810 hpriv->saved_port_map = port_map; 811 } 812 813 /* record values to use during operation */ 814 hpriv->cap = cap; 815 hpriv->port_map = port_map; 816 } 817 818 /** 819 * ahci_restore_initial_config - Restore initial config 820 * @host: target ATA host 821 * 822 * Restore initial config stored by ahci_save_initial_config(). 823 * 824 * LOCKING: 825 * None. 826 */ 827 static void ahci_restore_initial_config(struct ata_host *host) 828 { 829 struct ahci_host_priv *hpriv = host->private_data; 830 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 831 832 writel(hpriv->saved_cap, mmio + HOST_CAP); 833 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 834 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 835 } 836 837 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 838 { 839 static const int offset[] = { 840 [SCR_STATUS] = PORT_SCR_STAT, 841 [SCR_CONTROL] = PORT_SCR_CTL, 842 [SCR_ERROR] = PORT_SCR_ERR, 843 [SCR_ACTIVE] = PORT_SCR_ACT, 844 [SCR_NOTIFICATION] = PORT_SCR_NTF, 845 }; 846 struct ahci_host_priv *hpriv = ap->host->private_data; 847 848 if (sc_reg < ARRAY_SIZE(offset) && 849 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 850 return offset[sc_reg]; 851 return 0; 852 } 853 854 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 855 { 856 void __iomem *port_mmio = ahci_port_base(link->ap); 857 int offset = ahci_scr_offset(link->ap, sc_reg); 858 859 if (offset) { 860 *val = readl(port_mmio + offset); 861 return 0; 862 } 863 return -EINVAL; 864 } 865 866 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 867 { 868 void __iomem *port_mmio = ahci_port_base(link->ap); 869 int offset = ahci_scr_offset(link->ap, sc_reg); 870 871 if (offset) { 872 writel(val, port_mmio + offset); 873 return 0; 874 } 875 return -EINVAL; 876 } 877 878 static void ahci_start_engine(struct ata_port *ap) 879 { 880 void __iomem *port_mmio = ahci_port_base(ap); 881 u32 tmp; 882 883 /* start DMA */ 884 tmp = readl(port_mmio + PORT_CMD); 885 tmp |= PORT_CMD_START; 886 writel(tmp, port_mmio + PORT_CMD); 887 readl(port_mmio + PORT_CMD); /* flush */ 888 } 889 890 static int ahci_stop_engine(struct ata_port *ap) 891 { 892 void __iomem *port_mmio = ahci_port_base(ap); 893 u32 tmp; 894 895 tmp = readl(port_mmio + PORT_CMD); 896 897 /* check if the HBA is idle */ 898 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 899 return 0; 900 901 /* setting HBA to idle */ 902 tmp &= ~PORT_CMD_START; 903 writel(tmp, port_mmio + PORT_CMD); 904 905 /* wait for engine to stop. This could be as long as 500 msec */ 906 tmp = ata_wait_register(port_mmio + PORT_CMD, 907 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 908 if (tmp & PORT_CMD_LIST_ON) 909 return -EIO; 910 911 return 0; 912 } 913 914 static void ahci_start_fis_rx(struct ata_port *ap) 915 { 916 void __iomem *port_mmio = ahci_port_base(ap); 917 struct ahci_host_priv *hpriv = ap->host->private_data; 918 struct ahci_port_priv *pp = ap->private_data; 919 u32 tmp; 920 921 /* set FIS registers */ 922 if (hpriv->cap & HOST_CAP_64) 923 writel((pp->cmd_slot_dma >> 16) >> 16, 924 port_mmio + PORT_LST_ADDR_HI); 925 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 926 927 if (hpriv->cap & HOST_CAP_64) 928 writel((pp->rx_fis_dma >> 16) >> 16, 929 port_mmio + PORT_FIS_ADDR_HI); 930 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 931 932 /* enable FIS reception */ 933 tmp = readl(port_mmio + PORT_CMD); 934 tmp |= PORT_CMD_FIS_RX; 935 writel(tmp, port_mmio + PORT_CMD); 936 937 /* flush */ 938 readl(port_mmio + PORT_CMD); 939 } 940 941 static int ahci_stop_fis_rx(struct ata_port *ap) 942 { 943 void __iomem *port_mmio = ahci_port_base(ap); 944 u32 tmp; 945 946 /* disable FIS reception */ 947 tmp = readl(port_mmio + PORT_CMD); 948 tmp &= ~PORT_CMD_FIS_RX; 949 writel(tmp, port_mmio + PORT_CMD); 950 951 /* wait for completion, spec says 500ms, give it 1000 */ 952 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 953 PORT_CMD_FIS_ON, 10, 1000); 954 if (tmp & PORT_CMD_FIS_ON) 955 return -EBUSY; 956 957 return 0; 958 } 959 960 static void ahci_power_up(struct ata_port *ap) 961 { 962 struct ahci_host_priv *hpriv = ap->host->private_data; 963 void __iomem *port_mmio = ahci_port_base(ap); 964 u32 cmd; 965 966 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 967 968 /* spin up device */ 969 if (hpriv->cap & HOST_CAP_SSS) { 970 cmd |= PORT_CMD_SPIN_UP; 971 writel(cmd, port_mmio + PORT_CMD); 972 } 973 974 /* wake up link */ 975 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 976 } 977 978 static void ahci_disable_alpm(struct ata_port *ap) 979 { 980 struct ahci_host_priv *hpriv = ap->host->private_data; 981 void __iomem *port_mmio = ahci_port_base(ap); 982 u32 cmd; 983 struct ahci_port_priv *pp = ap->private_data; 984 985 /* IPM bits should be disabled by libata-core */ 986 /* get the existing command bits */ 987 cmd = readl(port_mmio + PORT_CMD); 988 989 /* disable ALPM and ASP */ 990 cmd &= ~PORT_CMD_ASP; 991 cmd &= ~PORT_CMD_ALPE; 992 993 /* force the interface back to active */ 994 cmd |= PORT_CMD_ICC_ACTIVE; 995 996 /* write out new cmd value */ 997 writel(cmd, port_mmio + PORT_CMD); 998 cmd = readl(port_mmio + PORT_CMD); 999 1000 /* wait 10ms to be sure we've come out of any low power state */ 1001 msleep(10); 1002 1003 /* clear out any PhyRdy stuff from interrupt status */ 1004 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 1005 1006 /* go ahead and clean out PhyRdy Change from Serror too */ 1007 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); 1008 1009 /* 1010 * Clear flag to indicate that we should ignore all PhyRdy 1011 * state changes 1012 */ 1013 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; 1014 1015 /* 1016 * Enable interrupts on Phy Ready. 1017 */ 1018 pp->intr_mask |= PORT_IRQ_PHYRDY; 1019 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1020 1021 /* 1022 * don't change the link pm policy - we can be called 1023 * just to turn of link pm temporarily 1024 */ 1025 } 1026 1027 static int ahci_enable_alpm(struct ata_port *ap, 1028 enum link_pm policy) 1029 { 1030 struct ahci_host_priv *hpriv = ap->host->private_data; 1031 void __iomem *port_mmio = ahci_port_base(ap); 1032 u32 cmd; 1033 struct ahci_port_priv *pp = ap->private_data; 1034 u32 asp; 1035 1036 /* Make sure the host is capable of link power management */ 1037 if (!(hpriv->cap & HOST_CAP_ALPM)) 1038 return -EINVAL; 1039 1040 switch (policy) { 1041 case MAX_PERFORMANCE: 1042 case NOT_AVAILABLE: 1043 /* 1044 * if we came here with NOT_AVAILABLE, 1045 * it just means this is the first time we 1046 * have tried to enable - default to max performance, 1047 * and let the user go to lower power modes on request. 1048 */ 1049 ahci_disable_alpm(ap); 1050 return 0; 1051 case MIN_POWER: 1052 /* configure HBA to enter SLUMBER */ 1053 asp = PORT_CMD_ASP; 1054 break; 1055 case MEDIUM_POWER: 1056 /* configure HBA to enter PARTIAL */ 1057 asp = 0; 1058 break; 1059 default: 1060 return -EINVAL; 1061 } 1062 1063 /* 1064 * Disable interrupts on Phy Ready. This keeps us from 1065 * getting woken up due to spurious phy ready interrupts 1066 * TBD - Hot plug should be done via polling now, is 1067 * that even supported? 1068 */ 1069 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 1070 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1071 1072 /* 1073 * Set a flag to indicate that we should ignore all PhyRdy 1074 * state changes since these can happen now whenever we 1075 * change link state 1076 */ 1077 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; 1078 1079 /* get the existing command bits */ 1080 cmd = readl(port_mmio + PORT_CMD); 1081 1082 /* 1083 * Set ASP based on Policy 1084 */ 1085 cmd |= asp; 1086 1087 /* 1088 * Setting this bit will instruct the HBA to aggressively 1089 * enter a lower power link state when it's appropriate and 1090 * based on the value set above for ASP 1091 */ 1092 cmd |= PORT_CMD_ALPE; 1093 1094 /* write out new cmd value */ 1095 writel(cmd, port_mmio + PORT_CMD); 1096 cmd = readl(port_mmio + PORT_CMD); 1097 1098 /* IPM bits should be set by libata-core */ 1099 return 0; 1100 } 1101 1102 #ifdef CONFIG_PM 1103 static void ahci_power_down(struct ata_port *ap) 1104 { 1105 struct ahci_host_priv *hpriv = ap->host->private_data; 1106 void __iomem *port_mmio = ahci_port_base(ap); 1107 u32 cmd, scontrol; 1108 1109 if (!(hpriv->cap & HOST_CAP_SSS)) 1110 return; 1111 1112 /* put device into listen mode, first set PxSCTL.DET to 0 */ 1113 scontrol = readl(port_mmio + PORT_SCR_CTL); 1114 scontrol &= ~0xf; 1115 writel(scontrol, port_mmio + PORT_SCR_CTL); 1116 1117 /* then set PxCMD.SUD to 0 */ 1118 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 1119 cmd &= ~PORT_CMD_SPIN_UP; 1120 writel(cmd, port_mmio + PORT_CMD); 1121 } 1122 #endif 1123 1124 static void ahci_start_port(struct ata_port *ap) 1125 { 1126 struct ahci_port_priv *pp = ap->private_data; 1127 struct ata_link *link; 1128 struct ahci_em_priv *emp; 1129 ssize_t rc; 1130 int i; 1131 1132 /* enable FIS reception */ 1133 ahci_start_fis_rx(ap); 1134 1135 /* enable DMA */ 1136 ahci_start_engine(ap); 1137 1138 /* turn on LEDs */ 1139 if (ap->flags & ATA_FLAG_EM) { 1140 ata_for_each_link(link, ap, EDGE) { 1141 emp = &pp->em_priv[link->pmp]; 1142 1143 /* EM Transmit bit maybe busy during init */ 1144 for (i = 0; i < EM_MAX_RETRY; i++) { 1145 rc = ahci_transmit_led_message(ap, 1146 emp->led_state, 1147 4); 1148 if (rc == -EBUSY) 1149 msleep(1); 1150 else 1151 break; 1152 } 1153 } 1154 } 1155 1156 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 1157 ata_for_each_link(link, ap, EDGE) 1158 ahci_init_sw_activity(link); 1159 1160 } 1161 1162 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 1163 { 1164 int rc; 1165 1166 /* disable DMA */ 1167 rc = ahci_stop_engine(ap); 1168 if (rc) { 1169 *emsg = "failed to stop engine"; 1170 return rc; 1171 } 1172 1173 /* disable FIS reception */ 1174 rc = ahci_stop_fis_rx(ap); 1175 if (rc) { 1176 *emsg = "failed stop FIS RX"; 1177 return rc; 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int ahci_reset_controller(struct ata_host *host) 1184 { 1185 struct pci_dev *pdev = to_pci_dev(host->dev); 1186 struct ahci_host_priv *hpriv = host->private_data; 1187 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1188 u32 tmp; 1189 1190 /* we must be in AHCI mode, before using anything 1191 * AHCI-specific, such as HOST_RESET. 1192 */ 1193 ahci_enable_ahci(mmio); 1194 1195 /* global controller reset */ 1196 if (!ahci_skip_host_reset) { 1197 tmp = readl(mmio + HOST_CTL); 1198 if ((tmp & HOST_RESET) == 0) { 1199 writel(tmp | HOST_RESET, mmio + HOST_CTL); 1200 readl(mmio + HOST_CTL); /* flush */ 1201 } 1202 1203 /* 1204 * to perform host reset, OS should set HOST_RESET 1205 * and poll until this bit is read to be "0". 1206 * reset must complete within 1 second, or 1207 * the hardware should be considered fried. 1208 */ 1209 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, 1210 HOST_RESET, 10, 1000); 1211 1212 if (tmp & HOST_RESET) { 1213 dev_printk(KERN_ERR, host->dev, 1214 "controller reset failed (0x%x)\n", tmp); 1215 return -EIO; 1216 } 1217 1218 /* turn on AHCI mode */ 1219 ahci_enable_ahci(mmio); 1220 1221 /* Some registers might be cleared on reset. Restore 1222 * initial values. 1223 */ 1224 ahci_restore_initial_config(host); 1225 } else 1226 dev_printk(KERN_INFO, host->dev, 1227 "skipping global host reset\n"); 1228 1229 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 1230 u16 tmp16; 1231 1232 /* configure PCS */ 1233 pci_read_config_word(pdev, 0x92, &tmp16); 1234 if ((tmp16 & hpriv->port_map) != hpriv->port_map) { 1235 tmp16 |= hpriv->port_map; 1236 pci_write_config_word(pdev, 0x92, tmp16); 1237 } 1238 } 1239 1240 return 0; 1241 } 1242 1243 static void ahci_sw_activity(struct ata_link *link) 1244 { 1245 struct ata_port *ap = link->ap; 1246 struct ahci_port_priv *pp = ap->private_data; 1247 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1248 1249 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) 1250 return; 1251 1252 emp->activity++; 1253 if (!timer_pending(&emp->timer)) 1254 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); 1255 } 1256 1257 static void ahci_sw_activity_blink(unsigned long arg) 1258 { 1259 struct ata_link *link = (struct ata_link *)arg; 1260 struct ata_port *ap = link->ap; 1261 struct ahci_port_priv *pp = ap->private_data; 1262 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1263 unsigned long led_message = emp->led_state; 1264 u32 activity_led_state; 1265 unsigned long flags; 1266 1267 led_message &= EM_MSG_LED_VALUE; 1268 led_message |= ap->port_no | (link->pmp << 8); 1269 1270 /* check to see if we've had activity. If so, 1271 * toggle state of LED and reset timer. If not, 1272 * turn LED to desired idle state. 1273 */ 1274 spin_lock_irqsave(ap->lock, flags); 1275 if (emp->saved_activity != emp->activity) { 1276 emp->saved_activity = emp->activity; 1277 /* get the current LED state */ 1278 activity_led_state = led_message & EM_MSG_LED_VALUE_ON; 1279 1280 if (activity_led_state) 1281 activity_led_state = 0; 1282 else 1283 activity_led_state = 1; 1284 1285 /* clear old state */ 1286 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 1287 1288 /* toggle state */ 1289 led_message |= (activity_led_state << 16); 1290 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); 1291 } else { 1292 /* switch to idle */ 1293 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 1294 if (emp->blink_policy == BLINK_OFF) 1295 led_message |= (1 << 16); 1296 } 1297 spin_unlock_irqrestore(ap->lock, flags); 1298 ahci_transmit_led_message(ap, led_message, 4); 1299 } 1300 1301 static void ahci_init_sw_activity(struct ata_link *link) 1302 { 1303 struct ata_port *ap = link->ap; 1304 struct ahci_port_priv *pp = ap->private_data; 1305 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1306 1307 /* init activity stats, setup timer */ 1308 emp->saved_activity = emp->activity = 0; 1309 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); 1310 1311 /* check our blink policy and set flag for link if it's enabled */ 1312 if (emp->blink_policy) 1313 link->flags |= ATA_LFLAG_SW_ACTIVITY; 1314 } 1315 1316 static int ahci_reset_em(struct ata_host *host) 1317 { 1318 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1319 u32 em_ctl; 1320 1321 em_ctl = readl(mmio + HOST_EM_CTL); 1322 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) 1323 return -EINVAL; 1324 1325 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); 1326 return 0; 1327 } 1328 1329 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 1330 ssize_t size) 1331 { 1332 struct ahci_host_priv *hpriv = ap->host->private_data; 1333 struct ahci_port_priv *pp = ap->private_data; 1334 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1335 u32 em_ctl; 1336 u32 message[] = {0, 0}; 1337 unsigned long flags; 1338 int pmp; 1339 struct ahci_em_priv *emp; 1340 1341 /* get the slot number from the message */ 1342 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1343 if (pmp < EM_MAX_SLOTS) 1344 emp = &pp->em_priv[pmp]; 1345 else 1346 return -EINVAL; 1347 1348 spin_lock_irqsave(ap->lock, flags); 1349 1350 /* 1351 * if we are still busy transmitting a previous message, 1352 * do not allow 1353 */ 1354 em_ctl = readl(mmio + HOST_EM_CTL); 1355 if (em_ctl & EM_CTL_TM) { 1356 spin_unlock_irqrestore(ap->lock, flags); 1357 return -EBUSY; 1358 } 1359 1360 /* 1361 * create message header - this is all zero except for 1362 * the message size, which is 4 bytes. 1363 */ 1364 message[0] |= (4 << 8); 1365 1366 /* ignore 0:4 of byte zero, fill in port info yourself */ 1367 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); 1368 1369 /* write message to EM_LOC */ 1370 writel(message[0], mmio + hpriv->em_loc); 1371 writel(message[1], mmio + hpriv->em_loc+4); 1372 1373 /* save off new led state for port/slot */ 1374 emp->led_state = state; 1375 1376 /* 1377 * tell hardware to transmit the message 1378 */ 1379 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 1380 1381 spin_unlock_irqrestore(ap->lock, flags); 1382 return size; 1383 } 1384 1385 static ssize_t ahci_led_show(struct ata_port *ap, char *buf) 1386 { 1387 struct ahci_port_priv *pp = ap->private_data; 1388 struct ata_link *link; 1389 struct ahci_em_priv *emp; 1390 int rc = 0; 1391 1392 ata_for_each_link(link, ap, EDGE) { 1393 emp = &pp->em_priv[link->pmp]; 1394 rc += sprintf(buf, "%lx\n", emp->led_state); 1395 } 1396 return rc; 1397 } 1398 1399 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 1400 size_t size) 1401 { 1402 int state; 1403 int pmp; 1404 struct ahci_port_priv *pp = ap->private_data; 1405 struct ahci_em_priv *emp; 1406 1407 state = simple_strtoul(buf, NULL, 0); 1408 1409 /* get the slot number from the message */ 1410 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1411 if (pmp < EM_MAX_SLOTS) 1412 emp = &pp->em_priv[pmp]; 1413 else 1414 return -EINVAL; 1415 1416 /* mask off the activity bits if we are in sw_activity 1417 * mode, user should turn off sw_activity before setting 1418 * activity led through em_message 1419 */ 1420 if (emp->blink_policy) 1421 state &= ~EM_MSG_LED_VALUE_ACTIVITY; 1422 1423 return ahci_transmit_led_message(ap, state, size); 1424 } 1425 1426 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) 1427 { 1428 struct ata_link *link = dev->link; 1429 struct ata_port *ap = link->ap; 1430 struct ahci_port_priv *pp = ap->private_data; 1431 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1432 u32 port_led_state = emp->led_state; 1433 1434 /* save the desired Activity LED behavior */ 1435 if (val == OFF) { 1436 /* clear LFLAG */ 1437 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); 1438 1439 /* set the LED to OFF */ 1440 port_led_state &= EM_MSG_LED_VALUE_OFF; 1441 port_led_state |= (ap->port_no | (link->pmp << 8)); 1442 ahci_transmit_led_message(ap, port_led_state, 4); 1443 } else { 1444 link->flags |= ATA_LFLAG_SW_ACTIVITY; 1445 if (val == BLINK_OFF) { 1446 /* set LED to ON for idle */ 1447 port_led_state &= EM_MSG_LED_VALUE_OFF; 1448 port_led_state |= (ap->port_no | (link->pmp << 8)); 1449 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ 1450 ahci_transmit_led_message(ap, port_led_state, 4); 1451 } 1452 } 1453 emp->blink_policy = val; 1454 return 0; 1455 } 1456 1457 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) 1458 { 1459 struct ata_link *link = dev->link; 1460 struct ata_port *ap = link->ap; 1461 struct ahci_port_priv *pp = ap->private_data; 1462 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1463 1464 /* display the saved value of activity behavior for this 1465 * disk. 1466 */ 1467 return sprintf(buf, "%d\n", emp->blink_policy); 1468 } 1469 1470 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 1471 int port_no, void __iomem *mmio, 1472 void __iomem *port_mmio) 1473 { 1474 const char *emsg = NULL; 1475 int rc; 1476 u32 tmp; 1477 1478 /* make sure port is not active */ 1479 rc = ahci_deinit_port(ap, &emsg); 1480 if (rc) 1481 dev_printk(KERN_WARNING, &pdev->dev, 1482 "%s (%d)\n", emsg, rc); 1483 1484 /* clear SError */ 1485 tmp = readl(port_mmio + PORT_SCR_ERR); 1486 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1487 writel(tmp, port_mmio + PORT_SCR_ERR); 1488 1489 /* clear port IRQ */ 1490 tmp = readl(port_mmio + PORT_IRQ_STAT); 1491 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1492 if (tmp) 1493 writel(tmp, port_mmio + PORT_IRQ_STAT); 1494 1495 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1496 } 1497 1498 static void ahci_init_controller(struct ata_host *host) 1499 { 1500 struct ahci_host_priv *hpriv = host->private_data; 1501 struct pci_dev *pdev = to_pci_dev(host->dev); 1502 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1503 int i; 1504 void __iomem *port_mmio; 1505 u32 tmp; 1506 int mv; 1507 1508 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 1509 if (pdev->device == 0x6121) 1510 mv = 2; 1511 else 1512 mv = 4; 1513 port_mmio = __ahci_port_base(host, mv); 1514 1515 writel(0, port_mmio + PORT_IRQ_MASK); 1516 1517 /* clear port IRQ */ 1518 tmp = readl(port_mmio + PORT_IRQ_STAT); 1519 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1520 if (tmp) 1521 writel(tmp, port_mmio + PORT_IRQ_STAT); 1522 } 1523 1524 for (i = 0; i < host->n_ports; i++) { 1525 struct ata_port *ap = host->ports[i]; 1526 1527 port_mmio = ahci_port_base(ap); 1528 if (ata_port_is_dummy(ap)) 1529 continue; 1530 1531 ahci_port_init(pdev, ap, i, mmio, port_mmio); 1532 } 1533 1534 tmp = readl(mmio + HOST_CTL); 1535 VPRINTK("HOST_CTL 0x%x\n", tmp); 1536 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1537 tmp = readl(mmio + HOST_CTL); 1538 VPRINTK("HOST_CTL 0x%x\n", tmp); 1539 } 1540 1541 static void ahci_dev_config(struct ata_device *dev) 1542 { 1543 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1544 1545 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1546 dev->max_sectors = 255; 1547 ata_dev_printk(dev, KERN_INFO, 1548 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1549 } 1550 } 1551 1552 static unsigned int ahci_dev_classify(struct ata_port *ap) 1553 { 1554 void __iomem *port_mmio = ahci_port_base(ap); 1555 struct ata_taskfile tf; 1556 u32 tmp; 1557 1558 tmp = readl(port_mmio + PORT_SIG); 1559 tf.lbah = (tmp >> 24) & 0xff; 1560 tf.lbam = (tmp >> 16) & 0xff; 1561 tf.lbal = (tmp >> 8) & 0xff; 1562 tf.nsect = (tmp) & 0xff; 1563 1564 return ata_dev_classify(&tf); 1565 } 1566 1567 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1568 u32 opts) 1569 { 1570 dma_addr_t cmd_tbl_dma; 1571 1572 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1573 1574 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1575 pp->cmd_slot[tag].status = 0; 1576 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1577 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1578 } 1579 1580 static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1581 { 1582 void __iomem *port_mmio = ahci_port_base(ap); 1583 struct ahci_host_priv *hpriv = ap->host->private_data; 1584 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1585 u32 tmp; 1586 int busy, rc; 1587 1588 /* do we need to kick the port? */ 1589 busy = status & (ATA_BUSY | ATA_DRQ); 1590 if (!busy && !force_restart) 1591 return 0; 1592 1593 /* stop engine */ 1594 rc = ahci_stop_engine(ap); 1595 if (rc) 1596 goto out_restart; 1597 1598 /* need to do CLO? */ 1599 if (!busy) { 1600 rc = 0; 1601 goto out_restart; 1602 } 1603 1604 if (!(hpriv->cap & HOST_CAP_CLO)) { 1605 rc = -EOPNOTSUPP; 1606 goto out_restart; 1607 } 1608 1609 /* perform CLO */ 1610 tmp = readl(port_mmio + PORT_CMD); 1611 tmp |= PORT_CMD_CLO; 1612 writel(tmp, port_mmio + PORT_CMD); 1613 1614 rc = 0; 1615 tmp = ata_wait_register(port_mmio + PORT_CMD, 1616 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1617 if (tmp & PORT_CMD_CLO) 1618 rc = -EIO; 1619 1620 /* restart engine */ 1621 out_restart: 1622 ahci_start_engine(ap); 1623 return rc; 1624 } 1625 1626 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1627 struct ata_taskfile *tf, int is_cmd, u16 flags, 1628 unsigned long timeout_msec) 1629 { 1630 const u32 cmd_fis_len = 5; /* five dwords */ 1631 struct ahci_port_priv *pp = ap->private_data; 1632 void __iomem *port_mmio = ahci_port_base(ap); 1633 u8 *fis = pp->cmd_tbl; 1634 u32 tmp; 1635 1636 /* prep the command */ 1637 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1638 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1639 1640 /* issue & wait */ 1641 writel(1, port_mmio + PORT_CMD_ISSUE); 1642 1643 if (timeout_msec) { 1644 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1645 1, timeout_msec); 1646 if (tmp & 0x1) { 1647 ahci_kick_engine(ap, 1); 1648 return -EBUSY; 1649 } 1650 } else 1651 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1652 1653 return 0; 1654 } 1655 1656 static int ahci_do_softreset(struct ata_link *link, unsigned int *class, 1657 int pmp, unsigned long deadline, 1658 int (*check_ready)(struct ata_link *link)) 1659 { 1660 struct ata_port *ap = link->ap; 1661 const char *reason = NULL; 1662 unsigned long now, msecs; 1663 struct ata_taskfile tf; 1664 int rc; 1665 1666 DPRINTK("ENTER\n"); 1667 1668 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1669 rc = ahci_kick_engine(ap, 1); 1670 if (rc && rc != -EOPNOTSUPP) 1671 ata_link_printk(link, KERN_WARNING, 1672 "failed to reset engine (errno=%d)\n", rc); 1673 1674 ata_tf_init(link->device, &tf); 1675 1676 /* issue the first D2H Register FIS */ 1677 msecs = 0; 1678 now = jiffies; 1679 if (time_after(now, deadline)) 1680 msecs = jiffies_to_msecs(deadline - now); 1681 1682 tf.ctl |= ATA_SRST; 1683 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1684 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1685 rc = -EIO; 1686 reason = "1st FIS failed"; 1687 goto fail; 1688 } 1689 1690 /* spec says at least 5us, but be generous and sleep for 1ms */ 1691 msleep(1); 1692 1693 /* issue the second D2H Register FIS */ 1694 tf.ctl &= ~ATA_SRST; 1695 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1696 1697 /* wait for link to become ready */ 1698 rc = ata_wait_after_reset(link, deadline, check_ready); 1699 /* link occupied, -ENODEV too is an error */ 1700 if (rc) { 1701 reason = "device not ready"; 1702 goto fail; 1703 } 1704 *class = ahci_dev_classify(ap); 1705 1706 DPRINTK("EXIT, class=%u\n", *class); 1707 return 0; 1708 1709 fail: 1710 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1711 return rc; 1712 } 1713 1714 static int ahci_check_ready(struct ata_link *link) 1715 { 1716 void __iomem *port_mmio = ahci_port_base(link->ap); 1717 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1718 1719 return ata_check_ready(status); 1720 } 1721 1722 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1723 unsigned long deadline) 1724 { 1725 int pmp = sata_srst_pmp(link); 1726 1727 DPRINTK("ENTER\n"); 1728 1729 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); 1730 } 1731 1732 static int ahci_sb600_check_ready(struct ata_link *link) 1733 { 1734 void __iomem *port_mmio = ahci_port_base(link->ap); 1735 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1736 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); 1737 1738 /* 1739 * There is no need to check TFDATA if BAD PMP is found due to HW bug, 1740 * which can save timeout delay. 1741 */ 1742 if (irq_status & PORT_IRQ_BAD_PMP) 1743 return -EIO; 1744 1745 return ata_check_ready(status); 1746 } 1747 1748 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 1749 unsigned long deadline) 1750 { 1751 struct ata_port *ap = link->ap; 1752 void __iomem *port_mmio = ahci_port_base(ap); 1753 int pmp = sata_srst_pmp(link); 1754 int rc; 1755 u32 irq_sts; 1756 1757 DPRINTK("ENTER\n"); 1758 1759 rc = ahci_do_softreset(link, class, pmp, deadline, 1760 ahci_sb600_check_ready); 1761 1762 /* 1763 * Soft reset fails on some ATI chips with IPMS set when PMP 1764 * is enabled but SATA HDD/ODD is connected to SATA port, 1765 * do soft reset again to port 0. 1766 */ 1767 if (rc == -EIO) { 1768 irq_sts = readl(port_mmio + PORT_IRQ_STAT); 1769 if (irq_sts & PORT_IRQ_BAD_PMP) { 1770 ata_link_printk(link, KERN_WARNING, 1771 "failed due to HW bug, retry pmp=0\n"); 1772 rc = ahci_do_softreset(link, class, 0, deadline, 1773 ahci_check_ready); 1774 } 1775 } 1776 1777 return rc; 1778 } 1779 1780 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1781 unsigned long deadline) 1782 { 1783 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 1784 struct ata_port *ap = link->ap; 1785 struct ahci_port_priv *pp = ap->private_data; 1786 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1787 struct ata_taskfile tf; 1788 bool online; 1789 int rc; 1790 1791 DPRINTK("ENTER\n"); 1792 1793 ahci_stop_engine(ap); 1794 1795 /* clear D2H reception area to properly wait for D2H FIS */ 1796 ata_tf_init(link->device, &tf); 1797 tf.command = 0x80; 1798 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1799 1800 rc = sata_link_hardreset(link, timing, deadline, &online, 1801 ahci_check_ready); 1802 1803 ahci_start_engine(ap); 1804 1805 if (online) 1806 *class = ahci_dev_classify(ap); 1807 1808 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1809 return rc; 1810 } 1811 1812 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 1813 unsigned long deadline) 1814 { 1815 struct ata_port *ap = link->ap; 1816 bool online; 1817 int rc; 1818 1819 DPRINTK("ENTER\n"); 1820 1821 ahci_stop_engine(ap); 1822 1823 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1824 deadline, &online, NULL); 1825 1826 ahci_start_engine(ap); 1827 1828 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1829 1830 /* vt8251 doesn't clear BSY on signature FIS reception, 1831 * request follow-up softreset. 1832 */ 1833 return online ? -EAGAIN : rc; 1834 } 1835 1836 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 1837 unsigned long deadline) 1838 { 1839 struct ata_port *ap = link->ap; 1840 struct ahci_port_priv *pp = ap->private_data; 1841 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1842 struct ata_taskfile tf; 1843 bool online; 1844 int rc; 1845 1846 ahci_stop_engine(ap); 1847 1848 /* clear D2H reception area to properly wait for D2H FIS */ 1849 ata_tf_init(link->device, &tf); 1850 tf.command = 0x80; 1851 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1852 1853 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1854 deadline, &online, NULL); 1855 1856 ahci_start_engine(ap); 1857 1858 /* The pseudo configuration device on SIMG4726 attached to 1859 * ASUS P5W-DH Deluxe doesn't send signature FIS after 1860 * hardreset if no device is attached to the first downstream 1861 * port && the pseudo device locks up on SRST w/ PMP==0. To 1862 * work around this, wait for !BSY only briefly. If BSY isn't 1863 * cleared, perform CLO and proceed to IDENTIFY (achieved by 1864 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). 1865 * 1866 * Wait for two seconds. Devices attached to downstream port 1867 * which can't process the following IDENTIFY after this will 1868 * have to be reset again. For most cases, this should 1869 * suffice while making probing snappish enough. 1870 */ 1871 if (online) { 1872 rc = ata_wait_after_reset(link, jiffies + 2 * HZ, 1873 ahci_check_ready); 1874 if (rc) 1875 ahci_kick_engine(ap, 0); 1876 } 1877 return rc; 1878 } 1879 1880 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1881 { 1882 struct ata_port *ap = link->ap; 1883 void __iomem *port_mmio = ahci_port_base(ap); 1884 u32 new_tmp, tmp; 1885 1886 ata_std_postreset(link, class); 1887 1888 /* Make sure port's ATAPI bit is set appropriately */ 1889 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1890 if (*class == ATA_DEV_ATAPI) 1891 new_tmp |= PORT_CMD_ATAPI; 1892 else 1893 new_tmp &= ~PORT_CMD_ATAPI; 1894 if (new_tmp != tmp) { 1895 writel(new_tmp, port_mmio + PORT_CMD); 1896 readl(port_mmio + PORT_CMD); /* flush */ 1897 } 1898 } 1899 1900 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1901 { 1902 struct scatterlist *sg; 1903 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1904 unsigned int si; 1905 1906 VPRINTK("ENTER\n"); 1907 1908 /* 1909 * Next, the S/G list. 1910 */ 1911 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1912 dma_addr_t addr = sg_dma_address(sg); 1913 u32 sg_len = sg_dma_len(sg); 1914 1915 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1916 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1917 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1918 } 1919 1920 return si; 1921 } 1922 1923 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1924 { 1925 struct ata_port *ap = qc->ap; 1926 struct ahci_port_priv *pp = ap->private_data; 1927 int is_atapi = ata_is_atapi(qc->tf.protocol); 1928 void *cmd_tbl; 1929 u32 opts; 1930 const u32 cmd_fis_len = 5; /* five dwords */ 1931 unsigned int n_elem; 1932 1933 /* 1934 * Fill in command table information. First, the header, 1935 * a SATA Register - Host to Device command FIS. 1936 */ 1937 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1938 1939 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1940 if (is_atapi) { 1941 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1942 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1943 } 1944 1945 n_elem = 0; 1946 if (qc->flags & ATA_QCFLAG_DMAMAP) 1947 n_elem = ahci_fill_sg(qc, cmd_tbl); 1948 1949 /* 1950 * Fill in command slot information. 1951 */ 1952 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1953 if (qc->tf.flags & ATA_TFLAG_WRITE) 1954 opts |= AHCI_CMD_WRITE; 1955 if (is_atapi) 1956 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1957 1958 ahci_fill_cmd_slot(pp, qc->tag, opts); 1959 } 1960 1961 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1962 { 1963 struct ahci_host_priv *hpriv = ap->host->private_data; 1964 struct ahci_port_priv *pp = ap->private_data; 1965 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1966 struct ata_link *link = NULL; 1967 struct ata_queued_cmd *active_qc; 1968 struct ata_eh_info *active_ehi; 1969 u32 serror; 1970 1971 /* determine active link */ 1972 ata_for_each_link(link, ap, EDGE) 1973 if (ata_link_active(link)) 1974 break; 1975 if (!link) 1976 link = &ap->link; 1977 1978 active_qc = ata_qc_from_tag(ap, link->active_tag); 1979 active_ehi = &link->eh_info; 1980 1981 /* record irq stat */ 1982 ata_ehi_clear_desc(host_ehi); 1983 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1984 1985 /* AHCI needs SError cleared; otherwise, it might lock up */ 1986 ahci_scr_read(&ap->link, SCR_ERROR, &serror); 1987 ahci_scr_write(&ap->link, SCR_ERROR, serror); 1988 host_ehi->serror |= serror; 1989 1990 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1991 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1992 irq_stat &= ~PORT_IRQ_IF_ERR; 1993 1994 if (irq_stat & PORT_IRQ_TF_ERR) { 1995 /* If qc is active, charge it; otherwise, the active 1996 * link. There's no active qc on NCQ errors. It will 1997 * be determined by EH by reading log page 10h. 1998 */ 1999 if (active_qc) 2000 active_qc->err_mask |= AC_ERR_DEV; 2001 else 2002 active_ehi->err_mask |= AC_ERR_DEV; 2003 2004 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 2005 host_ehi->serror &= ~SERR_INTERNAL; 2006 } 2007 2008 if (irq_stat & PORT_IRQ_UNK_FIS) { 2009 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 2010 2011 active_ehi->err_mask |= AC_ERR_HSM; 2012 active_ehi->action |= ATA_EH_RESET; 2013 ata_ehi_push_desc(active_ehi, 2014 "unknown FIS %08x %08x %08x %08x" , 2015 unk[0], unk[1], unk[2], unk[3]); 2016 } 2017 2018 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { 2019 active_ehi->err_mask |= AC_ERR_HSM; 2020 active_ehi->action |= ATA_EH_RESET; 2021 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 2022 } 2023 2024 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 2025 host_ehi->err_mask |= AC_ERR_HOST_BUS; 2026 host_ehi->action |= ATA_EH_RESET; 2027 ata_ehi_push_desc(host_ehi, "host bus error"); 2028 } 2029 2030 if (irq_stat & PORT_IRQ_IF_ERR) { 2031 host_ehi->err_mask |= AC_ERR_ATA_BUS; 2032 host_ehi->action |= ATA_EH_RESET; 2033 ata_ehi_push_desc(host_ehi, "interface fatal error"); 2034 } 2035 2036 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 2037 ata_ehi_hotplugged(host_ehi); 2038 ata_ehi_push_desc(host_ehi, "%s", 2039 irq_stat & PORT_IRQ_CONNECT ? 2040 "connection status changed" : "PHY RDY changed"); 2041 } 2042 2043 /* okay, let's hand over to EH */ 2044 2045 if (irq_stat & PORT_IRQ_FREEZE) 2046 ata_port_freeze(ap); 2047 else 2048 ata_port_abort(ap); 2049 } 2050 2051 static void ahci_port_intr(struct ata_port *ap) 2052 { 2053 void __iomem *port_mmio = ahci_port_base(ap); 2054 struct ata_eh_info *ehi = &ap->link.eh_info; 2055 struct ahci_port_priv *pp = ap->private_data; 2056 struct ahci_host_priv *hpriv = ap->host->private_data; 2057 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 2058 u32 status, qc_active; 2059 int rc; 2060 2061 status = readl(port_mmio + PORT_IRQ_STAT); 2062 writel(status, port_mmio + PORT_IRQ_STAT); 2063 2064 /* ignore BAD_PMP while resetting */ 2065 if (unlikely(resetting)) 2066 status &= ~PORT_IRQ_BAD_PMP; 2067 2068 /* If we are getting PhyRdy, this is 2069 * just a power state change, we should 2070 * clear out this, plus the PhyRdy/Comm 2071 * Wake bits from Serror 2072 */ 2073 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 2074 (status & PORT_IRQ_PHYRDY)) { 2075 status &= ~PORT_IRQ_PHYRDY; 2076 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); 2077 } 2078 2079 if (unlikely(status & PORT_IRQ_ERROR)) { 2080 ahci_error_intr(ap, status); 2081 return; 2082 } 2083 2084 if (status & PORT_IRQ_SDB_FIS) { 2085 /* If SNotification is available, leave notification 2086 * handling to sata_async_notification(). If not, 2087 * emulate it by snooping SDB FIS RX area. 2088 * 2089 * Snooping FIS RX area is probably cheaper than 2090 * poking SNotification but some constrollers which 2091 * implement SNotification, ICH9 for example, don't 2092 * store AN SDB FIS into receive area. 2093 */ 2094 if (hpriv->cap & HOST_CAP_SNTF) 2095 sata_async_notification(ap); 2096 else { 2097 /* If the 'N' bit in word 0 of the FIS is set, 2098 * we just received asynchronous notification. 2099 * Tell libata about it. 2100 */ 2101 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 2102 u32 f0 = le32_to_cpu(f[0]); 2103 2104 if (f0 & (1 << 15)) 2105 sata_async_notification(ap); 2106 } 2107 } 2108 2109 /* pp->active_link is valid iff any command is in flight */ 2110 if (ap->qc_active && pp->active_link->sactive) 2111 qc_active = readl(port_mmio + PORT_SCR_ACT); 2112 else 2113 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 2114 2115 rc = ata_qc_complete_multiple(ap, qc_active); 2116 2117 /* while resetting, invalid completions are expected */ 2118 if (unlikely(rc < 0 && !resetting)) { 2119 ehi->err_mask |= AC_ERR_HSM; 2120 ehi->action |= ATA_EH_RESET; 2121 ata_port_freeze(ap); 2122 } 2123 } 2124 2125 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 2126 { 2127 struct ata_host *host = dev_instance; 2128 struct ahci_host_priv *hpriv; 2129 unsigned int i, handled = 0; 2130 void __iomem *mmio; 2131 u32 irq_stat, irq_masked; 2132 2133 VPRINTK("ENTER\n"); 2134 2135 hpriv = host->private_data; 2136 mmio = host->iomap[AHCI_PCI_BAR]; 2137 2138 /* sigh. 0xffffffff is a valid return from h/w */ 2139 irq_stat = readl(mmio + HOST_IRQ_STAT); 2140 if (!irq_stat) 2141 return IRQ_NONE; 2142 2143 irq_masked = irq_stat & hpriv->port_map; 2144 2145 spin_lock(&host->lock); 2146 2147 for (i = 0; i < host->n_ports; i++) { 2148 struct ata_port *ap; 2149 2150 if (!(irq_masked & (1 << i))) 2151 continue; 2152 2153 ap = host->ports[i]; 2154 if (ap) { 2155 ahci_port_intr(ap); 2156 VPRINTK("port %u\n", i); 2157 } else { 2158 VPRINTK("port %u (no irq)\n", i); 2159 if (ata_ratelimit()) 2160 dev_printk(KERN_WARNING, host->dev, 2161 "interrupt on disabled port %u\n", i); 2162 } 2163 2164 handled = 1; 2165 } 2166 2167 /* HOST_IRQ_STAT behaves as level triggered latch meaning that 2168 * it should be cleared after all the port events are cleared; 2169 * otherwise, it will raise a spurious interrupt after each 2170 * valid one. Please read section 10.6.2 of ahci 1.1 for more 2171 * information. 2172 * 2173 * Also, use the unmasked value to clear interrupt as spurious 2174 * pending event on a dummy port might cause screaming IRQ. 2175 */ 2176 writel(irq_stat, mmio + HOST_IRQ_STAT); 2177 2178 spin_unlock(&host->lock); 2179 2180 VPRINTK("EXIT\n"); 2181 2182 return IRQ_RETVAL(handled); 2183 } 2184 2185 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 2186 { 2187 struct ata_port *ap = qc->ap; 2188 void __iomem *port_mmio = ahci_port_base(ap); 2189 struct ahci_port_priv *pp = ap->private_data; 2190 2191 /* Keep track of the currently active link. It will be used 2192 * in completion path to determine whether NCQ phase is in 2193 * progress. 2194 */ 2195 pp->active_link = qc->dev->link; 2196 2197 if (qc->tf.protocol == ATA_PROT_NCQ) 2198 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 2199 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 2200 2201 ahci_sw_activity(qc->dev->link); 2202 2203 return 0; 2204 } 2205 2206 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 2207 { 2208 struct ahci_port_priv *pp = qc->ap->private_data; 2209 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 2210 2211 ata_tf_from_fis(d2h_fis, &qc->result_tf); 2212 return true; 2213 } 2214 2215 static void ahci_freeze(struct ata_port *ap) 2216 { 2217 void __iomem *port_mmio = ahci_port_base(ap); 2218 2219 /* turn IRQ off */ 2220 writel(0, port_mmio + PORT_IRQ_MASK); 2221 } 2222 2223 static void ahci_thaw(struct ata_port *ap) 2224 { 2225 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 2226 void __iomem *port_mmio = ahci_port_base(ap); 2227 u32 tmp; 2228 struct ahci_port_priv *pp = ap->private_data; 2229 2230 /* clear IRQ */ 2231 tmp = readl(port_mmio + PORT_IRQ_STAT); 2232 writel(tmp, port_mmio + PORT_IRQ_STAT); 2233 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 2234 2235 /* turn IRQ back on */ 2236 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 2237 } 2238 2239 static void ahci_error_handler(struct ata_port *ap) 2240 { 2241 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 2242 /* restart engine */ 2243 ahci_stop_engine(ap); 2244 ahci_start_engine(ap); 2245 } 2246 2247 sata_pmp_error_handler(ap); 2248 } 2249 2250 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 2251 { 2252 struct ata_port *ap = qc->ap; 2253 2254 /* make DMA engine forget about the failed command */ 2255 if (qc->flags & ATA_QCFLAG_FAILED) 2256 ahci_kick_engine(ap, 1); 2257 } 2258 2259 static void ahci_pmp_attach(struct ata_port *ap) 2260 { 2261 void __iomem *port_mmio = ahci_port_base(ap); 2262 struct ahci_port_priv *pp = ap->private_data; 2263 u32 cmd; 2264 2265 cmd = readl(port_mmio + PORT_CMD); 2266 cmd |= PORT_CMD_PMP; 2267 writel(cmd, port_mmio + PORT_CMD); 2268 2269 pp->intr_mask |= PORT_IRQ_BAD_PMP; 2270 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 2271 } 2272 2273 static void ahci_pmp_detach(struct ata_port *ap) 2274 { 2275 void __iomem *port_mmio = ahci_port_base(ap); 2276 struct ahci_port_priv *pp = ap->private_data; 2277 u32 cmd; 2278 2279 cmd = readl(port_mmio + PORT_CMD); 2280 cmd &= ~PORT_CMD_PMP; 2281 writel(cmd, port_mmio + PORT_CMD); 2282 2283 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 2284 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 2285 } 2286 2287 static int ahci_port_resume(struct ata_port *ap) 2288 { 2289 ahci_power_up(ap); 2290 ahci_start_port(ap); 2291 2292 if (sata_pmp_attached(ap)) 2293 ahci_pmp_attach(ap); 2294 else 2295 ahci_pmp_detach(ap); 2296 2297 return 0; 2298 } 2299 2300 #ifdef CONFIG_PM 2301 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 2302 { 2303 const char *emsg = NULL; 2304 int rc; 2305 2306 rc = ahci_deinit_port(ap, &emsg); 2307 if (rc == 0) 2308 ahci_power_down(ap); 2309 else { 2310 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 2311 ahci_start_port(ap); 2312 } 2313 2314 return rc; 2315 } 2316 2317 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 2318 { 2319 struct ata_host *host = dev_get_drvdata(&pdev->dev); 2320 struct ahci_host_priv *hpriv = host->private_data; 2321 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2322 u32 ctl; 2323 2324 if (mesg.event & PM_EVENT_SUSPEND && 2325 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { 2326 dev_printk(KERN_ERR, &pdev->dev, 2327 "BIOS update required for suspend/resume\n"); 2328 return -EIO; 2329 } 2330 2331 if (mesg.event & PM_EVENT_SLEEP) { 2332 /* AHCI spec rev1.1 section 8.3.3: 2333 * Software must disable interrupts prior to requesting a 2334 * transition of the HBA to D3 state. 2335 */ 2336 ctl = readl(mmio + HOST_CTL); 2337 ctl &= ~HOST_IRQ_EN; 2338 writel(ctl, mmio + HOST_CTL); 2339 readl(mmio + HOST_CTL); /* flush */ 2340 } 2341 2342 return ata_pci_device_suspend(pdev, mesg); 2343 } 2344 2345 static int ahci_pci_device_resume(struct pci_dev *pdev) 2346 { 2347 struct ata_host *host = dev_get_drvdata(&pdev->dev); 2348 int rc; 2349 2350 rc = ata_pci_device_do_resume(pdev); 2351 if (rc) 2352 return rc; 2353 2354 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 2355 rc = ahci_reset_controller(host); 2356 if (rc) 2357 return rc; 2358 2359 ahci_init_controller(host); 2360 } 2361 2362 ata_host_resume(host); 2363 2364 return 0; 2365 } 2366 #endif 2367 2368 static int ahci_port_start(struct ata_port *ap) 2369 { 2370 struct device *dev = ap->host->dev; 2371 struct ahci_port_priv *pp; 2372 void *mem; 2373 dma_addr_t mem_dma; 2374 2375 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 2376 if (!pp) 2377 return -ENOMEM; 2378 2379 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 2380 GFP_KERNEL); 2381 if (!mem) 2382 return -ENOMEM; 2383 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 2384 2385 /* 2386 * First item in chunk of DMA memory: 32-slot command table, 2387 * 32 bytes each in size 2388 */ 2389 pp->cmd_slot = mem; 2390 pp->cmd_slot_dma = mem_dma; 2391 2392 mem += AHCI_CMD_SLOT_SZ; 2393 mem_dma += AHCI_CMD_SLOT_SZ; 2394 2395 /* 2396 * Second item: Received-FIS area 2397 */ 2398 pp->rx_fis = mem; 2399 pp->rx_fis_dma = mem_dma; 2400 2401 mem += AHCI_RX_FIS_SZ; 2402 mem_dma += AHCI_RX_FIS_SZ; 2403 2404 /* 2405 * Third item: data area for storing a single command 2406 * and its scatter-gather table 2407 */ 2408 pp->cmd_tbl = mem; 2409 pp->cmd_tbl_dma = mem_dma; 2410 2411 /* 2412 * Save off initial list of interrupts to be enabled. 2413 * This could be changed later 2414 */ 2415 pp->intr_mask = DEF_PORT_IRQ; 2416 2417 ap->private_data = pp; 2418 2419 /* engage engines, captain */ 2420 return ahci_port_resume(ap); 2421 } 2422 2423 static void ahci_port_stop(struct ata_port *ap) 2424 { 2425 const char *emsg = NULL; 2426 int rc; 2427 2428 /* de-initialize port */ 2429 rc = ahci_deinit_port(ap, &emsg); 2430 if (rc) 2431 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 2432 } 2433 2434 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 2435 { 2436 int rc; 2437 2438 if (using_dac && 2439 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2440 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2441 if (rc) { 2442 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2443 if (rc) { 2444 dev_printk(KERN_ERR, &pdev->dev, 2445 "64-bit DMA enable failed\n"); 2446 return rc; 2447 } 2448 } 2449 } else { 2450 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2451 if (rc) { 2452 dev_printk(KERN_ERR, &pdev->dev, 2453 "32-bit DMA enable failed\n"); 2454 return rc; 2455 } 2456 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2457 if (rc) { 2458 dev_printk(KERN_ERR, &pdev->dev, 2459 "32-bit consistent DMA enable failed\n"); 2460 return rc; 2461 } 2462 } 2463 return 0; 2464 } 2465 2466 static void ahci_print_info(struct ata_host *host) 2467 { 2468 struct ahci_host_priv *hpriv = host->private_data; 2469 struct pci_dev *pdev = to_pci_dev(host->dev); 2470 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2471 u32 vers, cap, impl, speed; 2472 const char *speed_s; 2473 u16 cc; 2474 const char *scc_s; 2475 2476 vers = readl(mmio + HOST_VERSION); 2477 cap = hpriv->cap; 2478 impl = hpriv->port_map; 2479 2480 speed = (cap >> 20) & 0xf; 2481 if (speed == 1) 2482 speed_s = "1.5"; 2483 else if (speed == 2) 2484 speed_s = "3"; 2485 else if (speed == 3) 2486 speed_s = "6"; 2487 else 2488 speed_s = "?"; 2489 2490 pci_read_config_word(pdev, 0x0a, &cc); 2491 if (cc == PCI_CLASS_STORAGE_IDE) 2492 scc_s = "IDE"; 2493 else if (cc == PCI_CLASS_STORAGE_SATA) 2494 scc_s = "SATA"; 2495 else if (cc == PCI_CLASS_STORAGE_RAID) 2496 scc_s = "RAID"; 2497 else 2498 scc_s = "unknown"; 2499 2500 dev_printk(KERN_INFO, &pdev->dev, 2501 "AHCI %02x%02x.%02x%02x " 2502 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2503 , 2504 2505 (vers >> 24) & 0xff, 2506 (vers >> 16) & 0xff, 2507 (vers >> 8) & 0xff, 2508 vers & 0xff, 2509 2510 ((cap >> 8) & 0x1f) + 1, 2511 (cap & 0x1f) + 1, 2512 speed_s, 2513 impl, 2514 scc_s); 2515 2516 dev_printk(KERN_INFO, &pdev->dev, 2517 "flags: " 2518 "%s%s%s%s%s%s%s" 2519 "%s%s%s%s%s%s%s" 2520 "%s\n" 2521 , 2522 2523 cap & (1 << 31) ? "64bit " : "", 2524 cap & (1 << 30) ? "ncq " : "", 2525 cap & (1 << 29) ? "sntf " : "", 2526 cap & (1 << 28) ? "ilck " : "", 2527 cap & (1 << 27) ? "stag " : "", 2528 cap & (1 << 26) ? "pm " : "", 2529 cap & (1 << 25) ? "led " : "", 2530 2531 cap & (1 << 24) ? "clo " : "", 2532 cap & (1 << 19) ? "nz " : "", 2533 cap & (1 << 18) ? "only " : "", 2534 cap & (1 << 17) ? "pmp " : "", 2535 cap & (1 << 15) ? "pio " : "", 2536 cap & (1 << 14) ? "slum " : "", 2537 cap & (1 << 13) ? "part " : "", 2538 cap & (1 << 6) ? "ems ": "" 2539 ); 2540 } 2541 2542 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 2543 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't 2544 * support PMP and the 4726 either directly exports the device 2545 * attached to the first downstream port or acts as a hardware storage 2546 * controller and emulate a single ATA device (can be RAID 0/1 or some 2547 * other configuration). 2548 * 2549 * When there's no device attached to the first downstream port of the 2550 * 4726, "Config Disk" appears, which is a pseudo ATA device to 2551 * configure the 4726. However, ATA emulation of the device is very 2552 * lame. It doesn't send signature D2H Reg FIS after the initial 2553 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. 2554 * 2555 * The following function works around the problem by always using 2556 * hardreset on the port and not depending on receiving signature FIS 2557 * afterward. If signature FIS isn't received soon, ATA class is 2558 * assumed without follow-up softreset. 2559 */ 2560 static void ahci_p5wdh_workaround(struct ata_host *host) 2561 { 2562 static struct dmi_system_id sysids[] = { 2563 { 2564 .ident = "P5W DH Deluxe", 2565 .matches = { 2566 DMI_MATCH(DMI_SYS_VENDOR, 2567 "ASUSTEK COMPUTER INC"), 2568 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 2569 }, 2570 }, 2571 { } 2572 }; 2573 struct pci_dev *pdev = to_pci_dev(host->dev); 2574 2575 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && 2576 dmi_check_system(sysids)) { 2577 struct ata_port *ap = host->ports[1]; 2578 2579 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " 2580 "Deluxe on-board SIMG4726 workaround\n"); 2581 2582 ap->ops = &ahci_p5wdh_ops; 2583 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; 2584 } 2585 } 2586 2587 /* 2588 * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older 2589 * BIOS. The oldest version known to be broken is 0901 and working is 2590 * 1501 which was released on 2007-10-26. Force 32bit DMA on anything 2591 * older than 1501. Please read bko#9412 for more info. 2592 */ 2593 static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev) 2594 { 2595 static const struct dmi_system_id sysids[] = { 2596 { 2597 .ident = "ASUS M2A-VM", 2598 .matches = { 2599 DMI_MATCH(DMI_BOARD_VENDOR, 2600 "ASUSTeK Computer INC."), 2601 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), 2602 }, 2603 }, 2604 { } 2605 }; 2606 const char *cutoff_mmdd = "10/26"; 2607 const char *date; 2608 int year; 2609 2610 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || 2611 !dmi_check_system(sysids)) 2612 return false; 2613 2614 /* 2615 * Argh.... both version and date are free form strings. 2616 * Let's hope they're using the same date format across 2617 * different versions. 2618 */ 2619 date = dmi_get_system_info(DMI_BIOS_DATE); 2620 year = dmi_get_year(DMI_BIOS_DATE); 2621 if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' && 2622 (year > 2007 || 2623 (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0))) 2624 return false; 2625 2626 dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, " 2627 "forcing 32bit DMA, update BIOS\n"); 2628 2629 return true; 2630 } 2631 2632 static bool ahci_broken_system_poweroff(struct pci_dev *pdev) 2633 { 2634 static const struct dmi_system_id broken_systems[] = { 2635 { 2636 .ident = "HP Compaq nx6310", 2637 .matches = { 2638 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2639 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"), 2640 }, 2641 /* PCI slot number of the controller */ 2642 .driver_data = (void *)0x1FUL, 2643 }, 2644 { 2645 .ident = "HP Compaq 6720s", 2646 .matches = { 2647 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2648 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"), 2649 }, 2650 /* PCI slot number of the controller */ 2651 .driver_data = (void *)0x1FUL, 2652 }, 2653 2654 { } /* terminate list */ 2655 }; 2656 const struct dmi_system_id *dmi = dmi_first_match(broken_systems); 2657 2658 if (dmi) { 2659 unsigned long slot = (unsigned long)dmi->driver_data; 2660 /* apply the quirk only to on-board controllers */ 2661 return slot == PCI_SLOT(pdev->devfn); 2662 } 2663 2664 return false; 2665 } 2666 2667 static bool ahci_broken_suspend(struct pci_dev *pdev) 2668 { 2669 static const struct dmi_system_id sysids[] = { 2670 /* 2671 * On HP dv[4-6] and HDX18 with earlier BIOSen, link 2672 * to the harddisk doesn't become online after 2673 * resuming from STR. Warn and fail suspend. 2674 */ 2675 { 2676 .ident = "dv4", 2677 .matches = { 2678 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2679 DMI_MATCH(DMI_PRODUCT_NAME, 2680 "HP Pavilion dv4 Notebook PC"), 2681 }, 2682 .driver_data = "F.30", /* cutoff BIOS version */ 2683 }, 2684 { 2685 .ident = "dv5", 2686 .matches = { 2687 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2688 DMI_MATCH(DMI_PRODUCT_NAME, 2689 "HP Pavilion dv5 Notebook PC"), 2690 }, 2691 .driver_data = "F.16", /* cutoff BIOS version */ 2692 }, 2693 { 2694 .ident = "dv6", 2695 .matches = { 2696 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2697 DMI_MATCH(DMI_PRODUCT_NAME, 2698 "HP Pavilion dv6 Notebook PC"), 2699 }, 2700 .driver_data = "F.21", /* cutoff BIOS version */ 2701 }, 2702 { 2703 .ident = "HDX18", 2704 .matches = { 2705 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 2706 DMI_MATCH(DMI_PRODUCT_NAME, 2707 "HP HDX18 Notebook PC"), 2708 }, 2709 .driver_data = "F.23", /* cutoff BIOS version */ 2710 }, 2711 { } /* terminate list */ 2712 }; 2713 const struct dmi_system_id *dmi = dmi_first_match(sysids); 2714 const char *ver; 2715 2716 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) 2717 return false; 2718 2719 ver = dmi_get_system_info(DMI_BIOS_VERSION); 2720 2721 return !ver || strcmp(ver, dmi->driver_data) < 0; 2722 } 2723 2724 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2725 { 2726 static int printed_version; 2727 unsigned int board_id = ent->driver_data; 2728 struct ata_port_info pi = ahci_port_info[board_id]; 2729 const struct ata_port_info *ppi[] = { &pi, NULL }; 2730 struct device *dev = &pdev->dev; 2731 struct ahci_host_priv *hpriv; 2732 struct ata_host *host; 2733 int n_ports, i, rc; 2734 2735 VPRINTK("ENTER\n"); 2736 2737 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 2738 2739 if (!printed_version++) 2740 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2741 2742 /* The AHCI driver can only drive the SATA ports, the PATA driver 2743 can drive them all so if both drivers are selected make sure 2744 AHCI stays out of the way */ 2745 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) 2746 return -ENODEV; 2747 2748 /* acquire resources */ 2749 rc = pcim_enable_device(pdev); 2750 if (rc) 2751 return rc; 2752 2753 /* AHCI controllers often implement SFF compatible interface. 2754 * Grab all PCI BARs just in case. 2755 */ 2756 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 2757 if (rc == -EBUSY) 2758 pcim_pin_device(pdev); 2759 if (rc) 2760 return rc; 2761 2762 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 2763 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 2764 u8 map; 2765 2766 /* ICH6s share the same PCI ID for both piix and ahci 2767 * modes. Enabling ahci mode while MAP indicates 2768 * combined mode is a bad idea. Yield to ata_piix. 2769 */ 2770 pci_read_config_byte(pdev, ICH_MAP, &map); 2771 if (map & 0x3) { 2772 dev_printk(KERN_INFO, &pdev->dev, "controller is in " 2773 "combined mode, can't enable AHCI mode\n"); 2774 return -ENODEV; 2775 } 2776 } 2777 2778 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 2779 if (!hpriv) 2780 return -ENOMEM; 2781 hpriv->flags |= (unsigned long)pi.private_data; 2782 2783 /* MCP65 revision A1 and A2 can't do MSI */ 2784 if (board_id == board_ahci_mcp65 && 2785 (pdev->revision == 0xa1 || pdev->revision == 0xa2)) 2786 hpriv->flags |= AHCI_HFLAG_NO_MSI; 2787 2788 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ 2789 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) 2790 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; 2791 2792 /* apply ASUS M2A_VM quirk */ 2793 if (ahci_asus_m2a_vm_32bit_only(pdev)) 2794 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; 2795 2796 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) 2797 pci_enable_msi(pdev); 2798 2799 /* save initial config */ 2800 ahci_save_initial_config(pdev, hpriv); 2801 2802 /* prepare host */ 2803 if (hpriv->cap & HOST_CAP_NCQ) 2804 pi.flags |= ATA_FLAG_NCQ; 2805 2806 if (hpriv->cap & HOST_CAP_PMP) 2807 pi.flags |= ATA_FLAG_PMP; 2808 2809 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { 2810 u8 messages; 2811 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 2812 u32 em_loc = readl(mmio + HOST_EM_LOC); 2813 u32 em_ctl = readl(mmio + HOST_EM_CTL); 2814 2815 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; 2816 2817 /* we only support LED message type right now */ 2818 if ((messages & 0x01) && (ahci_em_messages == 1)) { 2819 /* store em_loc */ 2820 hpriv->em_loc = ((em_loc >> 16) * 4); 2821 pi.flags |= ATA_FLAG_EM; 2822 if (!(em_ctl & EM_CTL_ALHD)) 2823 pi.flags |= ATA_FLAG_SW_ACTIVITY; 2824 } 2825 } 2826 2827 if (ahci_broken_system_poweroff(pdev)) { 2828 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; 2829 dev_info(&pdev->dev, 2830 "quirky BIOS, skipping spindown on poweroff\n"); 2831 } 2832 2833 if (ahci_broken_suspend(pdev)) { 2834 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; 2835 dev_printk(KERN_WARNING, &pdev->dev, 2836 "BIOS update required for suspend/resume\n"); 2837 } 2838 2839 /* CAP.NP sometimes indicate the index of the last enabled 2840 * port, at other times, that of the last possible port, so 2841 * determining the maximum port number requires looking at 2842 * both CAP.NP and port_map. 2843 */ 2844 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 2845 2846 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2847 if (!host) 2848 return -ENOMEM; 2849 host->iomap = pcim_iomap_table(pdev); 2850 host->private_data = hpriv; 2851 2852 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 2853 host->flags |= ATA_HOST_PARALLEL_SCAN; 2854 else 2855 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 2856 2857 if (pi.flags & ATA_FLAG_EM) 2858 ahci_reset_em(host); 2859 2860 for (i = 0; i < host->n_ports; i++) { 2861 struct ata_port *ap = host->ports[i]; 2862 2863 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 2864 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 2865 0x100 + ap->port_no * 0x80, "port"); 2866 2867 /* set initial link pm policy */ 2868 ap->pm_policy = NOT_AVAILABLE; 2869 2870 /* set enclosure management message type */ 2871 if (ap->flags & ATA_FLAG_EM) 2872 ap->em_message_type = ahci_em_messages; 2873 2874 2875 /* disabled/not-implemented port */ 2876 if (!(hpriv->port_map & (1 << i))) 2877 ap->ops = &ata_dummy_port_ops; 2878 } 2879 2880 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 2881 ahci_p5wdh_workaround(host); 2882 2883 /* initialize adapter */ 2884 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 2885 if (rc) 2886 return rc; 2887 2888 rc = ahci_reset_controller(host); 2889 if (rc) 2890 return rc; 2891 2892 ahci_init_controller(host); 2893 ahci_print_info(host); 2894 2895 pci_set_master(pdev); 2896 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 2897 &ahci_sht); 2898 } 2899 2900 static int __init ahci_init(void) 2901 { 2902 return pci_register_driver(&ahci_pci_driver); 2903 } 2904 2905 static void __exit ahci_exit(void) 2906 { 2907 pci_unregister_driver(&ahci_pci_driver); 2908 } 2909 2910 2911 MODULE_AUTHOR("Jeff Garzik"); 2912 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 2913 MODULE_LICENSE("GPL"); 2914 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 2915 MODULE_VERSION(DRV_VERSION); 2916 2917 module_init(ahci_init); 2918 module_exit(ahci_exit); 2919