1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <linux/dmi.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <linux/libata.h> 48 49 #define DRV_NAME "ahci" 50 #define DRV_VERSION "3.0" 51 52 static int ahci_skip_host_reset; 53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 55 56 static int ahci_enable_alpm(struct ata_port *ap, 57 enum link_pm policy); 58 static void ahci_disable_alpm(struct ata_port *ap); 59 60 enum { 61 AHCI_PCI_BAR = 5, 62 AHCI_MAX_PORTS = 32, 63 AHCI_MAX_SG = 168, /* hardware max is 64K */ 64 AHCI_DMA_BOUNDARY = 0xffffffff, 65 AHCI_MAX_CMDS = 32, 66 AHCI_CMD_SZ = 32, 67 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 68 AHCI_RX_FIS_SZ = 256, 69 AHCI_CMD_TBL_CDB = 0x40, 70 AHCI_CMD_TBL_HDR_SZ = 0x80, 71 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 72 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 73 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 74 AHCI_RX_FIS_SZ, 75 AHCI_IRQ_ON_SG = (1 << 31), 76 AHCI_CMD_ATAPI = (1 << 5), 77 AHCI_CMD_WRITE = (1 << 6), 78 AHCI_CMD_PREFETCH = (1 << 7), 79 AHCI_CMD_RESET = (1 << 8), 80 AHCI_CMD_CLR_BUSY = (1 << 10), 81 82 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 83 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 84 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 85 86 board_ahci = 0, 87 board_ahci_vt8251 = 1, 88 board_ahci_ign_iferr = 2, 89 board_ahci_sb600 = 3, 90 board_ahci_mv = 4, 91 board_ahci_sb700 = 5, 92 93 /* global controller registers */ 94 HOST_CAP = 0x00, /* host capabilities */ 95 HOST_CTL = 0x04, /* global host control */ 96 HOST_IRQ_STAT = 0x08, /* interrupt status */ 97 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 98 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 99 100 /* HOST_CTL bits */ 101 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 102 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 103 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 104 105 /* HOST_CAP bits */ 106 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 107 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 108 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 109 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ 110 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 111 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 112 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 113 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 114 115 /* registers for each SATA port */ 116 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 117 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 118 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 119 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 120 PORT_IRQ_STAT = 0x10, /* interrupt status */ 121 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 122 PORT_CMD = 0x18, /* port command */ 123 PORT_TFDATA = 0x20, /* taskfile data */ 124 PORT_SIG = 0x24, /* device TF signature */ 125 PORT_CMD_ISSUE = 0x38, /* command issue */ 126 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 127 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 128 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 129 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 130 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ 131 132 /* PORT_IRQ_{STAT,MASK} bits */ 133 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 134 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 135 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 136 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 137 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 138 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 139 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 140 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 141 142 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 143 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 144 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 145 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 146 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 147 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 148 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 149 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 150 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 151 152 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 153 PORT_IRQ_IF_ERR | 154 PORT_IRQ_CONNECT | 155 PORT_IRQ_PHYRDY | 156 PORT_IRQ_UNK_FIS | 157 PORT_IRQ_BAD_PMP, 158 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 159 PORT_IRQ_TF_ERR | 160 PORT_IRQ_HBUS_DATA_ERR, 161 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 162 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 163 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 164 165 /* PORT_CMD bits */ 166 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ 167 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ 168 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 169 PORT_CMD_PMP = (1 << 17), /* PMP attached */ 170 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 171 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 172 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 173 PORT_CMD_CLO = (1 << 3), /* Command list override */ 174 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 175 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 176 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 177 178 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 179 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 180 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 181 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 182 183 /* hpriv->flags bits */ 184 AHCI_HFLAG_NO_NCQ = (1 << 0), 185 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ 186 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ 187 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ 188 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 189 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 190 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 191 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 192 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 193 194 /* ap->flags bits */ 195 196 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 197 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 198 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | 199 ATA_FLAG_IPM, 200 201 ICH_MAP = 0x90, /* ICH MAP register */ 202 }; 203 204 struct ahci_cmd_hdr { 205 __le32 opts; 206 __le32 status; 207 __le32 tbl_addr; 208 __le32 tbl_addr_hi; 209 __le32 reserved[4]; 210 }; 211 212 struct ahci_sg { 213 __le32 addr; 214 __le32 addr_hi; 215 __le32 reserved; 216 __le32 flags_size; 217 }; 218 219 struct ahci_host_priv { 220 unsigned int flags; /* AHCI_HFLAG_* */ 221 u32 cap; /* cap to use */ 222 u32 port_map; /* port map to use */ 223 u32 saved_cap; /* saved initial cap */ 224 u32 saved_port_map; /* saved initial port_map */ 225 }; 226 227 struct ahci_port_priv { 228 struct ata_link *active_link; 229 struct ahci_cmd_hdr *cmd_slot; 230 dma_addr_t cmd_slot_dma; 231 void *cmd_tbl; 232 dma_addr_t cmd_tbl_dma; 233 void *rx_fis; 234 dma_addr_t rx_fis_dma; 235 /* for NCQ spurious interrupt analysis */ 236 unsigned int ncq_saw_d2h:1; 237 unsigned int ncq_saw_dmas:1; 238 unsigned int ncq_saw_sdb:1; 239 u32 intr_mask; /* interrupts to enable */ 240 }; 241 242 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 243 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 244 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 245 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 246 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 247 static int ahci_port_start(struct ata_port *ap); 248 static void ahci_port_stop(struct ata_port *ap); 249 static void ahci_qc_prep(struct ata_queued_cmd *qc); 250 static void ahci_freeze(struct ata_port *ap); 251 static void ahci_thaw(struct ata_port *ap); 252 static void ahci_pmp_attach(struct ata_port *ap); 253 static void ahci_pmp_detach(struct ata_port *ap); 254 static int ahci_softreset(struct ata_link *link, unsigned int *class, 255 unsigned long deadline); 256 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 257 unsigned long deadline); 258 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 259 unsigned long deadline); 260 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 261 unsigned long deadline); 262 static void ahci_postreset(struct ata_link *link, unsigned int *class); 263 static void ahci_error_handler(struct ata_port *ap); 264 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 265 static int ahci_port_resume(struct ata_port *ap); 266 static void ahci_dev_config(struct ata_device *dev); 267 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); 268 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 269 u32 opts); 270 #ifdef CONFIG_PM 271 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 272 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 273 static int ahci_pci_device_resume(struct pci_dev *pdev); 274 #endif 275 276 static struct device_attribute *ahci_shost_attrs[] = { 277 &dev_attr_link_power_management_policy, 278 NULL 279 }; 280 281 static struct scsi_host_template ahci_sht = { 282 ATA_NCQ_SHT(DRV_NAME), 283 .can_queue = AHCI_MAX_CMDS - 1, 284 .sg_tablesize = AHCI_MAX_SG, 285 .dma_boundary = AHCI_DMA_BOUNDARY, 286 .shost_attrs = ahci_shost_attrs, 287 }; 288 289 static struct ata_port_operations ahci_ops = { 290 .inherits = &sata_pmp_port_ops, 291 292 .qc_defer = sata_pmp_qc_defer_cmd_switch, 293 .qc_prep = ahci_qc_prep, 294 .qc_issue = ahci_qc_issue, 295 .qc_fill_rtf = ahci_qc_fill_rtf, 296 297 .freeze = ahci_freeze, 298 .thaw = ahci_thaw, 299 .softreset = ahci_softreset, 300 .hardreset = ahci_hardreset, 301 .postreset = ahci_postreset, 302 .pmp_softreset = ahci_softreset, 303 .error_handler = ahci_error_handler, 304 .post_internal_cmd = ahci_post_internal_cmd, 305 .dev_config = ahci_dev_config, 306 307 .scr_read = ahci_scr_read, 308 .scr_write = ahci_scr_write, 309 .pmp_attach = ahci_pmp_attach, 310 .pmp_detach = ahci_pmp_detach, 311 312 .enable_pm = ahci_enable_alpm, 313 .disable_pm = ahci_disable_alpm, 314 #ifdef CONFIG_PM 315 .port_suspend = ahci_port_suspend, 316 .port_resume = ahci_port_resume, 317 #endif 318 .port_start = ahci_port_start, 319 .port_stop = ahci_port_stop, 320 }; 321 322 static struct ata_port_operations ahci_vt8251_ops = { 323 .inherits = &ahci_ops, 324 .hardreset = ahci_vt8251_hardreset, 325 }; 326 327 static struct ata_port_operations ahci_p5wdh_ops = { 328 .inherits = &ahci_ops, 329 .hardreset = ahci_p5wdh_hardreset, 330 }; 331 332 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 333 334 static const struct ata_port_info ahci_port_info[] = { 335 /* board_ahci */ 336 { 337 .flags = AHCI_FLAG_COMMON, 338 .pio_mask = 0x1f, /* pio0-4 */ 339 .udma_mask = ATA_UDMA6, 340 .port_ops = &ahci_ops, 341 }, 342 /* board_ahci_vt8251 */ 343 { 344 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 345 .flags = AHCI_FLAG_COMMON, 346 .pio_mask = 0x1f, /* pio0-4 */ 347 .udma_mask = ATA_UDMA6, 348 .port_ops = &ahci_vt8251_ops, 349 }, 350 /* board_ahci_ign_iferr */ 351 { 352 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 353 .flags = AHCI_FLAG_COMMON, 354 .pio_mask = 0x1f, /* pio0-4 */ 355 .udma_mask = ATA_UDMA6, 356 .port_ops = &ahci_ops, 357 }, 358 /* board_ahci_sb600 */ 359 { 360 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 361 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | 362 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), 363 .flags = AHCI_FLAG_COMMON, 364 .pio_mask = 0x1f, /* pio0-4 */ 365 .udma_mask = ATA_UDMA6, 366 .port_ops = &ahci_ops, 367 }, 368 /* board_ahci_mv */ 369 { 370 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 371 AHCI_HFLAG_MV_PATA), 372 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 373 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 374 .pio_mask = 0x1f, /* pio0-4 */ 375 .udma_mask = ATA_UDMA6, 376 .port_ops = &ahci_ops, 377 }, 378 /* board_ahci_sb700 */ 379 { 380 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 381 AHCI_HFLAG_NO_PMP), 382 .flags = AHCI_FLAG_COMMON, 383 .pio_mask = 0x1f, /* pio0-4 */ 384 .udma_mask = ATA_UDMA6, 385 .port_ops = &ahci_ops, 386 }, 387 }; 388 389 static const struct pci_device_id ahci_pci_tbl[] = { 390 /* Intel */ 391 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 392 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 393 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 394 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 395 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 396 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 397 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 398 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 399 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 400 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 401 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 402 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 403 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 404 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 405 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 406 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 407 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 408 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 409 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 410 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 411 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 412 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 413 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 414 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 415 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 416 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 417 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 418 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 419 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 420 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 421 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 422 423 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 424 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 425 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 426 427 /* ATI */ 428 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 429 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ 430 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ 431 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ 432 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ 433 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 434 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 435 436 /* VIA */ 437 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 438 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 439 440 /* NVIDIA */ 441 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ 442 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ 443 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ 444 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ 445 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ 446 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ 447 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ 448 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ 449 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 450 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 451 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 452 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 453 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 454 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 455 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 456 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 457 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 458 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 459 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 460 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 461 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ 462 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ 463 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ 464 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ 465 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ 466 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ 467 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ 468 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ 469 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ 470 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ 471 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ 472 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ 473 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 474 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 475 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 476 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 477 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 478 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 479 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 480 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 481 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 482 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 483 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 484 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 485 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 486 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 487 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 488 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 489 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 490 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 491 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 492 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 493 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 494 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 495 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 496 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 497 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */ 498 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */ 499 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */ 500 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */ 501 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */ 502 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ 503 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ 504 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ 505 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ 506 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ 507 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ 508 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ 509 510 /* SiS */ 511 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 512 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 513 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 514 515 /* Marvell */ 516 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 517 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 518 519 /* Generic, PCI class code for AHCI */ 520 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 521 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 522 523 { } /* terminate list */ 524 }; 525 526 527 static struct pci_driver ahci_pci_driver = { 528 .name = DRV_NAME, 529 .id_table = ahci_pci_tbl, 530 .probe = ahci_init_one, 531 .remove = ata_pci_remove_one, 532 #ifdef CONFIG_PM 533 .suspend = ahci_pci_device_suspend, 534 .resume = ahci_pci_device_resume, 535 #endif 536 }; 537 538 539 static inline int ahci_nr_ports(u32 cap) 540 { 541 return (cap & 0x1f) + 1; 542 } 543 544 static inline void __iomem *__ahci_port_base(struct ata_host *host, 545 unsigned int port_no) 546 { 547 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 548 549 return mmio + 0x100 + (port_no * 0x80); 550 } 551 552 static inline void __iomem *ahci_port_base(struct ata_port *ap) 553 { 554 return __ahci_port_base(ap->host, ap->port_no); 555 } 556 557 static void ahci_enable_ahci(void __iomem *mmio) 558 { 559 int i; 560 u32 tmp; 561 562 /* turn on AHCI_EN */ 563 tmp = readl(mmio + HOST_CTL); 564 if (tmp & HOST_AHCI_EN) 565 return; 566 567 /* Some controllers need AHCI_EN to be written multiple times. 568 * Try a few times before giving up. 569 */ 570 for (i = 0; i < 5; i++) { 571 tmp |= HOST_AHCI_EN; 572 writel(tmp, mmio + HOST_CTL); 573 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 574 if (tmp & HOST_AHCI_EN) 575 return; 576 msleep(10); 577 } 578 579 WARN_ON(1); 580 } 581 582 /** 583 * ahci_save_initial_config - Save and fixup initial config values 584 * @pdev: target PCI device 585 * @hpriv: host private area to store config values 586 * 587 * Some registers containing configuration info might be setup by 588 * BIOS and might be cleared on reset. This function saves the 589 * initial values of those registers into @hpriv such that they 590 * can be restored after controller reset. 591 * 592 * If inconsistent, config values are fixed up by this function. 593 * 594 * LOCKING: 595 * None. 596 */ 597 static void ahci_save_initial_config(struct pci_dev *pdev, 598 struct ahci_host_priv *hpriv) 599 { 600 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 601 u32 cap, port_map; 602 int i; 603 int mv; 604 605 /* make sure AHCI mode is enabled before accessing CAP */ 606 ahci_enable_ahci(mmio); 607 608 /* Values prefixed with saved_ are written back to host after 609 * reset. Values without are used for driver operation. 610 */ 611 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 612 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 613 614 /* some chips have errata preventing 64bit use */ 615 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 616 dev_printk(KERN_INFO, &pdev->dev, 617 "controller can't do 64bit DMA, forcing 32bit\n"); 618 cap &= ~HOST_CAP_64; 619 } 620 621 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 622 dev_printk(KERN_INFO, &pdev->dev, 623 "controller can't do NCQ, turning off CAP_NCQ\n"); 624 cap &= ~HOST_CAP_NCQ; 625 } 626 627 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 628 dev_printk(KERN_INFO, &pdev->dev, 629 "controller can't do PMP, turning off CAP_PMP\n"); 630 cap &= ~HOST_CAP_PMP; 631 } 632 633 /* 634 * Temporary Marvell 6145 hack: PATA port presence 635 * is asserted through the standard AHCI port 636 * presence register, as bit 4 (counting from 0) 637 */ 638 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 639 if (pdev->device == 0x6121) 640 mv = 0x3; 641 else 642 mv = 0xf; 643 dev_printk(KERN_ERR, &pdev->dev, 644 "MV_AHCI HACK: port_map %x -> %x\n", 645 port_map, 646 port_map & mv); 647 648 port_map &= mv; 649 } 650 651 /* cross check port_map and cap.n_ports */ 652 if (port_map) { 653 int map_ports = 0; 654 655 for (i = 0; i < AHCI_MAX_PORTS; i++) 656 if (port_map & (1 << i)) 657 map_ports++; 658 659 /* If PI has more ports than n_ports, whine, clear 660 * port_map and let it be generated from n_ports. 661 */ 662 if (map_ports > ahci_nr_ports(cap)) { 663 dev_printk(KERN_WARNING, &pdev->dev, 664 "implemented port map (0x%x) contains more " 665 "ports than nr_ports (%u), using nr_ports\n", 666 port_map, ahci_nr_ports(cap)); 667 port_map = 0; 668 } 669 } 670 671 /* fabricate port_map from cap.nr_ports */ 672 if (!port_map) { 673 port_map = (1 << ahci_nr_ports(cap)) - 1; 674 dev_printk(KERN_WARNING, &pdev->dev, 675 "forcing PORTS_IMPL to 0x%x\n", port_map); 676 677 /* write the fixed up value to the PI register */ 678 hpriv->saved_port_map = port_map; 679 } 680 681 /* record values to use during operation */ 682 hpriv->cap = cap; 683 hpriv->port_map = port_map; 684 } 685 686 /** 687 * ahci_restore_initial_config - Restore initial config 688 * @host: target ATA host 689 * 690 * Restore initial config stored by ahci_save_initial_config(). 691 * 692 * LOCKING: 693 * None. 694 */ 695 static void ahci_restore_initial_config(struct ata_host *host) 696 { 697 struct ahci_host_priv *hpriv = host->private_data; 698 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 699 700 writel(hpriv->saved_cap, mmio + HOST_CAP); 701 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 702 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 703 } 704 705 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 706 { 707 static const int offset[] = { 708 [SCR_STATUS] = PORT_SCR_STAT, 709 [SCR_CONTROL] = PORT_SCR_CTL, 710 [SCR_ERROR] = PORT_SCR_ERR, 711 [SCR_ACTIVE] = PORT_SCR_ACT, 712 [SCR_NOTIFICATION] = PORT_SCR_NTF, 713 }; 714 struct ahci_host_priv *hpriv = ap->host->private_data; 715 716 if (sc_reg < ARRAY_SIZE(offset) && 717 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 718 return offset[sc_reg]; 719 return 0; 720 } 721 722 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 723 { 724 void __iomem *port_mmio = ahci_port_base(ap); 725 int offset = ahci_scr_offset(ap, sc_reg); 726 727 if (offset) { 728 *val = readl(port_mmio + offset); 729 return 0; 730 } 731 return -EINVAL; 732 } 733 734 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 735 { 736 void __iomem *port_mmio = ahci_port_base(ap); 737 int offset = ahci_scr_offset(ap, sc_reg); 738 739 if (offset) { 740 writel(val, port_mmio + offset); 741 return 0; 742 } 743 return -EINVAL; 744 } 745 746 static void ahci_start_engine(struct ata_port *ap) 747 { 748 void __iomem *port_mmio = ahci_port_base(ap); 749 u32 tmp; 750 751 /* start DMA */ 752 tmp = readl(port_mmio + PORT_CMD); 753 tmp |= PORT_CMD_START; 754 writel(tmp, port_mmio + PORT_CMD); 755 readl(port_mmio + PORT_CMD); /* flush */ 756 } 757 758 static int ahci_stop_engine(struct ata_port *ap) 759 { 760 void __iomem *port_mmio = ahci_port_base(ap); 761 u32 tmp; 762 763 tmp = readl(port_mmio + PORT_CMD); 764 765 /* check if the HBA is idle */ 766 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 767 return 0; 768 769 /* setting HBA to idle */ 770 tmp &= ~PORT_CMD_START; 771 writel(tmp, port_mmio + PORT_CMD); 772 773 /* wait for engine to stop. This could be as long as 500 msec */ 774 tmp = ata_wait_register(port_mmio + PORT_CMD, 775 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 776 if (tmp & PORT_CMD_LIST_ON) 777 return -EIO; 778 779 return 0; 780 } 781 782 static void ahci_start_fis_rx(struct ata_port *ap) 783 { 784 void __iomem *port_mmio = ahci_port_base(ap); 785 struct ahci_host_priv *hpriv = ap->host->private_data; 786 struct ahci_port_priv *pp = ap->private_data; 787 u32 tmp; 788 789 /* set FIS registers */ 790 if (hpriv->cap & HOST_CAP_64) 791 writel((pp->cmd_slot_dma >> 16) >> 16, 792 port_mmio + PORT_LST_ADDR_HI); 793 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 794 795 if (hpriv->cap & HOST_CAP_64) 796 writel((pp->rx_fis_dma >> 16) >> 16, 797 port_mmio + PORT_FIS_ADDR_HI); 798 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 799 800 /* enable FIS reception */ 801 tmp = readl(port_mmio + PORT_CMD); 802 tmp |= PORT_CMD_FIS_RX; 803 writel(tmp, port_mmio + PORT_CMD); 804 805 /* flush */ 806 readl(port_mmio + PORT_CMD); 807 } 808 809 static int ahci_stop_fis_rx(struct ata_port *ap) 810 { 811 void __iomem *port_mmio = ahci_port_base(ap); 812 u32 tmp; 813 814 /* disable FIS reception */ 815 tmp = readl(port_mmio + PORT_CMD); 816 tmp &= ~PORT_CMD_FIS_RX; 817 writel(tmp, port_mmio + PORT_CMD); 818 819 /* wait for completion, spec says 500ms, give it 1000 */ 820 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 821 PORT_CMD_FIS_ON, 10, 1000); 822 if (tmp & PORT_CMD_FIS_ON) 823 return -EBUSY; 824 825 return 0; 826 } 827 828 static void ahci_power_up(struct ata_port *ap) 829 { 830 struct ahci_host_priv *hpriv = ap->host->private_data; 831 void __iomem *port_mmio = ahci_port_base(ap); 832 u32 cmd; 833 834 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 835 836 /* spin up device */ 837 if (hpriv->cap & HOST_CAP_SSS) { 838 cmd |= PORT_CMD_SPIN_UP; 839 writel(cmd, port_mmio + PORT_CMD); 840 } 841 842 /* wake up link */ 843 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 844 } 845 846 static void ahci_disable_alpm(struct ata_port *ap) 847 { 848 struct ahci_host_priv *hpriv = ap->host->private_data; 849 void __iomem *port_mmio = ahci_port_base(ap); 850 u32 cmd; 851 struct ahci_port_priv *pp = ap->private_data; 852 853 /* IPM bits should be disabled by libata-core */ 854 /* get the existing command bits */ 855 cmd = readl(port_mmio + PORT_CMD); 856 857 /* disable ALPM and ASP */ 858 cmd &= ~PORT_CMD_ASP; 859 cmd &= ~PORT_CMD_ALPE; 860 861 /* force the interface back to active */ 862 cmd |= PORT_CMD_ICC_ACTIVE; 863 864 /* write out new cmd value */ 865 writel(cmd, port_mmio + PORT_CMD); 866 cmd = readl(port_mmio + PORT_CMD); 867 868 /* wait 10ms to be sure we've come out of any low power state */ 869 msleep(10); 870 871 /* clear out any PhyRdy stuff from interrupt status */ 872 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 873 874 /* go ahead and clean out PhyRdy Change from Serror too */ 875 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 876 877 /* 878 * Clear flag to indicate that we should ignore all PhyRdy 879 * state changes 880 */ 881 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; 882 883 /* 884 * Enable interrupts on Phy Ready. 885 */ 886 pp->intr_mask |= PORT_IRQ_PHYRDY; 887 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 888 889 /* 890 * don't change the link pm policy - we can be called 891 * just to turn of link pm temporarily 892 */ 893 } 894 895 static int ahci_enable_alpm(struct ata_port *ap, 896 enum link_pm policy) 897 { 898 struct ahci_host_priv *hpriv = ap->host->private_data; 899 void __iomem *port_mmio = ahci_port_base(ap); 900 u32 cmd; 901 struct ahci_port_priv *pp = ap->private_data; 902 u32 asp; 903 904 /* Make sure the host is capable of link power management */ 905 if (!(hpriv->cap & HOST_CAP_ALPM)) 906 return -EINVAL; 907 908 switch (policy) { 909 case MAX_PERFORMANCE: 910 case NOT_AVAILABLE: 911 /* 912 * if we came here with NOT_AVAILABLE, 913 * it just means this is the first time we 914 * have tried to enable - default to max performance, 915 * and let the user go to lower power modes on request. 916 */ 917 ahci_disable_alpm(ap); 918 return 0; 919 case MIN_POWER: 920 /* configure HBA to enter SLUMBER */ 921 asp = PORT_CMD_ASP; 922 break; 923 case MEDIUM_POWER: 924 /* configure HBA to enter PARTIAL */ 925 asp = 0; 926 break; 927 default: 928 return -EINVAL; 929 } 930 931 /* 932 * Disable interrupts on Phy Ready. This keeps us from 933 * getting woken up due to spurious phy ready interrupts 934 * TBD - Hot plug should be done via polling now, is 935 * that even supported? 936 */ 937 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 938 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 939 940 /* 941 * Set a flag to indicate that we should ignore all PhyRdy 942 * state changes since these can happen now whenever we 943 * change link state 944 */ 945 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; 946 947 /* get the existing command bits */ 948 cmd = readl(port_mmio + PORT_CMD); 949 950 /* 951 * Set ASP based on Policy 952 */ 953 cmd |= asp; 954 955 /* 956 * Setting this bit will instruct the HBA to aggressively 957 * enter a lower power link state when it's appropriate and 958 * based on the value set above for ASP 959 */ 960 cmd |= PORT_CMD_ALPE; 961 962 /* write out new cmd value */ 963 writel(cmd, port_mmio + PORT_CMD); 964 cmd = readl(port_mmio + PORT_CMD); 965 966 /* IPM bits should be set by libata-core */ 967 return 0; 968 } 969 970 #ifdef CONFIG_PM 971 static void ahci_power_down(struct ata_port *ap) 972 { 973 struct ahci_host_priv *hpriv = ap->host->private_data; 974 void __iomem *port_mmio = ahci_port_base(ap); 975 u32 cmd, scontrol; 976 977 if (!(hpriv->cap & HOST_CAP_SSS)) 978 return; 979 980 /* put device into listen mode, first set PxSCTL.DET to 0 */ 981 scontrol = readl(port_mmio + PORT_SCR_CTL); 982 scontrol &= ~0xf; 983 writel(scontrol, port_mmio + PORT_SCR_CTL); 984 985 /* then set PxCMD.SUD to 0 */ 986 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 987 cmd &= ~PORT_CMD_SPIN_UP; 988 writel(cmd, port_mmio + PORT_CMD); 989 } 990 #endif 991 992 static void ahci_start_port(struct ata_port *ap) 993 { 994 /* enable FIS reception */ 995 ahci_start_fis_rx(ap); 996 997 /* enable DMA */ 998 ahci_start_engine(ap); 999 } 1000 1001 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 1002 { 1003 int rc; 1004 1005 /* disable DMA */ 1006 rc = ahci_stop_engine(ap); 1007 if (rc) { 1008 *emsg = "failed to stop engine"; 1009 return rc; 1010 } 1011 1012 /* disable FIS reception */ 1013 rc = ahci_stop_fis_rx(ap); 1014 if (rc) { 1015 *emsg = "failed stop FIS RX"; 1016 return rc; 1017 } 1018 1019 return 0; 1020 } 1021 1022 static int ahci_reset_controller(struct ata_host *host) 1023 { 1024 struct pci_dev *pdev = to_pci_dev(host->dev); 1025 struct ahci_host_priv *hpriv = host->private_data; 1026 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1027 u32 tmp; 1028 1029 /* we must be in AHCI mode, before using anything 1030 * AHCI-specific, such as HOST_RESET. 1031 */ 1032 ahci_enable_ahci(mmio); 1033 1034 /* global controller reset */ 1035 if (!ahci_skip_host_reset) { 1036 tmp = readl(mmio + HOST_CTL); 1037 if ((tmp & HOST_RESET) == 0) { 1038 writel(tmp | HOST_RESET, mmio + HOST_CTL); 1039 readl(mmio + HOST_CTL); /* flush */ 1040 } 1041 1042 /* reset must complete within 1 second, or 1043 * the hardware should be considered fried. 1044 */ 1045 ssleep(1); 1046 1047 tmp = readl(mmio + HOST_CTL); 1048 if (tmp & HOST_RESET) { 1049 dev_printk(KERN_ERR, host->dev, 1050 "controller reset failed (0x%x)\n", tmp); 1051 return -EIO; 1052 } 1053 1054 /* turn on AHCI mode */ 1055 ahci_enable_ahci(mmio); 1056 1057 /* Some registers might be cleared on reset. Restore 1058 * initial values. 1059 */ 1060 ahci_restore_initial_config(host); 1061 } else 1062 dev_printk(KERN_INFO, host->dev, 1063 "skipping global host reset\n"); 1064 1065 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 1066 u16 tmp16; 1067 1068 /* configure PCS */ 1069 pci_read_config_word(pdev, 0x92, &tmp16); 1070 if ((tmp16 & hpriv->port_map) != hpriv->port_map) { 1071 tmp16 |= hpriv->port_map; 1072 pci_write_config_word(pdev, 0x92, tmp16); 1073 } 1074 } 1075 1076 return 0; 1077 } 1078 1079 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 1080 int port_no, void __iomem *mmio, 1081 void __iomem *port_mmio) 1082 { 1083 const char *emsg = NULL; 1084 int rc; 1085 u32 tmp; 1086 1087 /* make sure port is not active */ 1088 rc = ahci_deinit_port(ap, &emsg); 1089 if (rc) 1090 dev_printk(KERN_WARNING, &pdev->dev, 1091 "%s (%d)\n", emsg, rc); 1092 1093 /* clear SError */ 1094 tmp = readl(port_mmio + PORT_SCR_ERR); 1095 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1096 writel(tmp, port_mmio + PORT_SCR_ERR); 1097 1098 /* clear port IRQ */ 1099 tmp = readl(port_mmio + PORT_IRQ_STAT); 1100 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1101 if (tmp) 1102 writel(tmp, port_mmio + PORT_IRQ_STAT); 1103 1104 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1105 } 1106 1107 static void ahci_init_controller(struct ata_host *host) 1108 { 1109 struct ahci_host_priv *hpriv = host->private_data; 1110 struct pci_dev *pdev = to_pci_dev(host->dev); 1111 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1112 int i; 1113 void __iomem *port_mmio; 1114 u32 tmp; 1115 int mv; 1116 1117 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 1118 if (pdev->device == 0x6121) 1119 mv = 2; 1120 else 1121 mv = 4; 1122 port_mmio = __ahci_port_base(host, mv); 1123 1124 writel(0, port_mmio + PORT_IRQ_MASK); 1125 1126 /* clear port IRQ */ 1127 tmp = readl(port_mmio + PORT_IRQ_STAT); 1128 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1129 if (tmp) 1130 writel(tmp, port_mmio + PORT_IRQ_STAT); 1131 } 1132 1133 for (i = 0; i < host->n_ports; i++) { 1134 struct ata_port *ap = host->ports[i]; 1135 1136 port_mmio = ahci_port_base(ap); 1137 if (ata_port_is_dummy(ap)) 1138 continue; 1139 1140 ahci_port_init(pdev, ap, i, mmio, port_mmio); 1141 } 1142 1143 tmp = readl(mmio + HOST_CTL); 1144 VPRINTK("HOST_CTL 0x%x\n", tmp); 1145 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1146 tmp = readl(mmio + HOST_CTL); 1147 VPRINTK("HOST_CTL 0x%x\n", tmp); 1148 } 1149 1150 static void ahci_dev_config(struct ata_device *dev) 1151 { 1152 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1153 1154 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1155 dev->max_sectors = 255; 1156 ata_dev_printk(dev, KERN_INFO, 1157 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1158 } 1159 } 1160 1161 static unsigned int ahci_dev_classify(struct ata_port *ap) 1162 { 1163 void __iomem *port_mmio = ahci_port_base(ap); 1164 struct ata_taskfile tf; 1165 u32 tmp; 1166 1167 tmp = readl(port_mmio + PORT_SIG); 1168 tf.lbah = (tmp >> 24) & 0xff; 1169 tf.lbam = (tmp >> 16) & 0xff; 1170 tf.lbal = (tmp >> 8) & 0xff; 1171 tf.nsect = (tmp) & 0xff; 1172 1173 return ata_dev_classify(&tf); 1174 } 1175 1176 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1177 u32 opts) 1178 { 1179 dma_addr_t cmd_tbl_dma; 1180 1181 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1182 1183 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1184 pp->cmd_slot[tag].status = 0; 1185 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1186 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1187 } 1188 1189 static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1190 { 1191 void __iomem *port_mmio = ahci_port_base(ap); 1192 struct ahci_host_priv *hpriv = ap->host->private_data; 1193 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1194 u32 tmp; 1195 int busy, rc; 1196 1197 /* do we need to kick the port? */ 1198 busy = status & (ATA_BUSY | ATA_DRQ); 1199 if (!busy && !force_restart) 1200 return 0; 1201 1202 /* stop engine */ 1203 rc = ahci_stop_engine(ap); 1204 if (rc) 1205 goto out_restart; 1206 1207 /* need to do CLO? */ 1208 if (!busy) { 1209 rc = 0; 1210 goto out_restart; 1211 } 1212 1213 if (!(hpriv->cap & HOST_CAP_CLO)) { 1214 rc = -EOPNOTSUPP; 1215 goto out_restart; 1216 } 1217 1218 /* perform CLO */ 1219 tmp = readl(port_mmio + PORT_CMD); 1220 tmp |= PORT_CMD_CLO; 1221 writel(tmp, port_mmio + PORT_CMD); 1222 1223 rc = 0; 1224 tmp = ata_wait_register(port_mmio + PORT_CMD, 1225 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1226 if (tmp & PORT_CMD_CLO) 1227 rc = -EIO; 1228 1229 /* restart engine */ 1230 out_restart: 1231 ahci_start_engine(ap); 1232 return rc; 1233 } 1234 1235 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1236 struct ata_taskfile *tf, int is_cmd, u16 flags, 1237 unsigned long timeout_msec) 1238 { 1239 const u32 cmd_fis_len = 5; /* five dwords */ 1240 struct ahci_port_priv *pp = ap->private_data; 1241 void __iomem *port_mmio = ahci_port_base(ap); 1242 u8 *fis = pp->cmd_tbl; 1243 u32 tmp; 1244 1245 /* prep the command */ 1246 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1247 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1248 1249 /* issue & wait */ 1250 writel(1, port_mmio + PORT_CMD_ISSUE); 1251 1252 if (timeout_msec) { 1253 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1254 1, timeout_msec); 1255 if (tmp & 0x1) { 1256 ahci_kick_engine(ap, 1); 1257 return -EBUSY; 1258 } 1259 } else 1260 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1261 1262 return 0; 1263 } 1264 1265 static int ahci_check_ready(struct ata_link *link) 1266 { 1267 void __iomem *port_mmio = ahci_port_base(link->ap); 1268 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1269 1270 return ata_check_ready(status); 1271 } 1272 1273 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1274 unsigned long deadline) 1275 { 1276 struct ata_port *ap = link->ap; 1277 int pmp = sata_srst_pmp(link); 1278 const char *reason = NULL; 1279 unsigned long now, msecs; 1280 struct ata_taskfile tf; 1281 int rc; 1282 1283 DPRINTK("ENTER\n"); 1284 1285 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1286 rc = ahci_kick_engine(ap, 1); 1287 if (rc && rc != -EOPNOTSUPP) 1288 ata_link_printk(link, KERN_WARNING, 1289 "failed to reset engine (errno=%d)\n", rc); 1290 1291 ata_tf_init(link->device, &tf); 1292 1293 /* issue the first D2H Register FIS */ 1294 msecs = 0; 1295 now = jiffies; 1296 if (time_after(now, deadline)) 1297 msecs = jiffies_to_msecs(deadline - now); 1298 1299 tf.ctl |= ATA_SRST; 1300 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1301 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1302 rc = -EIO; 1303 reason = "1st FIS failed"; 1304 goto fail; 1305 } 1306 1307 /* spec says at least 5us, but be generous and sleep for 1ms */ 1308 msleep(1); 1309 1310 /* issue the second D2H Register FIS */ 1311 tf.ctl &= ~ATA_SRST; 1312 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1313 1314 /* wait for link to become ready */ 1315 rc = ata_wait_after_reset(link, deadline, ahci_check_ready); 1316 /* link occupied, -ENODEV too is an error */ 1317 if (rc) { 1318 reason = "device not ready"; 1319 goto fail; 1320 } 1321 *class = ahci_dev_classify(ap); 1322 1323 DPRINTK("EXIT, class=%u\n", *class); 1324 return 0; 1325 1326 fail: 1327 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1328 return rc; 1329 } 1330 1331 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1332 unsigned long deadline) 1333 { 1334 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 1335 struct ata_port *ap = link->ap; 1336 struct ahci_port_priv *pp = ap->private_data; 1337 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1338 struct ata_taskfile tf; 1339 bool online; 1340 int rc; 1341 1342 DPRINTK("ENTER\n"); 1343 1344 ahci_stop_engine(ap); 1345 1346 /* clear D2H reception area to properly wait for D2H FIS */ 1347 ata_tf_init(link->device, &tf); 1348 tf.command = 0x80; 1349 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1350 1351 rc = sata_link_hardreset(link, timing, deadline, &online, 1352 ahci_check_ready); 1353 1354 ahci_start_engine(ap); 1355 1356 if (online) 1357 *class = ahci_dev_classify(ap); 1358 1359 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1360 return rc; 1361 } 1362 1363 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 1364 unsigned long deadline) 1365 { 1366 struct ata_port *ap = link->ap; 1367 bool online; 1368 int rc; 1369 1370 DPRINTK("ENTER\n"); 1371 1372 ahci_stop_engine(ap); 1373 1374 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1375 deadline, &online, NULL); 1376 1377 ahci_start_engine(ap); 1378 1379 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1380 1381 /* vt8251 doesn't clear BSY on signature FIS reception, 1382 * request follow-up softreset. 1383 */ 1384 return online ? -EAGAIN : rc; 1385 } 1386 1387 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 1388 unsigned long deadline) 1389 { 1390 struct ata_port *ap = link->ap; 1391 struct ahci_port_priv *pp = ap->private_data; 1392 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1393 struct ata_taskfile tf; 1394 bool online; 1395 int rc; 1396 1397 ahci_stop_engine(ap); 1398 1399 /* clear D2H reception area to properly wait for D2H FIS */ 1400 ata_tf_init(link->device, &tf); 1401 tf.command = 0x80; 1402 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1403 1404 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1405 deadline, &online, NULL); 1406 1407 ahci_start_engine(ap); 1408 1409 /* The pseudo configuration device on SIMG4726 attached to 1410 * ASUS P5W-DH Deluxe doesn't send signature FIS after 1411 * hardreset if no device is attached to the first downstream 1412 * port && the pseudo device locks up on SRST w/ PMP==0. To 1413 * work around this, wait for !BSY only briefly. If BSY isn't 1414 * cleared, perform CLO and proceed to IDENTIFY (achieved by 1415 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). 1416 * 1417 * Wait for two seconds. Devices attached to downstream port 1418 * which can't process the following IDENTIFY after this will 1419 * have to be reset again. For most cases, this should 1420 * suffice while making probing snappish enough. 1421 */ 1422 if (online) { 1423 rc = ata_wait_after_reset(link, jiffies + 2 * HZ, 1424 ahci_check_ready); 1425 if (rc) 1426 ahci_kick_engine(ap, 0); 1427 } 1428 return rc; 1429 } 1430 1431 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1432 { 1433 struct ata_port *ap = link->ap; 1434 void __iomem *port_mmio = ahci_port_base(ap); 1435 u32 new_tmp, tmp; 1436 1437 ata_std_postreset(link, class); 1438 1439 /* Make sure port's ATAPI bit is set appropriately */ 1440 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1441 if (*class == ATA_DEV_ATAPI) 1442 new_tmp |= PORT_CMD_ATAPI; 1443 else 1444 new_tmp &= ~PORT_CMD_ATAPI; 1445 if (new_tmp != tmp) { 1446 writel(new_tmp, port_mmio + PORT_CMD); 1447 readl(port_mmio + PORT_CMD); /* flush */ 1448 } 1449 } 1450 1451 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1452 { 1453 struct scatterlist *sg; 1454 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1455 unsigned int si; 1456 1457 VPRINTK("ENTER\n"); 1458 1459 /* 1460 * Next, the S/G list. 1461 */ 1462 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1463 dma_addr_t addr = sg_dma_address(sg); 1464 u32 sg_len = sg_dma_len(sg); 1465 1466 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1467 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1468 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1469 } 1470 1471 return si; 1472 } 1473 1474 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1475 { 1476 struct ata_port *ap = qc->ap; 1477 struct ahci_port_priv *pp = ap->private_data; 1478 int is_atapi = ata_is_atapi(qc->tf.protocol); 1479 void *cmd_tbl; 1480 u32 opts; 1481 const u32 cmd_fis_len = 5; /* five dwords */ 1482 unsigned int n_elem; 1483 1484 /* 1485 * Fill in command table information. First, the header, 1486 * a SATA Register - Host to Device command FIS. 1487 */ 1488 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1489 1490 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1491 if (is_atapi) { 1492 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1493 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1494 } 1495 1496 n_elem = 0; 1497 if (qc->flags & ATA_QCFLAG_DMAMAP) 1498 n_elem = ahci_fill_sg(qc, cmd_tbl); 1499 1500 /* 1501 * Fill in command slot information. 1502 */ 1503 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1504 if (qc->tf.flags & ATA_TFLAG_WRITE) 1505 opts |= AHCI_CMD_WRITE; 1506 if (is_atapi) 1507 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1508 1509 ahci_fill_cmd_slot(pp, qc->tag, opts); 1510 } 1511 1512 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1513 { 1514 struct ahci_host_priv *hpriv = ap->host->private_data; 1515 struct ahci_port_priv *pp = ap->private_data; 1516 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1517 struct ata_link *link = NULL; 1518 struct ata_queued_cmd *active_qc; 1519 struct ata_eh_info *active_ehi; 1520 u32 serror; 1521 1522 /* determine active link */ 1523 ata_port_for_each_link(link, ap) 1524 if (ata_link_active(link)) 1525 break; 1526 if (!link) 1527 link = &ap->link; 1528 1529 active_qc = ata_qc_from_tag(ap, link->active_tag); 1530 active_ehi = &link->eh_info; 1531 1532 /* record irq stat */ 1533 ata_ehi_clear_desc(host_ehi); 1534 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1535 1536 /* AHCI needs SError cleared; otherwise, it might lock up */ 1537 ahci_scr_read(ap, SCR_ERROR, &serror); 1538 ahci_scr_write(ap, SCR_ERROR, serror); 1539 host_ehi->serror |= serror; 1540 1541 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1542 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1543 irq_stat &= ~PORT_IRQ_IF_ERR; 1544 1545 if (irq_stat & PORT_IRQ_TF_ERR) { 1546 /* If qc is active, charge it; otherwise, the active 1547 * link. There's no active qc on NCQ errors. It will 1548 * be determined by EH by reading log page 10h. 1549 */ 1550 if (active_qc) 1551 active_qc->err_mask |= AC_ERR_DEV; 1552 else 1553 active_ehi->err_mask |= AC_ERR_DEV; 1554 1555 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1556 host_ehi->serror &= ~SERR_INTERNAL; 1557 } 1558 1559 if (irq_stat & PORT_IRQ_UNK_FIS) { 1560 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1561 1562 active_ehi->err_mask |= AC_ERR_HSM; 1563 active_ehi->action |= ATA_EH_RESET; 1564 ata_ehi_push_desc(active_ehi, 1565 "unknown FIS %08x %08x %08x %08x" , 1566 unk[0], unk[1], unk[2], unk[3]); 1567 } 1568 1569 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { 1570 active_ehi->err_mask |= AC_ERR_HSM; 1571 active_ehi->action |= ATA_EH_RESET; 1572 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1573 } 1574 1575 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1576 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1577 host_ehi->action |= ATA_EH_RESET; 1578 ata_ehi_push_desc(host_ehi, "host bus error"); 1579 } 1580 1581 if (irq_stat & PORT_IRQ_IF_ERR) { 1582 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1583 host_ehi->action |= ATA_EH_RESET; 1584 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1585 } 1586 1587 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1588 ata_ehi_hotplugged(host_ehi); 1589 ata_ehi_push_desc(host_ehi, "%s", 1590 irq_stat & PORT_IRQ_CONNECT ? 1591 "connection status changed" : "PHY RDY changed"); 1592 } 1593 1594 /* okay, let's hand over to EH */ 1595 1596 if (irq_stat & PORT_IRQ_FREEZE) 1597 ata_port_freeze(ap); 1598 else 1599 ata_port_abort(ap); 1600 } 1601 1602 static void ahci_port_intr(struct ata_port *ap) 1603 { 1604 void __iomem *port_mmio = ahci_port_base(ap); 1605 struct ata_eh_info *ehi = &ap->link.eh_info; 1606 struct ahci_port_priv *pp = ap->private_data; 1607 struct ahci_host_priv *hpriv = ap->host->private_data; 1608 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1609 u32 status, qc_active; 1610 int rc; 1611 1612 status = readl(port_mmio + PORT_IRQ_STAT); 1613 writel(status, port_mmio + PORT_IRQ_STAT); 1614 1615 /* ignore BAD_PMP while resetting */ 1616 if (unlikely(resetting)) 1617 status &= ~PORT_IRQ_BAD_PMP; 1618 1619 /* If we are getting PhyRdy, this is 1620 * just a power state change, we should 1621 * clear out this, plus the PhyRdy/Comm 1622 * Wake bits from Serror 1623 */ 1624 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 1625 (status & PORT_IRQ_PHYRDY)) { 1626 status &= ~PORT_IRQ_PHYRDY; 1627 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 1628 } 1629 1630 if (unlikely(status & PORT_IRQ_ERROR)) { 1631 ahci_error_intr(ap, status); 1632 return; 1633 } 1634 1635 if (status & PORT_IRQ_SDB_FIS) { 1636 /* If SNotification is available, leave notification 1637 * handling to sata_async_notification(). If not, 1638 * emulate it by snooping SDB FIS RX area. 1639 * 1640 * Snooping FIS RX area is probably cheaper than 1641 * poking SNotification but some constrollers which 1642 * implement SNotification, ICH9 for example, don't 1643 * store AN SDB FIS into receive area. 1644 */ 1645 if (hpriv->cap & HOST_CAP_SNTF) 1646 sata_async_notification(ap); 1647 else { 1648 /* If the 'N' bit in word 0 of the FIS is set, 1649 * we just received asynchronous notification. 1650 * Tell libata about it. 1651 */ 1652 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1653 u32 f0 = le32_to_cpu(f[0]); 1654 1655 if (f0 & (1 << 15)) 1656 sata_async_notification(ap); 1657 } 1658 } 1659 1660 /* pp->active_link is valid iff any command is in flight */ 1661 if (ap->qc_active && pp->active_link->sactive) 1662 qc_active = readl(port_mmio + PORT_SCR_ACT); 1663 else 1664 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1665 1666 rc = ata_qc_complete_multiple(ap, qc_active); 1667 1668 /* while resetting, invalid completions are expected */ 1669 if (unlikely(rc < 0 && !resetting)) { 1670 ehi->err_mask |= AC_ERR_HSM; 1671 ehi->action |= ATA_EH_RESET; 1672 ata_port_freeze(ap); 1673 } 1674 } 1675 1676 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1677 { 1678 struct ata_host *host = dev_instance; 1679 struct ahci_host_priv *hpriv; 1680 unsigned int i, handled = 0; 1681 void __iomem *mmio; 1682 u32 irq_stat, irq_ack = 0; 1683 1684 VPRINTK("ENTER\n"); 1685 1686 hpriv = host->private_data; 1687 mmio = host->iomap[AHCI_PCI_BAR]; 1688 1689 /* sigh. 0xffffffff is a valid return from h/w */ 1690 irq_stat = readl(mmio + HOST_IRQ_STAT); 1691 irq_stat &= hpriv->port_map; 1692 if (!irq_stat) 1693 return IRQ_NONE; 1694 1695 spin_lock(&host->lock); 1696 1697 for (i = 0; i < host->n_ports; i++) { 1698 struct ata_port *ap; 1699 1700 if (!(irq_stat & (1 << i))) 1701 continue; 1702 1703 ap = host->ports[i]; 1704 if (ap) { 1705 ahci_port_intr(ap); 1706 VPRINTK("port %u\n", i); 1707 } else { 1708 VPRINTK("port %u (no irq)\n", i); 1709 if (ata_ratelimit()) 1710 dev_printk(KERN_WARNING, host->dev, 1711 "interrupt on disabled port %u\n", i); 1712 } 1713 1714 irq_ack |= (1 << i); 1715 } 1716 1717 if (irq_ack) { 1718 writel(irq_ack, mmio + HOST_IRQ_STAT); 1719 handled = 1; 1720 } 1721 1722 spin_unlock(&host->lock); 1723 1724 VPRINTK("EXIT\n"); 1725 1726 return IRQ_RETVAL(handled); 1727 } 1728 1729 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1730 { 1731 struct ata_port *ap = qc->ap; 1732 void __iomem *port_mmio = ahci_port_base(ap); 1733 struct ahci_port_priv *pp = ap->private_data; 1734 1735 /* Keep track of the currently active link. It will be used 1736 * in completion path to determine whether NCQ phase is in 1737 * progress. 1738 */ 1739 pp->active_link = qc->dev->link; 1740 1741 if (qc->tf.protocol == ATA_PROT_NCQ) 1742 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1743 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1744 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1745 1746 return 0; 1747 } 1748 1749 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1750 { 1751 struct ahci_port_priv *pp = qc->ap->private_data; 1752 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1753 1754 ata_tf_from_fis(d2h_fis, &qc->result_tf); 1755 return true; 1756 } 1757 1758 static void ahci_freeze(struct ata_port *ap) 1759 { 1760 void __iomem *port_mmio = ahci_port_base(ap); 1761 1762 /* turn IRQ off */ 1763 writel(0, port_mmio + PORT_IRQ_MASK); 1764 } 1765 1766 static void ahci_thaw(struct ata_port *ap) 1767 { 1768 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1769 void __iomem *port_mmio = ahci_port_base(ap); 1770 u32 tmp; 1771 struct ahci_port_priv *pp = ap->private_data; 1772 1773 /* clear IRQ */ 1774 tmp = readl(port_mmio + PORT_IRQ_STAT); 1775 writel(tmp, port_mmio + PORT_IRQ_STAT); 1776 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1777 1778 /* turn IRQ back on */ 1779 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1780 } 1781 1782 static void ahci_error_handler(struct ata_port *ap) 1783 { 1784 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1785 /* restart engine */ 1786 ahci_stop_engine(ap); 1787 ahci_start_engine(ap); 1788 } 1789 1790 sata_pmp_error_handler(ap); 1791 } 1792 1793 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1794 { 1795 struct ata_port *ap = qc->ap; 1796 1797 /* make DMA engine forget about the failed command */ 1798 if (qc->flags & ATA_QCFLAG_FAILED) 1799 ahci_kick_engine(ap, 1); 1800 } 1801 1802 static void ahci_pmp_attach(struct ata_port *ap) 1803 { 1804 void __iomem *port_mmio = ahci_port_base(ap); 1805 struct ahci_port_priv *pp = ap->private_data; 1806 u32 cmd; 1807 1808 cmd = readl(port_mmio + PORT_CMD); 1809 cmd |= PORT_CMD_PMP; 1810 writel(cmd, port_mmio + PORT_CMD); 1811 1812 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1813 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1814 } 1815 1816 static void ahci_pmp_detach(struct ata_port *ap) 1817 { 1818 void __iomem *port_mmio = ahci_port_base(ap); 1819 struct ahci_port_priv *pp = ap->private_data; 1820 u32 cmd; 1821 1822 cmd = readl(port_mmio + PORT_CMD); 1823 cmd &= ~PORT_CMD_PMP; 1824 writel(cmd, port_mmio + PORT_CMD); 1825 1826 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1827 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1828 } 1829 1830 static int ahci_port_resume(struct ata_port *ap) 1831 { 1832 ahci_power_up(ap); 1833 ahci_start_port(ap); 1834 1835 if (sata_pmp_attached(ap)) 1836 ahci_pmp_attach(ap); 1837 else 1838 ahci_pmp_detach(ap); 1839 1840 return 0; 1841 } 1842 1843 #ifdef CONFIG_PM 1844 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1845 { 1846 const char *emsg = NULL; 1847 int rc; 1848 1849 rc = ahci_deinit_port(ap, &emsg); 1850 if (rc == 0) 1851 ahci_power_down(ap); 1852 else { 1853 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1854 ahci_start_port(ap); 1855 } 1856 1857 return rc; 1858 } 1859 1860 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1861 { 1862 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1863 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1864 u32 ctl; 1865 1866 if (mesg.event & PM_EVENT_SLEEP) { 1867 /* AHCI spec rev1.1 section 8.3.3: 1868 * Software must disable interrupts prior to requesting a 1869 * transition of the HBA to D3 state. 1870 */ 1871 ctl = readl(mmio + HOST_CTL); 1872 ctl &= ~HOST_IRQ_EN; 1873 writel(ctl, mmio + HOST_CTL); 1874 readl(mmio + HOST_CTL); /* flush */ 1875 } 1876 1877 return ata_pci_device_suspend(pdev, mesg); 1878 } 1879 1880 static int ahci_pci_device_resume(struct pci_dev *pdev) 1881 { 1882 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1883 int rc; 1884 1885 rc = ata_pci_device_do_resume(pdev); 1886 if (rc) 1887 return rc; 1888 1889 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 1890 rc = ahci_reset_controller(host); 1891 if (rc) 1892 return rc; 1893 1894 ahci_init_controller(host); 1895 } 1896 1897 ata_host_resume(host); 1898 1899 return 0; 1900 } 1901 #endif 1902 1903 static int ahci_port_start(struct ata_port *ap) 1904 { 1905 struct device *dev = ap->host->dev; 1906 struct ahci_port_priv *pp; 1907 void *mem; 1908 dma_addr_t mem_dma; 1909 1910 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1911 if (!pp) 1912 return -ENOMEM; 1913 1914 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1915 GFP_KERNEL); 1916 if (!mem) 1917 return -ENOMEM; 1918 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 1919 1920 /* 1921 * First item in chunk of DMA memory: 32-slot command table, 1922 * 32 bytes each in size 1923 */ 1924 pp->cmd_slot = mem; 1925 pp->cmd_slot_dma = mem_dma; 1926 1927 mem += AHCI_CMD_SLOT_SZ; 1928 mem_dma += AHCI_CMD_SLOT_SZ; 1929 1930 /* 1931 * Second item: Received-FIS area 1932 */ 1933 pp->rx_fis = mem; 1934 pp->rx_fis_dma = mem_dma; 1935 1936 mem += AHCI_RX_FIS_SZ; 1937 mem_dma += AHCI_RX_FIS_SZ; 1938 1939 /* 1940 * Third item: data area for storing a single command 1941 * and its scatter-gather table 1942 */ 1943 pp->cmd_tbl = mem; 1944 pp->cmd_tbl_dma = mem_dma; 1945 1946 /* 1947 * Save off initial list of interrupts to be enabled. 1948 * This could be changed later 1949 */ 1950 pp->intr_mask = DEF_PORT_IRQ; 1951 1952 ap->private_data = pp; 1953 1954 /* engage engines, captain */ 1955 return ahci_port_resume(ap); 1956 } 1957 1958 static void ahci_port_stop(struct ata_port *ap) 1959 { 1960 const char *emsg = NULL; 1961 int rc; 1962 1963 /* de-initialize port */ 1964 rc = ahci_deinit_port(ap, &emsg); 1965 if (rc) 1966 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 1967 } 1968 1969 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 1970 { 1971 int rc; 1972 1973 if (using_dac && 1974 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1975 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1976 if (rc) { 1977 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1978 if (rc) { 1979 dev_printk(KERN_ERR, &pdev->dev, 1980 "64-bit DMA enable failed\n"); 1981 return rc; 1982 } 1983 } 1984 } else { 1985 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1986 if (rc) { 1987 dev_printk(KERN_ERR, &pdev->dev, 1988 "32-bit DMA enable failed\n"); 1989 return rc; 1990 } 1991 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1992 if (rc) { 1993 dev_printk(KERN_ERR, &pdev->dev, 1994 "32-bit consistent DMA enable failed\n"); 1995 return rc; 1996 } 1997 } 1998 return 0; 1999 } 2000 2001 static void ahci_print_info(struct ata_host *host) 2002 { 2003 struct ahci_host_priv *hpriv = host->private_data; 2004 struct pci_dev *pdev = to_pci_dev(host->dev); 2005 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2006 u32 vers, cap, impl, speed; 2007 const char *speed_s; 2008 u16 cc; 2009 const char *scc_s; 2010 2011 vers = readl(mmio + HOST_VERSION); 2012 cap = hpriv->cap; 2013 impl = hpriv->port_map; 2014 2015 speed = (cap >> 20) & 0xf; 2016 if (speed == 1) 2017 speed_s = "1.5"; 2018 else if (speed == 2) 2019 speed_s = "3"; 2020 else 2021 speed_s = "?"; 2022 2023 pci_read_config_word(pdev, 0x0a, &cc); 2024 if (cc == PCI_CLASS_STORAGE_IDE) 2025 scc_s = "IDE"; 2026 else if (cc == PCI_CLASS_STORAGE_SATA) 2027 scc_s = "SATA"; 2028 else if (cc == PCI_CLASS_STORAGE_RAID) 2029 scc_s = "RAID"; 2030 else 2031 scc_s = "unknown"; 2032 2033 dev_printk(KERN_INFO, &pdev->dev, 2034 "AHCI %02x%02x.%02x%02x " 2035 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2036 , 2037 2038 (vers >> 24) & 0xff, 2039 (vers >> 16) & 0xff, 2040 (vers >> 8) & 0xff, 2041 vers & 0xff, 2042 2043 ((cap >> 8) & 0x1f) + 1, 2044 (cap & 0x1f) + 1, 2045 speed_s, 2046 impl, 2047 scc_s); 2048 2049 dev_printk(KERN_INFO, &pdev->dev, 2050 "flags: " 2051 "%s%s%s%s%s%s%s" 2052 "%s%s%s%s%s%s%s\n" 2053 , 2054 2055 cap & (1 << 31) ? "64bit " : "", 2056 cap & (1 << 30) ? "ncq " : "", 2057 cap & (1 << 29) ? "sntf " : "", 2058 cap & (1 << 28) ? "ilck " : "", 2059 cap & (1 << 27) ? "stag " : "", 2060 cap & (1 << 26) ? "pm " : "", 2061 cap & (1 << 25) ? "led " : "", 2062 2063 cap & (1 << 24) ? "clo " : "", 2064 cap & (1 << 19) ? "nz " : "", 2065 cap & (1 << 18) ? "only " : "", 2066 cap & (1 << 17) ? "pmp " : "", 2067 cap & (1 << 15) ? "pio " : "", 2068 cap & (1 << 14) ? "slum " : "", 2069 cap & (1 << 13) ? "part " : "" 2070 ); 2071 } 2072 2073 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 2074 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't 2075 * support PMP and the 4726 either directly exports the device 2076 * attached to the first downstream port or acts as a hardware storage 2077 * controller and emulate a single ATA device (can be RAID 0/1 or some 2078 * other configuration). 2079 * 2080 * When there's no device attached to the first downstream port of the 2081 * 4726, "Config Disk" appears, which is a pseudo ATA device to 2082 * configure the 4726. However, ATA emulation of the device is very 2083 * lame. It doesn't send signature D2H Reg FIS after the initial 2084 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. 2085 * 2086 * The following function works around the problem by always using 2087 * hardreset on the port and not depending on receiving signature FIS 2088 * afterward. If signature FIS isn't received soon, ATA class is 2089 * assumed without follow-up softreset. 2090 */ 2091 static void ahci_p5wdh_workaround(struct ata_host *host) 2092 { 2093 static struct dmi_system_id sysids[] = { 2094 { 2095 .ident = "P5W DH Deluxe", 2096 .matches = { 2097 DMI_MATCH(DMI_SYS_VENDOR, 2098 "ASUSTEK COMPUTER INC"), 2099 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 2100 }, 2101 }, 2102 { } 2103 }; 2104 struct pci_dev *pdev = to_pci_dev(host->dev); 2105 2106 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && 2107 dmi_check_system(sysids)) { 2108 struct ata_port *ap = host->ports[1]; 2109 2110 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " 2111 "Deluxe on-board SIMG4726 workaround\n"); 2112 2113 ap->ops = &ahci_p5wdh_ops; 2114 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; 2115 } 2116 } 2117 2118 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2119 { 2120 static int printed_version; 2121 struct ata_port_info pi = ahci_port_info[ent->driver_data]; 2122 const struct ata_port_info *ppi[] = { &pi, NULL }; 2123 struct device *dev = &pdev->dev; 2124 struct ahci_host_priv *hpriv; 2125 struct ata_host *host; 2126 int n_ports, i, rc; 2127 2128 VPRINTK("ENTER\n"); 2129 2130 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 2131 2132 if (!printed_version++) 2133 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2134 2135 /* acquire resources */ 2136 rc = pcim_enable_device(pdev); 2137 if (rc) 2138 return rc; 2139 2140 /* AHCI controllers often implement SFF compatible interface. 2141 * Grab all PCI BARs just in case. 2142 */ 2143 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 2144 if (rc == -EBUSY) 2145 pcim_pin_device(pdev); 2146 if (rc) 2147 return rc; 2148 2149 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 2150 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 2151 u8 map; 2152 2153 /* ICH6s share the same PCI ID for both piix and ahci 2154 * modes. Enabling ahci mode while MAP indicates 2155 * combined mode is a bad idea. Yield to ata_piix. 2156 */ 2157 pci_read_config_byte(pdev, ICH_MAP, &map); 2158 if (map & 0x3) { 2159 dev_printk(KERN_INFO, &pdev->dev, "controller is in " 2160 "combined mode, can't enable AHCI mode\n"); 2161 return -ENODEV; 2162 } 2163 } 2164 2165 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 2166 if (!hpriv) 2167 return -ENOMEM; 2168 hpriv->flags |= (unsigned long)pi.private_data; 2169 2170 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 2171 pci_intx(pdev, 1); 2172 2173 /* save initial config */ 2174 ahci_save_initial_config(pdev, hpriv); 2175 2176 /* prepare host */ 2177 if (hpriv->cap & HOST_CAP_NCQ) 2178 pi.flags |= ATA_FLAG_NCQ; 2179 2180 if (hpriv->cap & HOST_CAP_PMP) 2181 pi.flags |= ATA_FLAG_PMP; 2182 2183 /* CAP.NP sometimes indicate the index of the last enabled 2184 * port, at other times, that of the last possible port, so 2185 * determining the maximum port number requires looking at 2186 * both CAP.NP and port_map. 2187 */ 2188 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 2189 2190 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2191 if (!host) 2192 return -ENOMEM; 2193 host->iomap = pcim_iomap_table(pdev); 2194 host->private_data = hpriv; 2195 2196 for (i = 0; i < host->n_ports; i++) { 2197 struct ata_port *ap = host->ports[i]; 2198 2199 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 2200 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 2201 0x100 + ap->port_no * 0x80, "port"); 2202 2203 /* set initial link pm policy */ 2204 ap->pm_policy = NOT_AVAILABLE; 2205 2206 /* disabled/not-implemented port */ 2207 if (!(hpriv->port_map & (1 << i))) 2208 ap->ops = &ata_dummy_port_ops; 2209 } 2210 2211 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 2212 ahci_p5wdh_workaround(host); 2213 2214 /* initialize adapter */ 2215 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 2216 if (rc) 2217 return rc; 2218 2219 rc = ahci_reset_controller(host); 2220 if (rc) 2221 return rc; 2222 2223 ahci_init_controller(host); 2224 ahci_print_info(host); 2225 2226 pci_set_master(pdev); 2227 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 2228 &ahci_sht); 2229 } 2230 2231 static int __init ahci_init(void) 2232 { 2233 return pci_register_driver(&ahci_pci_driver); 2234 } 2235 2236 static void __exit ahci_exit(void) 2237 { 2238 pci_unregister_driver(&ahci_pci_driver); 2239 } 2240 2241 2242 MODULE_AUTHOR("Jeff Garzik"); 2243 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 2244 MODULE_LICENSE("GPL"); 2245 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 2246 MODULE_VERSION(DRV_VERSION); 2247 2248 module_init(ahci_init); 2249 module_exit(ahci_exit); 2250