1 /* 2 * ahci.c - AHCI SATA support 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <linux/dmi.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <linux/libata.h> 48 49 #define DRV_NAME "ahci" 50 #define DRV_VERSION "3.0" 51 52 static int ahci_skip_host_reset; 53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 55 56 static int ahci_enable_alpm(struct ata_port *ap, 57 enum link_pm policy); 58 static void ahci_disable_alpm(struct ata_port *ap); 59 60 enum { 61 AHCI_PCI_BAR = 5, 62 AHCI_MAX_PORTS = 32, 63 AHCI_MAX_SG = 168, /* hardware max is 64K */ 64 AHCI_DMA_BOUNDARY = 0xffffffff, 65 AHCI_MAX_CMDS = 32, 66 AHCI_CMD_SZ = 32, 67 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, 68 AHCI_RX_FIS_SZ = 256, 69 AHCI_CMD_TBL_CDB = 0x40, 70 AHCI_CMD_TBL_HDR_SZ = 0x80, 71 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), 72 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, 73 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + 74 AHCI_RX_FIS_SZ, 75 AHCI_IRQ_ON_SG = (1 << 31), 76 AHCI_CMD_ATAPI = (1 << 5), 77 AHCI_CMD_WRITE = (1 << 6), 78 AHCI_CMD_PREFETCH = (1 << 7), 79 AHCI_CMD_RESET = (1 << 8), 80 AHCI_CMD_CLR_BUSY = (1 << 10), 81 82 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ 83 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ 84 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ 85 86 board_ahci = 0, 87 board_ahci_vt8251 = 1, 88 board_ahci_ign_iferr = 2, 89 board_ahci_sb600 = 3, 90 board_ahci_mv = 4, 91 board_ahci_sb700 = 5, 92 93 /* global controller registers */ 94 HOST_CAP = 0x00, /* host capabilities */ 95 HOST_CTL = 0x04, /* global host control */ 96 HOST_IRQ_STAT = 0x08, /* interrupt status */ 97 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ 98 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ 99 100 /* HOST_CTL bits */ 101 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 102 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 103 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 104 105 /* HOST_CAP bits */ 106 HOST_CAP_SSC = (1 << 14), /* Slumber capable */ 107 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ 108 HOST_CAP_CLO = (1 << 24), /* Command List Override support */ 109 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ 110 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 111 HOST_CAP_SNTF = (1 << 29), /* SNotification register */ 112 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ 113 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ 114 115 /* registers for each SATA port */ 116 PORT_LST_ADDR = 0x00, /* command list DMA addr */ 117 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ 118 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ 119 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ 120 PORT_IRQ_STAT = 0x10, /* interrupt status */ 121 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ 122 PORT_CMD = 0x18, /* port command */ 123 PORT_TFDATA = 0x20, /* taskfile data */ 124 PORT_SIG = 0x24, /* device TF signature */ 125 PORT_CMD_ISSUE = 0x38, /* command issue */ 126 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ 127 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ 128 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ 129 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ 130 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ 131 132 /* PORT_IRQ_{STAT,MASK} bits */ 133 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ 134 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ 135 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ 136 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ 137 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ 138 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ 139 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ 140 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ 141 142 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ 143 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ 144 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ 145 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ 146 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ 147 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ 148 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ 149 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ 150 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ 151 152 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | 153 PORT_IRQ_IF_ERR | 154 PORT_IRQ_CONNECT | 155 PORT_IRQ_PHYRDY | 156 PORT_IRQ_UNK_FIS | 157 PORT_IRQ_BAD_PMP, 158 PORT_IRQ_ERROR = PORT_IRQ_FREEZE | 159 PORT_IRQ_TF_ERR | 160 PORT_IRQ_HBUS_DATA_ERR, 161 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | 162 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | 163 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, 164 165 /* PORT_CMD bits */ 166 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ 167 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ 168 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ 169 PORT_CMD_PMP = (1 << 17), /* PMP attached */ 170 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ 171 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ 172 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ 173 PORT_CMD_CLO = (1 << 3), /* Command list override */ 174 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ 175 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ 176 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ 177 178 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ 179 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ 180 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 181 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 182 183 /* hpriv->flags bits */ 184 AHCI_HFLAG_NO_NCQ = (1 << 0), 185 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ 186 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ 187 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ 188 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ 189 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 190 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 191 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 192 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 193 194 /* ap->flags bits */ 195 196 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 197 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 198 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | 199 ATA_FLAG_IPM, 200 201 ICH_MAP = 0x90, /* ICH MAP register */ 202 }; 203 204 struct ahci_cmd_hdr { 205 __le32 opts; 206 __le32 status; 207 __le32 tbl_addr; 208 __le32 tbl_addr_hi; 209 __le32 reserved[4]; 210 }; 211 212 struct ahci_sg { 213 __le32 addr; 214 __le32 addr_hi; 215 __le32 reserved; 216 __le32 flags_size; 217 }; 218 219 struct ahci_host_priv { 220 unsigned int flags; /* AHCI_HFLAG_* */ 221 u32 cap; /* cap to use */ 222 u32 port_map; /* port map to use */ 223 u32 saved_cap; /* saved initial cap */ 224 u32 saved_port_map; /* saved initial port_map */ 225 }; 226 227 struct ahci_port_priv { 228 struct ata_link *active_link; 229 struct ahci_cmd_hdr *cmd_slot; 230 dma_addr_t cmd_slot_dma; 231 void *cmd_tbl; 232 dma_addr_t cmd_tbl_dma; 233 void *rx_fis; 234 dma_addr_t rx_fis_dma; 235 /* for NCQ spurious interrupt analysis */ 236 unsigned int ncq_saw_d2h:1; 237 unsigned int ncq_saw_dmas:1; 238 unsigned int ncq_saw_sdb:1; 239 u32 intr_mask; /* interrupts to enable */ 240 }; 241 242 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 243 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 244 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 245 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 246 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 247 static int ahci_port_start(struct ata_port *ap); 248 static void ahci_port_stop(struct ata_port *ap); 249 static void ahci_qc_prep(struct ata_queued_cmd *qc); 250 static void ahci_freeze(struct ata_port *ap); 251 static void ahci_thaw(struct ata_port *ap); 252 static void ahci_pmp_attach(struct ata_port *ap); 253 static void ahci_pmp_detach(struct ata_port *ap); 254 static int ahci_softreset(struct ata_link *link, unsigned int *class, 255 unsigned long deadline); 256 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 257 unsigned long deadline); 258 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 259 unsigned long deadline); 260 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 261 unsigned long deadline); 262 static void ahci_postreset(struct ata_link *link, unsigned int *class); 263 static void ahci_error_handler(struct ata_port *ap); 264 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 265 static int ahci_port_resume(struct ata_port *ap); 266 static void ahci_dev_config(struct ata_device *dev); 267 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); 268 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 269 u32 opts); 270 #ifdef CONFIG_PM 271 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 272 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 273 static int ahci_pci_device_resume(struct pci_dev *pdev); 274 #endif 275 276 static struct class_device_attribute *ahci_shost_attrs[] = { 277 &class_device_attr_link_power_management_policy, 278 NULL 279 }; 280 281 static struct scsi_host_template ahci_sht = { 282 ATA_NCQ_SHT(DRV_NAME), 283 .can_queue = AHCI_MAX_CMDS - 1, 284 .sg_tablesize = AHCI_MAX_SG, 285 .dma_boundary = AHCI_DMA_BOUNDARY, 286 .shost_attrs = ahci_shost_attrs, 287 }; 288 289 static struct ata_port_operations ahci_ops = { 290 .inherits = &sata_pmp_port_ops, 291 292 .qc_defer = sata_pmp_qc_defer_cmd_switch, 293 .qc_prep = ahci_qc_prep, 294 .qc_issue = ahci_qc_issue, 295 .qc_fill_rtf = ahci_qc_fill_rtf, 296 297 .freeze = ahci_freeze, 298 .thaw = ahci_thaw, 299 .softreset = ahci_softreset, 300 .hardreset = ahci_hardreset, 301 .postreset = ahci_postreset, 302 .pmp_softreset = ahci_softreset, 303 .error_handler = ahci_error_handler, 304 .post_internal_cmd = ahci_post_internal_cmd, 305 .dev_config = ahci_dev_config, 306 307 .scr_read = ahci_scr_read, 308 .scr_write = ahci_scr_write, 309 .pmp_attach = ahci_pmp_attach, 310 .pmp_detach = ahci_pmp_detach, 311 312 .enable_pm = ahci_enable_alpm, 313 .disable_pm = ahci_disable_alpm, 314 #ifdef CONFIG_PM 315 .port_suspend = ahci_port_suspend, 316 .port_resume = ahci_port_resume, 317 #endif 318 .port_start = ahci_port_start, 319 .port_stop = ahci_port_stop, 320 }; 321 322 static struct ata_port_operations ahci_vt8251_ops = { 323 .inherits = &ahci_ops, 324 .hardreset = ahci_vt8251_hardreset, 325 }; 326 327 static struct ata_port_operations ahci_p5wdh_ops = { 328 .inherits = &ahci_ops, 329 .hardreset = ahci_p5wdh_hardreset, 330 }; 331 332 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 333 334 static const struct ata_port_info ahci_port_info[] = { 335 /* board_ahci */ 336 { 337 .flags = AHCI_FLAG_COMMON, 338 .pio_mask = 0x1f, /* pio0-4 */ 339 .udma_mask = ATA_UDMA6, 340 .port_ops = &ahci_ops, 341 }, 342 /* board_ahci_vt8251 */ 343 { 344 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 345 .flags = AHCI_FLAG_COMMON, 346 .pio_mask = 0x1f, /* pio0-4 */ 347 .udma_mask = ATA_UDMA6, 348 .port_ops = &ahci_vt8251_ops, 349 }, 350 /* board_ahci_ign_iferr */ 351 { 352 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 353 .flags = AHCI_FLAG_COMMON, 354 .pio_mask = 0x1f, /* pio0-4 */ 355 .udma_mask = ATA_UDMA6, 356 .port_ops = &ahci_ops, 357 }, 358 /* board_ahci_sb600 */ 359 { 360 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 361 AHCI_HFLAG_32BIT_ONLY | 362 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), 363 .flags = AHCI_FLAG_COMMON, 364 .pio_mask = 0x1f, /* pio0-4 */ 365 .udma_mask = ATA_UDMA6, 366 .port_ops = &ahci_ops, 367 }, 368 /* board_ahci_mv */ 369 { 370 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 371 AHCI_HFLAG_MV_PATA), 372 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 373 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 374 .pio_mask = 0x1f, /* pio0-4 */ 375 .udma_mask = ATA_UDMA6, 376 .port_ops = &ahci_ops, 377 }, 378 /* board_ahci_sb700 */ 379 { 380 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 381 AHCI_HFLAG_NO_PMP), 382 .flags = AHCI_FLAG_COMMON, 383 .pio_mask = 0x1f, /* pio0-4 */ 384 .udma_mask = ATA_UDMA6, 385 .port_ops = &ahci_ops, 386 }, 387 }; 388 389 static const struct pci_device_id ahci_pci_tbl[] = { 390 /* Intel */ 391 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 392 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 393 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 394 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 395 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 396 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 397 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 398 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 399 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 400 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 401 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 402 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ 403 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 404 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 405 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 406 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 407 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 408 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 409 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 410 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 411 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 412 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 413 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 414 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 415 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 416 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 417 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 418 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 419 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 420 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 421 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 422 423 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 424 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 425 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 426 427 /* ATI */ 428 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 429 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ 430 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ 431 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ 432 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ 433 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 434 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 435 436 /* VIA */ 437 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 438 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 439 440 /* NVIDIA */ 441 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ 442 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ 443 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ 444 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ 445 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ 446 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ 447 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ 448 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ 449 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ 450 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ 451 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ 452 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ 453 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ 454 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ 455 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ 456 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ 457 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ 458 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ 459 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ 460 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ 461 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ 462 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ 463 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ 464 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ 465 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ 466 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ 467 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ 468 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ 469 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ 470 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ 471 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ 472 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ 473 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ 474 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ 475 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ 476 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ 477 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ 478 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ 479 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ 480 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ 481 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ 482 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ 483 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ 484 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ 485 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ 486 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ 487 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ 488 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ 489 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ 490 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ 491 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ 492 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ 493 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ 494 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 495 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 496 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 497 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */ 498 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */ 499 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */ 500 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */ 501 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */ 502 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ 503 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ 504 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ 505 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ 506 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ 507 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ 508 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ 509 510 /* SiS */ 511 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 512 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ 513 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 514 515 /* Marvell */ 516 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 517 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 518 519 /* Generic, PCI class code for AHCI */ 520 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 521 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 522 523 { } /* terminate list */ 524 }; 525 526 527 static struct pci_driver ahci_pci_driver = { 528 .name = DRV_NAME, 529 .id_table = ahci_pci_tbl, 530 .probe = ahci_init_one, 531 .remove = ata_pci_remove_one, 532 #ifdef CONFIG_PM 533 .suspend = ahci_pci_device_suspend, 534 .resume = ahci_pci_device_resume, 535 #endif 536 }; 537 538 539 static inline int ahci_nr_ports(u32 cap) 540 { 541 return (cap & 0x1f) + 1; 542 } 543 544 static inline void __iomem *__ahci_port_base(struct ata_host *host, 545 unsigned int port_no) 546 { 547 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 548 549 return mmio + 0x100 + (port_no * 0x80); 550 } 551 552 static inline void __iomem *ahci_port_base(struct ata_port *ap) 553 { 554 return __ahci_port_base(ap->host, ap->port_no); 555 } 556 557 static void ahci_enable_ahci(void __iomem *mmio) 558 { 559 u32 tmp; 560 561 /* turn on AHCI_EN */ 562 tmp = readl(mmio + HOST_CTL); 563 if (!(tmp & HOST_AHCI_EN)) { 564 tmp |= HOST_AHCI_EN; 565 writel(tmp, mmio + HOST_CTL); 566 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 567 WARN_ON(!(tmp & HOST_AHCI_EN)); 568 } 569 } 570 571 /** 572 * ahci_save_initial_config - Save and fixup initial config values 573 * @pdev: target PCI device 574 * @hpriv: host private area to store config values 575 * 576 * Some registers containing configuration info might be setup by 577 * BIOS and might be cleared on reset. This function saves the 578 * initial values of those registers into @hpriv such that they 579 * can be restored after controller reset. 580 * 581 * If inconsistent, config values are fixed up by this function. 582 * 583 * LOCKING: 584 * None. 585 */ 586 static void ahci_save_initial_config(struct pci_dev *pdev, 587 struct ahci_host_priv *hpriv) 588 { 589 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 590 u32 cap, port_map; 591 int i; 592 int mv; 593 594 /* make sure AHCI mode is enabled before accessing CAP */ 595 ahci_enable_ahci(mmio); 596 597 /* Values prefixed with saved_ are written back to host after 598 * reset. Values without are used for driver operation. 599 */ 600 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 601 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 602 603 /* some chips have errata preventing 64bit use */ 604 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 605 dev_printk(KERN_INFO, &pdev->dev, 606 "controller can't do 64bit DMA, forcing 32bit\n"); 607 cap &= ~HOST_CAP_64; 608 } 609 610 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 611 dev_printk(KERN_INFO, &pdev->dev, 612 "controller can't do NCQ, turning off CAP_NCQ\n"); 613 cap &= ~HOST_CAP_NCQ; 614 } 615 616 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 617 dev_printk(KERN_INFO, &pdev->dev, 618 "controller can't do PMP, turning off CAP_PMP\n"); 619 cap &= ~HOST_CAP_PMP; 620 } 621 622 /* 623 * Temporary Marvell 6145 hack: PATA port presence 624 * is asserted through the standard AHCI port 625 * presence register, as bit 4 (counting from 0) 626 */ 627 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 628 if (pdev->device == 0x6121) 629 mv = 0x3; 630 else 631 mv = 0xf; 632 dev_printk(KERN_ERR, &pdev->dev, 633 "MV_AHCI HACK: port_map %x -> %x\n", 634 port_map, 635 port_map & mv); 636 637 port_map &= mv; 638 } 639 640 /* cross check port_map and cap.n_ports */ 641 if (port_map) { 642 int map_ports = 0; 643 644 for (i = 0; i < AHCI_MAX_PORTS; i++) 645 if (port_map & (1 << i)) 646 map_ports++; 647 648 /* If PI has more ports than n_ports, whine, clear 649 * port_map and let it be generated from n_ports. 650 */ 651 if (map_ports > ahci_nr_ports(cap)) { 652 dev_printk(KERN_WARNING, &pdev->dev, 653 "implemented port map (0x%x) contains more " 654 "ports than nr_ports (%u), using nr_ports\n", 655 port_map, ahci_nr_ports(cap)); 656 port_map = 0; 657 } 658 } 659 660 /* fabricate port_map from cap.nr_ports */ 661 if (!port_map) { 662 port_map = (1 << ahci_nr_ports(cap)) - 1; 663 dev_printk(KERN_WARNING, &pdev->dev, 664 "forcing PORTS_IMPL to 0x%x\n", port_map); 665 666 /* write the fixed up value to the PI register */ 667 hpriv->saved_port_map = port_map; 668 } 669 670 /* record values to use during operation */ 671 hpriv->cap = cap; 672 hpriv->port_map = port_map; 673 } 674 675 /** 676 * ahci_restore_initial_config - Restore initial config 677 * @host: target ATA host 678 * 679 * Restore initial config stored by ahci_save_initial_config(). 680 * 681 * LOCKING: 682 * None. 683 */ 684 static void ahci_restore_initial_config(struct ata_host *host) 685 { 686 struct ahci_host_priv *hpriv = host->private_data; 687 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 688 689 writel(hpriv->saved_cap, mmio + HOST_CAP); 690 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 691 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 692 } 693 694 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 695 { 696 static const int offset[] = { 697 [SCR_STATUS] = PORT_SCR_STAT, 698 [SCR_CONTROL] = PORT_SCR_CTL, 699 [SCR_ERROR] = PORT_SCR_ERR, 700 [SCR_ACTIVE] = PORT_SCR_ACT, 701 [SCR_NOTIFICATION] = PORT_SCR_NTF, 702 }; 703 struct ahci_host_priv *hpriv = ap->host->private_data; 704 705 if (sc_reg < ARRAY_SIZE(offset) && 706 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 707 return offset[sc_reg]; 708 return 0; 709 } 710 711 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 712 { 713 void __iomem *port_mmio = ahci_port_base(ap); 714 int offset = ahci_scr_offset(ap, sc_reg); 715 716 if (offset) { 717 *val = readl(port_mmio + offset); 718 return 0; 719 } 720 return -EINVAL; 721 } 722 723 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 724 { 725 void __iomem *port_mmio = ahci_port_base(ap); 726 int offset = ahci_scr_offset(ap, sc_reg); 727 728 if (offset) { 729 writel(val, port_mmio + offset); 730 return 0; 731 } 732 return -EINVAL; 733 } 734 735 static void ahci_start_engine(struct ata_port *ap) 736 { 737 void __iomem *port_mmio = ahci_port_base(ap); 738 u32 tmp; 739 740 /* start DMA */ 741 tmp = readl(port_mmio + PORT_CMD); 742 tmp |= PORT_CMD_START; 743 writel(tmp, port_mmio + PORT_CMD); 744 readl(port_mmio + PORT_CMD); /* flush */ 745 } 746 747 static int ahci_stop_engine(struct ata_port *ap) 748 { 749 void __iomem *port_mmio = ahci_port_base(ap); 750 u32 tmp; 751 752 tmp = readl(port_mmio + PORT_CMD); 753 754 /* check if the HBA is idle */ 755 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 756 return 0; 757 758 /* setting HBA to idle */ 759 tmp &= ~PORT_CMD_START; 760 writel(tmp, port_mmio + PORT_CMD); 761 762 /* wait for engine to stop. This could be as long as 500 msec */ 763 tmp = ata_wait_register(port_mmio + PORT_CMD, 764 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 765 if (tmp & PORT_CMD_LIST_ON) 766 return -EIO; 767 768 return 0; 769 } 770 771 static void ahci_start_fis_rx(struct ata_port *ap) 772 { 773 void __iomem *port_mmio = ahci_port_base(ap); 774 struct ahci_host_priv *hpriv = ap->host->private_data; 775 struct ahci_port_priv *pp = ap->private_data; 776 u32 tmp; 777 778 /* set FIS registers */ 779 if (hpriv->cap & HOST_CAP_64) 780 writel((pp->cmd_slot_dma >> 16) >> 16, 781 port_mmio + PORT_LST_ADDR_HI); 782 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 783 784 if (hpriv->cap & HOST_CAP_64) 785 writel((pp->rx_fis_dma >> 16) >> 16, 786 port_mmio + PORT_FIS_ADDR_HI); 787 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 788 789 /* enable FIS reception */ 790 tmp = readl(port_mmio + PORT_CMD); 791 tmp |= PORT_CMD_FIS_RX; 792 writel(tmp, port_mmio + PORT_CMD); 793 794 /* flush */ 795 readl(port_mmio + PORT_CMD); 796 } 797 798 static int ahci_stop_fis_rx(struct ata_port *ap) 799 { 800 void __iomem *port_mmio = ahci_port_base(ap); 801 u32 tmp; 802 803 /* disable FIS reception */ 804 tmp = readl(port_mmio + PORT_CMD); 805 tmp &= ~PORT_CMD_FIS_RX; 806 writel(tmp, port_mmio + PORT_CMD); 807 808 /* wait for completion, spec says 500ms, give it 1000 */ 809 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 810 PORT_CMD_FIS_ON, 10, 1000); 811 if (tmp & PORT_CMD_FIS_ON) 812 return -EBUSY; 813 814 return 0; 815 } 816 817 static void ahci_power_up(struct ata_port *ap) 818 { 819 struct ahci_host_priv *hpriv = ap->host->private_data; 820 void __iomem *port_mmio = ahci_port_base(ap); 821 u32 cmd; 822 823 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 824 825 /* spin up device */ 826 if (hpriv->cap & HOST_CAP_SSS) { 827 cmd |= PORT_CMD_SPIN_UP; 828 writel(cmd, port_mmio + PORT_CMD); 829 } 830 831 /* wake up link */ 832 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 833 } 834 835 static void ahci_disable_alpm(struct ata_port *ap) 836 { 837 struct ahci_host_priv *hpriv = ap->host->private_data; 838 void __iomem *port_mmio = ahci_port_base(ap); 839 u32 cmd; 840 struct ahci_port_priv *pp = ap->private_data; 841 842 /* IPM bits should be disabled by libata-core */ 843 /* get the existing command bits */ 844 cmd = readl(port_mmio + PORT_CMD); 845 846 /* disable ALPM and ASP */ 847 cmd &= ~PORT_CMD_ASP; 848 cmd &= ~PORT_CMD_ALPE; 849 850 /* force the interface back to active */ 851 cmd |= PORT_CMD_ICC_ACTIVE; 852 853 /* write out new cmd value */ 854 writel(cmd, port_mmio + PORT_CMD); 855 cmd = readl(port_mmio + PORT_CMD); 856 857 /* wait 10ms to be sure we've come out of any low power state */ 858 msleep(10); 859 860 /* clear out any PhyRdy stuff from interrupt status */ 861 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 862 863 /* go ahead and clean out PhyRdy Change from Serror too */ 864 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 865 866 /* 867 * Clear flag to indicate that we should ignore all PhyRdy 868 * state changes 869 */ 870 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; 871 872 /* 873 * Enable interrupts on Phy Ready. 874 */ 875 pp->intr_mask |= PORT_IRQ_PHYRDY; 876 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 877 878 /* 879 * don't change the link pm policy - we can be called 880 * just to turn of link pm temporarily 881 */ 882 } 883 884 static int ahci_enable_alpm(struct ata_port *ap, 885 enum link_pm policy) 886 { 887 struct ahci_host_priv *hpriv = ap->host->private_data; 888 void __iomem *port_mmio = ahci_port_base(ap); 889 u32 cmd; 890 struct ahci_port_priv *pp = ap->private_data; 891 u32 asp; 892 893 /* Make sure the host is capable of link power management */ 894 if (!(hpriv->cap & HOST_CAP_ALPM)) 895 return -EINVAL; 896 897 switch (policy) { 898 case MAX_PERFORMANCE: 899 case NOT_AVAILABLE: 900 /* 901 * if we came here with NOT_AVAILABLE, 902 * it just means this is the first time we 903 * have tried to enable - default to max performance, 904 * and let the user go to lower power modes on request. 905 */ 906 ahci_disable_alpm(ap); 907 return 0; 908 case MIN_POWER: 909 /* configure HBA to enter SLUMBER */ 910 asp = PORT_CMD_ASP; 911 break; 912 case MEDIUM_POWER: 913 /* configure HBA to enter PARTIAL */ 914 asp = 0; 915 break; 916 default: 917 return -EINVAL; 918 } 919 920 /* 921 * Disable interrupts on Phy Ready. This keeps us from 922 * getting woken up due to spurious phy ready interrupts 923 * TBD - Hot plug should be done via polling now, is 924 * that even supported? 925 */ 926 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 927 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 928 929 /* 930 * Set a flag to indicate that we should ignore all PhyRdy 931 * state changes since these can happen now whenever we 932 * change link state 933 */ 934 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; 935 936 /* get the existing command bits */ 937 cmd = readl(port_mmio + PORT_CMD); 938 939 /* 940 * Set ASP based on Policy 941 */ 942 cmd |= asp; 943 944 /* 945 * Setting this bit will instruct the HBA to aggressively 946 * enter a lower power link state when it's appropriate and 947 * based on the value set above for ASP 948 */ 949 cmd |= PORT_CMD_ALPE; 950 951 /* write out new cmd value */ 952 writel(cmd, port_mmio + PORT_CMD); 953 cmd = readl(port_mmio + PORT_CMD); 954 955 /* IPM bits should be set by libata-core */ 956 return 0; 957 } 958 959 #ifdef CONFIG_PM 960 static void ahci_power_down(struct ata_port *ap) 961 { 962 struct ahci_host_priv *hpriv = ap->host->private_data; 963 void __iomem *port_mmio = ahci_port_base(ap); 964 u32 cmd, scontrol; 965 966 if (!(hpriv->cap & HOST_CAP_SSS)) 967 return; 968 969 /* put device into listen mode, first set PxSCTL.DET to 0 */ 970 scontrol = readl(port_mmio + PORT_SCR_CTL); 971 scontrol &= ~0xf; 972 writel(scontrol, port_mmio + PORT_SCR_CTL); 973 974 /* then set PxCMD.SUD to 0 */ 975 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 976 cmd &= ~PORT_CMD_SPIN_UP; 977 writel(cmd, port_mmio + PORT_CMD); 978 } 979 #endif 980 981 static void ahci_start_port(struct ata_port *ap) 982 { 983 /* enable FIS reception */ 984 ahci_start_fis_rx(ap); 985 986 /* enable DMA */ 987 ahci_start_engine(ap); 988 } 989 990 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 991 { 992 int rc; 993 994 /* disable DMA */ 995 rc = ahci_stop_engine(ap); 996 if (rc) { 997 *emsg = "failed to stop engine"; 998 return rc; 999 } 1000 1001 /* disable FIS reception */ 1002 rc = ahci_stop_fis_rx(ap); 1003 if (rc) { 1004 *emsg = "failed stop FIS RX"; 1005 return rc; 1006 } 1007 1008 return 0; 1009 } 1010 1011 static int ahci_reset_controller(struct ata_host *host) 1012 { 1013 struct pci_dev *pdev = to_pci_dev(host->dev); 1014 struct ahci_host_priv *hpriv = host->private_data; 1015 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1016 u32 tmp; 1017 1018 /* we must be in AHCI mode, before using anything 1019 * AHCI-specific, such as HOST_RESET. 1020 */ 1021 ahci_enable_ahci(mmio); 1022 1023 /* global controller reset */ 1024 if (!ahci_skip_host_reset) { 1025 tmp = readl(mmio + HOST_CTL); 1026 if ((tmp & HOST_RESET) == 0) { 1027 writel(tmp | HOST_RESET, mmio + HOST_CTL); 1028 readl(mmio + HOST_CTL); /* flush */ 1029 } 1030 1031 /* reset must complete within 1 second, or 1032 * the hardware should be considered fried. 1033 */ 1034 ssleep(1); 1035 1036 tmp = readl(mmio + HOST_CTL); 1037 if (tmp & HOST_RESET) { 1038 dev_printk(KERN_ERR, host->dev, 1039 "controller reset failed (0x%x)\n", tmp); 1040 return -EIO; 1041 } 1042 1043 /* turn on AHCI mode */ 1044 ahci_enable_ahci(mmio); 1045 1046 /* Some registers might be cleared on reset. Restore 1047 * initial values. 1048 */ 1049 ahci_restore_initial_config(host); 1050 } else 1051 dev_printk(KERN_INFO, host->dev, 1052 "skipping global host reset\n"); 1053 1054 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 1055 u16 tmp16; 1056 1057 /* configure PCS */ 1058 pci_read_config_word(pdev, 0x92, &tmp16); 1059 if ((tmp16 & hpriv->port_map) != hpriv->port_map) { 1060 tmp16 |= hpriv->port_map; 1061 pci_write_config_word(pdev, 0x92, tmp16); 1062 } 1063 } 1064 1065 return 0; 1066 } 1067 1068 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, 1069 int port_no, void __iomem *mmio, 1070 void __iomem *port_mmio) 1071 { 1072 const char *emsg = NULL; 1073 int rc; 1074 u32 tmp; 1075 1076 /* make sure port is not active */ 1077 rc = ahci_deinit_port(ap, &emsg); 1078 if (rc) 1079 dev_printk(KERN_WARNING, &pdev->dev, 1080 "%s (%d)\n", emsg, rc); 1081 1082 /* clear SError */ 1083 tmp = readl(port_mmio + PORT_SCR_ERR); 1084 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1085 writel(tmp, port_mmio + PORT_SCR_ERR); 1086 1087 /* clear port IRQ */ 1088 tmp = readl(port_mmio + PORT_IRQ_STAT); 1089 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1090 if (tmp) 1091 writel(tmp, port_mmio + PORT_IRQ_STAT); 1092 1093 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1094 } 1095 1096 static void ahci_init_controller(struct ata_host *host) 1097 { 1098 struct ahci_host_priv *hpriv = host->private_data; 1099 struct pci_dev *pdev = to_pci_dev(host->dev); 1100 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1101 int i; 1102 void __iomem *port_mmio; 1103 u32 tmp; 1104 int mv; 1105 1106 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 1107 if (pdev->device == 0x6121) 1108 mv = 2; 1109 else 1110 mv = 4; 1111 port_mmio = __ahci_port_base(host, mv); 1112 1113 writel(0, port_mmio + PORT_IRQ_MASK); 1114 1115 /* clear port IRQ */ 1116 tmp = readl(port_mmio + PORT_IRQ_STAT); 1117 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1118 if (tmp) 1119 writel(tmp, port_mmio + PORT_IRQ_STAT); 1120 } 1121 1122 for (i = 0; i < host->n_ports; i++) { 1123 struct ata_port *ap = host->ports[i]; 1124 1125 port_mmio = ahci_port_base(ap); 1126 if (ata_port_is_dummy(ap)) 1127 continue; 1128 1129 ahci_port_init(pdev, ap, i, mmio, port_mmio); 1130 } 1131 1132 tmp = readl(mmio + HOST_CTL); 1133 VPRINTK("HOST_CTL 0x%x\n", tmp); 1134 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1135 tmp = readl(mmio + HOST_CTL); 1136 VPRINTK("HOST_CTL 0x%x\n", tmp); 1137 } 1138 1139 static void ahci_dev_config(struct ata_device *dev) 1140 { 1141 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1142 1143 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1144 dev->max_sectors = 255; 1145 ata_dev_printk(dev, KERN_INFO, 1146 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1147 } 1148 } 1149 1150 static unsigned int ahci_dev_classify(struct ata_port *ap) 1151 { 1152 void __iomem *port_mmio = ahci_port_base(ap); 1153 struct ata_taskfile tf; 1154 u32 tmp; 1155 1156 tmp = readl(port_mmio + PORT_SIG); 1157 tf.lbah = (tmp >> 24) & 0xff; 1158 tf.lbam = (tmp >> 16) & 0xff; 1159 tf.lbal = (tmp >> 8) & 0xff; 1160 tf.nsect = (tmp) & 0xff; 1161 1162 return ata_dev_classify(&tf); 1163 } 1164 1165 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1166 u32 opts) 1167 { 1168 dma_addr_t cmd_tbl_dma; 1169 1170 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1171 1172 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1173 pp->cmd_slot[tag].status = 0; 1174 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1175 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1176 } 1177 1178 static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1179 { 1180 void __iomem *port_mmio = ahci_port_base(ap); 1181 struct ahci_host_priv *hpriv = ap->host->private_data; 1182 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1183 u32 tmp; 1184 int busy, rc; 1185 1186 /* do we need to kick the port? */ 1187 busy = status & (ATA_BUSY | ATA_DRQ); 1188 if (!busy && !force_restart) 1189 return 0; 1190 1191 /* stop engine */ 1192 rc = ahci_stop_engine(ap); 1193 if (rc) 1194 goto out_restart; 1195 1196 /* need to do CLO? */ 1197 if (!busy) { 1198 rc = 0; 1199 goto out_restart; 1200 } 1201 1202 if (!(hpriv->cap & HOST_CAP_CLO)) { 1203 rc = -EOPNOTSUPP; 1204 goto out_restart; 1205 } 1206 1207 /* perform CLO */ 1208 tmp = readl(port_mmio + PORT_CMD); 1209 tmp |= PORT_CMD_CLO; 1210 writel(tmp, port_mmio + PORT_CMD); 1211 1212 rc = 0; 1213 tmp = ata_wait_register(port_mmio + PORT_CMD, 1214 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1215 if (tmp & PORT_CMD_CLO) 1216 rc = -EIO; 1217 1218 /* restart engine */ 1219 out_restart: 1220 ahci_start_engine(ap); 1221 return rc; 1222 } 1223 1224 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1225 struct ata_taskfile *tf, int is_cmd, u16 flags, 1226 unsigned long timeout_msec) 1227 { 1228 const u32 cmd_fis_len = 5; /* five dwords */ 1229 struct ahci_port_priv *pp = ap->private_data; 1230 void __iomem *port_mmio = ahci_port_base(ap); 1231 u8 *fis = pp->cmd_tbl; 1232 u32 tmp; 1233 1234 /* prep the command */ 1235 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1236 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1237 1238 /* issue & wait */ 1239 writel(1, port_mmio + PORT_CMD_ISSUE); 1240 1241 if (timeout_msec) { 1242 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1243 1, timeout_msec); 1244 if (tmp & 0x1) { 1245 ahci_kick_engine(ap, 1); 1246 return -EBUSY; 1247 } 1248 } else 1249 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1250 1251 return 0; 1252 } 1253 1254 static int ahci_check_ready(struct ata_link *link) 1255 { 1256 void __iomem *port_mmio = ahci_port_base(link->ap); 1257 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1258 1259 if (!(status & ATA_BUSY)) 1260 return 1; 1261 return 0; 1262 } 1263 1264 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1265 unsigned long deadline) 1266 { 1267 struct ata_port *ap = link->ap; 1268 int pmp = sata_srst_pmp(link); 1269 const char *reason = NULL; 1270 unsigned long now, msecs; 1271 struct ata_taskfile tf; 1272 int rc; 1273 1274 DPRINTK("ENTER\n"); 1275 1276 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1277 rc = ahci_kick_engine(ap, 1); 1278 if (rc && rc != -EOPNOTSUPP) 1279 ata_link_printk(link, KERN_WARNING, 1280 "failed to reset engine (errno=%d)\n", rc); 1281 1282 ata_tf_init(link->device, &tf); 1283 1284 /* issue the first D2H Register FIS */ 1285 msecs = 0; 1286 now = jiffies; 1287 if (time_after(now, deadline)) 1288 msecs = jiffies_to_msecs(deadline - now); 1289 1290 tf.ctl |= ATA_SRST; 1291 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1292 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1293 rc = -EIO; 1294 reason = "1st FIS failed"; 1295 goto fail; 1296 } 1297 1298 /* spec says at least 5us, but be generous and sleep for 1ms */ 1299 msleep(1); 1300 1301 /* issue the second D2H Register FIS */ 1302 tf.ctl &= ~ATA_SRST; 1303 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1304 1305 /* wait for link to become ready */ 1306 rc = ata_wait_after_reset(link, deadline, ahci_check_ready); 1307 /* link occupied, -ENODEV too is an error */ 1308 if (rc) { 1309 reason = "device not ready"; 1310 goto fail; 1311 } 1312 *class = ahci_dev_classify(ap); 1313 1314 DPRINTK("EXIT, class=%u\n", *class); 1315 return 0; 1316 1317 fail: 1318 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1319 return rc; 1320 } 1321 1322 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1323 unsigned long deadline) 1324 { 1325 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 1326 struct ata_port *ap = link->ap; 1327 struct ahci_port_priv *pp = ap->private_data; 1328 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1329 struct ata_taskfile tf; 1330 bool online; 1331 int rc; 1332 1333 DPRINTK("ENTER\n"); 1334 1335 ahci_stop_engine(ap); 1336 1337 /* clear D2H reception area to properly wait for D2H FIS */ 1338 ata_tf_init(link->device, &tf); 1339 tf.command = 0x80; 1340 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1341 1342 rc = sata_link_hardreset(link, timing, deadline, &online, 1343 ahci_check_ready); 1344 1345 ahci_start_engine(ap); 1346 1347 if (online) 1348 *class = ahci_dev_classify(ap); 1349 1350 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1351 return rc; 1352 } 1353 1354 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 1355 unsigned long deadline) 1356 { 1357 struct ata_port *ap = link->ap; 1358 bool online; 1359 int rc; 1360 1361 DPRINTK("ENTER\n"); 1362 1363 ahci_stop_engine(ap); 1364 1365 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1366 deadline, &online, NULL); 1367 1368 ahci_start_engine(ap); 1369 1370 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1371 1372 /* vt8251 doesn't clear BSY on signature FIS reception, 1373 * request follow-up softreset. 1374 */ 1375 return online ? -EAGAIN : rc; 1376 } 1377 1378 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 1379 unsigned long deadline) 1380 { 1381 struct ata_port *ap = link->ap; 1382 struct ahci_port_priv *pp = ap->private_data; 1383 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1384 struct ata_taskfile tf; 1385 bool online; 1386 int rc; 1387 1388 ahci_stop_engine(ap); 1389 1390 /* clear D2H reception area to properly wait for D2H FIS */ 1391 ata_tf_init(link->device, &tf); 1392 tf.command = 0x80; 1393 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1394 1395 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 1396 deadline, &online, NULL); 1397 1398 ahci_start_engine(ap); 1399 1400 /* The pseudo configuration device on SIMG4726 attached to 1401 * ASUS P5W-DH Deluxe doesn't send signature FIS after 1402 * hardreset if no device is attached to the first downstream 1403 * port && the pseudo device locks up on SRST w/ PMP==0. To 1404 * work around this, wait for !BSY only briefly. If BSY isn't 1405 * cleared, perform CLO and proceed to IDENTIFY (achieved by 1406 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). 1407 * 1408 * Wait for two seconds. Devices attached to downstream port 1409 * which can't process the following IDENTIFY after this will 1410 * have to be reset again. For most cases, this should 1411 * suffice while making probing snappish enough. 1412 */ 1413 if (online) { 1414 rc = ata_wait_after_reset(link, jiffies + 2 * HZ, 1415 ahci_check_ready); 1416 if (rc) 1417 ahci_kick_engine(ap, 0); 1418 } 1419 return rc; 1420 } 1421 1422 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1423 { 1424 struct ata_port *ap = link->ap; 1425 void __iomem *port_mmio = ahci_port_base(ap); 1426 u32 new_tmp, tmp; 1427 1428 ata_std_postreset(link, class); 1429 1430 /* Make sure port's ATAPI bit is set appropriately */ 1431 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1432 if (*class == ATA_DEV_ATAPI) 1433 new_tmp |= PORT_CMD_ATAPI; 1434 else 1435 new_tmp &= ~PORT_CMD_ATAPI; 1436 if (new_tmp != tmp) { 1437 writel(new_tmp, port_mmio + PORT_CMD); 1438 readl(port_mmio + PORT_CMD); /* flush */ 1439 } 1440 } 1441 1442 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1443 { 1444 struct scatterlist *sg; 1445 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1446 unsigned int si; 1447 1448 VPRINTK("ENTER\n"); 1449 1450 /* 1451 * Next, the S/G list. 1452 */ 1453 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1454 dma_addr_t addr = sg_dma_address(sg); 1455 u32 sg_len = sg_dma_len(sg); 1456 1457 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1458 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1459 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1460 } 1461 1462 return si; 1463 } 1464 1465 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1466 { 1467 struct ata_port *ap = qc->ap; 1468 struct ahci_port_priv *pp = ap->private_data; 1469 int is_atapi = ata_is_atapi(qc->tf.protocol); 1470 void *cmd_tbl; 1471 u32 opts; 1472 const u32 cmd_fis_len = 5; /* five dwords */ 1473 unsigned int n_elem; 1474 1475 /* 1476 * Fill in command table information. First, the header, 1477 * a SATA Register - Host to Device command FIS. 1478 */ 1479 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1480 1481 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1482 if (is_atapi) { 1483 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1484 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1485 } 1486 1487 n_elem = 0; 1488 if (qc->flags & ATA_QCFLAG_DMAMAP) 1489 n_elem = ahci_fill_sg(qc, cmd_tbl); 1490 1491 /* 1492 * Fill in command slot information. 1493 */ 1494 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1495 if (qc->tf.flags & ATA_TFLAG_WRITE) 1496 opts |= AHCI_CMD_WRITE; 1497 if (is_atapi) 1498 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1499 1500 ahci_fill_cmd_slot(pp, qc->tag, opts); 1501 } 1502 1503 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1504 { 1505 struct ahci_host_priv *hpriv = ap->host->private_data; 1506 struct ahci_port_priv *pp = ap->private_data; 1507 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1508 struct ata_link *link = NULL; 1509 struct ata_queued_cmd *active_qc; 1510 struct ata_eh_info *active_ehi; 1511 u32 serror; 1512 1513 /* determine active link */ 1514 ata_port_for_each_link(link, ap) 1515 if (ata_link_active(link)) 1516 break; 1517 if (!link) 1518 link = &ap->link; 1519 1520 active_qc = ata_qc_from_tag(ap, link->active_tag); 1521 active_ehi = &link->eh_info; 1522 1523 /* record irq stat */ 1524 ata_ehi_clear_desc(host_ehi); 1525 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1526 1527 /* AHCI needs SError cleared; otherwise, it might lock up */ 1528 ahci_scr_read(ap, SCR_ERROR, &serror); 1529 ahci_scr_write(ap, SCR_ERROR, serror); 1530 host_ehi->serror |= serror; 1531 1532 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1533 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1534 irq_stat &= ~PORT_IRQ_IF_ERR; 1535 1536 if (irq_stat & PORT_IRQ_TF_ERR) { 1537 /* If qc is active, charge it; otherwise, the active 1538 * link. There's no active qc on NCQ errors. It will 1539 * be determined by EH by reading log page 10h. 1540 */ 1541 if (active_qc) 1542 active_qc->err_mask |= AC_ERR_DEV; 1543 else 1544 active_ehi->err_mask |= AC_ERR_DEV; 1545 1546 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1547 host_ehi->serror &= ~SERR_INTERNAL; 1548 } 1549 1550 if (irq_stat & PORT_IRQ_UNK_FIS) { 1551 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1552 1553 active_ehi->err_mask |= AC_ERR_HSM; 1554 active_ehi->action |= ATA_EH_RESET; 1555 ata_ehi_push_desc(active_ehi, 1556 "unknown FIS %08x %08x %08x %08x" , 1557 unk[0], unk[1], unk[2], unk[3]); 1558 } 1559 1560 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { 1561 active_ehi->err_mask |= AC_ERR_HSM; 1562 active_ehi->action |= ATA_EH_RESET; 1563 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1564 } 1565 1566 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1567 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1568 host_ehi->action |= ATA_EH_RESET; 1569 ata_ehi_push_desc(host_ehi, "host bus error"); 1570 } 1571 1572 if (irq_stat & PORT_IRQ_IF_ERR) { 1573 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1574 host_ehi->action |= ATA_EH_RESET; 1575 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1576 } 1577 1578 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1579 ata_ehi_hotplugged(host_ehi); 1580 ata_ehi_push_desc(host_ehi, "%s", 1581 irq_stat & PORT_IRQ_CONNECT ? 1582 "connection status changed" : "PHY RDY changed"); 1583 } 1584 1585 /* okay, let's hand over to EH */ 1586 1587 if (irq_stat & PORT_IRQ_FREEZE) 1588 ata_port_freeze(ap); 1589 else 1590 ata_port_abort(ap); 1591 } 1592 1593 static void ahci_port_intr(struct ata_port *ap) 1594 { 1595 void __iomem *port_mmio = ahci_port_base(ap); 1596 struct ata_eh_info *ehi = &ap->link.eh_info; 1597 struct ahci_port_priv *pp = ap->private_data; 1598 struct ahci_host_priv *hpriv = ap->host->private_data; 1599 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1600 u32 status, qc_active; 1601 int rc; 1602 1603 status = readl(port_mmio + PORT_IRQ_STAT); 1604 writel(status, port_mmio + PORT_IRQ_STAT); 1605 1606 /* ignore BAD_PMP while resetting */ 1607 if (unlikely(resetting)) 1608 status &= ~PORT_IRQ_BAD_PMP; 1609 1610 /* If we are getting PhyRdy, this is 1611 * just a power state change, we should 1612 * clear out this, plus the PhyRdy/Comm 1613 * Wake bits from Serror 1614 */ 1615 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 1616 (status & PORT_IRQ_PHYRDY)) { 1617 status &= ~PORT_IRQ_PHYRDY; 1618 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 1619 } 1620 1621 if (unlikely(status & PORT_IRQ_ERROR)) { 1622 ahci_error_intr(ap, status); 1623 return; 1624 } 1625 1626 if (status & PORT_IRQ_SDB_FIS) { 1627 /* If SNotification is available, leave notification 1628 * handling to sata_async_notification(). If not, 1629 * emulate it by snooping SDB FIS RX area. 1630 * 1631 * Snooping FIS RX area is probably cheaper than 1632 * poking SNotification but some constrollers which 1633 * implement SNotification, ICH9 for example, don't 1634 * store AN SDB FIS into receive area. 1635 */ 1636 if (hpriv->cap & HOST_CAP_SNTF) 1637 sata_async_notification(ap); 1638 else { 1639 /* If the 'N' bit in word 0 of the FIS is set, 1640 * we just received asynchronous notification. 1641 * Tell libata about it. 1642 */ 1643 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1644 u32 f0 = le32_to_cpu(f[0]); 1645 1646 if (f0 & (1 << 15)) 1647 sata_async_notification(ap); 1648 } 1649 } 1650 1651 /* pp->active_link is valid iff any command is in flight */ 1652 if (ap->qc_active && pp->active_link->sactive) 1653 qc_active = readl(port_mmio + PORT_SCR_ACT); 1654 else 1655 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1656 1657 rc = ata_qc_complete_multiple(ap, qc_active); 1658 1659 /* while resetting, invalid completions are expected */ 1660 if (unlikely(rc < 0 && !resetting)) { 1661 ehi->err_mask |= AC_ERR_HSM; 1662 ehi->action |= ATA_EH_RESET; 1663 ata_port_freeze(ap); 1664 } 1665 } 1666 1667 static irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1668 { 1669 struct ata_host *host = dev_instance; 1670 struct ahci_host_priv *hpriv; 1671 unsigned int i, handled = 0; 1672 void __iomem *mmio; 1673 u32 irq_stat, irq_ack = 0; 1674 1675 VPRINTK("ENTER\n"); 1676 1677 hpriv = host->private_data; 1678 mmio = host->iomap[AHCI_PCI_BAR]; 1679 1680 /* sigh. 0xffffffff is a valid return from h/w */ 1681 irq_stat = readl(mmio + HOST_IRQ_STAT); 1682 irq_stat &= hpriv->port_map; 1683 if (!irq_stat) 1684 return IRQ_NONE; 1685 1686 spin_lock(&host->lock); 1687 1688 for (i = 0; i < host->n_ports; i++) { 1689 struct ata_port *ap; 1690 1691 if (!(irq_stat & (1 << i))) 1692 continue; 1693 1694 ap = host->ports[i]; 1695 if (ap) { 1696 ahci_port_intr(ap); 1697 VPRINTK("port %u\n", i); 1698 } else { 1699 VPRINTK("port %u (no irq)\n", i); 1700 if (ata_ratelimit()) 1701 dev_printk(KERN_WARNING, host->dev, 1702 "interrupt on disabled port %u\n", i); 1703 } 1704 1705 irq_ack |= (1 << i); 1706 } 1707 1708 if (irq_ack) { 1709 writel(irq_ack, mmio + HOST_IRQ_STAT); 1710 handled = 1; 1711 } 1712 1713 spin_unlock(&host->lock); 1714 1715 VPRINTK("EXIT\n"); 1716 1717 return IRQ_RETVAL(handled); 1718 } 1719 1720 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1721 { 1722 struct ata_port *ap = qc->ap; 1723 void __iomem *port_mmio = ahci_port_base(ap); 1724 struct ahci_port_priv *pp = ap->private_data; 1725 1726 /* Keep track of the currently active link. It will be used 1727 * in completion path to determine whether NCQ phase is in 1728 * progress. 1729 */ 1730 pp->active_link = qc->dev->link; 1731 1732 if (qc->tf.protocol == ATA_PROT_NCQ) 1733 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1734 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1735 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1736 1737 return 0; 1738 } 1739 1740 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1741 { 1742 struct ahci_port_priv *pp = qc->ap->private_data; 1743 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1744 1745 ata_tf_from_fis(d2h_fis, &qc->result_tf); 1746 return true; 1747 } 1748 1749 static void ahci_freeze(struct ata_port *ap) 1750 { 1751 void __iomem *port_mmio = ahci_port_base(ap); 1752 1753 /* turn IRQ off */ 1754 writel(0, port_mmio + PORT_IRQ_MASK); 1755 } 1756 1757 static void ahci_thaw(struct ata_port *ap) 1758 { 1759 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; 1760 void __iomem *port_mmio = ahci_port_base(ap); 1761 u32 tmp; 1762 struct ahci_port_priv *pp = ap->private_data; 1763 1764 /* clear IRQ */ 1765 tmp = readl(port_mmio + PORT_IRQ_STAT); 1766 writel(tmp, port_mmio + PORT_IRQ_STAT); 1767 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1768 1769 /* turn IRQ back on */ 1770 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1771 } 1772 1773 static void ahci_error_handler(struct ata_port *ap) 1774 { 1775 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1776 /* restart engine */ 1777 ahci_stop_engine(ap); 1778 ahci_start_engine(ap); 1779 } 1780 1781 sata_pmp_error_handler(ap); 1782 } 1783 1784 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1785 { 1786 struct ata_port *ap = qc->ap; 1787 1788 /* make DMA engine forget about the failed command */ 1789 if (qc->flags & ATA_QCFLAG_FAILED) 1790 ahci_kick_engine(ap, 1); 1791 } 1792 1793 static void ahci_pmp_attach(struct ata_port *ap) 1794 { 1795 void __iomem *port_mmio = ahci_port_base(ap); 1796 struct ahci_port_priv *pp = ap->private_data; 1797 u32 cmd; 1798 1799 cmd = readl(port_mmio + PORT_CMD); 1800 cmd |= PORT_CMD_PMP; 1801 writel(cmd, port_mmio + PORT_CMD); 1802 1803 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1804 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1805 } 1806 1807 static void ahci_pmp_detach(struct ata_port *ap) 1808 { 1809 void __iomem *port_mmio = ahci_port_base(ap); 1810 struct ahci_port_priv *pp = ap->private_data; 1811 u32 cmd; 1812 1813 cmd = readl(port_mmio + PORT_CMD); 1814 cmd &= ~PORT_CMD_PMP; 1815 writel(cmd, port_mmio + PORT_CMD); 1816 1817 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1818 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1819 } 1820 1821 static int ahci_port_resume(struct ata_port *ap) 1822 { 1823 ahci_power_up(ap); 1824 ahci_start_port(ap); 1825 1826 if (sata_pmp_attached(ap)) 1827 ahci_pmp_attach(ap); 1828 else 1829 ahci_pmp_detach(ap); 1830 1831 return 0; 1832 } 1833 1834 #ifdef CONFIG_PM 1835 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1836 { 1837 const char *emsg = NULL; 1838 int rc; 1839 1840 rc = ahci_deinit_port(ap, &emsg); 1841 if (rc == 0) 1842 ahci_power_down(ap); 1843 else { 1844 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1845 ahci_start_port(ap); 1846 } 1847 1848 return rc; 1849 } 1850 1851 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1852 { 1853 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1854 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1855 u32 ctl; 1856 1857 if (mesg.event & PM_EVENT_SLEEP) { 1858 /* AHCI spec rev1.1 section 8.3.3: 1859 * Software must disable interrupts prior to requesting a 1860 * transition of the HBA to D3 state. 1861 */ 1862 ctl = readl(mmio + HOST_CTL); 1863 ctl &= ~HOST_IRQ_EN; 1864 writel(ctl, mmio + HOST_CTL); 1865 readl(mmio + HOST_CTL); /* flush */ 1866 } 1867 1868 return ata_pci_device_suspend(pdev, mesg); 1869 } 1870 1871 static int ahci_pci_device_resume(struct pci_dev *pdev) 1872 { 1873 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1874 int rc; 1875 1876 rc = ata_pci_device_do_resume(pdev); 1877 if (rc) 1878 return rc; 1879 1880 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 1881 rc = ahci_reset_controller(host); 1882 if (rc) 1883 return rc; 1884 1885 ahci_init_controller(host); 1886 } 1887 1888 ata_host_resume(host); 1889 1890 return 0; 1891 } 1892 #endif 1893 1894 static int ahci_port_start(struct ata_port *ap) 1895 { 1896 struct device *dev = ap->host->dev; 1897 struct ahci_port_priv *pp; 1898 void *mem; 1899 dma_addr_t mem_dma; 1900 1901 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1902 if (!pp) 1903 return -ENOMEM; 1904 1905 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1906 GFP_KERNEL); 1907 if (!mem) 1908 return -ENOMEM; 1909 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 1910 1911 /* 1912 * First item in chunk of DMA memory: 32-slot command table, 1913 * 32 bytes each in size 1914 */ 1915 pp->cmd_slot = mem; 1916 pp->cmd_slot_dma = mem_dma; 1917 1918 mem += AHCI_CMD_SLOT_SZ; 1919 mem_dma += AHCI_CMD_SLOT_SZ; 1920 1921 /* 1922 * Second item: Received-FIS area 1923 */ 1924 pp->rx_fis = mem; 1925 pp->rx_fis_dma = mem_dma; 1926 1927 mem += AHCI_RX_FIS_SZ; 1928 mem_dma += AHCI_RX_FIS_SZ; 1929 1930 /* 1931 * Third item: data area for storing a single command 1932 * and its scatter-gather table 1933 */ 1934 pp->cmd_tbl = mem; 1935 pp->cmd_tbl_dma = mem_dma; 1936 1937 /* 1938 * Save off initial list of interrupts to be enabled. 1939 * This could be changed later 1940 */ 1941 pp->intr_mask = DEF_PORT_IRQ; 1942 1943 ap->private_data = pp; 1944 1945 /* engage engines, captain */ 1946 return ahci_port_resume(ap); 1947 } 1948 1949 static void ahci_port_stop(struct ata_port *ap) 1950 { 1951 const char *emsg = NULL; 1952 int rc; 1953 1954 /* de-initialize port */ 1955 rc = ahci_deinit_port(ap, &emsg); 1956 if (rc) 1957 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 1958 } 1959 1960 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 1961 { 1962 int rc; 1963 1964 if (using_dac && 1965 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1966 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1967 if (rc) { 1968 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1969 if (rc) { 1970 dev_printk(KERN_ERR, &pdev->dev, 1971 "64-bit DMA enable failed\n"); 1972 return rc; 1973 } 1974 } 1975 } else { 1976 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1977 if (rc) { 1978 dev_printk(KERN_ERR, &pdev->dev, 1979 "32-bit DMA enable failed\n"); 1980 return rc; 1981 } 1982 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1983 if (rc) { 1984 dev_printk(KERN_ERR, &pdev->dev, 1985 "32-bit consistent DMA enable failed\n"); 1986 return rc; 1987 } 1988 } 1989 return 0; 1990 } 1991 1992 static void ahci_print_info(struct ata_host *host) 1993 { 1994 struct ahci_host_priv *hpriv = host->private_data; 1995 struct pci_dev *pdev = to_pci_dev(host->dev); 1996 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1997 u32 vers, cap, impl, speed; 1998 const char *speed_s; 1999 u16 cc; 2000 const char *scc_s; 2001 2002 vers = readl(mmio + HOST_VERSION); 2003 cap = hpriv->cap; 2004 impl = hpriv->port_map; 2005 2006 speed = (cap >> 20) & 0xf; 2007 if (speed == 1) 2008 speed_s = "1.5"; 2009 else if (speed == 2) 2010 speed_s = "3"; 2011 else 2012 speed_s = "?"; 2013 2014 pci_read_config_word(pdev, 0x0a, &cc); 2015 if (cc == PCI_CLASS_STORAGE_IDE) 2016 scc_s = "IDE"; 2017 else if (cc == PCI_CLASS_STORAGE_SATA) 2018 scc_s = "SATA"; 2019 else if (cc == PCI_CLASS_STORAGE_RAID) 2020 scc_s = "RAID"; 2021 else 2022 scc_s = "unknown"; 2023 2024 dev_printk(KERN_INFO, &pdev->dev, 2025 "AHCI %02x%02x.%02x%02x " 2026 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2027 , 2028 2029 (vers >> 24) & 0xff, 2030 (vers >> 16) & 0xff, 2031 (vers >> 8) & 0xff, 2032 vers & 0xff, 2033 2034 ((cap >> 8) & 0x1f) + 1, 2035 (cap & 0x1f) + 1, 2036 speed_s, 2037 impl, 2038 scc_s); 2039 2040 dev_printk(KERN_INFO, &pdev->dev, 2041 "flags: " 2042 "%s%s%s%s%s%s%s" 2043 "%s%s%s%s%s%s%s\n" 2044 , 2045 2046 cap & (1 << 31) ? "64bit " : "", 2047 cap & (1 << 30) ? "ncq " : "", 2048 cap & (1 << 29) ? "sntf " : "", 2049 cap & (1 << 28) ? "ilck " : "", 2050 cap & (1 << 27) ? "stag " : "", 2051 cap & (1 << 26) ? "pm " : "", 2052 cap & (1 << 25) ? "led " : "", 2053 2054 cap & (1 << 24) ? "clo " : "", 2055 cap & (1 << 19) ? "nz " : "", 2056 cap & (1 << 18) ? "only " : "", 2057 cap & (1 << 17) ? "pmp " : "", 2058 cap & (1 << 15) ? "pio " : "", 2059 cap & (1 << 14) ? "slum " : "", 2060 cap & (1 << 13) ? "part " : "" 2061 ); 2062 } 2063 2064 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 2065 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't 2066 * support PMP and the 4726 either directly exports the device 2067 * attached to the first downstream port or acts as a hardware storage 2068 * controller and emulate a single ATA device (can be RAID 0/1 or some 2069 * other configuration). 2070 * 2071 * When there's no device attached to the first downstream port of the 2072 * 4726, "Config Disk" appears, which is a pseudo ATA device to 2073 * configure the 4726. However, ATA emulation of the device is very 2074 * lame. It doesn't send signature D2H Reg FIS after the initial 2075 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. 2076 * 2077 * The following function works around the problem by always using 2078 * hardreset on the port and not depending on receiving signature FIS 2079 * afterward. If signature FIS isn't received soon, ATA class is 2080 * assumed without follow-up softreset. 2081 */ 2082 static void ahci_p5wdh_workaround(struct ata_host *host) 2083 { 2084 static struct dmi_system_id sysids[] = { 2085 { 2086 .ident = "P5W DH Deluxe", 2087 .matches = { 2088 DMI_MATCH(DMI_SYS_VENDOR, 2089 "ASUSTEK COMPUTER INC"), 2090 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 2091 }, 2092 }, 2093 { } 2094 }; 2095 struct pci_dev *pdev = to_pci_dev(host->dev); 2096 2097 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && 2098 dmi_check_system(sysids)) { 2099 struct ata_port *ap = host->ports[1]; 2100 2101 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " 2102 "Deluxe on-board SIMG4726 workaround\n"); 2103 2104 ap->ops = &ahci_p5wdh_ops; 2105 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; 2106 } 2107 } 2108 2109 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2110 { 2111 static int printed_version; 2112 struct ata_port_info pi = ahci_port_info[ent->driver_data]; 2113 const struct ata_port_info *ppi[] = { &pi, NULL }; 2114 struct device *dev = &pdev->dev; 2115 struct ahci_host_priv *hpriv; 2116 struct ata_host *host; 2117 int n_ports, i, rc; 2118 2119 VPRINTK("ENTER\n"); 2120 2121 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); 2122 2123 if (!printed_version++) 2124 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2125 2126 /* acquire resources */ 2127 rc = pcim_enable_device(pdev); 2128 if (rc) 2129 return rc; 2130 2131 /* AHCI controllers often implement SFF compatible interface. 2132 * Grab all PCI BARs just in case. 2133 */ 2134 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 2135 if (rc == -EBUSY) 2136 pcim_pin_device(pdev); 2137 if (rc) 2138 return rc; 2139 2140 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 2141 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 2142 u8 map; 2143 2144 /* ICH6s share the same PCI ID for both piix and ahci 2145 * modes. Enabling ahci mode while MAP indicates 2146 * combined mode is a bad idea. Yield to ata_piix. 2147 */ 2148 pci_read_config_byte(pdev, ICH_MAP, &map); 2149 if (map & 0x3) { 2150 dev_printk(KERN_INFO, &pdev->dev, "controller is in " 2151 "combined mode, can't enable AHCI mode\n"); 2152 return -ENODEV; 2153 } 2154 } 2155 2156 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 2157 if (!hpriv) 2158 return -ENOMEM; 2159 hpriv->flags |= (unsigned long)pi.private_data; 2160 2161 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 2162 pci_intx(pdev, 1); 2163 2164 /* save initial config */ 2165 ahci_save_initial_config(pdev, hpriv); 2166 2167 /* prepare host */ 2168 if (hpriv->cap & HOST_CAP_NCQ) 2169 pi.flags |= ATA_FLAG_NCQ; 2170 2171 if (hpriv->cap & HOST_CAP_PMP) 2172 pi.flags |= ATA_FLAG_PMP; 2173 2174 /* CAP.NP sometimes indicate the index of the last enabled 2175 * port, at other times, that of the last possible port, so 2176 * determining the maximum port number requires looking at 2177 * both CAP.NP and port_map. 2178 */ 2179 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 2180 2181 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2182 if (!host) 2183 return -ENOMEM; 2184 host->iomap = pcim_iomap_table(pdev); 2185 host->private_data = hpriv; 2186 2187 for (i = 0; i < host->n_ports; i++) { 2188 struct ata_port *ap = host->ports[i]; 2189 2190 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 2191 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 2192 0x100 + ap->port_no * 0x80, "port"); 2193 2194 /* set initial link pm policy */ 2195 ap->pm_policy = NOT_AVAILABLE; 2196 2197 /* disabled/not-implemented port */ 2198 if (!(hpriv->port_map & (1 << i))) 2199 ap->ops = &ata_dummy_port_ops; 2200 } 2201 2202 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 2203 ahci_p5wdh_workaround(host); 2204 2205 /* initialize adapter */ 2206 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 2207 if (rc) 2208 return rc; 2209 2210 rc = ahci_reset_controller(host); 2211 if (rc) 2212 return rc; 2213 2214 ahci_init_controller(host); 2215 ahci_print_info(host); 2216 2217 pci_set_master(pdev); 2218 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 2219 &ahci_sht); 2220 } 2221 2222 static int __init ahci_init(void) 2223 { 2224 return pci_register_driver(&ahci_pci_driver); 2225 } 2226 2227 static void __exit ahci_exit(void) 2228 { 2229 pci_unregister_driver(&ahci_pci_driver); 2230 } 2231 2232 2233 MODULE_AUTHOR("Jeff Garzik"); 2234 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 2235 MODULE_LICENSE("GPL"); 2236 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 2237 MODULE_VERSION(DRV_VERSION); 2238 2239 module_init(ahci_init); 2240 module_exit(ahci_exit); 2241