1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24 25 /* 26 * sata_mv TODO list: 27 * 28 * --> Errata workaround for NCQ device errors. 29 * 30 * --> More errata workarounds for PCI-X. 31 * 32 * --> Complete a full errata audit for all chipsets to identify others. 33 * 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 * 36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI). 37 * 38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead. 39 * 40 * --> Develop a low-power-consumption strategy, and implement it. 41 * 42 * --> [Experiment, low priority] Investigate interrupt coalescing. 43 * Quite often, especially with PCI Message Signalled Interrupts (MSI), 44 * the overhead reduced by interrupt mitigation is quite often not 45 * worth the latency cost. 46 * 47 * --> [Experiment, Marvell value added] Is it possible to use target 48 * mode to cross-connect two Linux boxes with Marvell cards? If so, 49 * creating LibATA target mode support would be very interesting. 50 * 51 * Target mode, for those without docs, is the ability to directly 52 * connect two SATA ports. 53 */ 54 55 #include <linux/kernel.h> 56 #include <linux/module.h> 57 #include <linux/pci.h> 58 #include <linux/init.h> 59 #include <linux/blkdev.h> 60 #include <linux/delay.h> 61 #include <linux/interrupt.h> 62 #include <linux/dmapool.h> 63 #include <linux/dma-mapping.h> 64 #include <linux/device.h> 65 #include <linux/platform_device.h> 66 #include <linux/ata_platform.h> 67 #include <linux/mbus.h> 68 #include <linux/bitops.h> 69 #include <scsi/scsi_host.h> 70 #include <scsi/scsi_cmnd.h> 71 #include <scsi/scsi_device.h> 72 #include <linux/libata.h> 73 74 #define DRV_NAME "sata_mv" 75 #define DRV_VERSION "1.20" 76 77 enum { 78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 80 MV_IO_BAR = 2, /* offset 0x18: IO space */ 81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 82 83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 85 86 MV_PCI_REG_BASE = 0, 87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 93 94 MV_SATAHC0_REG_BASE = 0x20000, 95 MV_FLASH_CTL_OFS = 0x1046c, 96 MV_GPIO_PORT_CTL_OFS = 0x104f0, 97 MV_RESET_CFG_OFS = 0x180d8, 98 99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 103 104 MV_MAX_Q_DEPTH = 32, 105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 106 107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 108 * CRPB needs alignment on a 256B boundary. Size == 256B 109 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 110 */ 111 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 112 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 113 MV_MAX_SG_CT = 256, 114 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 115 116 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 117 MV_PORT_HC_SHIFT = 2, 118 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 119 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 120 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 121 122 /* Host Flags */ 123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 125 /* SoC integrated controllers, no PCI interface */ 126 MV_FLAG_SOC = (1 << 28), 127 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 130 ATA_FLAG_PIO_POLLING, 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 132 133 CRQB_FLAG_READ = (1 << 0), 134 CRQB_TAG_SHIFT = 1, 135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 136 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 138 CRQB_CMD_ADDR_SHIFT = 8, 139 CRQB_CMD_CS = (0x2 << 11), 140 CRQB_CMD_LAST = (1 << 15), 141 142 CRPB_FLAG_STATUS_SHIFT = 8, 143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 145 146 EPRD_FLAG_END_OF_TBL = (1 << 31), 147 148 /* PCI interface registers */ 149 150 PCI_COMMAND_OFS = 0xc00, 151 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 152 153 PCI_MAIN_CMD_STS_OFS = 0xd30, 154 STOP_PCI_MASTER = (1 << 2), 155 PCI_MASTER_EMPTY = (1 << 3), 156 GLOB_SFT_RST = (1 << 4), 157 158 MV_PCI_MODE_OFS = 0xd00, 159 MV_PCI_MODE_MASK = 0x30, 160 161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 162 MV_PCI_DISC_TIMER = 0xd04, 163 MV_PCI_MSI_TRIGGER = 0xc38, 164 MV_PCI_SERR_MASK = 0xc28, 165 MV_PCI_XBAR_TMOUT_OFS = 0x1d04, 166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 168 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 169 MV_PCI_ERR_COMMAND = 0x1d50, 170 171 PCI_IRQ_CAUSE_OFS = 0x1d58, 172 PCI_IRQ_MASK_OFS = 0x1d5c, 173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 174 175 PCIE_IRQ_CAUSE_OFS = 0x1900, 176 PCIE_IRQ_MASK_OFS = 0x1910, 177 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 178 179 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 180 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 181 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, 182 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, 183 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, 184 ERR_IRQ = (1 << 0), /* shift by port # */ 185 DONE_IRQ = (1 << 1), /* shift by port # */ 186 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 187 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 188 PCI_ERR = (1 << 18), 189 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 190 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 191 PORTS_0_3_COAL_DONE = (1 << 8), 192 PORTS_4_7_COAL_DONE = (1 << 17), 193 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 194 GPIO_INT = (1 << 22), 195 SELF_INT = (1 << 23), 196 TWSI_INT = (1 << 24), 197 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 198 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 199 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 200 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 201 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 202 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 203 HC_MAIN_RSVD), 204 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 205 HC_MAIN_RSVD_5), 206 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC), 207 208 /* SATAHC registers */ 209 HC_CFG_OFS = 0, 210 211 HC_IRQ_CAUSE_OFS = 0x14, 212 DMA_IRQ = (1 << 0), /* shift by port # */ 213 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 214 DEV_IRQ = (1 << 8), /* shift by port # */ 215 216 /* Shadow block registers */ 217 SHD_BLK_OFS = 0x100, 218 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 219 220 /* SATA registers */ 221 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 222 SATA_ACTIVE_OFS = 0x350, 223 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 224 225 LTMODE_OFS = 0x30c, 226 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 227 228 PHY_MODE3 = 0x310, 229 PHY_MODE4 = 0x314, 230 PHY_MODE2 = 0x330, 231 SATA_IFCTL_OFS = 0x344, 232 SATA_TESTCTL_OFS = 0x348, 233 SATA_IFSTAT_OFS = 0x34c, 234 VENDOR_UNIQUE_FIS_OFS = 0x35c, 235 236 FISCFG_OFS = 0x360, 237 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 238 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 239 240 MV5_PHY_MODE = 0x74, 241 MV5_LTMODE_OFS = 0x30, 242 MV5_PHY_CTL_OFS = 0x0C, 243 SATA_INTERFACE_CFG_OFS = 0x050, 244 245 MV_M2_PREAMP_MASK = 0x7e0, 246 247 /* Port registers */ 248 EDMA_CFG_OFS = 0, 249 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 250 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 251 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 252 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 253 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 254 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 255 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 256 257 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 258 EDMA_ERR_IRQ_MASK_OFS = 0xc, 259 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 260 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 261 EDMA_ERR_DEV = (1 << 2), /* device error */ 262 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 263 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 264 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 265 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 266 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 267 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 268 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 269 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 270 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 271 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 272 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 273 274 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 275 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 276 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 277 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 278 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 279 280 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 281 282 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 283 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 284 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 285 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 286 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 287 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 288 289 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 290 291 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 292 EDMA_ERR_OVERRUN_5 = (1 << 5), 293 EDMA_ERR_UNDERRUN_5 = (1 << 6), 294 295 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 296 EDMA_ERR_LNK_CTRL_RX_1 | 297 EDMA_ERR_LNK_CTRL_RX_3 | 298 EDMA_ERR_LNK_CTRL_TX, 299 300 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 301 EDMA_ERR_PRD_PAR | 302 EDMA_ERR_DEV_DCON | 303 EDMA_ERR_DEV_CON | 304 EDMA_ERR_SERR | 305 EDMA_ERR_SELF_DIS | 306 EDMA_ERR_CRQB_PAR | 307 EDMA_ERR_CRPB_PAR | 308 EDMA_ERR_INTRL_PAR | 309 EDMA_ERR_IORDY | 310 EDMA_ERR_LNK_CTRL_RX_2 | 311 EDMA_ERR_LNK_DATA_RX | 312 EDMA_ERR_LNK_DATA_TX | 313 EDMA_ERR_TRANS_PROTO, 314 315 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 316 EDMA_ERR_PRD_PAR | 317 EDMA_ERR_DEV_DCON | 318 EDMA_ERR_DEV_CON | 319 EDMA_ERR_OVERRUN_5 | 320 EDMA_ERR_UNDERRUN_5 | 321 EDMA_ERR_SELF_DIS_5 | 322 EDMA_ERR_CRQB_PAR | 323 EDMA_ERR_CRPB_PAR | 324 EDMA_ERR_INTRL_PAR | 325 EDMA_ERR_IORDY, 326 327 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 328 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 329 330 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 331 EDMA_REQ_Q_PTR_SHIFT = 5, 332 333 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 334 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 335 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 336 EDMA_RSP_Q_PTR_SHIFT = 3, 337 338 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 339 EDMA_EN = (1 << 0), /* enable EDMA */ 340 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 341 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 342 343 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ 344 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 345 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 346 347 EDMA_IORDY_TMOUT_OFS = 0x34, 348 EDMA_ARB_CFG_OFS = 0x38, 349 350 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 351 352 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ 353 354 /* Host private flags (hp_flags) */ 355 MV_HP_FLAG_MSI = (1 << 0), 356 MV_HP_ERRATA_50XXB0 = (1 << 1), 357 MV_HP_ERRATA_50XXB2 = (1 << 2), 358 MV_HP_ERRATA_60X1B2 = (1 << 3), 359 MV_HP_ERRATA_60X1C0 = (1 << 4), 360 MV_HP_ERRATA_XX42A0 = (1 << 5), 361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 366 367 /* Port private flags (pp_flags) */ 368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 370 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 371 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 372 }; 373 374 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 375 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 376 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 377 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 378 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 379 380 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 381 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 382 383 enum { 384 /* DMA boundary 0xffff is required by the s/g splitting 385 * we need on /length/ in mv_fill-sg(). 386 */ 387 MV_DMA_BOUNDARY = 0xffffU, 388 389 /* mask of register bits containing lower 32 bits 390 * of EDMA request queue DMA address 391 */ 392 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 393 394 /* ditto, for response queue */ 395 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 396 }; 397 398 enum chip_type { 399 chip_504x, 400 chip_508x, 401 chip_5080, 402 chip_604x, 403 chip_608x, 404 chip_6042, 405 chip_7042, 406 chip_soc, 407 }; 408 409 /* Command ReQuest Block: 32B */ 410 struct mv_crqb { 411 __le32 sg_addr; 412 __le32 sg_addr_hi; 413 __le16 ctrl_flags; 414 __le16 ata_cmd[11]; 415 }; 416 417 struct mv_crqb_iie { 418 __le32 addr; 419 __le32 addr_hi; 420 __le32 flags; 421 __le32 len; 422 __le32 ata_cmd[4]; 423 }; 424 425 /* Command ResPonse Block: 8B */ 426 struct mv_crpb { 427 __le16 id; 428 __le16 flags; 429 __le32 tmstmp; 430 }; 431 432 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 433 struct mv_sg { 434 __le32 addr; 435 __le32 flags_size; 436 __le32 addr_hi; 437 __le32 reserved; 438 }; 439 440 struct mv_port_priv { 441 struct mv_crqb *crqb; 442 dma_addr_t crqb_dma; 443 struct mv_crpb *crpb; 444 dma_addr_t crpb_dma; 445 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 446 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 447 448 unsigned int req_idx; 449 unsigned int resp_idx; 450 451 u32 pp_flags; 452 unsigned int delayed_eh_pmp_map; 453 }; 454 455 struct mv_port_signal { 456 u32 amps; 457 u32 pre; 458 }; 459 460 struct mv_host_priv { 461 u32 hp_flags; 462 struct mv_port_signal signal[8]; 463 const struct mv_hw_ops *ops; 464 int n_ports; 465 void __iomem *base; 466 void __iomem *main_irq_cause_addr; 467 void __iomem *main_irq_mask_addr; 468 u32 irq_cause_ofs; 469 u32 irq_mask_ofs; 470 u32 unmask_all_irqs; 471 /* 472 * These consistent DMA memory pools give us guaranteed 473 * alignment for hardware-accessed data structures, 474 * and less memory waste in accomplishing the alignment. 475 */ 476 struct dma_pool *crqb_pool; 477 struct dma_pool *crpb_pool; 478 struct dma_pool *sg_tbl_pool; 479 }; 480 481 struct mv_hw_ops { 482 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 483 unsigned int port); 484 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 485 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 486 void __iomem *mmio); 487 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 488 unsigned int n_hc); 489 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 490 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 491 }; 492 493 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 494 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 495 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 496 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 497 static int mv_port_start(struct ata_port *ap); 498 static void mv_port_stop(struct ata_port *ap); 499 static int mv_qc_defer(struct ata_queued_cmd *qc); 500 static void mv_qc_prep(struct ata_queued_cmd *qc); 501 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 502 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 503 static int mv_hardreset(struct ata_link *link, unsigned int *class, 504 unsigned long deadline); 505 static void mv_eh_freeze(struct ata_port *ap); 506 static void mv_eh_thaw(struct ata_port *ap); 507 static void mv6_dev_config(struct ata_device *dev); 508 509 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 510 unsigned int port); 511 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 512 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 513 void __iomem *mmio); 514 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 515 unsigned int n_hc); 516 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 517 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 518 519 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 520 unsigned int port); 521 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 522 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 523 void __iomem *mmio); 524 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 525 unsigned int n_hc); 526 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 527 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 528 void __iomem *mmio); 529 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 530 void __iomem *mmio); 531 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 532 void __iomem *mmio, unsigned int n_hc); 533 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 534 void __iomem *mmio); 535 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 536 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 537 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 538 unsigned int port_no); 539 static int mv_stop_edma(struct ata_port *ap); 540 static int mv_stop_edma_engine(void __iomem *port_mmio); 541 static void mv_edma_cfg(struct ata_port *ap, int want_ncq); 542 543 static void mv_pmp_select(struct ata_port *ap, int pmp); 544 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 545 unsigned long deadline); 546 static int mv_softreset(struct ata_link *link, unsigned int *class, 547 unsigned long deadline); 548 static void mv_pmp_error_handler(struct ata_port *ap); 549 static void mv_process_crpb_entries(struct ata_port *ap, 550 struct mv_port_priv *pp); 551 552 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 553 * because we have to allow room for worst case splitting of 554 * PRDs for 64K boundaries in mv_fill_sg(). 555 */ 556 static struct scsi_host_template mv5_sht = { 557 ATA_BASE_SHT(DRV_NAME), 558 .sg_tablesize = MV_MAX_SG_CT / 2, 559 .dma_boundary = MV_DMA_BOUNDARY, 560 }; 561 562 static struct scsi_host_template mv6_sht = { 563 ATA_NCQ_SHT(DRV_NAME), 564 .can_queue = MV_MAX_Q_DEPTH - 1, 565 .sg_tablesize = MV_MAX_SG_CT / 2, 566 .dma_boundary = MV_DMA_BOUNDARY, 567 }; 568 569 static struct ata_port_operations mv5_ops = { 570 .inherits = &ata_sff_port_ops, 571 572 .qc_defer = mv_qc_defer, 573 .qc_prep = mv_qc_prep, 574 .qc_issue = mv_qc_issue, 575 576 .freeze = mv_eh_freeze, 577 .thaw = mv_eh_thaw, 578 .hardreset = mv_hardreset, 579 .error_handler = ata_std_error_handler, /* avoid SFF EH */ 580 .post_internal_cmd = ATA_OP_NULL, 581 582 .scr_read = mv5_scr_read, 583 .scr_write = mv5_scr_write, 584 585 .port_start = mv_port_start, 586 .port_stop = mv_port_stop, 587 }; 588 589 static struct ata_port_operations mv6_ops = { 590 .inherits = &mv5_ops, 591 .dev_config = mv6_dev_config, 592 .scr_read = mv_scr_read, 593 .scr_write = mv_scr_write, 594 595 .pmp_hardreset = mv_pmp_hardreset, 596 .pmp_softreset = mv_softreset, 597 .softreset = mv_softreset, 598 .error_handler = mv_pmp_error_handler, 599 }; 600 601 static struct ata_port_operations mv_iie_ops = { 602 .inherits = &mv6_ops, 603 .dev_config = ATA_OP_NULL, 604 .qc_prep = mv_qc_prep_iie, 605 }; 606 607 static const struct ata_port_info mv_port_info[] = { 608 { /* chip_504x */ 609 .flags = MV_COMMON_FLAGS, 610 .pio_mask = 0x1f, /* pio0-4 */ 611 .udma_mask = ATA_UDMA6, 612 .port_ops = &mv5_ops, 613 }, 614 { /* chip_508x */ 615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 616 .pio_mask = 0x1f, /* pio0-4 */ 617 .udma_mask = ATA_UDMA6, 618 .port_ops = &mv5_ops, 619 }, 620 { /* chip_5080 */ 621 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 622 .pio_mask = 0x1f, /* pio0-4 */ 623 .udma_mask = ATA_UDMA6, 624 .port_ops = &mv5_ops, 625 }, 626 { /* chip_604x */ 627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 628 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 629 ATA_FLAG_NCQ, 630 .pio_mask = 0x1f, /* pio0-4 */ 631 .udma_mask = ATA_UDMA6, 632 .port_ops = &mv6_ops, 633 }, 634 { /* chip_608x */ 635 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 636 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 637 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 638 .pio_mask = 0x1f, /* pio0-4 */ 639 .udma_mask = ATA_UDMA6, 640 .port_ops = &mv6_ops, 641 }, 642 { /* chip_6042 */ 643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 644 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 645 ATA_FLAG_NCQ, 646 .pio_mask = 0x1f, /* pio0-4 */ 647 .udma_mask = ATA_UDMA6, 648 .port_ops = &mv_iie_ops, 649 }, 650 { /* chip_7042 */ 651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 652 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 653 ATA_FLAG_NCQ, 654 .pio_mask = 0x1f, /* pio0-4 */ 655 .udma_mask = ATA_UDMA6, 656 .port_ops = &mv_iie_ops, 657 }, 658 { /* chip_soc */ 659 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 660 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 661 ATA_FLAG_NCQ | MV_FLAG_SOC, 662 .pio_mask = 0x1f, /* pio0-4 */ 663 .udma_mask = ATA_UDMA6, 664 .port_ops = &mv_iie_ops, 665 }, 666 }; 667 668 static const struct pci_device_id mv_pci_tbl[] = { 669 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 670 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 671 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 672 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 673 /* RocketRAID 1740/174x have different identifiers */ 674 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 675 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 676 677 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 678 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 679 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 680 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 681 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 682 683 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 684 685 /* Adaptec 1430SA */ 686 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 687 688 /* Marvell 7042 support */ 689 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 690 691 /* Highpoint RocketRAID PCIe series */ 692 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 693 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 694 695 { } /* terminate list */ 696 }; 697 698 static const struct mv_hw_ops mv5xxx_ops = { 699 .phy_errata = mv5_phy_errata, 700 .enable_leds = mv5_enable_leds, 701 .read_preamp = mv5_read_preamp, 702 .reset_hc = mv5_reset_hc, 703 .reset_flash = mv5_reset_flash, 704 .reset_bus = mv5_reset_bus, 705 }; 706 707 static const struct mv_hw_ops mv6xxx_ops = { 708 .phy_errata = mv6_phy_errata, 709 .enable_leds = mv6_enable_leds, 710 .read_preamp = mv6_read_preamp, 711 .reset_hc = mv6_reset_hc, 712 .reset_flash = mv6_reset_flash, 713 .reset_bus = mv_reset_pci_bus, 714 }; 715 716 static const struct mv_hw_ops mv_soc_ops = { 717 .phy_errata = mv6_phy_errata, 718 .enable_leds = mv_soc_enable_leds, 719 .read_preamp = mv_soc_read_preamp, 720 .reset_hc = mv_soc_reset_hc, 721 .reset_flash = mv_soc_reset_flash, 722 .reset_bus = mv_soc_reset_bus, 723 }; 724 725 /* 726 * Functions 727 */ 728 729 static inline void writelfl(unsigned long data, void __iomem *addr) 730 { 731 writel(data, addr); 732 (void) readl(addr); /* flush to avoid PCI posted write */ 733 } 734 735 static inline unsigned int mv_hc_from_port(unsigned int port) 736 { 737 return port >> MV_PORT_HC_SHIFT; 738 } 739 740 static inline unsigned int mv_hardport_from_port(unsigned int port) 741 { 742 return port & MV_PORT_MASK; 743 } 744 745 /* 746 * Consolidate some rather tricky bit shift calculations. 747 * This is hot-path stuff, so not a function. 748 * Simple code, with two return values, so macro rather than inline. 749 * 750 * port is the sole input, in range 0..7. 751 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 752 * hardport is the other output, in range 0..3. 753 * 754 * Note that port and hardport may be the same variable in some cases. 755 */ 756 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 757 { \ 758 shift = mv_hc_from_port(port) * HC_SHIFT; \ 759 hardport = mv_hardport_from_port(port); \ 760 shift += hardport * 2; \ 761 } 762 763 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 764 { 765 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 766 } 767 768 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 769 unsigned int port) 770 { 771 return mv_hc_base(base, mv_hc_from_port(port)); 772 } 773 774 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 775 { 776 return mv_hc_base_from_port(base, port) + 777 MV_SATAHC_ARBTR_REG_SZ + 778 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 779 } 780 781 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 782 { 783 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 784 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 785 786 return hc_mmio + ofs; 787 } 788 789 static inline void __iomem *mv_host_base(struct ata_host *host) 790 { 791 struct mv_host_priv *hpriv = host->private_data; 792 return hpriv->base; 793 } 794 795 static inline void __iomem *mv_ap_base(struct ata_port *ap) 796 { 797 return mv_port_base(mv_host_base(ap->host), ap->port_no); 798 } 799 800 static inline int mv_get_hc_count(unsigned long port_flags) 801 { 802 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 803 } 804 805 static void mv_set_edma_ptrs(void __iomem *port_mmio, 806 struct mv_host_priv *hpriv, 807 struct mv_port_priv *pp) 808 { 809 u32 index; 810 811 /* 812 * initialize request queue 813 */ 814 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 815 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 816 817 WARN_ON(pp->crqb_dma & 0x3ff); 818 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 819 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 820 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 821 822 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 823 writelfl((pp->crqb_dma & 0xffffffff) | index, 824 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 825 else 826 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 827 828 /* 829 * initialize response queue 830 */ 831 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 832 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 833 834 WARN_ON(pp->crpb_dma & 0xff); 835 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 836 837 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 838 writelfl((pp->crpb_dma & 0xffffffff) | index, 839 port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 840 else 841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 842 843 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 845 } 846 847 /** 848 * mv_start_dma - Enable eDMA engine 849 * @base: port base address 850 * @pp: port private data 851 * 852 * Verify the local cache of the eDMA state is accurate with a 853 * WARN_ON. 854 * 855 * LOCKING: 856 * Inherited from caller. 857 */ 858 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 859 struct mv_port_priv *pp, u8 protocol) 860 { 861 int want_ncq = (protocol == ATA_PROT_NCQ); 862 863 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 864 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 865 if (want_ncq != using_ncq) 866 mv_stop_edma(ap); 867 } 868 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 869 struct mv_host_priv *hpriv = ap->host->private_data; 870 int hardport = mv_hardport_from_port(ap->port_no); 871 void __iomem *hc_mmio = mv_hc_base_from_port( 872 mv_host_base(ap->host), hardport); 873 u32 hc_irq_cause, ipending; 874 875 /* clear EDMA event indicators, if any */ 876 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 877 878 /* clear EDMA interrupt indicator, if any */ 879 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 880 ipending = (DEV_IRQ | DMA_IRQ) << hardport; 881 if (hc_irq_cause & ipending) { 882 writelfl(hc_irq_cause & ~ipending, 883 hc_mmio + HC_IRQ_CAUSE_OFS); 884 } 885 886 mv_edma_cfg(ap, want_ncq); 887 888 /* clear FIS IRQ Cause */ 889 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 890 891 mv_set_edma_ptrs(port_mmio, hpriv, pp); 892 893 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 894 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 895 } 896 } 897 898 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 899 { 900 void __iomem *port_mmio = mv_ap_base(ap); 901 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 902 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 903 int i; 904 905 /* 906 * Wait for the EDMA engine to finish transactions in progress. 907 * No idea what a good "timeout" value might be, but measurements 908 * indicate that it often requires hundreds of microseconds 909 * with two drives in-use. So we use the 15msec value above 910 * as a rough guess at what even more drives might require. 911 */ 912 for (i = 0; i < timeout; ++i) { 913 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); 914 if ((edma_stat & empty_idle) == empty_idle) 915 break; 916 udelay(per_loop); 917 } 918 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 919 } 920 921 /** 922 * mv_stop_edma_engine - Disable eDMA engine 923 * @port_mmio: io base address 924 * 925 * LOCKING: 926 * Inherited from caller. 927 */ 928 static int mv_stop_edma_engine(void __iomem *port_mmio) 929 { 930 int i; 931 932 /* Disable eDMA. The disable bit auto clears. */ 933 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 934 935 /* Wait for the chip to confirm eDMA is off. */ 936 for (i = 10000; i > 0; i--) { 937 u32 reg = readl(port_mmio + EDMA_CMD_OFS); 938 if (!(reg & EDMA_EN)) 939 return 0; 940 udelay(10); 941 } 942 return -EIO; 943 } 944 945 static int mv_stop_edma(struct ata_port *ap) 946 { 947 void __iomem *port_mmio = mv_ap_base(ap); 948 struct mv_port_priv *pp = ap->private_data; 949 950 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 951 return 0; 952 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 953 mv_wait_for_edma_empty_idle(ap); 954 if (mv_stop_edma_engine(port_mmio)) { 955 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 956 return -EIO; 957 } 958 return 0; 959 } 960 961 #ifdef ATA_DEBUG 962 static void mv_dump_mem(void __iomem *start, unsigned bytes) 963 { 964 int b, w; 965 for (b = 0; b < bytes; ) { 966 DPRINTK("%p: ", start + b); 967 for (w = 0; b < bytes && w < 4; w++) { 968 printk("%08x ", readl(start + b)); 969 b += sizeof(u32); 970 } 971 printk("\n"); 972 } 973 } 974 #endif 975 976 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 977 { 978 #ifdef ATA_DEBUG 979 int b, w; 980 u32 dw; 981 for (b = 0; b < bytes; ) { 982 DPRINTK("%02x: ", b); 983 for (w = 0; b < bytes && w < 4; w++) { 984 (void) pci_read_config_dword(pdev, b, &dw); 985 printk("%08x ", dw); 986 b += sizeof(u32); 987 } 988 printk("\n"); 989 } 990 #endif 991 } 992 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 993 struct pci_dev *pdev) 994 { 995 #ifdef ATA_DEBUG 996 void __iomem *hc_base = mv_hc_base(mmio_base, 997 port >> MV_PORT_HC_SHIFT); 998 void __iomem *port_base; 999 int start_port, num_ports, p, start_hc, num_hcs, hc; 1000 1001 if (0 > port) { 1002 start_hc = start_port = 0; 1003 num_ports = 8; /* shld be benign for 4 port devs */ 1004 num_hcs = 2; 1005 } else { 1006 start_hc = port >> MV_PORT_HC_SHIFT; 1007 start_port = port; 1008 num_ports = num_hcs = 1; 1009 } 1010 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1011 num_ports > 1 ? num_ports - 1 : start_port); 1012 1013 if (NULL != pdev) { 1014 DPRINTK("PCI config space regs:\n"); 1015 mv_dump_pci_cfg(pdev, 0x68); 1016 } 1017 DPRINTK("PCI regs:\n"); 1018 mv_dump_mem(mmio_base+0xc00, 0x3c); 1019 mv_dump_mem(mmio_base+0xd00, 0x34); 1020 mv_dump_mem(mmio_base+0xf00, 0x4); 1021 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1022 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1023 hc_base = mv_hc_base(mmio_base, hc); 1024 DPRINTK("HC regs (HC %i):\n", hc); 1025 mv_dump_mem(hc_base, 0x1c); 1026 } 1027 for (p = start_port; p < start_port + num_ports; p++) { 1028 port_base = mv_port_base(mmio_base, p); 1029 DPRINTK("EDMA regs (port %i):\n", p); 1030 mv_dump_mem(port_base, 0x54); 1031 DPRINTK("SATA regs (port %i):\n", p); 1032 mv_dump_mem(port_base+0x300, 0x60); 1033 } 1034 #endif 1035 } 1036 1037 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1038 { 1039 unsigned int ofs; 1040 1041 switch (sc_reg_in) { 1042 case SCR_STATUS: 1043 case SCR_CONTROL: 1044 case SCR_ERROR: 1045 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 1046 break; 1047 case SCR_ACTIVE: 1048 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 1049 break; 1050 default: 1051 ofs = 0xffffffffU; 1052 break; 1053 } 1054 return ofs; 1055 } 1056 1057 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1058 { 1059 unsigned int ofs = mv_scr_offset(sc_reg_in); 1060 1061 if (ofs != 0xffffffffU) { 1062 *val = readl(mv_ap_base(ap) + ofs); 1063 return 0; 1064 } else 1065 return -EINVAL; 1066 } 1067 1068 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1069 { 1070 unsigned int ofs = mv_scr_offset(sc_reg_in); 1071 1072 if (ofs != 0xffffffffU) { 1073 writelfl(val, mv_ap_base(ap) + ofs); 1074 return 0; 1075 } else 1076 return -EINVAL; 1077 } 1078 1079 static void mv6_dev_config(struct ata_device *adev) 1080 { 1081 /* 1082 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1083 * 1084 * Gen-II does not support NCQ over a port multiplier 1085 * (no FIS-based switching). 1086 * 1087 * We don't have hob_nsect when doing NCQ commands on Gen-II. 1088 * See mv_qc_prep() for more info. 1089 */ 1090 if (adev->flags & ATA_DFLAG_NCQ) { 1091 if (sata_pmp_attached(adev->link->ap)) { 1092 adev->flags &= ~ATA_DFLAG_NCQ; 1093 ata_dev_printk(adev, KERN_INFO, 1094 "NCQ disabled for command-based switching\n"); 1095 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) { 1096 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS; 1097 ata_dev_printk(adev, KERN_INFO, 1098 "max_sectors limited to %u for NCQ\n", 1099 adev->max_sectors); 1100 } 1101 } 1102 } 1103 1104 static int mv_qc_defer(struct ata_queued_cmd *qc) 1105 { 1106 struct ata_link *link = qc->dev->link; 1107 struct ata_port *ap = link->ap; 1108 struct mv_port_priv *pp = ap->private_data; 1109 1110 /* 1111 * Don't allow new commands if we're in a delayed EH state 1112 * for NCQ and/or FIS-based switching. 1113 */ 1114 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1115 return ATA_DEFER_PORT; 1116 /* 1117 * If the port is completely idle, then allow the new qc. 1118 */ 1119 if (ap->nr_active_links == 0) 1120 return 0; 1121 1122 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1123 /* 1124 * The port is operating in host queuing mode (EDMA). 1125 * It can accomodate a new qc if the qc protocol 1126 * is compatible with the current host queue mode. 1127 */ 1128 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1129 /* 1130 * The host queue (EDMA) is in NCQ mode. 1131 * If the new qc is also an NCQ command, 1132 * then allow the new qc. 1133 */ 1134 if (qc->tf.protocol == ATA_PROT_NCQ) 1135 return 0; 1136 } else { 1137 /* 1138 * The host queue (EDMA) is in non-NCQ, DMA mode. 1139 * If the new qc is also a non-NCQ, DMA command, 1140 * then allow the new qc. 1141 */ 1142 if (qc->tf.protocol == ATA_PROT_DMA) 1143 return 0; 1144 } 1145 } 1146 return ATA_DEFER_PORT; 1147 } 1148 1149 static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) 1150 { 1151 u32 new_fiscfg, old_fiscfg; 1152 u32 new_ltmode, old_ltmode; 1153 u32 new_haltcond, old_haltcond; 1154 1155 old_fiscfg = readl(port_mmio + FISCFG_OFS); 1156 old_ltmode = readl(port_mmio + LTMODE_OFS); 1157 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); 1158 1159 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1160 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1161 new_haltcond = old_haltcond | EDMA_ERR_DEV; 1162 1163 if (want_fbs) { 1164 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; 1165 new_ltmode = old_ltmode | LTMODE_BIT8; 1166 if (want_ncq) 1167 new_haltcond &= ~EDMA_ERR_DEV; 1168 else 1169 new_fiscfg |= FISCFG_WAIT_DEV_ERR; 1170 } 1171 1172 if (new_fiscfg != old_fiscfg) 1173 writelfl(new_fiscfg, port_mmio + FISCFG_OFS); 1174 if (new_ltmode != old_ltmode) 1175 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1176 if (new_haltcond != old_haltcond) 1177 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); 1178 } 1179 1180 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1181 { 1182 struct mv_host_priv *hpriv = ap->host->private_data; 1183 u32 old, new; 1184 1185 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1186 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); 1187 if (want_ncq) 1188 new = old | (1 << 22); 1189 else 1190 new = old & ~(1 << 22); 1191 if (new != old) 1192 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); 1193 } 1194 1195 static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1196 { 1197 u32 cfg; 1198 struct mv_port_priv *pp = ap->private_data; 1199 struct mv_host_priv *hpriv = ap->host->private_data; 1200 void __iomem *port_mmio = mv_ap_base(ap); 1201 1202 /* set up non-NCQ EDMA configuration */ 1203 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1204 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; 1205 1206 if (IS_GEN_I(hpriv)) 1207 cfg |= (1 << 8); /* enab config burst size mask */ 1208 1209 else if (IS_GEN_II(hpriv)) { 1210 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1211 mv_60x1_errata_sata25(ap, want_ncq); 1212 1213 } else if (IS_GEN_IIE(hpriv)) { 1214 int want_fbs = sata_pmp_attached(ap); 1215 /* 1216 * Possible future enhancement: 1217 * 1218 * The chip can use FBS with non-NCQ, if we allow it, 1219 * But first we need to have the error handling in place 1220 * for this mode (datasheet section 7.3.15.4.2.3). 1221 * So disallow non-NCQ FBS for now. 1222 */ 1223 want_fbs &= want_ncq; 1224 1225 mv_config_fbs(port_mmio, want_ncq, want_fbs); 1226 1227 if (want_fbs) { 1228 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1229 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1230 } 1231 1232 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1233 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1234 if (HAS_PCI(ap->host)) 1235 cfg |= (1 << 18); /* enab early completion */ 1236 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1237 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1238 } 1239 1240 if (want_ncq) { 1241 cfg |= EDMA_CFG_NCQ; 1242 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1243 } else 1244 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1245 1246 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1247 } 1248 1249 static void mv_port_free_dma_mem(struct ata_port *ap) 1250 { 1251 struct mv_host_priv *hpriv = ap->host->private_data; 1252 struct mv_port_priv *pp = ap->private_data; 1253 int tag; 1254 1255 if (pp->crqb) { 1256 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1257 pp->crqb = NULL; 1258 } 1259 if (pp->crpb) { 1260 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1261 pp->crpb = NULL; 1262 } 1263 /* 1264 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1265 * For later hardware, we have one unique sg_tbl per NCQ tag. 1266 */ 1267 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1268 if (pp->sg_tbl[tag]) { 1269 if (tag == 0 || !IS_GEN_I(hpriv)) 1270 dma_pool_free(hpriv->sg_tbl_pool, 1271 pp->sg_tbl[tag], 1272 pp->sg_tbl_dma[tag]); 1273 pp->sg_tbl[tag] = NULL; 1274 } 1275 } 1276 } 1277 1278 /** 1279 * mv_port_start - Port specific init/start routine. 1280 * @ap: ATA channel to manipulate 1281 * 1282 * Allocate and point to DMA memory, init port private memory, 1283 * zero indices. 1284 * 1285 * LOCKING: 1286 * Inherited from caller. 1287 */ 1288 static int mv_port_start(struct ata_port *ap) 1289 { 1290 struct device *dev = ap->host->dev; 1291 struct mv_host_priv *hpriv = ap->host->private_data; 1292 struct mv_port_priv *pp; 1293 int tag; 1294 1295 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1296 if (!pp) 1297 return -ENOMEM; 1298 ap->private_data = pp; 1299 1300 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1301 if (!pp->crqb) 1302 return -ENOMEM; 1303 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1304 1305 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1306 if (!pp->crpb) 1307 goto out_port_free_dma_mem; 1308 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1309 1310 /* 1311 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1312 * For later hardware, we need one unique sg_tbl per NCQ tag. 1313 */ 1314 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1315 if (tag == 0 || !IS_GEN_I(hpriv)) { 1316 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1317 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1318 if (!pp->sg_tbl[tag]) 1319 goto out_port_free_dma_mem; 1320 } else { 1321 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1322 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1323 } 1324 } 1325 return 0; 1326 1327 out_port_free_dma_mem: 1328 mv_port_free_dma_mem(ap); 1329 return -ENOMEM; 1330 } 1331 1332 /** 1333 * mv_port_stop - Port specific cleanup/stop routine. 1334 * @ap: ATA channel to manipulate 1335 * 1336 * Stop DMA, cleanup port memory. 1337 * 1338 * LOCKING: 1339 * This routine uses the host lock to protect the DMA stop. 1340 */ 1341 static void mv_port_stop(struct ata_port *ap) 1342 { 1343 mv_stop_edma(ap); 1344 mv_port_free_dma_mem(ap); 1345 } 1346 1347 /** 1348 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1349 * @qc: queued command whose SG list to source from 1350 * 1351 * Populate the SG list and mark the last entry. 1352 * 1353 * LOCKING: 1354 * Inherited from caller. 1355 */ 1356 static void mv_fill_sg(struct ata_queued_cmd *qc) 1357 { 1358 struct mv_port_priv *pp = qc->ap->private_data; 1359 struct scatterlist *sg; 1360 struct mv_sg *mv_sg, *last_sg = NULL; 1361 unsigned int si; 1362 1363 mv_sg = pp->sg_tbl[qc->tag]; 1364 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1365 dma_addr_t addr = sg_dma_address(sg); 1366 u32 sg_len = sg_dma_len(sg); 1367 1368 while (sg_len) { 1369 u32 offset = addr & 0xffff; 1370 u32 len = sg_len; 1371 1372 if ((offset + sg_len > 0x10000)) 1373 len = 0x10000 - offset; 1374 1375 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1376 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1377 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1378 1379 sg_len -= len; 1380 addr += len; 1381 1382 last_sg = mv_sg; 1383 mv_sg++; 1384 } 1385 } 1386 1387 if (likely(last_sg)) 1388 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1389 } 1390 1391 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1392 { 1393 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1394 (last ? CRQB_CMD_LAST : 0); 1395 *cmdw = cpu_to_le16(tmp); 1396 } 1397 1398 /** 1399 * mv_qc_prep - Host specific command preparation. 1400 * @qc: queued command to prepare 1401 * 1402 * This routine simply redirects to the general purpose routine 1403 * if command is not DMA. Else, it handles prep of the CRQB 1404 * (command request block), does some sanity checking, and calls 1405 * the SG load routine. 1406 * 1407 * LOCKING: 1408 * Inherited from caller. 1409 */ 1410 static void mv_qc_prep(struct ata_queued_cmd *qc) 1411 { 1412 struct ata_port *ap = qc->ap; 1413 struct mv_port_priv *pp = ap->private_data; 1414 __le16 *cw; 1415 struct ata_taskfile *tf; 1416 u16 flags = 0; 1417 unsigned in_index; 1418 1419 if ((qc->tf.protocol != ATA_PROT_DMA) && 1420 (qc->tf.protocol != ATA_PROT_NCQ)) 1421 return; 1422 1423 /* Fill in command request block 1424 */ 1425 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1426 flags |= CRQB_FLAG_READ; 1427 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1428 flags |= qc->tag << CRQB_TAG_SHIFT; 1429 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1430 1431 /* get current queue index from software */ 1432 in_index = pp->req_idx; 1433 1434 pp->crqb[in_index].sg_addr = 1435 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1436 pp->crqb[in_index].sg_addr_hi = 1437 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1438 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1439 1440 cw = &pp->crqb[in_index].ata_cmd[0]; 1441 tf = &qc->tf; 1442 1443 /* Sadly, the CRQB cannot accomodate all registers--there are 1444 * only 11 bytes...so we must pick and choose required 1445 * registers based on the command. So, we drop feature and 1446 * hob_feature for [RW] DMA commands, but they are needed for 1447 * NCQ. NCQ will drop hob_nsect. 1448 */ 1449 switch (tf->command) { 1450 case ATA_CMD_READ: 1451 case ATA_CMD_READ_EXT: 1452 case ATA_CMD_WRITE: 1453 case ATA_CMD_WRITE_EXT: 1454 case ATA_CMD_WRITE_FUA_EXT: 1455 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1456 break; 1457 case ATA_CMD_FPDMA_READ: 1458 case ATA_CMD_FPDMA_WRITE: 1459 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1460 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1461 break; 1462 default: 1463 /* The only other commands EDMA supports in non-queued and 1464 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1465 * of which are defined/used by Linux. If we get here, this 1466 * driver needs work. 1467 * 1468 * FIXME: modify libata to give qc_prep a return value and 1469 * return error here. 1470 */ 1471 BUG_ON(tf->command); 1472 break; 1473 } 1474 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1475 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1476 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1477 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1478 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1479 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1480 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1481 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1482 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1483 1484 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1485 return; 1486 mv_fill_sg(qc); 1487 } 1488 1489 /** 1490 * mv_qc_prep_iie - Host specific command preparation. 1491 * @qc: queued command to prepare 1492 * 1493 * This routine simply redirects to the general purpose routine 1494 * if command is not DMA. Else, it handles prep of the CRQB 1495 * (command request block), does some sanity checking, and calls 1496 * the SG load routine. 1497 * 1498 * LOCKING: 1499 * Inherited from caller. 1500 */ 1501 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1502 { 1503 struct ata_port *ap = qc->ap; 1504 struct mv_port_priv *pp = ap->private_data; 1505 struct mv_crqb_iie *crqb; 1506 struct ata_taskfile *tf; 1507 unsigned in_index; 1508 u32 flags = 0; 1509 1510 if ((qc->tf.protocol != ATA_PROT_DMA) && 1511 (qc->tf.protocol != ATA_PROT_NCQ)) 1512 return; 1513 1514 /* Fill in Gen IIE command request block */ 1515 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1516 flags |= CRQB_FLAG_READ; 1517 1518 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1519 flags |= qc->tag << CRQB_TAG_SHIFT; 1520 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1521 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1522 1523 /* get current queue index from software */ 1524 in_index = pp->req_idx; 1525 1526 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1527 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1528 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1529 crqb->flags = cpu_to_le32(flags); 1530 1531 tf = &qc->tf; 1532 crqb->ata_cmd[0] = cpu_to_le32( 1533 (tf->command << 16) | 1534 (tf->feature << 24) 1535 ); 1536 crqb->ata_cmd[1] = cpu_to_le32( 1537 (tf->lbal << 0) | 1538 (tf->lbam << 8) | 1539 (tf->lbah << 16) | 1540 (tf->device << 24) 1541 ); 1542 crqb->ata_cmd[2] = cpu_to_le32( 1543 (tf->hob_lbal << 0) | 1544 (tf->hob_lbam << 8) | 1545 (tf->hob_lbah << 16) | 1546 (tf->hob_feature << 24) 1547 ); 1548 crqb->ata_cmd[3] = cpu_to_le32( 1549 (tf->nsect << 0) | 1550 (tf->hob_nsect << 8) 1551 ); 1552 1553 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1554 return; 1555 mv_fill_sg(qc); 1556 } 1557 1558 /** 1559 * mv_qc_issue - Initiate a command to the host 1560 * @qc: queued command to start 1561 * 1562 * This routine simply redirects to the general purpose routine 1563 * if command is not DMA. Else, it sanity checks our local 1564 * caches of the request producer/consumer indices then enables 1565 * DMA and bumps the request producer index. 1566 * 1567 * LOCKING: 1568 * Inherited from caller. 1569 */ 1570 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1571 { 1572 struct ata_port *ap = qc->ap; 1573 void __iomem *port_mmio = mv_ap_base(ap); 1574 struct mv_port_priv *pp = ap->private_data; 1575 u32 in_index; 1576 1577 if ((qc->tf.protocol != ATA_PROT_DMA) && 1578 (qc->tf.protocol != ATA_PROT_NCQ)) { 1579 /* 1580 * We're about to send a non-EDMA capable command to the 1581 * port. Turn off EDMA so there won't be problems accessing 1582 * shadow block, etc registers. 1583 */ 1584 mv_stop_edma(ap); 1585 mv_pmp_select(ap, qc->dev->link->pmp); 1586 return ata_sff_qc_issue(qc); 1587 } 1588 1589 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1590 1591 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 1592 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 1593 1594 /* and write the request in pointer to kick the EDMA to life */ 1595 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 1596 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1597 1598 return 0; 1599 } 1600 1601 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 1602 { 1603 struct mv_port_priv *pp = ap->private_data; 1604 struct ata_queued_cmd *qc; 1605 1606 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1607 return NULL; 1608 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1609 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1610 qc = NULL; 1611 return qc; 1612 } 1613 1614 static void mv_pmp_error_handler(struct ata_port *ap) 1615 { 1616 unsigned int pmp, pmp_map; 1617 struct mv_port_priv *pp = ap->private_data; 1618 1619 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 1620 /* 1621 * Perform NCQ error analysis on failed PMPs 1622 * before we freeze the port entirely. 1623 * 1624 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 1625 */ 1626 pmp_map = pp->delayed_eh_pmp_map; 1627 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 1628 for (pmp = 0; pmp_map != 0; pmp++) { 1629 unsigned int this_pmp = (1 << pmp); 1630 if (pmp_map & this_pmp) { 1631 struct ata_link *link = &ap->pmp_link[pmp]; 1632 pmp_map &= ~this_pmp; 1633 ata_eh_analyze_ncq_error(link); 1634 } 1635 } 1636 ata_port_freeze(ap); 1637 } 1638 sata_pmp_error_handler(ap); 1639 } 1640 1641 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 1642 { 1643 void __iomem *port_mmio = mv_ap_base(ap); 1644 1645 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; 1646 } 1647 1648 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 1649 { 1650 struct ata_eh_info *ehi; 1651 unsigned int pmp; 1652 1653 /* 1654 * Initialize EH info for PMPs which saw device errors 1655 */ 1656 ehi = &ap->link.eh_info; 1657 for (pmp = 0; pmp_map != 0; pmp++) { 1658 unsigned int this_pmp = (1 << pmp); 1659 if (pmp_map & this_pmp) { 1660 struct ata_link *link = &ap->pmp_link[pmp]; 1661 1662 pmp_map &= ~this_pmp; 1663 ehi = &link->eh_info; 1664 ata_ehi_clear_desc(ehi); 1665 ata_ehi_push_desc(ehi, "dev err"); 1666 ehi->err_mask |= AC_ERR_DEV; 1667 ehi->action |= ATA_EH_RESET; 1668 ata_link_abort(link); 1669 } 1670 } 1671 } 1672 1673 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1674 { 1675 struct mv_port_priv *pp = ap->private_data; 1676 int failed_links; 1677 unsigned int old_map, new_map; 1678 1679 /* 1680 * Device error during FBS+NCQ operation: 1681 * 1682 * Set a port flag to prevent further I/O being enqueued. 1683 * Leave the EDMA running to drain outstanding commands from this port. 1684 * Perform the post-mortem/EH only when all responses are complete. 1685 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 1686 */ 1687 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 1688 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 1689 pp->delayed_eh_pmp_map = 0; 1690 } 1691 old_map = pp->delayed_eh_pmp_map; 1692 new_map = old_map | mv_get_err_pmp_map(ap); 1693 1694 if (old_map != new_map) { 1695 pp->delayed_eh_pmp_map = new_map; 1696 mv_pmp_eh_prep(ap, new_map & ~old_map); 1697 } 1698 failed_links = hweight16(new_map); 1699 1700 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 1701 "failed_links=%d nr_active_links=%d\n", 1702 __func__, pp->delayed_eh_pmp_map, 1703 ap->qc_active, failed_links, 1704 ap->nr_active_links); 1705 1706 if (ap->nr_active_links <= failed_links) { 1707 mv_process_crpb_entries(ap, pp); 1708 mv_stop_edma(ap); 1709 mv_eh_freeze(ap); 1710 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 1711 return 1; /* handled */ 1712 } 1713 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 1714 return 1; /* handled */ 1715 } 1716 1717 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 1718 { 1719 /* 1720 * Possible future enhancement: 1721 * 1722 * FBS+non-NCQ operation is not yet implemented. 1723 * See related notes in mv_edma_cfg(). 1724 * 1725 * Device error during FBS+non-NCQ operation: 1726 * 1727 * We need to snapshot the shadow registers for each failed command. 1728 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 1729 */ 1730 return 0; /* not handled */ 1731 } 1732 1733 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 1734 { 1735 struct mv_port_priv *pp = ap->private_data; 1736 1737 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1738 return 0; /* EDMA was not active: not handled */ 1739 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 1740 return 0; /* FBS was not active: not handled */ 1741 1742 if (!(edma_err_cause & EDMA_ERR_DEV)) 1743 return 0; /* non DEV error: not handled */ 1744 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 1745 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 1746 return 0; /* other problems: not handled */ 1747 1748 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1749 /* 1750 * EDMA should NOT have self-disabled for this case. 1751 * If it did, then something is wrong elsewhere, 1752 * and we cannot handle it here. 1753 */ 1754 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1755 ata_port_printk(ap, KERN_WARNING, 1756 "%s: err_cause=0x%x pp_flags=0x%x\n", 1757 __func__, edma_err_cause, pp->pp_flags); 1758 return 0; /* not handled */ 1759 } 1760 return mv_handle_fbs_ncq_dev_err(ap); 1761 } else { 1762 /* 1763 * EDMA should have self-disabled for this case. 1764 * If it did not, then something is wrong elsewhere, 1765 * and we cannot handle it here. 1766 */ 1767 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 1768 ata_port_printk(ap, KERN_WARNING, 1769 "%s: err_cause=0x%x pp_flags=0x%x\n", 1770 __func__, edma_err_cause, pp->pp_flags); 1771 return 0; /* not handled */ 1772 } 1773 return mv_handle_fbs_non_ncq_dev_err(ap); 1774 } 1775 return 0; /* not handled */ 1776 } 1777 1778 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 1779 { 1780 struct ata_eh_info *ehi = &ap->link.eh_info; 1781 char *when = "idle"; 1782 1783 ata_ehi_clear_desc(ehi); 1784 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 1785 when = "disabled"; 1786 } else if (edma_was_enabled) { 1787 when = "EDMA enabled"; 1788 } else { 1789 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1790 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1791 when = "polling"; 1792 } 1793 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 1794 ehi->err_mask |= AC_ERR_OTHER; 1795 ehi->action |= ATA_EH_RESET; 1796 ata_port_freeze(ap); 1797 } 1798 1799 /** 1800 * mv_err_intr - Handle error interrupts on the port 1801 * @ap: ATA channel to manipulate 1802 * @qc: affected command (non-NCQ), or NULL 1803 * 1804 * Most cases require a full reset of the chip's state machine, 1805 * which also performs a COMRESET. 1806 * Also, if the port disabled DMA, update our cached copy to match. 1807 * 1808 * LOCKING: 1809 * Inherited from caller. 1810 */ 1811 static void mv_err_intr(struct ata_port *ap) 1812 { 1813 void __iomem *port_mmio = mv_ap_base(ap); 1814 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1815 struct mv_port_priv *pp = ap->private_data; 1816 struct mv_host_priv *hpriv = ap->host->private_data; 1817 unsigned int action = 0, err_mask = 0; 1818 struct ata_eh_info *ehi = &ap->link.eh_info; 1819 struct ata_queued_cmd *qc; 1820 int abort = 0; 1821 1822 /* 1823 * Read and clear the SError and err_cause bits. 1824 */ 1825 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1826 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1827 1828 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1829 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1830 1831 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", 1832 __func__, edma_err_cause, pp->pp_flags); 1833 1834 if (edma_err_cause & EDMA_ERR_DEV) { 1835 /* 1836 * Device errors during FIS-based switching operation 1837 * require special handling. 1838 */ 1839 if (mv_handle_dev_err(ap, edma_err_cause)) 1840 return; 1841 } 1842 1843 qc = mv_get_active_qc(ap); 1844 ata_ehi_clear_desc(ehi); 1845 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1846 edma_err_cause, pp->pp_flags); 1847 /* 1848 * All generations share these EDMA error cause bits: 1849 */ 1850 if (edma_err_cause & EDMA_ERR_DEV) { 1851 err_mask |= AC_ERR_DEV; 1852 action |= ATA_EH_RESET; 1853 ata_ehi_push_desc(ehi, "dev error"); 1854 } 1855 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1856 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1857 EDMA_ERR_INTRL_PAR)) { 1858 err_mask |= AC_ERR_ATA_BUS; 1859 action |= ATA_EH_RESET; 1860 ata_ehi_push_desc(ehi, "parity error"); 1861 } 1862 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1863 ata_ehi_hotplugged(ehi); 1864 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1865 "dev disconnect" : "dev connect"); 1866 action |= ATA_EH_RESET; 1867 } 1868 1869 /* 1870 * Gen-I has a different SELF_DIS bit, 1871 * different FREEZE bits, and no SERR bit: 1872 */ 1873 if (IS_GEN_I(hpriv)) { 1874 eh_freeze_mask = EDMA_EH_FREEZE_5; 1875 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1876 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1877 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1878 } 1879 } else { 1880 eh_freeze_mask = EDMA_EH_FREEZE; 1881 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1882 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1883 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1884 } 1885 if (edma_err_cause & EDMA_ERR_SERR) { 1886 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1887 err_mask |= AC_ERR_ATA_BUS; 1888 action |= ATA_EH_RESET; 1889 } 1890 } 1891 1892 if (!err_mask) { 1893 err_mask = AC_ERR_OTHER; 1894 action |= ATA_EH_RESET; 1895 } 1896 1897 ehi->serror |= serr; 1898 ehi->action |= action; 1899 1900 if (qc) 1901 qc->err_mask |= err_mask; 1902 else 1903 ehi->err_mask |= err_mask; 1904 1905 if (err_mask == AC_ERR_DEV) { 1906 /* 1907 * Cannot do ata_port_freeze() here, 1908 * because it would kill PIO access, 1909 * which is needed for further diagnosis. 1910 */ 1911 mv_eh_freeze(ap); 1912 abort = 1; 1913 } else if (edma_err_cause & eh_freeze_mask) { 1914 /* 1915 * Note to self: ata_port_freeze() calls ata_port_abort() 1916 */ 1917 ata_port_freeze(ap); 1918 } else { 1919 abort = 1; 1920 } 1921 1922 if (abort) { 1923 if (qc) 1924 ata_link_abort(qc->dev->link); 1925 else 1926 ata_port_abort(ap); 1927 } 1928 } 1929 1930 static void mv_process_crpb_response(struct ata_port *ap, 1931 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 1932 { 1933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1934 1935 if (qc) { 1936 u8 ata_status; 1937 u16 edma_status = le16_to_cpu(response->flags); 1938 /* 1939 * edma_status from a response queue entry: 1940 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). 1941 * MSB is saved ATA status from command completion. 1942 */ 1943 if (!ncq_enabled) { 1944 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 1945 if (err_cause) { 1946 /* 1947 * Error will be seen/handled by mv_err_intr(). 1948 * So do nothing at all here. 1949 */ 1950 return; 1951 } 1952 } 1953 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 1954 if (!ac_err_mask(ata_status)) 1955 ata_qc_complete(qc); 1956 /* else: leave it for mv_err_intr() */ 1957 } else { 1958 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 1959 __func__, tag); 1960 } 1961 } 1962 1963 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 1964 { 1965 void __iomem *port_mmio = mv_ap_base(ap); 1966 struct mv_host_priv *hpriv = ap->host->private_data; 1967 u32 in_index; 1968 bool work_done = false; 1969 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 1970 1971 /* Get the hardware queue position index */ 1972 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 1973 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1974 1975 /* Process new responses from since the last time we looked */ 1976 while (in_index != pp->resp_idx) { 1977 unsigned int tag; 1978 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 1979 1980 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 1981 1982 if (IS_GEN_I(hpriv)) { 1983 /* 50xx: no NCQ, only one command active at a time */ 1984 tag = ap->link.active_tag; 1985 } else { 1986 /* Gen II/IIE: get command tag from CRPB entry */ 1987 tag = le16_to_cpu(response->id) & 0x1f; 1988 } 1989 mv_process_crpb_response(ap, response, tag, ncq_enabled); 1990 work_done = true; 1991 } 1992 1993 /* Update the software queue position index in hardware */ 1994 if (work_done) 1995 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 1996 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 1997 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1998 } 1999 2000 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2001 { 2002 struct mv_port_priv *pp; 2003 int edma_was_enabled; 2004 2005 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2006 mv_unexpected_intr(ap, 0); 2007 return; 2008 } 2009 /* 2010 * Grab a snapshot of the EDMA_EN flag setting, 2011 * so that we have a consistent view for this port, 2012 * even if something we call of our routines changes it. 2013 */ 2014 pp = ap->private_data; 2015 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2016 /* 2017 * Process completed CRPB response(s) before other events. 2018 */ 2019 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2020 mv_process_crpb_entries(ap, pp); 2021 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2022 mv_handle_fbs_ncq_dev_err(ap); 2023 } 2024 /* 2025 * Handle chip-reported errors, or continue on to handle PIO. 2026 */ 2027 if (unlikely(port_cause & ERR_IRQ)) { 2028 mv_err_intr(ap); 2029 } else if (!edma_was_enabled) { 2030 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2031 if (qc) 2032 ata_sff_host_intr(ap, qc); 2033 else 2034 mv_unexpected_intr(ap, edma_was_enabled); 2035 } 2036 } 2037 2038 /** 2039 * mv_host_intr - Handle all interrupts on the given host controller 2040 * @host: host specific structure 2041 * @main_irq_cause: Main interrupt cause register for the chip. 2042 * 2043 * LOCKING: 2044 * Inherited from caller. 2045 */ 2046 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2047 { 2048 struct mv_host_priv *hpriv = host->private_data; 2049 void __iomem *mmio = hpriv->base, *hc_mmio; 2050 unsigned int handled = 0, port; 2051 2052 for (port = 0; port < hpriv->n_ports; port++) { 2053 struct ata_port *ap = host->ports[port]; 2054 unsigned int p, shift, hardport, port_cause; 2055 2056 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2057 /* 2058 * Each hc within the host has its own hc_irq_cause register, 2059 * where the interrupting ports bits get ack'd. 2060 */ 2061 if (hardport == 0) { /* first port on this hc ? */ 2062 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2063 u32 port_mask, ack_irqs; 2064 /* 2065 * Skip this entire hc if nothing pending for any ports 2066 */ 2067 if (!hc_cause) { 2068 port += MV_PORTS_PER_HC - 1; 2069 continue; 2070 } 2071 /* 2072 * We don't need/want to read the hc_irq_cause register, 2073 * because doing so hurts performance, and 2074 * main_irq_cause already gives us everything we need. 2075 * 2076 * But we do have to *write* to the hc_irq_cause to ack 2077 * the ports that we are handling this time through. 2078 * 2079 * This requires that we create a bitmap for those 2080 * ports which interrupted us, and use that bitmap 2081 * to ack (only) those ports via hc_irq_cause. 2082 */ 2083 ack_irqs = 0; 2084 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2085 if ((port + p) >= hpriv->n_ports) 2086 break; 2087 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2088 if (hc_cause & port_mask) 2089 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2090 } 2091 hc_mmio = mv_hc_base_from_port(mmio, port); 2092 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); 2093 handled = 1; 2094 } 2095 /* 2096 * Handle interrupts signalled for this port: 2097 */ 2098 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2099 if (port_cause) 2100 mv_port_intr(ap, port_cause); 2101 } 2102 return handled; 2103 } 2104 2105 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2106 { 2107 struct mv_host_priv *hpriv = host->private_data; 2108 struct ata_port *ap; 2109 struct ata_queued_cmd *qc; 2110 struct ata_eh_info *ehi; 2111 unsigned int i, err_mask, printed = 0; 2112 u32 err_cause; 2113 2114 err_cause = readl(mmio + hpriv->irq_cause_ofs); 2115 2116 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2117 err_cause); 2118 2119 DPRINTK("All regs @ PCI error\n"); 2120 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2121 2122 writelfl(0, mmio + hpriv->irq_cause_ofs); 2123 2124 for (i = 0; i < host->n_ports; i++) { 2125 ap = host->ports[i]; 2126 if (!ata_link_offline(&ap->link)) { 2127 ehi = &ap->link.eh_info; 2128 ata_ehi_clear_desc(ehi); 2129 if (!printed++) 2130 ata_ehi_push_desc(ehi, 2131 "PCI err cause 0x%08x", err_cause); 2132 err_mask = AC_ERR_HOST_BUS; 2133 ehi->action = ATA_EH_RESET; 2134 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2135 if (qc) 2136 qc->err_mask |= err_mask; 2137 else 2138 ehi->err_mask |= err_mask; 2139 2140 ata_port_freeze(ap); 2141 } 2142 } 2143 return 1; /* handled */ 2144 } 2145 2146 /** 2147 * mv_interrupt - Main interrupt event handler 2148 * @irq: unused 2149 * @dev_instance: private data; in this case the host structure 2150 * 2151 * Read the read only register to determine if any host 2152 * controllers have pending interrupts. If so, call lower level 2153 * routine to handle. Also check for PCI errors which are only 2154 * reported here. 2155 * 2156 * LOCKING: 2157 * This routine holds the host lock while processing pending 2158 * interrupts. 2159 */ 2160 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2161 { 2162 struct ata_host *host = dev_instance; 2163 struct mv_host_priv *hpriv = host->private_data; 2164 unsigned int handled = 0; 2165 u32 main_irq_cause, main_irq_mask; 2166 2167 spin_lock(&host->lock); 2168 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2169 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2170 /* 2171 * Deal with cases where we either have nothing pending, or have read 2172 * a bogus register value which can indicate HW removal or PCI fault. 2173 */ 2174 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { 2175 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) 2176 handled = mv_pci_error(host, hpriv->base); 2177 else 2178 handled = mv_host_intr(host, main_irq_cause); 2179 } 2180 spin_unlock(&host->lock); 2181 return IRQ_RETVAL(handled); 2182 } 2183 2184 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 2185 { 2186 unsigned int ofs; 2187 2188 switch (sc_reg_in) { 2189 case SCR_STATUS: 2190 case SCR_ERROR: 2191 case SCR_CONTROL: 2192 ofs = sc_reg_in * sizeof(u32); 2193 break; 2194 default: 2195 ofs = 0xffffffffU; 2196 break; 2197 } 2198 return ofs; 2199 } 2200 2201 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 2202 { 2203 struct mv_host_priv *hpriv = ap->host->private_data; 2204 void __iomem *mmio = hpriv->base; 2205 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2206 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2207 2208 if (ofs != 0xffffffffU) { 2209 *val = readl(addr + ofs); 2210 return 0; 2211 } else 2212 return -EINVAL; 2213 } 2214 2215 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 2216 { 2217 struct mv_host_priv *hpriv = ap->host->private_data; 2218 void __iomem *mmio = hpriv->base; 2219 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2220 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2221 2222 if (ofs != 0xffffffffU) { 2223 writelfl(val, addr + ofs); 2224 return 0; 2225 } else 2226 return -EINVAL; 2227 } 2228 2229 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 2230 { 2231 struct pci_dev *pdev = to_pci_dev(host->dev); 2232 int early_5080; 2233 2234 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 2235 2236 if (!early_5080) { 2237 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2238 tmp |= (1 << 0); 2239 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2240 } 2241 2242 mv_reset_pci_bus(host, mmio); 2243 } 2244 2245 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2246 { 2247 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); 2248 } 2249 2250 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2251 void __iomem *mmio) 2252 { 2253 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 2254 u32 tmp; 2255 2256 tmp = readl(phy_mmio + MV5_PHY_MODE); 2257 2258 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 2259 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 2260 } 2261 2262 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2263 { 2264 u32 tmp; 2265 2266 writel(0, mmio + MV_GPIO_PORT_CTL_OFS); 2267 2268 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2269 2270 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2271 tmp |= ~(1 << 0); 2272 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2273 } 2274 2275 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2276 unsigned int port) 2277 { 2278 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 2279 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 2280 u32 tmp; 2281 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2282 2283 if (fix_apm_sq) { 2284 tmp = readl(phy_mmio + MV5_LTMODE_OFS); 2285 tmp |= (1 << 19); 2286 writel(tmp, phy_mmio + MV5_LTMODE_OFS); 2287 2288 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); 2289 tmp &= ~0x3; 2290 tmp |= 0x1; 2291 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); 2292 } 2293 2294 tmp = readl(phy_mmio + MV5_PHY_MODE); 2295 tmp &= ~mask; 2296 tmp |= hpriv->signal[port].pre; 2297 tmp |= hpriv->signal[port].amps; 2298 writel(tmp, phy_mmio + MV5_PHY_MODE); 2299 } 2300 2301 2302 #undef ZERO 2303 #define ZERO(reg) writel(0, port_mmio + (reg)) 2304 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 2305 unsigned int port) 2306 { 2307 void __iomem *port_mmio = mv_port_base(mmio, port); 2308 2309 mv_reset_channel(hpriv, mmio, port); 2310 2311 ZERO(0x028); /* command */ 2312 writel(0x11f, port_mmio + EDMA_CFG_OFS); 2313 ZERO(0x004); /* timer */ 2314 ZERO(0x008); /* irq err cause */ 2315 ZERO(0x00c); /* irq err mask */ 2316 ZERO(0x010); /* rq bah */ 2317 ZERO(0x014); /* rq inp */ 2318 ZERO(0x018); /* rq outp */ 2319 ZERO(0x01c); /* respq bah */ 2320 ZERO(0x024); /* respq outp */ 2321 ZERO(0x020); /* respq inp */ 2322 ZERO(0x02c); /* test control */ 2323 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2324 } 2325 #undef ZERO 2326 2327 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2328 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2329 unsigned int hc) 2330 { 2331 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2332 u32 tmp; 2333 2334 ZERO(0x00c); 2335 ZERO(0x010); 2336 ZERO(0x014); 2337 ZERO(0x018); 2338 2339 tmp = readl(hc_mmio + 0x20); 2340 tmp &= 0x1c1c1c1c; 2341 tmp |= 0x03030303; 2342 writel(tmp, hc_mmio + 0x20); 2343 } 2344 #undef ZERO 2345 2346 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2347 unsigned int n_hc) 2348 { 2349 unsigned int hc, port; 2350 2351 for (hc = 0; hc < n_hc; hc++) { 2352 for (port = 0; port < MV_PORTS_PER_HC; port++) 2353 mv5_reset_hc_port(hpriv, mmio, 2354 (hc * MV_PORTS_PER_HC) + port); 2355 2356 mv5_reset_one_hc(hpriv, mmio, hc); 2357 } 2358 2359 return 0; 2360 } 2361 2362 #undef ZERO 2363 #define ZERO(reg) writel(0, mmio + (reg)) 2364 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 2365 { 2366 struct mv_host_priv *hpriv = host->private_data; 2367 u32 tmp; 2368 2369 tmp = readl(mmio + MV_PCI_MODE_OFS); 2370 tmp &= 0xff00ffff; 2371 writel(tmp, mmio + MV_PCI_MODE_OFS); 2372 2373 ZERO(MV_PCI_DISC_TIMER); 2374 ZERO(MV_PCI_MSI_TRIGGER); 2375 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2376 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); 2377 ZERO(MV_PCI_SERR_MASK); 2378 ZERO(hpriv->irq_cause_ofs); 2379 ZERO(hpriv->irq_mask_ofs); 2380 ZERO(MV_PCI_ERR_LOW_ADDRESS); 2381 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 2382 ZERO(MV_PCI_ERR_ATTRIBUTE); 2383 ZERO(MV_PCI_ERR_COMMAND); 2384 } 2385 #undef ZERO 2386 2387 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2388 { 2389 u32 tmp; 2390 2391 mv5_reset_flash(hpriv, mmio); 2392 2393 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); 2394 tmp &= 0x3; 2395 tmp |= (1 << 5) | (1 << 6); 2396 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); 2397 } 2398 2399 /** 2400 * mv6_reset_hc - Perform the 6xxx global soft reset 2401 * @mmio: base address of the HBA 2402 * 2403 * This routine only applies to 6xxx parts. 2404 * 2405 * LOCKING: 2406 * Inherited from caller. 2407 */ 2408 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2409 unsigned int n_hc) 2410 { 2411 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 2412 int i, rc = 0; 2413 u32 t; 2414 2415 /* Following procedure defined in PCI "main command and status 2416 * register" table. 2417 */ 2418 t = readl(reg); 2419 writel(t | STOP_PCI_MASTER, reg); 2420 2421 for (i = 0; i < 1000; i++) { 2422 udelay(1); 2423 t = readl(reg); 2424 if (PCI_MASTER_EMPTY & t) 2425 break; 2426 } 2427 if (!(PCI_MASTER_EMPTY & t)) { 2428 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 2429 rc = 1; 2430 goto done; 2431 } 2432 2433 /* set reset */ 2434 i = 5; 2435 do { 2436 writel(t | GLOB_SFT_RST, reg); 2437 t = readl(reg); 2438 udelay(1); 2439 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 2440 2441 if (!(GLOB_SFT_RST & t)) { 2442 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 2443 rc = 1; 2444 goto done; 2445 } 2446 2447 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 2448 i = 5; 2449 do { 2450 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 2451 t = readl(reg); 2452 udelay(1); 2453 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 2454 2455 if (GLOB_SFT_RST & t) { 2456 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2457 rc = 1; 2458 } 2459 done: 2460 return rc; 2461 } 2462 2463 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 2464 void __iomem *mmio) 2465 { 2466 void __iomem *port_mmio; 2467 u32 tmp; 2468 2469 tmp = readl(mmio + MV_RESET_CFG_OFS); 2470 if ((tmp & (1 << 0)) == 0) { 2471 hpriv->signal[idx].amps = 0x7 << 8; 2472 hpriv->signal[idx].pre = 0x1 << 5; 2473 return; 2474 } 2475 2476 port_mmio = mv_port_base(mmio, idx); 2477 tmp = readl(port_mmio + PHY_MODE2); 2478 2479 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2480 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2481 } 2482 2483 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2484 { 2485 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); 2486 } 2487 2488 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2489 unsigned int port) 2490 { 2491 void __iomem *port_mmio = mv_port_base(mmio, port); 2492 2493 u32 hp_flags = hpriv->hp_flags; 2494 int fix_phy_mode2 = 2495 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2496 int fix_phy_mode4 = 2497 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2498 u32 m2, tmp; 2499 2500 if (fix_phy_mode2) { 2501 m2 = readl(port_mmio + PHY_MODE2); 2502 m2 &= ~(1 << 16); 2503 m2 |= (1 << 31); 2504 writel(m2, port_mmio + PHY_MODE2); 2505 2506 udelay(200); 2507 2508 m2 = readl(port_mmio + PHY_MODE2); 2509 m2 &= ~((1 << 16) | (1 << 31)); 2510 writel(m2, port_mmio + PHY_MODE2); 2511 2512 udelay(200); 2513 } 2514 2515 /* who knows what this magic does */ 2516 tmp = readl(port_mmio + PHY_MODE3); 2517 tmp &= ~0x7F800000; 2518 tmp |= 0x2A800000; 2519 writel(tmp, port_mmio + PHY_MODE3); 2520 2521 if (fix_phy_mode4) { 2522 u32 m4; 2523 2524 m4 = readl(port_mmio + PHY_MODE4); 2525 2526 if (hp_flags & MV_HP_ERRATA_60X1B2) 2527 tmp = readl(port_mmio + PHY_MODE3); 2528 2529 /* workaround for errata FEr SATA#10 (part 1) */ 2530 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2531 2532 writel(m4, port_mmio + PHY_MODE4); 2533 2534 if (hp_flags & MV_HP_ERRATA_60X1B2) 2535 writel(tmp, port_mmio + PHY_MODE3); 2536 } 2537 2538 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2539 m2 = readl(port_mmio + PHY_MODE2); 2540 2541 m2 &= ~MV_M2_PREAMP_MASK; 2542 m2 |= hpriv->signal[port].amps; 2543 m2 |= hpriv->signal[port].pre; 2544 m2 &= ~(1 << 16); 2545 2546 /* according to mvSata 3.6.1, some IIE values are fixed */ 2547 if (IS_GEN_IIE(hpriv)) { 2548 m2 &= ~0xC30FF01F; 2549 m2 |= 0x0000900F; 2550 } 2551 2552 writel(m2, port_mmio + PHY_MODE2); 2553 } 2554 2555 /* TODO: use the generic LED interface to configure the SATA Presence */ 2556 /* & Acitivy LEDs on the board */ 2557 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 2558 void __iomem *mmio) 2559 { 2560 return; 2561 } 2562 2563 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 2564 void __iomem *mmio) 2565 { 2566 void __iomem *port_mmio; 2567 u32 tmp; 2568 2569 port_mmio = mv_port_base(mmio, idx); 2570 tmp = readl(port_mmio + PHY_MODE2); 2571 2572 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2573 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2574 } 2575 2576 #undef ZERO 2577 #define ZERO(reg) writel(0, port_mmio + (reg)) 2578 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 2579 void __iomem *mmio, unsigned int port) 2580 { 2581 void __iomem *port_mmio = mv_port_base(mmio, port); 2582 2583 mv_reset_channel(hpriv, mmio, port); 2584 2585 ZERO(0x028); /* command */ 2586 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2587 ZERO(0x004); /* timer */ 2588 ZERO(0x008); /* irq err cause */ 2589 ZERO(0x00c); /* irq err mask */ 2590 ZERO(0x010); /* rq bah */ 2591 ZERO(0x014); /* rq inp */ 2592 ZERO(0x018); /* rq outp */ 2593 ZERO(0x01c); /* respq bah */ 2594 ZERO(0x024); /* respq outp */ 2595 ZERO(0x020); /* respq inp */ 2596 ZERO(0x02c); /* test control */ 2597 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2598 } 2599 2600 #undef ZERO 2601 2602 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2603 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 2604 void __iomem *mmio) 2605 { 2606 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 2607 2608 ZERO(0x00c); 2609 ZERO(0x010); 2610 ZERO(0x014); 2611 2612 } 2613 2614 #undef ZERO 2615 2616 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 2617 void __iomem *mmio, unsigned int n_hc) 2618 { 2619 unsigned int port; 2620 2621 for (port = 0; port < hpriv->n_ports; port++) 2622 mv_soc_reset_hc_port(hpriv, mmio, port); 2623 2624 mv_soc_reset_one_hc(hpriv, mmio); 2625 2626 return 0; 2627 } 2628 2629 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 2630 void __iomem *mmio) 2631 { 2632 return; 2633 } 2634 2635 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 2636 { 2637 return; 2638 } 2639 2640 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 2641 { 2642 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); 2643 2644 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 2645 if (want_gen2i) 2646 ifcfg |= (1 << 7); /* enable gen2i speed */ 2647 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); 2648 } 2649 2650 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2651 unsigned int port_no) 2652 { 2653 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2654 2655 /* 2656 * The datasheet warns against setting EDMA_RESET when EDMA is active 2657 * (but doesn't say what the problem might be). So we first try 2658 * to disable the EDMA engine before doing the EDMA_RESET operation. 2659 */ 2660 mv_stop_edma_engine(port_mmio); 2661 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2662 2663 if (!IS_GEN_I(hpriv)) { 2664 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 2665 mv_setup_ifcfg(port_mmio, 1); 2666 } 2667 /* 2668 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 2669 * link, and physical layers. It resets all SATA interface registers 2670 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2671 */ 2672 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2673 udelay(25); /* allow reset propagation */ 2674 writelfl(0, port_mmio + EDMA_CMD_OFS); 2675 2676 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2677 2678 if (IS_GEN_I(hpriv)) 2679 mdelay(1); 2680 } 2681 2682 static void mv_pmp_select(struct ata_port *ap, int pmp) 2683 { 2684 if (sata_pmp_supported(ap)) { 2685 void __iomem *port_mmio = mv_ap_base(ap); 2686 u32 reg = readl(port_mmio + SATA_IFCTL_OFS); 2687 int old = reg & 0xf; 2688 2689 if (old != pmp) { 2690 reg = (reg & ~0xf) | pmp; 2691 writelfl(reg, port_mmio + SATA_IFCTL_OFS); 2692 } 2693 } 2694 } 2695 2696 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 2697 unsigned long deadline) 2698 { 2699 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2700 return sata_std_hardreset(link, class, deadline); 2701 } 2702 2703 static int mv_softreset(struct ata_link *link, unsigned int *class, 2704 unsigned long deadline) 2705 { 2706 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2707 return ata_sff_softreset(link, class, deadline); 2708 } 2709 2710 static int mv_hardreset(struct ata_link *link, unsigned int *class, 2711 unsigned long deadline) 2712 { 2713 struct ata_port *ap = link->ap; 2714 struct mv_host_priv *hpriv = ap->host->private_data; 2715 struct mv_port_priv *pp = ap->private_data; 2716 void __iomem *mmio = hpriv->base; 2717 int rc, attempts = 0, extra = 0; 2718 u32 sstatus; 2719 bool online; 2720 2721 mv_reset_channel(hpriv, mmio, ap->port_no); 2722 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2723 2724 /* Workaround for errata FEr SATA#10 (part 2) */ 2725 do { 2726 const unsigned long *timing = 2727 sata_ehc_deb_timing(&link->eh_context); 2728 2729 rc = sata_link_hardreset(link, timing, deadline + extra, 2730 &online, NULL); 2731 if (rc) 2732 return rc; 2733 sata_scr_read(link, SCR_STATUS, &sstatus); 2734 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2735 /* Force 1.5gb/s link speed and try again */ 2736 mv_setup_ifcfg(mv_ap_base(ap), 0); 2737 if (time_after(jiffies + HZ, deadline)) 2738 extra = HZ; /* only extend it once, max */ 2739 } 2740 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 2741 2742 return rc; 2743 } 2744 2745 static void mv_eh_freeze(struct ata_port *ap) 2746 { 2747 struct mv_host_priv *hpriv = ap->host->private_data; 2748 unsigned int shift, hardport, port = ap->port_no; 2749 u32 main_irq_mask; 2750 2751 /* FIXME: handle coalescing completion events properly */ 2752 2753 mv_stop_edma(ap); 2754 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2755 2756 /* disable assertion of portN err, done events */ 2757 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2758 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift); 2759 writelfl(main_irq_mask, hpriv->main_irq_mask_addr); 2760 } 2761 2762 static void mv_eh_thaw(struct ata_port *ap) 2763 { 2764 struct mv_host_priv *hpriv = ap->host->private_data; 2765 unsigned int shift, hardport, port = ap->port_no; 2766 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2767 void __iomem *port_mmio = mv_ap_base(ap); 2768 u32 main_irq_mask, hc_irq_cause; 2769 2770 /* FIXME: handle coalescing completion events properly */ 2771 2772 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2773 2774 /* clear EDMA errors on this port */ 2775 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2776 2777 /* clear pending irq events */ 2778 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2779 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); 2780 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2781 2782 /* enable assertion of portN err, done events */ 2783 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2784 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift); 2785 writelfl(main_irq_mask, hpriv->main_irq_mask_addr); 2786 } 2787 2788 /** 2789 * mv_port_init - Perform some early initialization on a single port. 2790 * @port: libata data structure storing shadow register addresses 2791 * @port_mmio: base address of the port 2792 * 2793 * Initialize shadow register mmio addresses, clear outstanding 2794 * interrupts on the port, and unmask interrupts for the future 2795 * start of the port. 2796 * 2797 * LOCKING: 2798 * Inherited from caller. 2799 */ 2800 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2801 { 2802 void __iomem *shd_base = port_mmio + SHD_BLK_OFS; 2803 unsigned serr_ofs; 2804 2805 /* PIO related setup 2806 */ 2807 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2808 port->error_addr = 2809 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2810 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2811 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2812 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2813 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2814 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2815 port->status_addr = 2816 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2817 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2818 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2819 2820 /* unused: */ 2821 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; 2822 2823 /* Clear any currently outstanding port interrupt conditions */ 2824 serr_ofs = mv_scr_offset(SCR_ERROR); 2825 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2826 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2827 2828 /* unmask all non-transient EDMA error interrupts */ 2829 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2830 2831 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2832 readl(port_mmio + EDMA_CFG_OFS), 2833 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2834 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2835 } 2836 2837 static unsigned int mv_in_pcix_mode(struct ata_host *host) 2838 { 2839 struct mv_host_priv *hpriv = host->private_data; 2840 void __iomem *mmio = hpriv->base; 2841 u32 reg; 2842 2843 if (!HAS_PCI(host) || !IS_PCIE(hpriv)) 2844 return 0; /* not PCI-X capable */ 2845 reg = readl(mmio + MV_PCI_MODE_OFS); 2846 if ((reg & MV_PCI_MODE_MASK) == 0) 2847 return 0; /* conventional PCI mode */ 2848 return 1; /* chip is in PCI-X mode */ 2849 } 2850 2851 static int mv_pci_cut_through_okay(struct ata_host *host) 2852 { 2853 struct mv_host_priv *hpriv = host->private_data; 2854 void __iomem *mmio = hpriv->base; 2855 u32 reg; 2856 2857 if (!mv_in_pcix_mode(host)) { 2858 reg = readl(mmio + PCI_COMMAND_OFS); 2859 if (reg & PCI_COMMAND_MRDTRIG) 2860 return 0; /* not okay */ 2861 } 2862 return 1; /* okay */ 2863 } 2864 2865 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2866 { 2867 struct pci_dev *pdev = to_pci_dev(host->dev); 2868 struct mv_host_priv *hpriv = host->private_data; 2869 u32 hp_flags = hpriv->hp_flags; 2870 2871 switch (board_idx) { 2872 case chip_5080: 2873 hpriv->ops = &mv5xxx_ops; 2874 hp_flags |= MV_HP_GEN_I; 2875 2876 switch (pdev->revision) { 2877 case 0x1: 2878 hp_flags |= MV_HP_ERRATA_50XXB0; 2879 break; 2880 case 0x3: 2881 hp_flags |= MV_HP_ERRATA_50XXB2; 2882 break; 2883 default: 2884 dev_printk(KERN_WARNING, &pdev->dev, 2885 "Applying 50XXB2 workarounds to unknown rev\n"); 2886 hp_flags |= MV_HP_ERRATA_50XXB2; 2887 break; 2888 } 2889 break; 2890 2891 case chip_504x: 2892 case chip_508x: 2893 hpriv->ops = &mv5xxx_ops; 2894 hp_flags |= MV_HP_GEN_I; 2895 2896 switch (pdev->revision) { 2897 case 0x0: 2898 hp_flags |= MV_HP_ERRATA_50XXB0; 2899 break; 2900 case 0x3: 2901 hp_flags |= MV_HP_ERRATA_50XXB2; 2902 break; 2903 default: 2904 dev_printk(KERN_WARNING, &pdev->dev, 2905 "Applying B2 workarounds to unknown rev\n"); 2906 hp_flags |= MV_HP_ERRATA_50XXB2; 2907 break; 2908 } 2909 break; 2910 2911 case chip_604x: 2912 case chip_608x: 2913 hpriv->ops = &mv6xxx_ops; 2914 hp_flags |= MV_HP_GEN_II; 2915 2916 switch (pdev->revision) { 2917 case 0x7: 2918 hp_flags |= MV_HP_ERRATA_60X1B2; 2919 break; 2920 case 0x9: 2921 hp_flags |= MV_HP_ERRATA_60X1C0; 2922 break; 2923 default: 2924 dev_printk(KERN_WARNING, &pdev->dev, 2925 "Applying B2 workarounds to unknown rev\n"); 2926 hp_flags |= MV_HP_ERRATA_60X1B2; 2927 break; 2928 } 2929 break; 2930 2931 case chip_7042: 2932 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 2933 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2934 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2935 { 2936 /* 2937 * Highpoint RocketRAID PCIe 23xx series cards: 2938 * 2939 * Unconfigured drives are treated as "Legacy" 2940 * by the BIOS, and it overwrites sector 8 with 2941 * a "Lgcy" metadata block prior to Linux boot. 2942 * 2943 * Configured drives (RAID or JBOD) leave sector 8 2944 * alone, but instead overwrite a high numbered 2945 * sector for the RAID metadata. This sector can 2946 * be determined exactly, by truncating the physical 2947 * drive capacity to a nice even GB value. 2948 * 2949 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 2950 * 2951 * Warn the user, lest they think we're just buggy. 2952 */ 2953 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 2954 " BIOS CORRUPTS DATA on all attached drives," 2955 " regardless of if/how they are configured." 2956 " BEWARE!\n"); 2957 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 2958 " use sectors 8-9 on \"Legacy\" drives," 2959 " and avoid the final two gigabytes on" 2960 " all RocketRAID BIOS initialized drives.\n"); 2961 } 2962 /* drop through */ 2963 case chip_6042: 2964 hpriv->ops = &mv6xxx_ops; 2965 hp_flags |= MV_HP_GEN_IIE; 2966 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 2967 hp_flags |= MV_HP_CUT_THROUGH; 2968 2969 switch (pdev->revision) { 2970 case 0x0: 2971 hp_flags |= MV_HP_ERRATA_XX42A0; 2972 break; 2973 case 0x1: 2974 hp_flags |= MV_HP_ERRATA_60X1C0; 2975 break; 2976 default: 2977 dev_printk(KERN_WARNING, &pdev->dev, 2978 "Applying 60X1C0 workarounds to unknown rev\n"); 2979 hp_flags |= MV_HP_ERRATA_60X1C0; 2980 break; 2981 } 2982 break; 2983 case chip_soc: 2984 hpriv->ops = &mv_soc_ops; 2985 hp_flags |= MV_HP_ERRATA_60X1C0; 2986 break; 2987 2988 default: 2989 dev_printk(KERN_ERR, host->dev, 2990 "BUG: invalid board index %u\n", board_idx); 2991 return 1; 2992 } 2993 2994 hpriv->hp_flags = hp_flags; 2995 if (hp_flags & MV_HP_PCIE) { 2996 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; 2997 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; 2998 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 2999 } else { 3000 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; 3001 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; 3002 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3003 } 3004 3005 return 0; 3006 } 3007 3008 /** 3009 * mv_init_host - Perform some early initialization of the host. 3010 * @host: ATA host to initialize 3011 * @board_idx: controller index 3012 * 3013 * If possible, do an early global reset of the host. Then do 3014 * our port init and clear/unmask all/relevant host interrupts. 3015 * 3016 * LOCKING: 3017 * Inherited from caller. 3018 */ 3019 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 3020 { 3021 int rc = 0, n_hc, port, hc; 3022 struct mv_host_priv *hpriv = host->private_data; 3023 void __iomem *mmio = hpriv->base; 3024 3025 rc = mv_chip_id(host, board_idx); 3026 if (rc) 3027 goto done; 3028 3029 if (HAS_PCI(host)) { 3030 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; 3031 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3032 } else { 3033 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3034 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3035 } 3036 3037 /* global interrupt mask: 0 == mask everything */ 3038 writel(0, hpriv->main_irq_mask_addr); 3039 3040 n_hc = mv_get_hc_count(host->ports[0]->flags); 3041 3042 for (port = 0; port < host->n_ports; port++) 3043 hpriv->ops->read_preamp(hpriv, port, mmio); 3044 3045 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3046 if (rc) 3047 goto done; 3048 3049 hpriv->ops->reset_flash(hpriv, mmio); 3050 hpriv->ops->reset_bus(host, mmio); 3051 hpriv->ops->enable_leds(hpriv, mmio); 3052 3053 for (port = 0; port < host->n_ports; port++) { 3054 struct ata_port *ap = host->ports[port]; 3055 void __iomem *port_mmio = mv_port_base(mmio, port); 3056 3057 mv_port_init(&ap->ioaddr, port_mmio); 3058 3059 #ifdef CONFIG_PCI 3060 if (HAS_PCI(host)) { 3061 unsigned int offset = port_mmio - mmio; 3062 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3063 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3064 } 3065 #endif 3066 } 3067 3068 for (hc = 0; hc < n_hc; hc++) { 3069 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3070 3071 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3072 "(before clear)=0x%08x\n", hc, 3073 readl(hc_mmio + HC_CFG_OFS), 3074 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 3075 3076 /* Clear any currently outstanding hc interrupt conditions */ 3077 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3078 } 3079 3080 if (HAS_PCI(host)) { 3081 /* Clear any currently outstanding host interrupt conditions */ 3082 writelfl(0, mmio + hpriv->irq_cause_ofs); 3083 3084 /* and unmask interrupt generation for host regs */ 3085 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3086 if (IS_GEN_I(hpriv)) 3087 writelfl(~HC_MAIN_MASKED_IRQS_5, 3088 hpriv->main_irq_mask_addr); 3089 else 3090 writelfl(~HC_MAIN_MASKED_IRQS, 3091 hpriv->main_irq_mask_addr); 3092 3093 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 3094 "PCI int cause/mask=0x%08x/0x%08x\n", 3095 readl(hpriv->main_irq_cause_addr), 3096 readl(hpriv->main_irq_mask_addr), 3097 readl(mmio + hpriv->irq_cause_ofs), 3098 readl(mmio + hpriv->irq_mask_ofs)); 3099 } else { 3100 writelfl(~HC_MAIN_MASKED_IRQS_SOC, 3101 hpriv->main_irq_mask_addr); 3102 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n", 3103 readl(hpriv->main_irq_cause_addr), 3104 readl(hpriv->main_irq_mask_addr)); 3105 } 3106 done: 3107 return rc; 3108 } 3109 3110 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3111 { 3112 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3113 MV_CRQB_Q_SZ, 0); 3114 if (!hpriv->crqb_pool) 3115 return -ENOMEM; 3116 3117 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3118 MV_CRPB_Q_SZ, 0); 3119 if (!hpriv->crpb_pool) 3120 return -ENOMEM; 3121 3122 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3123 MV_SG_TBL_SZ, 0); 3124 if (!hpriv->sg_tbl_pool) 3125 return -ENOMEM; 3126 3127 return 0; 3128 } 3129 3130 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3131 struct mbus_dram_target_info *dram) 3132 { 3133 int i; 3134 3135 for (i = 0; i < 4; i++) { 3136 writel(0, hpriv->base + WINDOW_CTRL(i)); 3137 writel(0, hpriv->base + WINDOW_BASE(i)); 3138 } 3139 3140 for (i = 0; i < dram->num_cs; i++) { 3141 struct mbus_dram_window *cs = dram->cs + i; 3142 3143 writel(((cs->size - 1) & 0xffff0000) | 3144 (cs->mbus_attr << 8) | 3145 (dram->mbus_dram_target_id << 4) | 1, 3146 hpriv->base + WINDOW_CTRL(i)); 3147 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 3148 } 3149 } 3150 3151 /** 3152 * mv_platform_probe - handle a positive probe of an soc Marvell 3153 * host 3154 * @pdev: platform device found 3155 * 3156 * LOCKING: 3157 * Inherited from caller. 3158 */ 3159 static int mv_platform_probe(struct platform_device *pdev) 3160 { 3161 static int printed_version; 3162 const struct mv_sata_platform_data *mv_platform_data; 3163 const struct ata_port_info *ppi[] = 3164 { &mv_port_info[chip_soc], NULL }; 3165 struct ata_host *host; 3166 struct mv_host_priv *hpriv; 3167 struct resource *res; 3168 int n_ports, rc; 3169 3170 if (!printed_version++) 3171 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3172 3173 /* 3174 * Simple resource validation .. 3175 */ 3176 if (unlikely(pdev->num_resources != 2)) { 3177 dev_err(&pdev->dev, "invalid number of resources\n"); 3178 return -EINVAL; 3179 } 3180 3181 /* 3182 * Get the register base first 3183 */ 3184 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3185 if (res == NULL) 3186 return -EINVAL; 3187 3188 /* allocate host */ 3189 mv_platform_data = pdev->dev.platform_data; 3190 n_ports = mv_platform_data->n_ports; 3191 3192 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3193 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3194 3195 if (!host || !hpriv) 3196 return -ENOMEM; 3197 host->private_data = hpriv; 3198 hpriv->n_ports = n_ports; 3199 3200 host->iomap = NULL; 3201 hpriv->base = devm_ioremap(&pdev->dev, res->start, 3202 res->end - res->start + 1); 3203 hpriv->base -= MV_SATAHC0_REG_BASE; 3204 3205 /* 3206 * (Re-)program MBUS remapping windows if we are asked to. 3207 */ 3208 if (mv_platform_data->dram != NULL) 3209 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 3210 3211 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3212 if (rc) 3213 return rc; 3214 3215 /* initialize adapter */ 3216 rc = mv_init_host(host, chip_soc); 3217 if (rc) 3218 return rc; 3219 3220 dev_printk(KERN_INFO, &pdev->dev, 3221 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 3222 host->n_ports); 3223 3224 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 3225 IRQF_SHARED, &mv6_sht); 3226 } 3227 3228 /* 3229 * 3230 * mv_platform_remove - unplug a platform interface 3231 * @pdev: platform device 3232 * 3233 * A platform bus SATA device has been unplugged. Perform the needed 3234 * cleanup. Also called on module unload for any active devices. 3235 */ 3236 static int __devexit mv_platform_remove(struct platform_device *pdev) 3237 { 3238 struct device *dev = &pdev->dev; 3239 struct ata_host *host = dev_get_drvdata(dev); 3240 3241 ata_host_detach(host); 3242 return 0; 3243 } 3244 3245 static struct platform_driver mv_platform_driver = { 3246 .probe = mv_platform_probe, 3247 .remove = __devexit_p(mv_platform_remove), 3248 .driver = { 3249 .name = DRV_NAME, 3250 .owner = THIS_MODULE, 3251 }, 3252 }; 3253 3254 3255 #ifdef CONFIG_PCI 3256 static int mv_pci_init_one(struct pci_dev *pdev, 3257 const struct pci_device_id *ent); 3258 3259 3260 static struct pci_driver mv_pci_driver = { 3261 .name = DRV_NAME, 3262 .id_table = mv_pci_tbl, 3263 .probe = mv_pci_init_one, 3264 .remove = ata_pci_remove_one, 3265 }; 3266 3267 /* 3268 * module options 3269 */ 3270 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 3271 3272 3273 /* move to PCI layer or libata core? */ 3274 static int pci_go_64(struct pci_dev *pdev) 3275 { 3276 int rc; 3277 3278 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3279 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3280 if (rc) { 3281 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3282 if (rc) { 3283 dev_printk(KERN_ERR, &pdev->dev, 3284 "64-bit DMA enable failed\n"); 3285 return rc; 3286 } 3287 } 3288 } else { 3289 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3290 if (rc) { 3291 dev_printk(KERN_ERR, &pdev->dev, 3292 "32-bit DMA enable failed\n"); 3293 return rc; 3294 } 3295 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3296 if (rc) { 3297 dev_printk(KERN_ERR, &pdev->dev, 3298 "32-bit consistent DMA enable failed\n"); 3299 return rc; 3300 } 3301 } 3302 3303 return rc; 3304 } 3305 3306 /** 3307 * mv_print_info - Dump key info to kernel log for perusal. 3308 * @host: ATA host to print info about 3309 * 3310 * FIXME: complete this. 3311 * 3312 * LOCKING: 3313 * Inherited from caller. 3314 */ 3315 static void mv_print_info(struct ata_host *host) 3316 { 3317 struct pci_dev *pdev = to_pci_dev(host->dev); 3318 struct mv_host_priv *hpriv = host->private_data; 3319 u8 scc; 3320 const char *scc_s, *gen; 3321 3322 /* Use this to determine the HW stepping of the chip so we know 3323 * what errata to workaround 3324 */ 3325 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 3326 if (scc == 0) 3327 scc_s = "SCSI"; 3328 else if (scc == 0x01) 3329 scc_s = "RAID"; 3330 else 3331 scc_s = "?"; 3332 3333 if (IS_GEN_I(hpriv)) 3334 gen = "I"; 3335 else if (IS_GEN_II(hpriv)) 3336 gen = "II"; 3337 else if (IS_GEN_IIE(hpriv)) 3338 gen = "IIE"; 3339 else 3340 gen = "?"; 3341 3342 dev_printk(KERN_INFO, &pdev->dev, 3343 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 3344 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 3345 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 3346 } 3347 3348 /** 3349 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 3350 * @pdev: PCI device found 3351 * @ent: PCI device ID entry for the matched host 3352 * 3353 * LOCKING: 3354 * Inherited from caller. 3355 */ 3356 static int mv_pci_init_one(struct pci_dev *pdev, 3357 const struct pci_device_id *ent) 3358 { 3359 static int printed_version; 3360 unsigned int board_idx = (unsigned int)ent->driver_data; 3361 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 3362 struct ata_host *host; 3363 struct mv_host_priv *hpriv; 3364 int n_ports, rc; 3365 3366 if (!printed_version++) 3367 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3368 3369 /* allocate host */ 3370 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 3371 3372 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3373 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3374 if (!host || !hpriv) 3375 return -ENOMEM; 3376 host->private_data = hpriv; 3377 hpriv->n_ports = n_ports; 3378 3379 /* acquire resources */ 3380 rc = pcim_enable_device(pdev); 3381 if (rc) 3382 return rc; 3383 3384 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 3385 if (rc == -EBUSY) 3386 pcim_pin_device(pdev); 3387 if (rc) 3388 return rc; 3389 host->iomap = pcim_iomap_table(pdev); 3390 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 3391 3392 rc = pci_go_64(pdev); 3393 if (rc) 3394 return rc; 3395 3396 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3397 if (rc) 3398 return rc; 3399 3400 /* initialize adapter */ 3401 rc = mv_init_host(host, board_idx); 3402 if (rc) 3403 return rc; 3404 3405 /* Enable interrupts */ 3406 if (msi && pci_enable_msi(pdev)) 3407 pci_intx(pdev, 1); 3408 3409 mv_dump_pci_cfg(pdev, 0x68); 3410 mv_print_info(host); 3411 3412 pci_set_master(pdev); 3413 pci_try_set_mwi(pdev); 3414 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 3415 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 3416 } 3417 #endif 3418 3419 static int mv_platform_probe(struct platform_device *pdev); 3420 static int __devexit mv_platform_remove(struct platform_device *pdev); 3421 3422 static int __init mv_init(void) 3423 { 3424 int rc = -ENODEV; 3425 #ifdef CONFIG_PCI 3426 rc = pci_register_driver(&mv_pci_driver); 3427 if (rc < 0) 3428 return rc; 3429 #endif 3430 rc = platform_driver_register(&mv_platform_driver); 3431 3432 #ifdef CONFIG_PCI 3433 if (rc < 0) 3434 pci_unregister_driver(&mv_pci_driver); 3435 #endif 3436 return rc; 3437 } 3438 3439 static void __exit mv_exit(void) 3440 { 3441 #ifdef CONFIG_PCI 3442 pci_unregister_driver(&mv_pci_driver); 3443 #endif 3444 platform_driver_unregister(&mv_platform_driver); 3445 } 3446 3447 MODULE_AUTHOR("Brett Russ"); 3448 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 3449 MODULE_LICENSE("GPL"); 3450 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3451 MODULE_VERSION(DRV_VERSION); 3452 MODULE_ALIAS("platform:" DRV_NAME); 3453 3454 #ifdef CONFIG_PCI 3455 module_param(msi, int, 0444); 3456 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 3457 #endif 3458 3459 module_init(mv_init); 3460 module_exit(mv_exit); 3461