1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24 25 /* 26 * sata_mv TODO list: 27 * 28 * --> Errata workaround for NCQ device errors. 29 * 30 * --> More errata workarounds for PCI-X. 31 * 32 * --> Complete a full errata audit for all chipsets to identify others. 33 * 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 * 36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI). 37 * 38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead. 39 * 40 * --> Develop a low-power-consumption strategy, and implement it. 41 * 42 * --> [Experiment, low priority] Investigate interrupt coalescing. 43 * Quite often, especially with PCI Message Signalled Interrupts (MSI), 44 * the overhead reduced by interrupt mitigation is quite often not 45 * worth the latency cost. 46 * 47 * --> [Experiment, Marvell value added] Is it possible to use target 48 * mode to cross-connect two Linux boxes with Marvell cards? If so, 49 * creating LibATA target mode support would be very interesting. 50 * 51 * Target mode, for those without docs, is the ability to directly 52 * connect two SATA ports. 53 */ 54 55 #include <linux/kernel.h> 56 #include <linux/module.h> 57 #include <linux/pci.h> 58 #include <linux/init.h> 59 #include <linux/blkdev.h> 60 #include <linux/delay.h> 61 #include <linux/interrupt.h> 62 #include <linux/dmapool.h> 63 #include <linux/dma-mapping.h> 64 #include <linux/device.h> 65 #include <linux/platform_device.h> 66 #include <linux/ata_platform.h> 67 #include <linux/mbus.h> 68 #include <linux/bitops.h> 69 #include <scsi/scsi_host.h> 70 #include <scsi/scsi_cmnd.h> 71 #include <scsi/scsi_device.h> 72 #include <linux/libata.h> 73 74 #define DRV_NAME "sata_mv" 75 #define DRV_VERSION "1.24" 76 77 enum { 78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 80 MV_IO_BAR = 2, /* offset 0x18: IO space */ 81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 82 83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 85 86 MV_PCI_REG_BASE = 0, 87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 93 94 MV_SATAHC0_REG_BASE = 0x20000, 95 MV_FLASH_CTL_OFS = 0x1046c, 96 MV_GPIO_PORT_CTL_OFS = 0x104f0, 97 MV_RESET_CFG_OFS = 0x180d8, 98 99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 103 104 MV_MAX_Q_DEPTH = 32, 105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 106 107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 108 * CRPB needs alignment on a 256B boundary. Size == 256B 109 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 110 */ 111 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 112 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 113 MV_MAX_SG_CT = 256, 114 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 115 116 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 117 MV_PORT_HC_SHIFT = 2, 118 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 119 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 120 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 121 122 /* Host Flags */ 123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 125 126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 128 ATA_FLAG_PIO_POLLING, 129 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 131 132 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 133 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 134 ATA_FLAG_NCQ | ATA_FLAG_AN, 135 136 CRQB_FLAG_READ = (1 << 0), 137 CRQB_TAG_SHIFT = 1, 138 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 139 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 140 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 141 CRQB_CMD_ADDR_SHIFT = 8, 142 CRQB_CMD_CS = (0x2 << 11), 143 CRQB_CMD_LAST = (1 << 15), 144 145 CRPB_FLAG_STATUS_SHIFT = 8, 146 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 147 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 148 149 EPRD_FLAG_END_OF_TBL = (1 << 31), 150 151 /* PCI interface registers */ 152 153 PCI_COMMAND_OFS = 0xc00, 154 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 155 156 PCI_MAIN_CMD_STS_OFS = 0xd30, 157 STOP_PCI_MASTER = (1 << 2), 158 PCI_MASTER_EMPTY = (1 << 3), 159 GLOB_SFT_RST = (1 << 4), 160 161 MV_PCI_MODE_OFS = 0xd00, 162 MV_PCI_MODE_MASK = 0x30, 163 164 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 165 MV_PCI_DISC_TIMER = 0xd04, 166 MV_PCI_MSI_TRIGGER = 0xc38, 167 MV_PCI_SERR_MASK = 0xc28, 168 MV_PCI_XBAR_TMOUT_OFS = 0x1d04, 169 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 170 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 171 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 172 MV_PCI_ERR_COMMAND = 0x1d50, 173 174 PCI_IRQ_CAUSE_OFS = 0x1d58, 175 PCI_IRQ_MASK_OFS = 0x1d5c, 176 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 177 178 PCIE_IRQ_CAUSE_OFS = 0x1900, 179 PCIE_IRQ_MASK_OFS = 0x1910, 180 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 181 182 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 183 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 184 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, 185 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, 186 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, 187 ERR_IRQ = (1 << 0), /* shift by port # */ 188 DONE_IRQ = (1 << 1), /* shift by port # */ 189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 191 PCI_ERR = (1 << 18), 192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 194 PORTS_0_3_COAL_DONE = (1 << 8), 195 PORTS_4_7_COAL_DONE = (1 << 17), 196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 197 GPIO_INT = (1 << 22), 198 SELF_INT = (1 << 23), 199 TWSI_INT = (1 << 24), 200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 203 204 /* SATAHC registers */ 205 HC_CFG_OFS = 0, 206 207 HC_IRQ_CAUSE_OFS = 0x14, 208 DMA_IRQ = (1 << 0), /* shift by port # */ 209 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 210 DEV_IRQ = (1 << 8), /* shift by port # */ 211 212 /* Shadow block registers */ 213 SHD_BLK_OFS = 0x100, 214 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 215 216 /* SATA registers */ 217 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 218 SATA_ACTIVE_OFS = 0x350, 219 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 220 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ 221 222 LTMODE_OFS = 0x30c, 223 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 224 225 PHY_MODE3 = 0x310, 226 PHY_MODE4 = 0x314, 227 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 228 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 229 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 230 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 231 232 PHY_MODE2 = 0x330, 233 SATA_IFCTL_OFS = 0x344, 234 SATA_TESTCTL_OFS = 0x348, 235 SATA_IFSTAT_OFS = 0x34c, 236 VENDOR_UNIQUE_FIS_OFS = 0x35c, 237 238 FISCFG_OFS = 0x360, 239 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 240 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 241 242 MV5_PHY_MODE = 0x74, 243 MV5_LTMODE_OFS = 0x30, 244 MV5_PHY_CTL_OFS = 0x0C, 245 SATA_INTERFACE_CFG_OFS = 0x050, 246 247 MV_M2_PREAMP_MASK = 0x7e0, 248 249 /* Port registers */ 250 EDMA_CFG_OFS = 0, 251 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 252 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 253 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 254 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 255 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 256 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 257 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 258 259 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 260 EDMA_ERR_IRQ_MASK_OFS = 0xc, 261 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 262 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 263 EDMA_ERR_DEV = (1 << 2), /* device error */ 264 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 265 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 266 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 267 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 268 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 269 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 270 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 271 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 272 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 273 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 274 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 275 276 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 277 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 278 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 279 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 280 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 281 282 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 283 284 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 285 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 286 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 287 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 288 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 289 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 290 291 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 292 293 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 294 EDMA_ERR_OVERRUN_5 = (1 << 5), 295 EDMA_ERR_UNDERRUN_5 = (1 << 6), 296 297 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 298 EDMA_ERR_LNK_CTRL_RX_1 | 299 EDMA_ERR_LNK_CTRL_RX_3 | 300 EDMA_ERR_LNK_CTRL_TX, 301 302 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 303 EDMA_ERR_PRD_PAR | 304 EDMA_ERR_DEV_DCON | 305 EDMA_ERR_DEV_CON | 306 EDMA_ERR_SERR | 307 EDMA_ERR_SELF_DIS | 308 EDMA_ERR_CRQB_PAR | 309 EDMA_ERR_CRPB_PAR | 310 EDMA_ERR_INTRL_PAR | 311 EDMA_ERR_IORDY | 312 EDMA_ERR_LNK_CTRL_RX_2 | 313 EDMA_ERR_LNK_DATA_RX | 314 EDMA_ERR_LNK_DATA_TX | 315 EDMA_ERR_TRANS_PROTO, 316 317 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 318 EDMA_ERR_PRD_PAR | 319 EDMA_ERR_DEV_DCON | 320 EDMA_ERR_DEV_CON | 321 EDMA_ERR_OVERRUN_5 | 322 EDMA_ERR_UNDERRUN_5 | 323 EDMA_ERR_SELF_DIS_5 | 324 EDMA_ERR_CRQB_PAR | 325 EDMA_ERR_CRPB_PAR | 326 EDMA_ERR_INTRL_PAR | 327 EDMA_ERR_IORDY, 328 329 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 330 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 331 332 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 333 EDMA_REQ_Q_PTR_SHIFT = 5, 334 335 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 336 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 337 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 338 EDMA_RSP_Q_PTR_SHIFT = 3, 339 340 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 341 EDMA_EN = (1 << 0), /* enable EDMA */ 342 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 343 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 344 345 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ 346 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 347 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 348 349 EDMA_IORDY_TMOUT_OFS = 0x34, 350 EDMA_ARB_CFG_OFS = 0x38, 351 352 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 353 354 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */ 355 356 /* Host private flags (hp_flags) */ 357 MV_HP_FLAG_MSI = (1 << 0), 358 MV_HP_ERRATA_50XXB0 = (1 << 1), 359 MV_HP_ERRATA_50XXB2 = (1 << 2), 360 MV_HP_ERRATA_60X1B2 = (1 << 3), 361 MV_HP_ERRATA_60X1C0 = (1 << 4), 362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 366 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 367 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 368 369 /* Port private flags (pp_flags) */ 370 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 371 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 372 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 373 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 374 }; 375 376 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 377 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 378 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 379 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 380 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 381 382 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 383 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 384 385 enum { 386 /* DMA boundary 0xffff is required by the s/g splitting 387 * we need on /length/ in mv_fill-sg(). 388 */ 389 MV_DMA_BOUNDARY = 0xffffU, 390 391 /* mask of register bits containing lower 32 bits 392 * of EDMA request queue DMA address 393 */ 394 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 395 396 /* ditto, for response queue */ 397 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 398 }; 399 400 enum chip_type { 401 chip_504x, 402 chip_508x, 403 chip_5080, 404 chip_604x, 405 chip_608x, 406 chip_6042, 407 chip_7042, 408 chip_soc, 409 }; 410 411 /* Command ReQuest Block: 32B */ 412 struct mv_crqb { 413 __le32 sg_addr; 414 __le32 sg_addr_hi; 415 __le16 ctrl_flags; 416 __le16 ata_cmd[11]; 417 }; 418 419 struct mv_crqb_iie { 420 __le32 addr; 421 __le32 addr_hi; 422 __le32 flags; 423 __le32 len; 424 __le32 ata_cmd[4]; 425 }; 426 427 /* Command ResPonse Block: 8B */ 428 struct mv_crpb { 429 __le16 id; 430 __le16 flags; 431 __le32 tmstmp; 432 }; 433 434 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 435 struct mv_sg { 436 __le32 addr; 437 __le32 flags_size; 438 __le32 addr_hi; 439 __le32 reserved; 440 }; 441 442 struct mv_port_priv { 443 struct mv_crqb *crqb; 444 dma_addr_t crqb_dma; 445 struct mv_crpb *crpb; 446 dma_addr_t crpb_dma; 447 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 448 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 449 450 unsigned int req_idx; 451 unsigned int resp_idx; 452 453 u32 pp_flags; 454 unsigned int delayed_eh_pmp_map; 455 }; 456 457 struct mv_port_signal { 458 u32 amps; 459 u32 pre; 460 }; 461 462 struct mv_host_priv { 463 u32 hp_flags; 464 u32 main_irq_mask; 465 struct mv_port_signal signal[8]; 466 const struct mv_hw_ops *ops; 467 int n_ports; 468 void __iomem *base; 469 void __iomem *main_irq_cause_addr; 470 void __iomem *main_irq_mask_addr; 471 u32 irq_cause_ofs; 472 u32 irq_mask_ofs; 473 u32 unmask_all_irqs; 474 /* 475 * These consistent DMA memory pools give us guaranteed 476 * alignment for hardware-accessed data structures, 477 * and less memory waste in accomplishing the alignment. 478 */ 479 struct dma_pool *crqb_pool; 480 struct dma_pool *crpb_pool; 481 struct dma_pool *sg_tbl_pool; 482 }; 483 484 struct mv_hw_ops { 485 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 486 unsigned int port); 487 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 488 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 489 void __iomem *mmio); 490 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 491 unsigned int n_hc); 492 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 494 }; 495 496 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 497 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 498 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 499 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 500 static int mv_port_start(struct ata_port *ap); 501 static void mv_port_stop(struct ata_port *ap); 502 static int mv_qc_defer(struct ata_queued_cmd *qc); 503 static void mv_qc_prep(struct ata_queued_cmd *qc); 504 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 505 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 506 static int mv_hardreset(struct ata_link *link, unsigned int *class, 507 unsigned long deadline); 508 static void mv_eh_freeze(struct ata_port *ap); 509 static void mv_eh_thaw(struct ata_port *ap); 510 static void mv6_dev_config(struct ata_device *dev); 511 512 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 513 unsigned int port); 514 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 515 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 516 void __iomem *mmio); 517 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 518 unsigned int n_hc); 519 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 520 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 521 522 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 523 unsigned int port); 524 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 525 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 526 void __iomem *mmio); 527 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 528 unsigned int n_hc); 529 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 530 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 531 void __iomem *mmio); 532 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 533 void __iomem *mmio); 534 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 535 void __iomem *mmio, unsigned int n_hc); 536 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 537 void __iomem *mmio); 538 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 539 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 540 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 541 unsigned int port_no); 542 static int mv_stop_edma(struct ata_port *ap); 543 static int mv_stop_edma_engine(void __iomem *port_mmio); 544 static void mv_edma_cfg(struct ata_port *ap, int want_ncq); 545 546 static void mv_pmp_select(struct ata_port *ap, int pmp); 547 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 548 unsigned long deadline); 549 static int mv_softreset(struct ata_link *link, unsigned int *class, 550 unsigned long deadline); 551 static void mv_pmp_error_handler(struct ata_port *ap); 552 static void mv_process_crpb_entries(struct ata_port *ap, 553 struct mv_port_priv *pp); 554 555 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 556 * because we have to allow room for worst case splitting of 557 * PRDs for 64K boundaries in mv_fill_sg(). 558 */ 559 static struct scsi_host_template mv5_sht = { 560 ATA_BASE_SHT(DRV_NAME), 561 .sg_tablesize = MV_MAX_SG_CT / 2, 562 .dma_boundary = MV_DMA_BOUNDARY, 563 }; 564 565 static struct scsi_host_template mv6_sht = { 566 ATA_NCQ_SHT(DRV_NAME), 567 .can_queue = MV_MAX_Q_DEPTH - 1, 568 .sg_tablesize = MV_MAX_SG_CT / 2, 569 .dma_boundary = MV_DMA_BOUNDARY, 570 }; 571 572 static struct ata_port_operations mv5_ops = { 573 .inherits = &ata_sff_port_ops, 574 575 .qc_defer = mv_qc_defer, 576 .qc_prep = mv_qc_prep, 577 .qc_issue = mv_qc_issue, 578 579 .freeze = mv_eh_freeze, 580 .thaw = mv_eh_thaw, 581 .hardreset = mv_hardreset, 582 .error_handler = ata_std_error_handler, /* avoid SFF EH */ 583 .post_internal_cmd = ATA_OP_NULL, 584 585 .scr_read = mv5_scr_read, 586 .scr_write = mv5_scr_write, 587 588 .port_start = mv_port_start, 589 .port_stop = mv_port_stop, 590 }; 591 592 static struct ata_port_operations mv6_ops = { 593 .inherits = &mv5_ops, 594 .dev_config = mv6_dev_config, 595 .scr_read = mv_scr_read, 596 .scr_write = mv_scr_write, 597 598 .pmp_hardreset = mv_pmp_hardreset, 599 .pmp_softreset = mv_softreset, 600 .softreset = mv_softreset, 601 .error_handler = mv_pmp_error_handler, 602 }; 603 604 static struct ata_port_operations mv_iie_ops = { 605 .inherits = &mv6_ops, 606 .dev_config = ATA_OP_NULL, 607 .qc_prep = mv_qc_prep_iie, 608 }; 609 610 static const struct ata_port_info mv_port_info[] = { 611 { /* chip_504x */ 612 .flags = MV_COMMON_FLAGS, 613 .pio_mask = 0x1f, /* pio0-4 */ 614 .udma_mask = ATA_UDMA6, 615 .port_ops = &mv5_ops, 616 }, 617 { /* chip_508x */ 618 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 619 .pio_mask = 0x1f, /* pio0-4 */ 620 .udma_mask = ATA_UDMA6, 621 .port_ops = &mv5_ops, 622 }, 623 { /* chip_5080 */ 624 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 625 .pio_mask = 0x1f, /* pio0-4 */ 626 .udma_mask = ATA_UDMA6, 627 .port_ops = &mv5_ops, 628 }, 629 { /* chip_604x */ 630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 631 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 632 ATA_FLAG_NCQ, 633 .pio_mask = 0x1f, /* pio0-4 */ 634 .udma_mask = ATA_UDMA6, 635 .port_ops = &mv6_ops, 636 }, 637 { /* chip_608x */ 638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 639 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 640 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 641 .pio_mask = 0x1f, /* pio0-4 */ 642 .udma_mask = ATA_UDMA6, 643 .port_ops = &mv6_ops, 644 }, 645 { /* chip_6042 */ 646 .flags = MV_GENIIE_FLAGS, 647 .pio_mask = 0x1f, /* pio0-4 */ 648 .udma_mask = ATA_UDMA6, 649 .port_ops = &mv_iie_ops, 650 }, 651 { /* chip_7042 */ 652 .flags = MV_GENIIE_FLAGS, 653 .pio_mask = 0x1f, /* pio0-4 */ 654 .udma_mask = ATA_UDMA6, 655 .port_ops = &mv_iie_ops, 656 }, 657 { /* chip_soc */ 658 .flags = MV_GENIIE_FLAGS, 659 .pio_mask = 0x1f, /* pio0-4 */ 660 .udma_mask = ATA_UDMA6, 661 .port_ops = &mv_iie_ops, 662 }, 663 }; 664 665 static const struct pci_device_id mv_pci_tbl[] = { 666 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 670 /* RocketRAID 1740/174x have different identifiers */ 671 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 672 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 673 674 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 675 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 676 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 677 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 678 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 679 680 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 681 682 /* Adaptec 1430SA */ 683 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 684 685 /* Marvell 7042 support */ 686 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 687 688 /* Highpoint RocketRAID PCIe series */ 689 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 690 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 691 692 { } /* terminate list */ 693 }; 694 695 static const struct mv_hw_ops mv5xxx_ops = { 696 .phy_errata = mv5_phy_errata, 697 .enable_leds = mv5_enable_leds, 698 .read_preamp = mv5_read_preamp, 699 .reset_hc = mv5_reset_hc, 700 .reset_flash = mv5_reset_flash, 701 .reset_bus = mv5_reset_bus, 702 }; 703 704 static const struct mv_hw_ops mv6xxx_ops = { 705 .phy_errata = mv6_phy_errata, 706 .enable_leds = mv6_enable_leds, 707 .read_preamp = mv6_read_preamp, 708 .reset_hc = mv6_reset_hc, 709 .reset_flash = mv6_reset_flash, 710 .reset_bus = mv_reset_pci_bus, 711 }; 712 713 static const struct mv_hw_ops mv_soc_ops = { 714 .phy_errata = mv6_phy_errata, 715 .enable_leds = mv_soc_enable_leds, 716 .read_preamp = mv_soc_read_preamp, 717 .reset_hc = mv_soc_reset_hc, 718 .reset_flash = mv_soc_reset_flash, 719 .reset_bus = mv_soc_reset_bus, 720 }; 721 722 /* 723 * Functions 724 */ 725 726 static inline void writelfl(unsigned long data, void __iomem *addr) 727 { 728 writel(data, addr); 729 (void) readl(addr); /* flush to avoid PCI posted write */ 730 } 731 732 static inline unsigned int mv_hc_from_port(unsigned int port) 733 { 734 return port >> MV_PORT_HC_SHIFT; 735 } 736 737 static inline unsigned int mv_hardport_from_port(unsigned int port) 738 { 739 return port & MV_PORT_MASK; 740 } 741 742 /* 743 * Consolidate some rather tricky bit shift calculations. 744 * This is hot-path stuff, so not a function. 745 * Simple code, with two return values, so macro rather than inline. 746 * 747 * port is the sole input, in range 0..7. 748 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 749 * hardport is the other output, in range 0..3. 750 * 751 * Note that port and hardport may be the same variable in some cases. 752 */ 753 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 754 { \ 755 shift = mv_hc_from_port(port) * HC_SHIFT; \ 756 hardport = mv_hardport_from_port(port); \ 757 shift += hardport * 2; \ 758 } 759 760 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 761 { 762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 763 } 764 765 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 766 unsigned int port) 767 { 768 return mv_hc_base(base, mv_hc_from_port(port)); 769 } 770 771 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 772 { 773 return mv_hc_base_from_port(base, port) + 774 MV_SATAHC_ARBTR_REG_SZ + 775 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 776 } 777 778 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 779 { 780 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 781 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 782 783 return hc_mmio + ofs; 784 } 785 786 static inline void __iomem *mv_host_base(struct ata_host *host) 787 { 788 struct mv_host_priv *hpriv = host->private_data; 789 return hpriv->base; 790 } 791 792 static inline void __iomem *mv_ap_base(struct ata_port *ap) 793 { 794 return mv_port_base(mv_host_base(ap->host), ap->port_no); 795 } 796 797 static inline int mv_get_hc_count(unsigned long port_flags) 798 { 799 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 800 } 801 802 static void mv_set_edma_ptrs(void __iomem *port_mmio, 803 struct mv_host_priv *hpriv, 804 struct mv_port_priv *pp) 805 { 806 u32 index; 807 808 /* 809 * initialize request queue 810 */ 811 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 812 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 813 814 WARN_ON(pp->crqb_dma & 0x3ff); 815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 819 820 /* 821 * initialize response queue 822 */ 823 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 824 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 825 826 WARN_ON(pp->crpb_dma & 0xff); 827 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 828 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 829 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 830 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 831 } 832 833 static void mv_set_main_irq_mask(struct ata_host *host, 834 u32 disable_bits, u32 enable_bits) 835 { 836 struct mv_host_priv *hpriv = host->private_data; 837 u32 old_mask, new_mask; 838 839 old_mask = hpriv->main_irq_mask; 840 new_mask = (old_mask & ~disable_bits) | enable_bits; 841 if (new_mask != old_mask) { 842 hpriv->main_irq_mask = new_mask; 843 writelfl(new_mask, hpriv->main_irq_mask_addr); 844 } 845 } 846 847 static void mv_enable_port_irqs(struct ata_port *ap, 848 unsigned int port_bits) 849 { 850 unsigned int shift, hardport, port = ap->port_no; 851 u32 disable_bits, enable_bits; 852 853 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 854 855 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 856 enable_bits = port_bits << shift; 857 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 858 } 859 860 /** 861 * mv_start_dma - Enable eDMA engine 862 * @base: port base address 863 * @pp: port private data 864 * 865 * Verify the local cache of the eDMA state is accurate with a 866 * WARN_ON. 867 * 868 * LOCKING: 869 * Inherited from caller. 870 */ 871 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 872 struct mv_port_priv *pp, u8 protocol) 873 { 874 int want_ncq = (protocol == ATA_PROT_NCQ); 875 876 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 877 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 878 if (want_ncq != using_ncq) 879 mv_stop_edma(ap); 880 } 881 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 882 struct mv_host_priv *hpriv = ap->host->private_data; 883 int hardport = mv_hardport_from_port(ap->port_no); 884 void __iomem *hc_mmio = mv_hc_base_from_port( 885 mv_host_base(ap->host), hardport); 886 u32 hc_irq_cause, ipending; 887 888 /* clear EDMA event indicators, if any */ 889 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 890 891 /* clear EDMA interrupt indicator, if any */ 892 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 893 ipending = (DEV_IRQ | DMA_IRQ) << hardport; 894 if (hc_irq_cause & ipending) { 895 writelfl(hc_irq_cause & ~ipending, 896 hc_mmio + HC_IRQ_CAUSE_OFS); 897 } 898 899 mv_edma_cfg(ap, want_ncq); 900 901 /* clear FIS IRQ Cause */ 902 if (IS_GEN_IIE(hpriv)) 903 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 904 905 mv_set_edma_ptrs(port_mmio, hpriv, pp); 906 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); 907 908 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 909 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 910 } 911 } 912 913 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 914 { 915 void __iomem *port_mmio = mv_ap_base(ap); 916 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 917 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 918 int i; 919 920 /* 921 * Wait for the EDMA engine to finish transactions in progress. 922 * No idea what a good "timeout" value might be, but measurements 923 * indicate that it often requires hundreds of microseconds 924 * with two drives in-use. So we use the 15msec value above 925 * as a rough guess at what even more drives might require. 926 */ 927 for (i = 0; i < timeout; ++i) { 928 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); 929 if ((edma_stat & empty_idle) == empty_idle) 930 break; 931 udelay(per_loop); 932 } 933 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 934 } 935 936 /** 937 * mv_stop_edma_engine - Disable eDMA engine 938 * @port_mmio: io base address 939 * 940 * LOCKING: 941 * Inherited from caller. 942 */ 943 static int mv_stop_edma_engine(void __iomem *port_mmio) 944 { 945 int i; 946 947 /* Disable eDMA. The disable bit auto clears. */ 948 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 949 950 /* Wait for the chip to confirm eDMA is off. */ 951 for (i = 10000; i > 0; i--) { 952 u32 reg = readl(port_mmio + EDMA_CMD_OFS); 953 if (!(reg & EDMA_EN)) 954 return 0; 955 udelay(10); 956 } 957 return -EIO; 958 } 959 960 static int mv_stop_edma(struct ata_port *ap) 961 { 962 void __iomem *port_mmio = mv_ap_base(ap); 963 struct mv_port_priv *pp = ap->private_data; 964 965 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 966 return 0; 967 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 968 mv_wait_for_edma_empty_idle(ap); 969 if (mv_stop_edma_engine(port_mmio)) { 970 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 971 return -EIO; 972 } 973 return 0; 974 } 975 976 #ifdef ATA_DEBUG 977 static void mv_dump_mem(void __iomem *start, unsigned bytes) 978 { 979 int b, w; 980 for (b = 0; b < bytes; ) { 981 DPRINTK("%p: ", start + b); 982 for (w = 0; b < bytes && w < 4; w++) { 983 printk("%08x ", readl(start + b)); 984 b += sizeof(u32); 985 } 986 printk("\n"); 987 } 988 } 989 #endif 990 991 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 992 { 993 #ifdef ATA_DEBUG 994 int b, w; 995 u32 dw; 996 for (b = 0; b < bytes; ) { 997 DPRINTK("%02x: ", b); 998 for (w = 0; b < bytes && w < 4; w++) { 999 (void) pci_read_config_dword(pdev, b, &dw); 1000 printk("%08x ", dw); 1001 b += sizeof(u32); 1002 } 1003 printk("\n"); 1004 } 1005 #endif 1006 } 1007 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 1008 struct pci_dev *pdev) 1009 { 1010 #ifdef ATA_DEBUG 1011 void __iomem *hc_base = mv_hc_base(mmio_base, 1012 port >> MV_PORT_HC_SHIFT); 1013 void __iomem *port_base; 1014 int start_port, num_ports, p, start_hc, num_hcs, hc; 1015 1016 if (0 > port) { 1017 start_hc = start_port = 0; 1018 num_ports = 8; /* shld be benign for 4 port devs */ 1019 num_hcs = 2; 1020 } else { 1021 start_hc = port >> MV_PORT_HC_SHIFT; 1022 start_port = port; 1023 num_ports = num_hcs = 1; 1024 } 1025 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1026 num_ports > 1 ? num_ports - 1 : start_port); 1027 1028 if (NULL != pdev) { 1029 DPRINTK("PCI config space regs:\n"); 1030 mv_dump_pci_cfg(pdev, 0x68); 1031 } 1032 DPRINTK("PCI regs:\n"); 1033 mv_dump_mem(mmio_base+0xc00, 0x3c); 1034 mv_dump_mem(mmio_base+0xd00, 0x34); 1035 mv_dump_mem(mmio_base+0xf00, 0x4); 1036 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1037 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1038 hc_base = mv_hc_base(mmio_base, hc); 1039 DPRINTK("HC regs (HC %i):\n", hc); 1040 mv_dump_mem(hc_base, 0x1c); 1041 } 1042 for (p = start_port; p < start_port + num_ports; p++) { 1043 port_base = mv_port_base(mmio_base, p); 1044 DPRINTK("EDMA regs (port %i):\n", p); 1045 mv_dump_mem(port_base, 0x54); 1046 DPRINTK("SATA regs (port %i):\n", p); 1047 mv_dump_mem(port_base+0x300, 0x60); 1048 } 1049 #endif 1050 } 1051 1052 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1053 { 1054 unsigned int ofs; 1055 1056 switch (sc_reg_in) { 1057 case SCR_STATUS: 1058 case SCR_CONTROL: 1059 case SCR_ERROR: 1060 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 1061 break; 1062 case SCR_ACTIVE: 1063 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 1064 break; 1065 default: 1066 ofs = 0xffffffffU; 1067 break; 1068 } 1069 return ofs; 1070 } 1071 1072 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1073 { 1074 unsigned int ofs = mv_scr_offset(sc_reg_in); 1075 1076 if (ofs != 0xffffffffU) { 1077 *val = readl(mv_ap_base(ap) + ofs); 1078 return 0; 1079 } else 1080 return -EINVAL; 1081 } 1082 1083 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1084 { 1085 unsigned int ofs = mv_scr_offset(sc_reg_in); 1086 1087 if (ofs != 0xffffffffU) { 1088 writelfl(val, mv_ap_base(ap) + ofs); 1089 return 0; 1090 } else 1091 return -EINVAL; 1092 } 1093 1094 static void mv6_dev_config(struct ata_device *adev) 1095 { 1096 /* 1097 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1098 * 1099 * Gen-II does not support NCQ over a port multiplier 1100 * (no FIS-based switching). 1101 * 1102 * We don't have hob_nsect when doing NCQ commands on Gen-II. 1103 * See mv_qc_prep() for more info. 1104 */ 1105 if (adev->flags & ATA_DFLAG_NCQ) { 1106 if (sata_pmp_attached(adev->link->ap)) { 1107 adev->flags &= ~ATA_DFLAG_NCQ; 1108 ata_dev_printk(adev, KERN_INFO, 1109 "NCQ disabled for command-based switching\n"); 1110 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) { 1111 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS; 1112 ata_dev_printk(adev, KERN_INFO, 1113 "max_sectors limited to %u for NCQ\n", 1114 adev->max_sectors); 1115 } 1116 } 1117 } 1118 1119 static int mv_qc_defer(struct ata_queued_cmd *qc) 1120 { 1121 struct ata_link *link = qc->dev->link; 1122 struct ata_port *ap = link->ap; 1123 struct mv_port_priv *pp = ap->private_data; 1124 1125 /* 1126 * Don't allow new commands if we're in a delayed EH state 1127 * for NCQ and/or FIS-based switching. 1128 */ 1129 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1130 return ATA_DEFER_PORT; 1131 /* 1132 * If the port is completely idle, then allow the new qc. 1133 */ 1134 if (ap->nr_active_links == 0) 1135 return 0; 1136 1137 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1138 /* 1139 * The port is operating in host queuing mode (EDMA). 1140 * It can accomodate a new qc if the qc protocol 1141 * is compatible with the current host queue mode. 1142 */ 1143 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1144 /* 1145 * The host queue (EDMA) is in NCQ mode. 1146 * If the new qc is also an NCQ command, 1147 * then allow the new qc. 1148 */ 1149 if (qc->tf.protocol == ATA_PROT_NCQ) 1150 return 0; 1151 } else { 1152 /* 1153 * The host queue (EDMA) is in non-NCQ, DMA mode. 1154 * If the new qc is also a non-NCQ, DMA command, 1155 * then allow the new qc. 1156 */ 1157 if (qc->tf.protocol == ATA_PROT_DMA) 1158 return 0; 1159 } 1160 } 1161 return ATA_DEFER_PORT; 1162 } 1163 1164 static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) 1165 { 1166 u32 new_fiscfg, old_fiscfg; 1167 u32 new_ltmode, old_ltmode; 1168 u32 new_haltcond, old_haltcond; 1169 1170 old_fiscfg = readl(port_mmio + FISCFG_OFS); 1171 old_ltmode = readl(port_mmio + LTMODE_OFS); 1172 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); 1173 1174 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1175 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1176 new_haltcond = old_haltcond | EDMA_ERR_DEV; 1177 1178 if (want_fbs) { 1179 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; 1180 new_ltmode = old_ltmode | LTMODE_BIT8; 1181 if (want_ncq) 1182 new_haltcond &= ~EDMA_ERR_DEV; 1183 else 1184 new_fiscfg |= FISCFG_WAIT_DEV_ERR; 1185 } 1186 1187 if (new_fiscfg != old_fiscfg) 1188 writelfl(new_fiscfg, port_mmio + FISCFG_OFS); 1189 if (new_ltmode != old_ltmode) 1190 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1191 if (new_haltcond != old_haltcond) 1192 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); 1193 } 1194 1195 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1196 { 1197 struct mv_host_priv *hpriv = ap->host->private_data; 1198 u32 old, new; 1199 1200 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1201 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); 1202 if (want_ncq) 1203 new = old | (1 << 22); 1204 else 1205 new = old & ~(1 << 22); 1206 if (new != old) 1207 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); 1208 } 1209 1210 static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1211 { 1212 u32 cfg; 1213 struct mv_port_priv *pp = ap->private_data; 1214 struct mv_host_priv *hpriv = ap->host->private_data; 1215 void __iomem *port_mmio = mv_ap_base(ap); 1216 1217 /* set up non-NCQ EDMA configuration */ 1218 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1219 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; 1220 1221 if (IS_GEN_I(hpriv)) 1222 cfg |= (1 << 8); /* enab config burst size mask */ 1223 1224 else if (IS_GEN_II(hpriv)) { 1225 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1226 mv_60x1_errata_sata25(ap, want_ncq); 1227 1228 } else if (IS_GEN_IIE(hpriv)) { 1229 int want_fbs = sata_pmp_attached(ap); 1230 /* 1231 * Possible future enhancement: 1232 * 1233 * The chip can use FBS with non-NCQ, if we allow it, 1234 * But first we need to have the error handling in place 1235 * for this mode (datasheet section 7.3.15.4.2.3). 1236 * So disallow non-NCQ FBS for now. 1237 */ 1238 want_fbs &= want_ncq; 1239 1240 mv_config_fbs(port_mmio, want_ncq, want_fbs); 1241 1242 if (want_fbs) { 1243 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1244 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1245 } 1246 1247 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1248 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1249 if (!IS_SOC(hpriv)) 1250 cfg |= (1 << 18); /* enab early completion */ 1251 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1252 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1253 } 1254 1255 if (want_ncq) { 1256 cfg |= EDMA_CFG_NCQ; 1257 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1258 } else 1259 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1260 1261 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1262 } 1263 1264 static void mv_port_free_dma_mem(struct ata_port *ap) 1265 { 1266 struct mv_host_priv *hpriv = ap->host->private_data; 1267 struct mv_port_priv *pp = ap->private_data; 1268 int tag; 1269 1270 if (pp->crqb) { 1271 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1272 pp->crqb = NULL; 1273 } 1274 if (pp->crpb) { 1275 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1276 pp->crpb = NULL; 1277 } 1278 /* 1279 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1280 * For later hardware, we have one unique sg_tbl per NCQ tag. 1281 */ 1282 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1283 if (pp->sg_tbl[tag]) { 1284 if (tag == 0 || !IS_GEN_I(hpriv)) 1285 dma_pool_free(hpriv->sg_tbl_pool, 1286 pp->sg_tbl[tag], 1287 pp->sg_tbl_dma[tag]); 1288 pp->sg_tbl[tag] = NULL; 1289 } 1290 } 1291 } 1292 1293 /** 1294 * mv_port_start - Port specific init/start routine. 1295 * @ap: ATA channel to manipulate 1296 * 1297 * Allocate and point to DMA memory, init port private memory, 1298 * zero indices. 1299 * 1300 * LOCKING: 1301 * Inherited from caller. 1302 */ 1303 static int mv_port_start(struct ata_port *ap) 1304 { 1305 struct device *dev = ap->host->dev; 1306 struct mv_host_priv *hpriv = ap->host->private_data; 1307 struct mv_port_priv *pp; 1308 int tag; 1309 1310 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1311 if (!pp) 1312 return -ENOMEM; 1313 ap->private_data = pp; 1314 1315 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1316 if (!pp->crqb) 1317 return -ENOMEM; 1318 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1319 1320 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1321 if (!pp->crpb) 1322 goto out_port_free_dma_mem; 1323 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1324 1325 /* 1326 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1327 * For later hardware, we need one unique sg_tbl per NCQ tag. 1328 */ 1329 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1330 if (tag == 0 || !IS_GEN_I(hpriv)) { 1331 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1332 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1333 if (!pp->sg_tbl[tag]) 1334 goto out_port_free_dma_mem; 1335 } else { 1336 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1337 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1338 } 1339 } 1340 return 0; 1341 1342 out_port_free_dma_mem: 1343 mv_port_free_dma_mem(ap); 1344 return -ENOMEM; 1345 } 1346 1347 /** 1348 * mv_port_stop - Port specific cleanup/stop routine. 1349 * @ap: ATA channel to manipulate 1350 * 1351 * Stop DMA, cleanup port memory. 1352 * 1353 * LOCKING: 1354 * This routine uses the host lock to protect the DMA stop. 1355 */ 1356 static void mv_port_stop(struct ata_port *ap) 1357 { 1358 mv_stop_edma(ap); 1359 mv_enable_port_irqs(ap, 0); 1360 mv_port_free_dma_mem(ap); 1361 } 1362 1363 /** 1364 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1365 * @qc: queued command whose SG list to source from 1366 * 1367 * Populate the SG list and mark the last entry. 1368 * 1369 * LOCKING: 1370 * Inherited from caller. 1371 */ 1372 static void mv_fill_sg(struct ata_queued_cmd *qc) 1373 { 1374 struct mv_port_priv *pp = qc->ap->private_data; 1375 struct scatterlist *sg; 1376 struct mv_sg *mv_sg, *last_sg = NULL; 1377 unsigned int si; 1378 1379 mv_sg = pp->sg_tbl[qc->tag]; 1380 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1381 dma_addr_t addr = sg_dma_address(sg); 1382 u32 sg_len = sg_dma_len(sg); 1383 1384 while (sg_len) { 1385 u32 offset = addr & 0xffff; 1386 u32 len = sg_len; 1387 1388 if ((offset + sg_len > 0x10000)) 1389 len = 0x10000 - offset; 1390 1391 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1392 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1393 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1394 1395 sg_len -= len; 1396 addr += len; 1397 1398 last_sg = mv_sg; 1399 mv_sg++; 1400 } 1401 } 1402 1403 if (likely(last_sg)) 1404 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1405 } 1406 1407 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1408 { 1409 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1410 (last ? CRQB_CMD_LAST : 0); 1411 *cmdw = cpu_to_le16(tmp); 1412 } 1413 1414 /** 1415 * mv_qc_prep - Host specific command preparation. 1416 * @qc: queued command to prepare 1417 * 1418 * This routine simply redirects to the general purpose routine 1419 * if command is not DMA. Else, it handles prep of the CRQB 1420 * (command request block), does some sanity checking, and calls 1421 * the SG load routine. 1422 * 1423 * LOCKING: 1424 * Inherited from caller. 1425 */ 1426 static void mv_qc_prep(struct ata_queued_cmd *qc) 1427 { 1428 struct ata_port *ap = qc->ap; 1429 struct mv_port_priv *pp = ap->private_data; 1430 __le16 *cw; 1431 struct ata_taskfile *tf; 1432 u16 flags = 0; 1433 unsigned in_index; 1434 1435 if ((qc->tf.protocol != ATA_PROT_DMA) && 1436 (qc->tf.protocol != ATA_PROT_NCQ)) 1437 return; 1438 1439 /* Fill in command request block 1440 */ 1441 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1442 flags |= CRQB_FLAG_READ; 1443 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1444 flags |= qc->tag << CRQB_TAG_SHIFT; 1445 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1446 1447 /* get current queue index from software */ 1448 in_index = pp->req_idx; 1449 1450 pp->crqb[in_index].sg_addr = 1451 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1452 pp->crqb[in_index].sg_addr_hi = 1453 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1454 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1455 1456 cw = &pp->crqb[in_index].ata_cmd[0]; 1457 tf = &qc->tf; 1458 1459 /* Sadly, the CRQB cannot accomodate all registers--there are 1460 * only 11 bytes...so we must pick and choose required 1461 * registers based on the command. So, we drop feature and 1462 * hob_feature for [RW] DMA commands, but they are needed for 1463 * NCQ. NCQ will drop hob_nsect. 1464 */ 1465 switch (tf->command) { 1466 case ATA_CMD_READ: 1467 case ATA_CMD_READ_EXT: 1468 case ATA_CMD_WRITE: 1469 case ATA_CMD_WRITE_EXT: 1470 case ATA_CMD_WRITE_FUA_EXT: 1471 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1472 break; 1473 case ATA_CMD_FPDMA_READ: 1474 case ATA_CMD_FPDMA_WRITE: 1475 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1476 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1477 break; 1478 default: 1479 /* The only other commands EDMA supports in non-queued and 1480 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1481 * of which are defined/used by Linux. If we get here, this 1482 * driver needs work. 1483 * 1484 * FIXME: modify libata to give qc_prep a return value and 1485 * return error here. 1486 */ 1487 BUG_ON(tf->command); 1488 break; 1489 } 1490 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1491 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1492 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1493 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1494 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1495 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1496 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1497 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1498 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1499 1500 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1501 return; 1502 mv_fill_sg(qc); 1503 } 1504 1505 /** 1506 * mv_qc_prep_iie - Host specific command preparation. 1507 * @qc: queued command to prepare 1508 * 1509 * This routine simply redirects to the general purpose routine 1510 * if command is not DMA. Else, it handles prep of the CRQB 1511 * (command request block), does some sanity checking, and calls 1512 * the SG load routine. 1513 * 1514 * LOCKING: 1515 * Inherited from caller. 1516 */ 1517 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1518 { 1519 struct ata_port *ap = qc->ap; 1520 struct mv_port_priv *pp = ap->private_data; 1521 struct mv_crqb_iie *crqb; 1522 struct ata_taskfile *tf; 1523 unsigned in_index; 1524 u32 flags = 0; 1525 1526 if ((qc->tf.protocol != ATA_PROT_DMA) && 1527 (qc->tf.protocol != ATA_PROT_NCQ)) 1528 return; 1529 1530 /* Fill in Gen IIE command request block */ 1531 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1532 flags |= CRQB_FLAG_READ; 1533 1534 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1535 flags |= qc->tag << CRQB_TAG_SHIFT; 1536 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1537 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1538 1539 /* get current queue index from software */ 1540 in_index = pp->req_idx; 1541 1542 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1543 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1544 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1545 crqb->flags = cpu_to_le32(flags); 1546 1547 tf = &qc->tf; 1548 crqb->ata_cmd[0] = cpu_to_le32( 1549 (tf->command << 16) | 1550 (tf->feature << 24) 1551 ); 1552 crqb->ata_cmd[1] = cpu_to_le32( 1553 (tf->lbal << 0) | 1554 (tf->lbam << 8) | 1555 (tf->lbah << 16) | 1556 (tf->device << 24) 1557 ); 1558 crqb->ata_cmd[2] = cpu_to_le32( 1559 (tf->hob_lbal << 0) | 1560 (tf->hob_lbam << 8) | 1561 (tf->hob_lbah << 16) | 1562 (tf->hob_feature << 24) 1563 ); 1564 crqb->ata_cmd[3] = cpu_to_le32( 1565 (tf->nsect << 0) | 1566 (tf->hob_nsect << 8) 1567 ); 1568 1569 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1570 return; 1571 mv_fill_sg(qc); 1572 } 1573 1574 /** 1575 * mv_qc_issue - Initiate a command to the host 1576 * @qc: queued command to start 1577 * 1578 * This routine simply redirects to the general purpose routine 1579 * if command is not DMA. Else, it sanity checks our local 1580 * caches of the request producer/consumer indices then enables 1581 * DMA and bumps the request producer index. 1582 * 1583 * LOCKING: 1584 * Inherited from caller. 1585 */ 1586 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1587 { 1588 struct ata_port *ap = qc->ap; 1589 void __iomem *port_mmio = mv_ap_base(ap); 1590 struct mv_port_priv *pp = ap->private_data; 1591 u32 in_index; 1592 1593 if ((qc->tf.protocol != ATA_PROT_DMA) && 1594 (qc->tf.protocol != ATA_PROT_NCQ)) { 1595 /* 1596 * We're about to send a non-EDMA capable command to the 1597 * port. Turn off EDMA so there won't be problems accessing 1598 * shadow block, etc registers. 1599 */ 1600 mv_stop_edma(ap); 1601 mv_enable_port_irqs(ap, ERR_IRQ); 1602 mv_pmp_select(ap, qc->dev->link->pmp); 1603 return ata_sff_qc_issue(qc); 1604 } 1605 1606 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1607 1608 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 1609 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 1610 1611 /* and write the request in pointer to kick the EDMA to life */ 1612 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 1613 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1614 1615 return 0; 1616 } 1617 1618 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 1619 { 1620 struct mv_port_priv *pp = ap->private_data; 1621 struct ata_queued_cmd *qc; 1622 1623 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1624 return NULL; 1625 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1626 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1627 qc = NULL; 1628 return qc; 1629 } 1630 1631 static void mv_pmp_error_handler(struct ata_port *ap) 1632 { 1633 unsigned int pmp, pmp_map; 1634 struct mv_port_priv *pp = ap->private_data; 1635 1636 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 1637 /* 1638 * Perform NCQ error analysis on failed PMPs 1639 * before we freeze the port entirely. 1640 * 1641 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 1642 */ 1643 pmp_map = pp->delayed_eh_pmp_map; 1644 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 1645 for (pmp = 0; pmp_map != 0; pmp++) { 1646 unsigned int this_pmp = (1 << pmp); 1647 if (pmp_map & this_pmp) { 1648 struct ata_link *link = &ap->pmp_link[pmp]; 1649 pmp_map &= ~this_pmp; 1650 ata_eh_analyze_ncq_error(link); 1651 } 1652 } 1653 ata_port_freeze(ap); 1654 } 1655 sata_pmp_error_handler(ap); 1656 } 1657 1658 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 1659 { 1660 void __iomem *port_mmio = mv_ap_base(ap); 1661 1662 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; 1663 } 1664 1665 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 1666 { 1667 struct ata_eh_info *ehi; 1668 unsigned int pmp; 1669 1670 /* 1671 * Initialize EH info for PMPs which saw device errors 1672 */ 1673 ehi = &ap->link.eh_info; 1674 for (pmp = 0; pmp_map != 0; pmp++) { 1675 unsigned int this_pmp = (1 << pmp); 1676 if (pmp_map & this_pmp) { 1677 struct ata_link *link = &ap->pmp_link[pmp]; 1678 1679 pmp_map &= ~this_pmp; 1680 ehi = &link->eh_info; 1681 ata_ehi_clear_desc(ehi); 1682 ata_ehi_push_desc(ehi, "dev err"); 1683 ehi->err_mask |= AC_ERR_DEV; 1684 ehi->action |= ATA_EH_RESET; 1685 ata_link_abort(link); 1686 } 1687 } 1688 } 1689 1690 static int mv_req_q_empty(struct ata_port *ap) 1691 { 1692 void __iomem *port_mmio = mv_ap_base(ap); 1693 u32 in_ptr, out_ptr; 1694 1695 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) 1696 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1697 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1698 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1699 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 1700 } 1701 1702 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1703 { 1704 struct mv_port_priv *pp = ap->private_data; 1705 int failed_links; 1706 unsigned int old_map, new_map; 1707 1708 /* 1709 * Device error during FBS+NCQ operation: 1710 * 1711 * Set a port flag to prevent further I/O being enqueued. 1712 * Leave the EDMA running to drain outstanding commands from this port. 1713 * Perform the post-mortem/EH only when all responses are complete. 1714 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 1715 */ 1716 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 1717 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 1718 pp->delayed_eh_pmp_map = 0; 1719 } 1720 old_map = pp->delayed_eh_pmp_map; 1721 new_map = old_map | mv_get_err_pmp_map(ap); 1722 1723 if (old_map != new_map) { 1724 pp->delayed_eh_pmp_map = new_map; 1725 mv_pmp_eh_prep(ap, new_map & ~old_map); 1726 } 1727 failed_links = hweight16(new_map); 1728 1729 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 1730 "failed_links=%d nr_active_links=%d\n", 1731 __func__, pp->delayed_eh_pmp_map, 1732 ap->qc_active, failed_links, 1733 ap->nr_active_links); 1734 1735 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 1736 mv_process_crpb_entries(ap, pp); 1737 mv_stop_edma(ap); 1738 mv_eh_freeze(ap); 1739 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 1740 return 1; /* handled */ 1741 } 1742 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 1743 return 1; /* handled */ 1744 } 1745 1746 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 1747 { 1748 /* 1749 * Possible future enhancement: 1750 * 1751 * FBS+non-NCQ operation is not yet implemented. 1752 * See related notes in mv_edma_cfg(). 1753 * 1754 * Device error during FBS+non-NCQ operation: 1755 * 1756 * We need to snapshot the shadow registers for each failed command. 1757 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 1758 */ 1759 return 0; /* not handled */ 1760 } 1761 1762 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 1763 { 1764 struct mv_port_priv *pp = ap->private_data; 1765 1766 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1767 return 0; /* EDMA was not active: not handled */ 1768 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 1769 return 0; /* FBS was not active: not handled */ 1770 1771 if (!(edma_err_cause & EDMA_ERR_DEV)) 1772 return 0; /* non DEV error: not handled */ 1773 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 1774 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 1775 return 0; /* other problems: not handled */ 1776 1777 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1778 /* 1779 * EDMA should NOT have self-disabled for this case. 1780 * If it did, then something is wrong elsewhere, 1781 * and we cannot handle it here. 1782 */ 1783 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1784 ata_port_printk(ap, KERN_WARNING, 1785 "%s: err_cause=0x%x pp_flags=0x%x\n", 1786 __func__, edma_err_cause, pp->pp_flags); 1787 return 0; /* not handled */ 1788 } 1789 return mv_handle_fbs_ncq_dev_err(ap); 1790 } else { 1791 /* 1792 * EDMA should have self-disabled for this case. 1793 * If it did not, then something is wrong elsewhere, 1794 * and we cannot handle it here. 1795 */ 1796 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 1797 ata_port_printk(ap, KERN_WARNING, 1798 "%s: err_cause=0x%x pp_flags=0x%x\n", 1799 __func__, edma_err_cause, pp->pp_flags); 1800 return 0; /* not handled */ 1801 } 1802 return mv_handle_fbs_non_ncq_dev_err(ap); 1803 } 1804 return 0; /* not handled */ 1805 } 1806 1807 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 1808 { 1809 struct ata_eh_info *ehi = &ap->link.eh_info; 1810 char *when = "idle"; 1811 1812 ata_ehi_clear_desc(ehi); 1813 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 1814 when = "disabled"; 1815 } else if (edma_was_enabled) { 1816 when = "EDMA enabled"; 1817 } else { 1818 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1819 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1820 when = "polling"; 1821 } 1822 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 1823 ehi->err_mask |= AC_ERR_OTHER; 1824 ehi->action |= ATA_EH_RESET; 1825 ata_port_freeze(ap); 1826 } 1827 1828 /** 1829 * mv_err_intr - Handle error interrupts on the port 1830 * @ap: ATA channel to manipulate 1831 * @qc: affected command (non-NCQ), or NULL 1832 * 1833 * Most cases require a full reset of the chip's state machine, 1834 * which also performs a COMRESET. 1835 * Also, if the port disabled DMA, update our cached copy to match. 1836 * 1837 * LOCKING: 1838 * Inherited from caller. 1839 */ 1840 static void mv_err_intr(struct ata_port *ap) 1841 { 1842 void __iomem *port_mmio = mv_ap_base(ap); 1843 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1844 u32 fis_cause = 0; 1845 struct mv_port_priv *pp = ap->private_data; 1846 struct mv_host_priv *hpriv = ap->host->private_data; 1847 unsigned int action = 0, err_mask = 0; 1848 struct ata_eh_info *ehi = &ap->link.eh_info; 1849 struct ata_queued_cmd *qc; 1850 int abort = 0; 1851 1852 /* 1853 * Read and clear the SError and err_cause bits. 1854 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 1855 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 1856 */ 1857 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1858 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1859 1860 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1861 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1862 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1863 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1864 } 1865 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1866 1867 if (edma_err_cause & EDMA_ERR_DEV) { 1868 /* 1869 * Device errors during FIS-based switching operation 1870 * require special handling. 1871 */ 1872 if (mv_handle_dev_err(ap, edma_err_cause)) 1873 return; 1874 } 1875 1876 qc = mv_get_active_qc(ap); 1877 ata_ehi_clear_desc(ehi); 1878 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1879 edma_err_cause, pp->pp_flags); 1880 1881 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1882 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 1883 if (fis_cause & SATA_FIS_IRQ_AN) { 1884 u32 ec = edma_err_cause & 1885 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 1886 sata_async_notification(ap); 1887 if (!ec) 1888 return; /* Just an AN; no need for the nukes */ 1889 ata_ehi_push_desc(ehi, "SDB notify"); 1890 } 1891 } 1892 /* 1893 * All generations share these EDMA error cause bits: 1894 */ 1895 if (edma_err_cause & EDMA_ERR_DEV) { 1896 err_mask |= AC_ERR_DEV; 1897 action |= ATA_EH_RESET; 1898 ata_ehi_push_desc(ehi, "dev error"); 1899 } 1900 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1901 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1902 EDMA_ERR_INTRL_PAR)) { 1903 err_mask |= AC_ERR_ATA_BUS; 1904 action |= ATA_EH_RESET; 1905 ata_ehi_push_desc(ehi, "parity error"); 1906 } 1907 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1908 ata_ehi_hotplugged(ehi); 1909 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1910 "dev disconnect" : "dev connect"); 1911 action |= ATA_EH_RESET; 1912 } 1913 1914 /* 1915 * Gen-I has a different SELF_DIS bit, 1916 * different FREEZE bits, and no SERR bit: 1917 */ 1918 if (IS_GEN_I(hpriv)) { 1919 eh_freeze_mask = EDMA_EH_FREEZE_5; 1920 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1921 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1922 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1923 } 1924 } else { 1925 eh_freeze_mask = EDMA_EH_FREEZE; 1926 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1927 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1928 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1929 } 1930 if (edma_err_cause & EDMA_ERR_SERR) { 1931 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1932 err_mask |= AC_ERR_ATA_BUS; 1933 action |= ATA_EH_RESET; 1934 } 1935 } 1936 1937 if (!err_mask) { 1938 err_mask = AC_ERR_OTHER; 1939 action |= ATA_EH_RESET; 1940 } 1941 1942 ehi->serror |= serr; 1943 ehi->action |= action; 1944 1945 if (qc) 1946 qc->err_mask |= err_mask; 1947 else 1948 ehi->err_mask |= err_mask; 1949 1950 if (err_mask == AC_ERR_DEV) { 1951 /* 1952 * Cannot do ata_port_freeze() here, 1953 * because it would kill PIO access, 1954 * which is needed for further diagnosis. 1955 */ 1956 mv_eh_freeze(ap); 1957 abort = 1; 1958 } else if (edma_err_cause & eh_freeze_mask) { 1959 /* 1960 * Note to self: ata_port_freeze() calls ata_port_abort() 1961 */ 1962 ata_port_freeze(ap); 1963 } else { 1964 abort = 1; 1965 } 1966 1967 if (abort) { 1968 if (qc) 1969 ata_link_abort(qc->dev->link); 1970 else 1971 ata_port_abort(ap); 1972 } 1973 } 1974 1975 static void mv_process_crpb_response(struct ata_port *ap, 1976 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 1977 { 1978 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1979 1980 if (qc) { 1981 u8 ata_status; 1982 u16 edma_status = le16_to_cpu(response->flags); 1983 /* 1984 * edma_status from a response queue entry: 1985 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). 1986 * MSB is saved ATA status from command completion. 1987 */ 1988 if (!ncq_enabled) { 1989 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 1990 if (err_cause) { 1991 /* 1992 * Error will be seen/handled by mv_err_intr(). 1993 * So do nothing at all here. 1994 */ 1995 return; 1996 } 1997 } 1998 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 1999 if (!ac_err_mask(ata_status)) 2000 ata_qc_complete(qc); 2001 /* else: leave it for mv_err_intr() */ 2002 } else { 2003 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 2004 __func__, tag); 2005 } 2006 } 2007 2008 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2009 { 2010 void __iomem *port_mmio = mv_ap_base(ap); 2011 struct mv_host_priv *hpriv = ap->host->private_data; 2012 u32 in_index; 2013 bool work_done = false; 2014 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2015 2016 /* Get the hardware queue position index */ 2017 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 2018 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2019 2020 /* Process new responses from since the last time we looked */ 2021 while (in_index != pp->resp_idx) { 2022 unsigned int tag; 2023 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2024 2025 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2026 2027 if (IS_GEN_I(hpriv)) { 2028 /* 50xx: no NCQ, only one command active at a time */ 2029 tag = ap->link.active_tag; 2030 } else { 2031 /* Gen II/IIE: get command tag from CRPB entry */ 2032 tag = le16_to_cpu(response->id) & 0x1f; 2033 } 2034 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2035 work_done = true; 2036 } 2037 2038 /* Update the software queue position index in hardware */ 2039 if (work_done) 2040 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2041 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2042 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 2043 } 2044 2045 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2046 { 2047 struct mv_port_priv *pp; 2048 int edma_was_enabled; 2049 2050 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2051 mv_unexpected_intr(ap, 0); 2052 return; 2053 } 2054 /* 2055 * Grab a snapshot of the EDMA_EN flag setting, 2056 * so that we have a consistent view for this port, 2057 * even if something we call of our routines changes it. 2058 */ 2059 pp = ap->private_data; 2060 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2061 /* 2062 * Process completed CRPB response(s) before other events. 2063 */ 2064 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2065 mv_process_crpb_entries(ap, pp); 2066 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2067 mv_handle_fbs_ncq_dev_err(ap); 2068 } 2069 /* 2070 * Handle chip-reported errors, or continue on to handle PIO. 2071 */ 2072 if (unlikely(port_cause & ERR_IRQ)) { 2073 mv_err_intr(ap); 2074 } else if (!edma_was_enabled) { 2075 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2076 if (qc) 2077 ata_sff_host_intr(ap, qc); 2078 else 2079 mv_unexpected_intr(ap, edma_was_enabled); 2080 } 2081 } 2082 2083 /** 2084 * mv_host_intr - Handle all interrupts on the given host controller 2085 * @host: host specific structure 2086 * @main_irq_cause: Main interrupt cause register for the chip. 2087 * 2088 * LOCKING: 2089 * Inherited from caller. 2090 */ 2091 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2092 { 2093 struct mv_host_priv *hpriv = host->private_data; 2094 void __iomem *mmio = hpriv->base, *hc_mmio; 2095 unsigned int handled = 0, port; 2096 2097 for (port = 0; port < hpriv->n_ports; port++) { 2098 struct ata_port *ap = host->ports[port]; 2099 unsigned int p, shift, hardport, port_cause; 2100 2101 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2102 /* 2103 * Each hc within the host has its own hc_irq_cause register, 2104 * where the interrupting ports bits get ack'd. 2105 */ 2106 if (hardport == 0) { /* first port on this hc ? */ 2107 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2108 u32 port_mask, ack_irqs; 2109 /* 2110 * Skip this entire hc if nothing pending for any ports 2111 */ 2112 if (!hc_cause) { 2113 port += MV_PORTS_PER_HC - 1; 2114 continue; 2115 } 2116 /* 2117 * We don't need/want to read the hc_irq_cause register, 2118 * because doing so hurts performance, and 2119 * main_irq_cause already gives us everything we need. 2120 * 2121 * But we do have to *write* to the hc_irq_cause to ack 2122 * the ports that we are handling this time through. 2123 * 2124 * This requires that we create a bitmap for those 2125 * ports which interrupted us, and use that bitmap 2126 * to ack (only) those ports via hc_irq_cause. 2127 */ 2128 ack_irqs = 0; 2129 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2130 if ((port + p) >= hpriv->n_ports) 2131 break; 2132 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2133 if (hc_cause & port_mask) 2134 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2135 } 2136 hc_mmio = mv_hc_base_from_port(mmio, port); 2137 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); 2138 handled = 1; 2139 } 2140 /* 2141 * Handle interrupts signalled for this port: 2142 */ 2143 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2144 if (port_cause) 2145 mv_port_intr(ap, port_cause); 2146 } 2147 return handled; 2148 } 2149 2150 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2151 { 2152 struct mv_host_priv *hpriv = host->private_data; 2153 struct ata_port *ap; 2154 struct ata_queued_cmd *qc; 2155 struct ata_eh_info *ehi; 2156 unsigned int i, err_mask, printed = 0; 2157 u32 err_cause; 2158 2159 err_cause = readl(mmio + hpriv->irq_cause_ofs); 2160 2161 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2162 err_cause); 2163 2164 DPRINTK("All regs @ PCI error\n"); 2165 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2166 2167 writelfl(0, mmio + hpriv->irq_cause_ofs); 2168 2169 for (i = 0; i < host->n_ports; i++) { 2170 ap = host->ports[i]; 2171 if (!ata_link_offline(&ap->link)) { 2172 ehi = &ap->link.eh_info; 2173 ata_ehi_clear_desc(ehi); 2174 if (!printed++) 2175 ata_ehi_push_desc(ehi, 2176 "PCI err cause 0x%08x", err_cause); 2177 err_mask = AC_ERR_HOST_BUS; 2178 ehi->action = ATA_EH_RESET; 2179 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2180 if (qc) 2181 qc->err_mask |= err_mask; 2182 else 2183 ehi->err_mask |= err_mask; 2184 2185 ata_port_freeze(ap); 2186 } 2187 } 2188 return 1; /* handled */ 2189 } 2190 2191 /** 2192 * mv_interrupt - Main interrupt event handler 2193 * @irq: unused 2194 * @dev_instance: private data; in this case the host structure 2195 * 2196 * Read the read only register to determine if any host 2197 * controllers have pending interrupts. If so, call lower level 2198 * routine to handle. Also check for PCI errors which are only 2199 * reported here. 2200 * 2201 * LOCKING: 2202 * This routine holds the host lock while processing pending 2203 * interrupts. 2204 */ 2205 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2206 { 2207 struct ata_host *host = dev_instance; 2208 struct mv_host_priv *hpriv = host->private_data; 2209 unsigned int handled = 0; 2210 u32 main_irq_cause, pending_irqs; 2211 2212 spin_lock(&host->lock); 2213 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2214 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2215 /* 2216 * Deal with cases where we either have nothing pending, or have read 2217 * a bogus register value which can indicate HW removal or PCI fault. 2218 */ 2219 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2220 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 2221 handled = mv_pci_error(host, hpriv->base); 2222 else 2223 handled = mv_host_intr(host, pending_irqs); 2224 } 2225 spin_unlock(&host->lock); 2226 return IRQ_RETVAL(handled); 2227 } 2228 2229 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 2230 { 2231 unsigned int ofs; 2232 2233 switch (sc_reg_in) { 2234 case SCR_STATUS: 2235 case SCR_ERROR: 2236 case SCR_CONTROL: 2237 ofs = sc_reg_in * sizeof(u32); 2238 break; 2239 default: 2240 ofs = 0xffffffffU; 2241 break; 2242 } 2243 return ofs; 2244 } 2245 2246 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 2247 { 2248 struct mv_host_priv *hpriv = ap->host->private_data; 2249 void __iomem *mmio = hpriv->base; 2250 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2251 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2252 2253 if (ofs != 0xffffffffU) { 2254 *val = readl(addr + ofs); 2255 return 0; 2256 } else 2257 return -EINVAL; 2258 } 2259 2260 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 2261 { 2262 struct mv_host_priv *hpriv = ap->host->private_data; 2263 void __iomem *mmio = hpriv->base; 2264 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2265 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2266 2267 if (ofs != 0xffffffffU) { 2268 writelfl(val, addr + ofs); 2269 return 0; 2270 } else 2271 return -EINVAL; 2272 } 2273 2274 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 2275 { 2276 struct pci_dev *pdev = to_pci_dev(host->dev); 2277 int early_5080; 2278 2279 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 2280 2281 if (!early_5080) { 2282 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2283 tmp |= (1 << 0); 2284 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2285 } 2286 2287 mv_reset_pci_bus(host, mmio); 2288 } 2289 2290 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2291 { 2292 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); 2293 } 2294 2295 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2296 void __iomem *mmio) 2297 { 2298 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 2299 u32 tmp; 2300 2301 tmp = readl(phy_mmio + MV5_PHY_MODE); 2302 2303 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 2304 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 2305 } 2306 2307 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2308 { 2309 u32 tmp; 2310 2311 writel(0, mmio + MV_GPIO_PORT_CTL_OFS); 2312 2313 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2314 2315 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2316 tmp |= ~(1 << 0); 2317 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2318 } 2319 2320 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2321 unsigned int port) 2322 { 2323 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 2324 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 2325 u32 tmp; 2326 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2327 2328 if (fix_apm_sq) { 2329 tmp = readl(phy_mmio + MV5_LTMODE_OFS); 2330 tmp |= (1 << 19); 2331 writel(tmp, phy_mmio + MV5_LTMODE_OFS); 2332 2333 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); 2334 tmp &= ~0x3; 2335 tmp |= 0x1; 2336 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); 2337 } 2338 2339 tmp = readl(phy_mmio + MV5_PHY_MODE); 2340 tmp &= ~mask; 2341 tmp |= hpriv->signal[port].pre; 2342 tmp |= hpriv->signal[port].amps; 2343 writel(tmp, phy_mmio + MV5_PHY_MODE); 2344 } 2345 2346 2347 #undef ZERO 2348 #define ZERO(reg) writel(0, port_mmio + (reg)) 2349 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 2350 unsigned int port) 2351 { 2352 void __iomem *port_mmio = mv_port_base(mmio, port); 2353 2354 mv_reset_channel(hpriv, mmio, port); 2355 2356 ZERO(0x028); /* command */ 2357 writel(0x11f, port_mmio + EDMA_CFG_OFS); 2358 ZERO(0x004); /* timer */ 2359 ZERO(0x008); /* irq err cause */ 2360 ZERO(0x00c); /* irq err mask */ 2361 ZERO(0x010); /* rq bah */ 2362 ZERO(0x014); /* rq inp */ 2363 ZERO(0x018); /* rq outp */ 2364 ZERO(0x01c); /* respq bah */ 2365 ZERO(0x024); /* respq outp */ 2366 ZERO(0x020); /* respq inp */ 2367 ZERO(0x02c); /* test control */ 2368 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2369 } 2370 #undef ZERO 2371 2372 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2373 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2374 unsigned int hc) 2375 { 2376 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2377 u32 tmp; 2378 2379 ZERO(0x00c); 2380 ZERO(0x010); 2381 ZERO(0x014); 2382 ZERO(0x018); 2383 2384 tmp = readl(hc_mmio + 0x20); 2385 tmp &= 0x1c1c1c1c; 2386 tmp |= 0x03030303; 2387 writel(tmp, hc_mmio + 0x20); 2388 } 2389 #undef ZERO 2390 2391 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2392 unsigned int n_hc) 2393 { 2394 unsigned int hc, port; 2395 2396 for (hc = 0; hc < n_hc; hc++) { 2397 for (port = 0; port < MV_PORTS_PER_HC; port++) 2398 mv5_reset_hc_port(hpriv, mmio, 2399 (hc * MV_PORTS_PER_HC) + port); 2400 2401 mv5_reset_one_hc(hpriv, mmio, hc); 2402 } 2403 2404 return 0; 2405 } 2406 2407 #undef ZERO 2408 #define ZERO(reg) writel(0, mmio + (reg)) 2409 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 2410 { 2411 struct mv_host_priv *hpriv = host->private_data; 2412 u32 tmp; 2413 2414 tmp = readl(mmio + MV_PCI_MODE_OFS); 2415 tmp &= 0xff00ffff; 2416 writel(tmp, mmio + MV_PCI_MODE_OFS); 2417 2418 ZERO(MV_PCI_DISC_TIMER); 2419 ZERO(MV_PCI_MSI_TRIGGER); 2420 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2421 ZERO(MV_PCI_SERR_MASK); 2422 ZERO(hpriv->irq_cause_ofs); 2423 ZERO(hpriv->irq_mask_ofs); 2424 ZERO(MV_PCI_ERR_LOW_ADDRESS); 2425 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 2426 ZERO(MV_PCI_ERR_ATTRIBUTE); 2427 ZERO(MV_PCI_ERR_COMMAND); 2428 } 2429 #undef ZERO 2430 2431 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2432 { 2433 u32 tmp; 2434 2435 mv5_reset_flash(hpriv, mmio); 2436 2437 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); 2438 tmp &= 0x3; 2439 tmp |= (1 << 5) | (1 << 6); 2440 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); 2441 } 2442 2443 /** 2444 * mv6_reset_hc - Perform the 6xxx global soft reset 2445 * @mmio: base address of the HBA 2446 * 2447 * This routine only applies to 6xxx parts. 2448 * 2449 * LOCKING: 2450 * Inherited from caller. 2451 */ 2452 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2453 unsigned int n_hc) 2454 { 2455 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 2456 int i, rc = 0; 2457 u32 t; 2458 2459 /* Following procedure defined in PCI "main command and status 2460 * register" table. 2461 */ 2462 t = readl(reg); 2463 writel(t | STOP_PCI_MASTER, reg); 2464 2465 for (i = 0; i < 1000; i++) { 2466 udelay(1); 2467 t = readl(reg); 2468 if (PCI_MASTER_EMPTY & t) 2469 break; 2470 } 2471 if (!(PCI_MASTER_EMPTY & t)) { 2472 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 2473 rc = 1; 2474 goto done; 2475 } 2476 2477 /* set reset */ 2478 i = 5; 2479 do { 2480 writel(t | GLOB_SFT_RST, reg); 2481 t = readl(reg); 2482 udelay(1); 2483 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 2484 2485 if (!(GLOB_SFT_RST & t)) { 2486 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 2487 rc = 1; 2488 goto done; 2489 } 2490 2491 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 2492 i = 5; 2493 do { 2494 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 2495 t = readl(reg); 2496 udelay(1); 2497 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 2498 2499 if (GLOB_SFT_RST & t) { 2500 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2501 rc = 1; 2502 } 2503 done: 2504 return rc; 2505 } 2506 2507 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 2508 void __iomem *mmio) 2509 { 2510 void __iomem *port_mmio; 2511 u32 tmp; 2512 2513 tmp = readl(mmio + MV_RESET_CFG_OFS); 2514 if ((tmp & (1 << 0)) == 0) { 2515 hpriv->signal[idx].amps = 0x7 << 8; 2516 hpriv->signal[idx].pre = 0x1 << 5; 2517 return; 2518 } 2519 2520 port_mmio = mv_port_base(mmio, idx); 2521 tmp = readl(port_mmio + PHY_MODE2); 2522 2523 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2524 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2525 } 2526 2527 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2528 { 2529 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); 2530 } 2531 2532 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2533 unsigned int port) 2534 { 2535 void __iomem *port_mmio = mv_port_base(mmio, port); 2536 2537 u32 hp_flags = hpriv->hp_flags; 2538 int fix_phy_mode2 = 2539 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2540 int fix_phy_mode4 = 2541 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2542 u32 m2, m3; 2543 2544 if (fix_phy_mode2) { 2545 m2 = readl(port_mmio + PHY_MODE2); 2546 m2 &= ~(1 << 16); 2547 m2 |= (1 << 31); 2548 writel(m2, port_mmio + PHY_MODE2); 2549 2550 udelay(200); 2551 2552 m2 = readl(port_mmio + PHY_MODE2); 2553 m2 &= ~((1 << 16) | (1 << 31)); 2554 writel(m2, port_mmio + PHY_MODE2); 2555 2556 udelay(200); 2557 } 2558 2559 /* 2560 * Gen-II/IIe PHY_MODE3 errata RM#2: 2561 * Achieves better receiver noise performance than the h/w default: 2562 */ 2563 m3 = readl(port_mmio + PHY_MODE3); 2564 m3 = (m3 & 0x1f) | (0x5555601 << 5); 2565 2566 /* Guideline 88F5182 (GL# SATA-S11) */ 2567 if (IS_SOC(hpriv)) 2568 m3 &= ~0x1c; 2569 2570 if (fix_phy_mode4) { 2571 u32 m4 = readl(port_mmio + PHY_MODE4); 2572 /* 2573 * Enforce reserved-bit restrictions on GenIIe devices only. 2574 * For earlier chipsets, force only the internal config field 2575 * (workaround for errata FEr SATA#10 part 1). 2576 */ 2577 if (IS_GEN_IIE(hpriv)) 2578 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 2579 else 2580 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 2581 writel(m4, port_mmio + PHY_MODE4); 2582 } 2583 /* 2584 * Workaround for 60x1-B2 errata SATA#13: 2585 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, 2586 * so we must always rewrite PHY_MODE3 after PHY_MODE4. 2587 */ 2588 writel(m3, port_mmio + PHY_MODE3); 2589 2590 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2591 m2 = readl(port_mmio + PHY_MODE2); 2592 2593 m2 &= ~MV_M2_PREAMP_MASK; 2594 m2 |= hpriv->signal[port].amps; 2595 m2 |= hpriv->signal[port].pre; 2596 m2 &= ~(1 << 16); 2597 2598 /* according to mvSata 3.6.1, some IIE values are fixed */ 2599 if (IS_GEN_IIE(hpriv)) { 2600 m2 &= ~0xC30FF01F; 2601 m2 |= 0x0000900F; 2602 } 2603 2604 writel(m2, port_mmio + PHY_MODE2); 2605 } 2606 2607 /* TODO: use the generic LED interface to configure the SATA Presence */ 2608 /* & Acitivy LEDs on the board */ 2609 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 2610 void __iomem *mmio) 2611 { 2612 return; 2613 } 2614 2615 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 2616 void __iomem *mmio) 2617 { 2618 void __iomem *port_mmio; 2619 u32 tmp; 2620 2621 port_mmio = mv_port_base(mmio, idx); 2622 tmp = readl(port_mmio + PHY_MODE2); 2623 2624 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2625 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2626 } 2627 2628 #undef ZERO 2629 #define ZERO(reg) writel(0, port_mmio + (reg)) 2630 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 2631 void __iomem *mmio, unsigned int port) 2632 { 2633 void __iomem *port_mmio = mv_port_base(mmio, port); 2634 2635 mv_reset_channel(hpriv, mmio, port); 2636 2637 ZERO(0x028); /* command */ 2638 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2639 ZERO(0x004); /* timer */ 2640 ZERO(0x008); /* irq err cause */ 2641 ZERO(0x00c); /* irq err mask */ 2642 ZERO(0x010); /* rq bah */ 2643 ZERO(0x014); /* rq inp */ 2644 ZERO(0x018); /* rq outp */ 2645 ZERO(0x01c); /* respq bah */ 2646 ZERO(0x024); /* respq outp */ 2647 ZERO(0x020); /* respq inp */ 2648 ZERO(0x02c); /* test control */ 2649 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2650 } 2651 2652 #undef ZERO 2653 2654 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2655 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 2656 void __iomem *mmio) 2657 { 2658 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 2659 2660 ZERO(0x00c); 2661 ZERO(0x010); 2662 ZERO(0x014); 2663 2664 } 2665 2666 #undef ZERO 2667 2668 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 2669 void __iomem *mmio, unsigned int n_hc) 2670 { 2671 unsigned int port; 2672 2673 for (port = 0; port < hpriv->n_ports; port++) 2674 mv_soc_reset_hc_port(hpriv, mmio, port); 2675 2676 mv_soc_reset_one_hc(hpriv, mmio); 2677 2678 return 0; 2679 } 2680 2681 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 2682 void __iomem *mmio) 2683 { 2684 return; 2685 } 2686 2687 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 2688 { 2689 return; 2690 } 2691 2692 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 2693 { 2694 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); 2695 2696 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 2697 if (want_gen2i) 2698 ifcfg |= (1 << 7); /* enable gen2i speed */ 2699 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); 2700 } 2701 2702 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2703 unsigned int port_no) 2704 { 2705 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2706 2707 /* 2708 * The datasheet warns against setting EDMA_RESET when EDMA is active 2709 * (but doesn't say what the problem might be). So we first try 2710 * to disable the EDMA engine before doing the EDMA_RESET operation. 2711 */ 2712 mv_stop_edma_engine(port_mmio); 2713 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2714 2715 if (!IS_GEN_I(hpriv)) { 2716 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 2717 mv_setup_ifcfg(port_mmio, 1); 2718 } 2719 /* 2720 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 2721 * link, and physical layers. It resets all SATA interface registers 2722 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2723 */ 2724 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2725 udelay(25); /* allow reset propagation */ 2726 writelfl(0, port_mmio + EDMA_CMD_OFS); 2727 2728 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2729 2730 if (IS_GEN_I(hpriv)) 2731 mdelay(1); 2732 } 2733 2734 static void mv_pmp_select(struct ata_port *ap, int pmp) 2735 { 2736 if (sata_pmp_supported(ap)) { 2737 void __iomem *port_mmio = mv_ap_base(ap); 2738 u32 reg = readl(port_mmio + SATA_IFCTL_OFS); 2739 int old = reg & 0xf; 2740 2741 if (old != pmp) { 2742 reg = (reg & ~0xf) | pmp; 2743 writelfl(reg, port_mmio + SATA_IFCTL_OFS); 2744 } 2745 } 2746 } 2747 2748 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 2749 unsigned long deadline) 2750 { 2751 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2752 return sata_std_hardreset(link, class, deadline); 2753 } 2754 2755 static int mv_softreset(struct ata_link *link, unsigned int *class, 2756 unsigned long deadline) 2757 { 2758 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2759 return ata_sff_softreset(link, class, deadline); 2760 } 2761 2762 static int mv_hardreset(struct ata_link *link, unsigned int *class, 2763 unsigned long deadline) 2764 { 2765 struct ata_port *ap = link->ap; 2766 struct mv_host_priv *hpriv = ap->host->private_data; 2767 struct mv_port_priv *pp = ap->private_data; 2768 void __iomem *mmio = hpriv->base; 2769 int rc, attempts = 0, extra = 0; 2770 u32 sstatus; 2771 bool online; 2772 2773 mv_reset_channel(hpriv, mmio, ap->port_no); 2774 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2775 2776 /* Workaround for errata FEr SATA#10 (part 2) */ 2777 do { 2778 const unsigned long *timing = 2779 sata_ehc_deb_timing(&link->eh_context); 2780 2781 rc = sata_link_hardreset(link, timing, deadline + extra, 2782 &online, NULL); 2783 rc = online ? -EAGAIN : rc; 2784 if (rc) 2785 return rc; 2786 sata_scr_read(link, SCR_STATUS, &sstatus); 2787 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2788 /* Force 1.5gb/s link speed and try again */ 2789 mv_setup_ifcfg(mv_ap_base(ap), 0); 2790 if (time_after(jiffies + HZ, deadline)) 2791 extra = HZ; /* only extend it once, max */ 2792 } 2793 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 2794 2795 return rc; 2796 } 2797 2798 static void mv_eh_freeze(struct ata_port *ap) 2799 { 2800 mv_stop_edma(ap); 2801 mv_enable_port_irqs(ap, 0); 2802 } 2803 2804 static void mv_eh_thaw(struct ata_port *ap) 2805 { 2806 struct mv_host_priv *hpriv = ap->host->private_data; 2807 unsigned int port = ap->port_no; 2808 unsigned int hardport = mv_hardport_from_port(port); 2809 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2810 void __iomem *port_mmio = mv_ap_base(ap); 2811 u32 hc_irq_cause; 2812 2813 /* clear EDMA errors on this port */ 2814 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2815 2816 /* clear pending irq events */ 2817 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2818 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2820 2821 mv_enable_port_irqs(ap, ERR_IRQ); 2822 } 2823 2824 /** 2825 * mv_port_init - Perform some early initialization on a single port. 2826 * @port: libata data structure storing shadow register addresses 2827 * @port_mmio: base address of the port 2828 * 2829 * Initialize shadow register mmio addresses, clear outstanding 2830 * interrupts on the port, and unmask interrupts for the future 2831 * start of the port. 2832 * 2833 * LOCKING: 2834 * Inherited from caller. 2835 */ 2836 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2837 { 2838 void __iomem *shd_base = port_mmio + SHD_BLK_OFS; 2839 unsigned serr_ofs; 2840 2841 /* PIO related setup 2842 */ 2843 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2844 port->error_addr = 2845 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2846 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2847 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2848 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2849 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2850 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2851 port->status_addr = 2852 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2853 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2854 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2855 2856 /* unused: */ 2857 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; 2858 2859 /* Clear any currently outstanding port interrupt conditions */ 2860 serr_ofs = mv_scr_offset(SCR_ERROR); 2861 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2862 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2863 2864 /* unmask all non-transient EDMA error interrupts */ 2865 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2866 2867 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2868 readl(port_mmio + EDMA_CFG_OFS), 2869 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2870 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2871 } 2872 2873 static unsigned int mv_in_pcix_mode(struct ata_host *host) 2874 { 2875 struct mv_host_priv *hpriv = host->private_data; 2876 void __iomem *mmio = hpriv->base; 2877 u32 reg; 2878 2879 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 2880 return 0; /* not PCI-X capable */ 2881 reg = readl(mmio + MV_PCI_MODE_OFS); 2882 if ((reg & MV_PCI_MODE_MASK) == 0) 2883 return 0; /* conventional PCI mode */ 2884 return 1; /* chip is in PCI-X mode */ 2885 } 2886 2887 static int mv_pci_cut_through_okay(struct ata_host *host) 2888 { 2889 struct mv_host_priv *hpriv = host->private_data; 2890 void __iomem *mmio = hpriv->base; 2891 u32 reg; 2892 2893 if (!mv_in_pcix_mode(host)) { 2894 reg = readl(mmio + PCI_COMMAND_OFS); 2895 if (reg & PCI_COMMAND_MRDTRIG) 2896 return 0; /* not okay */ 2897 } 2898 return 1; /* okay */ 2899 } 2900 2901 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2902 { 2903 struct pci_dev *pdev = to_pci_dev(host->dev); 2904 struct mv_host_priv *hpriv = host->private_data; 2905 u32 hp_flags = hpriv->hp_flags; 2906 2907 switch (board_idx) { 2908 case chip_5080: 2909 hpriv->ops = &mv5xxx_ops; 2910 hp_flags |= MV_HP_GEN_I; 2911 2912 switch (pdev->revision) { 2913 case 0x1: 2914 hp_flags |= MV_HP_ERRATA_50XXB0; 2915 break; 2916 case 0x3: 2917 hp_flags |= MV_HP_ERRATA_50XXB2; 2918 break; 2919 default: 2920 dev_printk(KERN_WARNING, &pdev->dev, 2921 "Applying 50XXB2 workarounds to unknown rev\n"); 2922 hp_flags |= MV_HP_ERRATA_50XXB2; 2923 break; 2924 } 2925 break; 2926 2927 case chip_504x: 2928 case chip_508x: 2929 hpriv->ops = &mv5xxx_ops; 2930 hp_flags |= MV_HP_GEN_I; 2931 2932 switch (pdev->revision) { 2933 case 0x0: 2934 hp_flags |= MV_HP_ERRATA_50XXB0; 2935 break; 2936 case 0x3: 2937 hp_flags |= MV_HP_ERRATA_50XXB2; 2938 break; 2939 default: 2940 dev_printk(KERN_WARNING, &pdev->dev, 2941 "Applying B2 workarounds to unknown rev\n"); 2942 hp_flags |= MV_HP_ERRATA_50XXB2; 2943 break; 2944 } 2945 break; 2946 2947 case chip_604x: 2948 case chip_608x: 2949 hpriv->ops = &mv6xxx_ops; 2950 hp_flags |= MV_HP_GEN_II; 2951 2952 switch (pdev->revision) { 2953 case 0x7: 2954 hp_flags |= MV_HP_ERRATA_60X1B2; 2955 break; 2956 case 0x9: 2957 hp_flags |= MV_HP_ERRATA_60X1C0; 2958 break; 2959 default: 2960 dev_printk(KERN_WARNING, &pdev->dev, 2961 "Applying B2 workarounds to unknown rev\n"); 2962 hp_flags |= MV_HP_ERRATA_60X1B2; 2963 break; 2964 } 2965 break; 2966 2967 case chip_7042: 2968 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 2969 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2970 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2971 { 2972 /* 2973 * Highpoint RocketRAID PCIe 23xx series cards: 2974 * 2975 * Unconfigured drives are treated as "Legacy" 2976 * by the BIOS, and it overwrites sector 8 with 2977 * a "Lgcy" metadata block prior to Linux boot. 2978 * 2979 * Configured drives (RAID or JBOD) leave sector 8 2980 * alone, but instead overwrite a high numbered 2981 * sector for the RAID metadata. This sector can 2982 * be determined exactly, by truncating the physical 2983 * drive capacity to a nice even GB value. 2984 * 2985 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 2986 * 2987 * Warn the user, lest they think we're just buggy. 2988 */ 2989 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 2990 " BIOS CORRUPTS DATA on all attached drives," 2991 " regardless of if/how they are configured." 2992 " BEWARE!\n"); 2993 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 2994 " use sectors 8-9 on \"Legacy\" drives," 2995 " and avoid the final two gigabytes on" 2996 " all RocketRAID BIOS initialized drives.\n"); 2997 } 2998 /* drop through */ 2999 case chip_6042: 3000 hpriv->ops = &mv6xxx_ops; 3001 hp_flags |= MV_HP_GEN_IIE; 3002 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3003 hp_flags |= MV_HP_CUT_THROUGH; 3004 3005 switch (pdev->revision) { 3006 case 0x2: /* Rev.B0: the first/only public release */ 3007 hp_flags |= MV_HP_ERRATA_60X1C0; 3008 break; 3009 default: 3010 dev_printk(KERN_WARNING, &pdev->dev, 3011 "Applying 60X1C0 workarounds to unknown rev\n"); 3012 hp_flags |= MV_HP_ERRATA_60X1C0; 3013 break; 3014 } 3015 break; 3016 case chip_soc: 3017 hpriv->ops = &mv_soc_ops; 3018 hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; 3019 break; 3020 3021 default: 3022 dev_printk(KERN_ERR, host->dev, 3023 "BUG: invalid board index %u\n", board_idx); 3024 return 1; 3025 } 3026 3027 hpriv->hp_flags = hp_flags; 3028 if (hp_flags & MV_HP_PCIE) { 3029 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; 3030 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; 3031 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3032 } else { 3033 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; 3034 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; 3035 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3036 } 3037 3038 return 0; 3039 } 3040 3041 /** 3042 * mv_init_host - Perform some early initialization of the host. 3043 * @host: ATA host to initialize 3044 * @board_idx: controller index 3045 * 3046 * If possible, do an early global reset of the host. Then do 3047 * our port init and clear/unmask all/relevant host interrupts. 3048 * 3049 * LOCKING: 3050 * Inherited from caller. 3051 */ 3052 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 3053 { 3054 int rc = 0, n_hc, port, hc; 3055 struct mv_host_priv *hpriv = host->private_data; 3056 void __iomem *mmio = hpriv->base; 3057 3058 rc = mv_chip_id(host, board_idx); 3059 if (rc) 3060 goto done; 3061 3062 if (IS_SOC(hpriv)) { 3063 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3064 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3065 } else { 3066 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; 3067 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3068 } 3069 3070 /* global interrupt mask: 0 == mask everything */ 3071 mv_set_main_irq_mask(host, ~0, 0); 3072 3073 n_hc = mv_get_hc_count(host->ports[0]->flags); 3074 3075 for (port = 0; port < host->n_ports; port++) 3076 hpriv->ops->read_preamp(hpriv, port, mmio); 3077 3078 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3079 if (rc) 3080 goto done; 3081 3082 hpriv->ops->reset_flash(hpriv, mmio); 3083 hpriv->ops->reset_bus(host, mmio); 3084 hpriv->ops->enable_leds(hpriv, mmio); 3085 3086 for (port = 0; port < host->n_ports; port++) { 3087 struct ata_port *ap = host->ports[port]; 3088 void __iomem *port_mmio = mv_port_base(mmio, port); 3089 3090 mv_port_init(&ap->ioaddr, port_mmio); 3091 3092 #ifdef CONFIG_PCI 3093 if (!IS_SOC(hpriv)) { 3094 unsigned int offset = port_mmio - mmio; 3095 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3096 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3097 } 3098 #endif 3099 } 3100 3101 for (hc = 0; hc < n_hc; hc++) { 3102 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3103 3104 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3105 "(before clear)=0x%08x\n", hc, 3106 readl(hc_mmio + HC_CFG_OFS), 3107 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 3108 3109 /* Clear any currently outstanding hc interrupt conditions */ 3110 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3111 } 3112 3113 if (!IS_SOC(hpriv)) { 3114 /* Clear any currently outstanding host interrupt conditions */ 3115 writelfl(0, mmio + hpriv->irq_cause_ofs); 3116 3117 /* and unmask interrupt generation for host regs */ 3118 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3119 3120 /* 3121 * enable only global host interrupts for now. 3122 * The per-port interrupts get done later as ports are set up. 3123 */ 3124 mv_set_main_irq_mask(host, 0, PCI_ERR); 3125 } 3126 done: 3127 return rc; 3128 } 3129 3130 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3131 { 3132 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3133 MV_CRQB_Q_SZ, 0); 3134 if (!hpriv->crqb_pool) 3135 return -ENOMEM; 3136 3137 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3138 MV_CRPB_Q_SZ, 0); 3139 if (!hpriv->crpb_pool) 3140 return -ENOMEM; 3141 3142 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3143 MV_SG_TBL_SZ, 0); 3144 if (!hpriv->sg_tbl_pool) 3145 return -ENOMEM; 3146 3147 return 0; 3148 } 3149 3150 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3151 struct mbus_dram_target_info *dram) 3152 { 3153 int i; 3154 3155 for (i = 0; i < 4; i++) { 3156 writel(0, hpriv->base + WINDOW_CTRL(i)); 3157 writel(0, hpriv->base + WINDOW_BASE(i)); 3158 } 3159 3160 for (i = 0; i < dram->num_cs; i++) { 3161 struct mbus_dram_window *cs = dram->cs + i; 3162 3163 writel(((cs->size - 1) & 0xffff0000) | 3164 (cs->mbus_attr << 8) | 3165 (dram->mbus_dram_target_id << 4) | 1, 3166 hpriv->base + WINDOW_CTRL(i)); 3167 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 3168 } 3169 } 3170 3171 /** 3172 * mv_platform_probe - handle a positive probe of an soc Marvell 3173 * host 3174 * @pdev: platform device found 3175 * 3176 * LOCKING: 3177 * Inherited from caller. 3178 */ 3179 static int mv_platform_probe(struct platform_device *pdev) 3180 { 3181 static int printed_version; 3182 const struct mv_sata_platform_data *mv_platform_data; 3183 const struct ata_port_info *ppi[] = 3184 { &mv_port_info[chip_soc], NULL }; 3185 struct ata_host *host; 3186 struct mv_host_priv *hpriv; 3187 struct resource *res; 3188 int n_ports, rc; 3189 3190 if (!printed_version++) 3191 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3192 3193 /* 3194 * Simple resource validation .. 3195 */ 3196 if (unlikely(pdev->num_resources != 2)) { 3197 dev_err(&pdev->dev, "invalid number of resources\n"); 3198 return -EINVAL; 3199 } 3200 3201 /* 3202 * Get the register base first 3203 */ 3204 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3205 if (res == NULL) 3206 return -EINVAL; 3207 3208 /* allocate host */ 3209 mv_platform_data = pdev->dev.platform_data; 3210 n_ports = mv_platform_data->n_ports; 3211 3212 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3213 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3214 3215 if (!host || !hpriv) 3216 return -ENOMEM; 3217 host->private_data = hpriv; 3218 hpriv->n_ports = n_ports; 3219 3220 host->iomap = NULL; 3221 hpriv->base = devm_ioremap(&pdev->dev, res->start, 3222 res->end - res->start + 1); 3223 hpriv->base -= MV_SATAHC0_REG_BASE; 3224 3225 /* 3226 * (Re-)program MBUS remapping windows if we are asked to. 3227 */ 3228 if (mv_platform_data->dram != NULL) 3229 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 3230 3231 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3232 if (rc) 3233 return rc; 3234 3235 /* initialize adapter */ 3236 rc = mv_init_host(host, chip_soc); 3237 if (rc) 3238 return rc; 3239 3240 dev_printk(KERN_INFO, &pdev->dev, 3241 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 3242 host->n_ports); 3243 3244 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 3245 IRQF_SHARED, &mv6_sht); 3246 } 3247 3248 /* 3249 * 3250 * mv_platform_remove - unplug a platform interface 3251 * @pdev: platform device 3252 * 3253 * A platform bus SATA device has been unplugged. Perform the needed 3254 * cleanup. Also called on module unload for any active devices. 3255 */ 3256 static int __devexit mv_platform_remove(struct platform_device *pdev) 3257 { 3258 struct device *dev = &pdev->dev; 3259 struct ata_host *host = dev_get_drvdata(dev); 3260 3261 ata_host_detach(host); 3262 return 0; 3263 } 3264 3265 static struct platform_driver mv_platform_driver = { 3266 .probe = mv_platform_probe, 3267 .remove = __devexit_p(mv_platform_remove), 3268 .driver = { 3269 .name = DRV_NAME, 3270 .owner = THIS_MODULE, 3271 }, 3272 }; 3273 3274 3275 #ifdef CONFIG_PCI 3276 static int mv_pci_init_one(struct pci_dev *pdev, 3277 const struct pci_device_id *ent); 3278 3279 3280 static struct pci_driver mv_pci_driver = { 3281 .name = DRV_NAME, 3282 .id_table = mv_pci_tbl, 3283 .probe = mv_pci_init_one, 3284 .remove = ata_pci_remove_one, 3285 }; 3286 3287 /* 3288 * module options 3289 */ 3290 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 3291 3292 3293 /* move to PCI layer or libata core? */ 3294 static int pci_go_64(struct pci_dev *pdev) 3295 { 3296 int rc; 3297 3298 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3299 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3300 if (rc) { 3301 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3302 if (rc) { 3303 dev_printk(KERN_ERR, &pdev->dev, 3304 "64-bit DMA enable failed\n"); 3305 return rc; 3306 } 3307 } 3308 } else { 3309 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3310 if (rc) { 3311 dev_printk(KERN_ERR, &pdev->dev, 3312 "32-bit DMA enable failed\n"); 3313 return rc; 3314 } 3315 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3316 if (rc) { 3317 dev_printk(KERN_ERR, &pdev->dev, 3318 "32-bit consistent DMA enable failed\n"); 3319 return rc; 3320 } 3321 } 3322 3323 return rc; 3324 } 3325 3326 /** 3327 * mv_print_info - Dump key info to kernel log for perusal. 3328 * @host: ATA host to print info about 3329 * 3330 * FIXME: complete this. 3331 * 3332 * LOCKING: 3333 * Inherited from caller. 3334 */ 3335 static void mv_print_info(struct ata_host *host) 3336 { 3337 struct pci_dev *pdev = to_pci_dev(host->dev); 3338 struct mv_host_priv *hpriv = host->private_data; 3339 u8 scc; 3340 const char *scc_s, *gen; 3341 3342 /* Use this to determine the HW stepping of the chip so we know 3343 * what errata to workaround 3344 */ 3345 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 3346 if (scc == 0) 3347 scc_s = "SCSI"; 3348 else if (scc == 0x01) 3349 scc_s = "RAID"; 3350 else 3351 scc_s = "?"; 3352 3353 if (IS_GEN_I(hpriv)) 3354 gen = "I"; 3355 else if (IS_GEN_II(hpriv)) 3356 gen = "II"; 3357 else if (IS_GEN_IIE(hpriv)) 3358 gen = "IIE"; 3359 else 3360 gen = "?"; 3361 3362 dev_printk(KERN_INFO, &pdev->dev, 3363 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 3364 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 3365 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 3366 } 3367 3368 /** 3369 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 3370 * @pdev: PCI device found 3371 * @ent: PCI device ID entry for the matched host 3372 * 3373 * LOCKING: 3374 * Inherited from caller. 3375 */ 3376 static int mv_pci_init_one(struct pci_dev *pdev, 3377 const struct pci_device_id *ent) 3378 { 3379 static int printed_version; 3380 unsigned int board_idx = (unsigned int)ent->driver_data; 3381 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 3382 struct ata_host *host; 3383 struct mv_host_priv *hpriv; 3384 int n_ports, rc; 3385 3386 if (!printed_version++) 3387 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3388 3389 /* allocate host */ 3390 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 3391 3392 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3393 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3394 if (!host || !hpriv) 3395 return -ENOMEM; 3396 host->private_data = hpriv; 3397 hpriv->n_ports = n_ports; 3398 3399 /* acquire resources */ 3400 rc = pcim_enable_device(pdev); 3401 if (rc) 3402 return rc; 3403 3404 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 3405 if (rc == -EBUSY) 3406 pcim_pin_device(pdev); 3407 if (rc) 3408 return rc; 3409 host->iomap = pcim_iomap_table(pdev); 3410 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 3411 3412 rc = pci_go_64(pdev); 3413 if (rc) 3414 return rc; 3415 3416 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3417 if (rc) 3418 return rc; 3419 3420 /* initialize adapter */ 3421 rc = mv_init_host(host, board_idx); 3422 if (rc) 3423 return rc; 3424 3425 /* Enable interrupts */ 3426 if (msi && pci_enable_msi(pdev)) 3427 pci_intx(pdev, 1); 3428 3429 mv_dump_pci_cfg(pdev, 0x68); 3430 mv_print_info(host); 3431 3432 pci_set_master(pdev); 3433 pci_try_set_mwi(pdev); 3434 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 3435 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 3436 } 3437 #endif 3438 3439 static int mv_platform_probe(struct platform_device *pdev); 3440 static int __devexit mv_platform_remove(struct platform_device *pdev); 3441 3442 static int __init mv_init(void) 3443 { 3444 int rc = -ENODEV; 3445 #ifdef CONFIG_PCI 3446 rc = pci_register_driver(&mv_pci_driver); 3447 if (rc < 0) 3448 return rc; 3449 #endif 3450 rc = platform_driver_register(&mv_platform_driver); 3451 3452 #ifdef CONFIG_PCI 3453 if (rc < 0) 3454 pci_unregister_driver(&mv_pci_driver); 3455 #endif 3456 return rc; 3457 } 3458 3459 static void __exit mv_exit(void) 3460 { 3461 #ifdef CONFIG_PCI 3462 pci_unregister_driver(&mv_pci_driver); 3463 #endif 3464 platform_driver_unregister(&mv_platform_driver); 3465 } 3466 3467 MODULE_AUTHOR("Brett Russ"); 3468 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 3469 MODULE_LICENSE("GPL"); 3470 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3471 MODULE_VERSION(DRV_VERSION); 3472 MODULE_ALIAS("platform:" DRV_NAME); 3473 3474 #ifdef CONFIG_PCI 3475 module_param(msi, int, 0444); 3476 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 3477 #endif 3478 3479 module_init(mv_init); 3480 module_exit(mv_exit); 3481