1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24 25 /* 26 sata_mv TODO list: 27 28 1) Needs a full errata audit for all chipsets. I implemented most 29 of the errata workarounds found in the Marvell vendor driver, but 30 I distinctly remember a couple workarounds (one related to PCI-X) 31 are still needed. 32 33 2) Improve/fix IRQ and error handling sequences. 34 35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it). 36 37 4) Think about TCQ support here, and for libata in general 38 with controllers that suppport it via host-queuing hardware 39 (a software-only implementation could be a nightmare). 40 41 5) Investigate problems with PCI Message Signalled Interrupts (MSI). 42 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead. 44 45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above). 46 47 8) Develop a low-power-consumption strategy, and implement it. 48 49 9) [Experiment, low priority] See if ATAPI can be supported using 50 "unknown FIS" or "vendor-specific FIS" support, or something creative 51 like that. 52 53 10) [Experiment, low priority] Investigate interrupt coalescing. 54 Quite often, especially with PCI Message Signalled Interrupts (MSI), 55 the overhead reduced by interrupt mitigation is quite often not 56 worth the latency cost. 57 58 11) [Experiment, Marvell value added] Is it possible to use target 59 mode to cross-connect two Linux boxes with Marvell cards? If so, 60 creating LibATA target mode support would be very interesting. 61 62 Target mode, for those without docs, is the ability to directly 63 connect two SATA controllers. 64 65 */ 66 67 #include <linux/kernel.h> 68 #include <linux/module.h> 69 #include <linux/pci.h> 70 #include <linux/init.h> 71 #include <linux/blkdev.h> 72 #include <linux/delay.h> 73 #include <linux/interrupt.h> 74 #include <linux/dmapool.h> 75 #include <linux/dma-mapping.h> 76 #include <linux/device.h> 77 #include <linux/platform_device.h> 78 #include <linux/ata_platform.h> 79 #include <scsi/scsi_host.h> 80 #include <scsi/scsi_cmnd.h> 81 #include <scsi/scsi_device.h> 82 #include <linux/libata.h> 83 84 #define DRV_NAME "sata_mv" 85 #define DRV_VERSION "1.20" 86 87 enum { 88 /* BAR's are enumerated in terms of pci_resource_start() terms */ 89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 90 MV_IO_BAR = 2, /* offset 0x18: IO space */ 91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 92 93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 95 96 MV_PCI_REG_BASE = 0, 97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 98 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 103 104 MV_SATAHC0_REG_BASE = 0x20000, 105 MV_FLASH_CTL = 0x1046c, 106 MV_GPIO_PORT_CTL = 0x104f0, 107 MV_RESET_CFG = 0x180d8, 108 109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 113 114 MV_MAX_Q_DEPTH = 32, 115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 116 117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 118 * CRPB needs alignment on a 256B boundary. Size == 256B 119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 120 */ 121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 123 MV_MAX_SG_CT = 256, 124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 125 126 MV_PORTS_PER_HC = 4, 127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 128 MV_PORT_HC_SHIFT = 2, 129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ 130 MV_PORT_MASK = 3, 131 132 /* Host Flags */ 133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 135 /* SoC integrated controllers, no PCI interface */ 136 MV_FLAG_SOC = (1 << 28), 137 138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 140 ATA_FLAG_PIO_POLLING, 141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 142 143 CRQB_FLAG_READ = (1 << 0), 144 CRQB_TAG_SHIFT = 1, 145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 148 CRQB_CMD_ADDR_SHIFT = 8, 149 CRQB_CMD_CS = (0x2 << 11), 150 CRQB_CMD_LAST = (1 << 15), 151 152 CRPB_FLAG_STATUS_SHIFT = 8, 153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 155 156 EPRD_FLAG_END_OF_TBL = (1 << 31), 157 158 /* PCI interface registers */ 159 160 PCI_COMMAND_OFS = 0xc00, 161 162 PCI_MAIN_CMD_STS_OFS = 0xd30, 163 STOP_PCI_MASTER = (1 << 2), 164 PCI_MASTER_EMPTY = (1 << 3), 165 GLOB_SFT_RST = (1 << 4), 166 167 MV_PCI_MODE = 0xd00, 168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 169 MV_PCI_DISC_TIMER = 0xd04, 170 MV_PCI_MSI_TRIGGER = 0xc38, 171 MV_PCI_SERR_MASK = 0xc28, 172 MV_PCI_XBAR_TMOUT = 0x1d04, 173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 175 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 176 MV_PCI_ERR_COMMAND = 0x1d50, 177 178 PCI_IRQ_CAUSE_OFS = 0x1d58, 179 PCI_IRQ_MASK_OFS = 0x1d5c, 180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 181 182 PCIE_IRQ_CAUSE_OFS = 0x1900, 183 PCIE_IRQ_MASK_OFS = 0x1910, 184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 185 186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 187 HC_MAIN_IRQ_MASK_OFS = 0x1d64, 188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020, 189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024, 190 PORT0_ERR = (1 << 0), /* shift by port # */ 191 PORT0_DONE = (1 << 1), /* shift by port # */ 192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 194 PCI_ERR = (1 << 18), 195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 197 PORTS_0_3_COAL_DONE = (1 << 8), 198 PORTS_4_7_COAL_DONE = (1 << 17), 199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 200 GPIO_INT = (1 << 22), 201 SELF_INT = (1 << 23), 202 TWSI_INT = (1 << 24), 203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 208 HC_MAIN_RSVD), 209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 210 HC_MAIN_RSVD_5), 211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC), 212 213 /* SATAHC registers */ 214 HC_CFG_OFS = 0, 215 216 HC_IRQ_CAUSE_OFS = 0x14, 217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */ 218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 219 DEV_IRQ = (1 << 8), /* shift by port # */ 220 221 /* Shadow block registers */ 222 SHD_BLK_OFS = 0x100, 223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 224 225 /* SATA registers */ 226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 227 SATA_ACTIVE_OFS = 0x350, 228 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 229 230 LTMODE_OFS = 0x30c, 231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 232 233 PHY_MODE3 = 0x310, 234 PHY_MODE4 = 0x314, 235 PHY_MODE2 = 0x330, 236 SATA_IFCTL_OFS = 0x344, 237 SATA_IFSTAT_OFS = 0x34c, 238 VENDOR_UNIQUE_FIS_OFS = 0x35c, 239 240 FIS_CFG_OFS = 0x360, 241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 242 243 MV5_PHY_MODE = 0x74, 244 MV5_LT_MODE = 0x30, 245 MV5_PHY_CTL = 0x0C, 246 SATA_INTERFACE_CFG = 0x050, 247 248 MV_M2_PREAMP_MASK = 0x7e0, 249 250 /* Port registers */ 251 EDMA_CFG_OFS = 0, 252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 259 260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 261 EDMA_ERR_IRQ_MASK_OFS = 0xc, 262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 264 EDMA_ERR_DEV = (1 << 2), /* device error */ 265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 276 277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 282 283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 284 285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 291 292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 293 294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 295 EDMA_ERR_OVERRUN_5 = (1 << 5), 296 EDMA_ERR_UNDERRUN_5 = (1 << 6), 297 298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 299 EDMA_ERR_LNK_CTRL_RX_1 | 300 EDMA_ERR_LNK_CTRL_RX_3 | 301 EDMA_ERR_LNK_CTRL_TX | 302 /* temporary, until we fix hotplug: */ 303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON), 304 305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 306 EDMA_ERR_PRD_PAR | 307 EDMA_ERR_DEV_DCON | 308 EDMA_ERR_DEV_CON | 309 EDMA_ERR_SERR | 310 EDMA_ERR_SELF_DIS | 311 EDMA_ERR_CRQB_PAR | 312 EDMA_ERR_CRPB_PAR | 313 EDMA_ERR_INTRL_PAR | 314 EDMA_ERR_IORDY | 315 EDMA_ERR_LNK_CTRL_RX_2 | 316 EDMA_ERR_LNK_DATA_RX | 317 EDMA_ERR_LNK_DATA_TX | 318 EDMA_ERR_TRANS_PROTO, 319 320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 321 EDMA_ERR_PRD_PAR | 322 EDMA_ERR_DEV_DCON | 323 EDMA_ERR_DEV_CON | 324 EDMA_ERR_OVERRUN_5 | 325 EDMA_ERR_UNDERRUN_5 | 326 EDMA_ERR_SELF_DIS_5 | 327 EDMA_ERR_CRQB_PAR | 328 EDMA_ERR_CRPB_PAR | 329 EDMA_ERR_INTRL_PAR | 330 EDMA_ERR_IORDY, 331 332 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 334 335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 336 EDMA_REQ_Q_PTR_SHIFT = 5, 337 338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 339 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 341 EDMA_RSP_Q_PTR_SHIFT = 3, 342 343 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 344 EDMA_EN = (1 << 0), /* enable EDMA */ 345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 346 ATA_RST = (1 << 2), /* reset trans/link/phy */ 347 348 EDMA_IORDY_TMOUT = 0x34, 349 EDMA_ARB_CFG = 0x38, 350 351 /* Host private flags (hp_flags) */ 352 MV_HP_FLAG_MSI = (1 << 0), 353 MV_HP_ERRATA_50XXB0 = (1 << 1), 354 MV_HP_ERRATA_50XXB2 = (1 << 2), 355 MV_HP_ERRATA_60X1B2 = (1 << 3), 356 MV_HP_ERRATA_60X1C0 = (1 << 4), 357 MV_HP_ERRATA_XX42A0 = (1 << 5), 358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 362 363 /* Port private flags (pp_flags) */ 364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 366 }; 367 368 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 369 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 370 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 371 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 372 373 enum { 374 /* DMA boundary 0xffff is required by the s/g splitting 375 * we need on /length/ in mv_fill-sg(). 376 */ 377 MV_DMA_BOUNDARY = 0xffffU, 378 379 /* mask of register bits containing lower 32 bits 380 * of EDMA request queue DMA address 381 */ 382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 383 384 /* ditto, for response queue */ 385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 386 }; 387 388 enum chip_type { 389 chip_504x, 390 chip_508x, 391 chip_5080, 392 chip_604x, 393 chip_608x, 394 chip_6042, 395 chip_7042, 396 chip_soc, 397 }; 398 399 /* Command ReQuest Block: 32B */ 400 struct mv_crqb { 401 __le32 sg_addr; 402 __le32 sg_addr_hi; 403 __le16 ctrl_flags; 404 __le16 ata_cmd[11]; 405 }; 406 407 struct mv_crqb_iie { 408 __le32 addr; 409 __le32 addr_hi; 410 __le32 flags; 411 __le32 len; 412 __le32 ata_cmd[4]; 413 }; 414 415 /* Command ResPonse Block: 8B */ 416 struct mv_crpb { 417 __le16 id; 418 __le16 flags; 419 __le32 tmstmp; 420 }; 421 422 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 423 struct mv_sg { 424 __le32 addr; 425 __le32 flags_size; 426 __le32 addr_hi; 427 __le32 reserved; 428 }; 429 430 struct mv_port_priv { 431 struct mv_crqb *crqb; 432 dma_addr_t crqb_dma; 433 struct mv_crpb *crpb; 434 dma_addr_t crpb_dma; 435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 437 438 unsigned int req_idx; 439 unsigned int resp_idx; 440 441 u32 pp_flags; 442 }; 443 444 struct mv_port_signal { 445 u32 amps; 446 u32 pre; 447 }; 448 449 struct mv_host_priv { 450 u32 hp_flags; 451 struct mv_port_signal signal[8]; 452 const struct mv_hw_ops *ops; 453 int n_ports; 454 void __iomem *base; 455 void __iomem *main_cause_reg_addr; 456 void __iomem *main_mask_reg_addr; 457 u32 irq_cause_ofs; 458 u32 irq_mask_ofs; 459 u32 unmask_all_irqs; 460 /* 461 * These consistent DMA memory pools give us guaranteed 462 * alignment for hardware-accessed data structures, 463 * and less memory waste in accomplishing the alignment. 464 */ 465 struct dma_pool *crqb_pool; 466 struct dma_pool *crpb_pool; 467 struct dma_pool *sg_tbl_pool; 468 }; 469 470 struct mv_hw_ops { 471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 472 unsigned int port); 473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 475 void __iomem *mmio); 476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 477 unsigned int n_hc); 478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 480 }; 481 482 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 483 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 484 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 485 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 486 static int mv_port_start(struct ata_port *ap); 487 static void mv_port_stop(struct ata_port *ap); 488 static void mv_qc_prep(struct ata_queued_cmd *qc); 489 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 490 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 491 static int mv_hardreset(struct ata_link *link, unsigned int *class, 492 unsigned long deadline); 493 static void mv_eh_freeze(struct ata_port *ap); 494 static void mv_eh_thaw(struct ata_port *ap); 495 static void mv6_dev_config(struct ata_device *dev); 496 497 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 498 unsigned int port); 499 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 500 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 501 void __iomem *mmio); 502 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 503 unsigned int n_hc); 504 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 505 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 506 507 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 508 unsigned int port); 509 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 510 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 511 void __iomem *mmio); 512 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 513 unsigned int n_hc); 514 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 515 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 516 void __iomem *mmio); 517 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 518 void __iomem *mmio); 519 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 520 void __iomem *mmio, unsigned int n_hc); 521 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 522 void __iomem *mmio); 523 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 524 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 525 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 526 unsigned int port_no); 527 static int mv_stop_edma(struct ata_port *ap); 528 static int mv_stop_edma_engine(void __iomem *port_mmio); 529 static void mv_edma_cfg(struct ata_port *ap, int want_ncq); 530 531 static void mv_pmp_select(struct ata_port *ap, int pmp); 532 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 533 unsigned long deadline); 534 static int mv_softreset(struct ata_link *link, unsigned int *class, 535 unsigned long deadline); 536 537 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 538 * because we have to allow room for worst case splitting of 539 * PRDs for 64K boundaries in mv_fill_sg(). 540 */ 541 static struct scsi_host_template mv5_sht = { 542 ATA_BASE_SHT(DRV_NAME), 543 .sg_tablesize = MV_MAX_SG_CT / 2, 544 .dma_boundary = MV_DMA_BOUNDARY, 545 }; 546 547 static struct scsi_host_template mv6_sht = { 548 ATA_NCQ_SHT(DRV_NAME), 549 .can_queue = MV_MAX_Q_DEPTH - 1, 550 .sg_tablesize = MV_MAX_SG_CT / 2, 551 .dma_boundary = MV_DMA_BOUNDARY, 552 }; 553 554 static struct ata_port_operations mv5_ops = { 555 .inherits = &ata_sff_port_ops, 556 557 .qc_prep = mv_qc_prep, 558 .qc_issue = mv_qc_issue, 559 560 .freeze = mv_eh_freeze, 561 .thaw = mv_eh_thaw, 562 .hardreset = mv_hardreset, 563 .error_handler = ata_std_error_handler, /* avoid SFF EH */ 564 .post_internal_cmd = ATA_OP_NULL, 565 566 .scr_read = mv5_scr_read, 567 .scr_write = mv5_scr_write, 568 569 .port_start = mv_port_start, 570 .port_stop = mv_port_stop, 571 }; 572 573 static struct ata_port_operations mv6_ops = { 574 .inherits = &mv5_ops, 575 .qc_defer = sata_pmp_qc_defer_cmd_switch, 576 .dev_config = mv6_dev_config, 577 .scr_read = mv_scr_read, 578 .scr_write = mv_scr_write, 579 580 .pmp_hardreset = mv_pmp_hardreset, 581 .pmp_softreset = mv_softreset, 582 .softreset = mv_softreset, 583 .error_handler = sata_pmp_error_handler, 584 }; 585 586 static struct ata_port_operations mv_iie_ops = { 587 .inherits = &mv6_ops, 588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */ 589 .dev_config = ATA_OP_NULL, 590 .qc_prep = mv_qc_prep_iie, 591 }; 592 593 static const struct ata_port_info mv_port_info[] = { 594 { /* chip_504x */ 595 .flags = MV_COMMON_FLAGS, 596 .pio_mask = 0x1f, /* pio0-4 */ 597 .udma_mask = ATA_UDMA6, 598 .port_ops = &mv5_ops, 599 }, 600 { /* chip_508x */ 601 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 602 .pio_mask = 0x1f, /* pio0-4 */ 603 .udma_mask = ATA_UDMA6, 604 .port_ops = &mv5_ops, 605 }, 606 { /* chip_5080 */ 607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 608 .pio_mask = 0x1f, /* pio0-4 */ 609 .udma_mask = ATA_UDMA6, 610 .port_ops = &mv5_ops, 611 }, 612 { /* chip_604x */ 613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 615 ATA_FLAG_NCQ, 616 .pio_mask = 0x1f, /* pio0-4 */ 617 .udma_mask = ATA_UDMA6, 618 .port_ops = &mv6_ops, 619 }, 620 { /* chip_608x */ 621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 624 .pio_mask = 0x1f, /* pio0-4 */ 625 .udma_mask = ATA_UDMA6, 626 .port_ops = &mv6_ops, 627 }, 628 { /* chip_6042 */ 629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 631 ATA_FLAG_NCQ, 632 .pio_mask = 0x1f, /* pio0-4 */ 633 .udma_mask = ATA_UDMA6, 634 .port_ops = &mv_iie_ops, 635 }, 636 { /* chip_7042 */ 637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 639 ATA_FLAG_NCQ, 640 .pio_mask = 0x1f, /* pio0-4 */ 641 .udma_mask = ATA_UDMA6, 642 .port_ops = &mv_iie_ops, 643 }, 644 { /* chip_soc */ 645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 647 ATA_FLAG_NCQ | MV_FLAG_SOC, 648 .pio_mask = 0x1f, /* pio0-4 */ 649 .udma_mask = ATA_UDMA6, 650 .port_ops = &mv_iie_ops, 651 }, 652 }; 653 654 static const struct pci_device_id mv_pci_tbl[] = { 655 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 656 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 657 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 658 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 659 /* RocketRAID 1740/174x have different identifiers */ 660 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 661 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 662 663 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 664 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 665 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 666 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 667 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 668 669 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 670 671 /* Adaptec 1430SA */ 672 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 673 674 /* Marvell 7042 support */ 675 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 676 677 /* Highpoint RocketRAID PCIe series */ 678 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 679 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 680 681 { } /* terminate list */ 682 }; 683 684 static const struct mv_hw_ops mv5xxx_ops = { 685 .phy_errata = mv5_phy_errata, 686 .enable_leds = mv5_enable_leds, 687 .read_preamp = mv5_read_preamp, 688 .reset_hc = mv5_reset_hc, 689 .reset_flash = mv5_reset_flash, 690 .reset_bus = mv5_reset_bus, 691 }; 692 693 static const struct mv_hw_ops mv6xxx_ops = { 694 .phy_errata = mv6_phy_errata, 695 .enable_leds = mv6_enable_leds, 696 .read_preamp = mv6_read_preamp, 697 .reset_hc = mv6_reset_hc, 698 .reset_flash = mv6_reset_flash, 699 .reset_bus = mv_reset_pci_bus, 700 }; 701 702 static const struct mv_hw_ops mv_soc_ops = { 703 .phy_errata = mv6_phy_errata, 704 .enable_leds = mv_soc_enable_leds, 705 .read_preamp = mv_soc_read_preamp, 706 .reset_hc = mv_soc_reset_hc, 707 .reset_flash = mv_soc_reset_flash, 708 .reset_bus = mv_soc_reset_bus, 709 }; 710 711 /* 712 * Functions 713 */ 714 715 static inline void writelfl(unsigned long data, void __iomem *addr) 716 { 717 writel(data, addr); 718 (void) readl(addr); /* flush to avoid PCI posted write */ 719 } 720 721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 722 { 723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 724 } 725 726 static inline unsigned int mv_hc_from_port(unsigned int port) 727 { 728 return port >> MV_PORT_HC_SHIFT; 729 } 730 731 static inline unsigned int mv_hardport_from_port(unsigned int port) 732 { 733 return port & MV_PORT_MASK; 734 } 735 736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 737 unsigned int port) 738 { 739 return mv_hc_base(base, mv_hc_from_port(port)); 740 } 741 742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 743 { 744 return mv_hc_base_from_port(base, port) + 745 MV_SATAHC_ARBTR_REG_SZ + 746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 747 } 748 749 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 750 { 751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 753 754 return hc_mmio + ofs; 755 } 756 757 static inline void __iomem *mv_host_base(struct ata_host *host) 758 { 759 struct mv_host_priv *hpriv = host->private_data; 760 return hpriv->base; 761 } 762 763 static inline void __iomem *mv_ap_base(struct ata_port *ap) 764 { 765 return mv_port_base(mv_host_base(ap->host), ap->port_no); 766 } 767 768 static inline int mv_get_hc_count(unsigned long port_flags) 769 { 770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 771 } 772 773 static void mv_set_edma_ptrs(void __iomem *port_mmio, 774 struct mv_host_priv *hpriv, 775 struct mv_port_priv *pp) 776 { 777 u32 index; 778 779 /* 780 * initialize request queue 781 */ 782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; 783 784 WARN_ON(pp->crqb_dma & 0x3ff); 785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 788 789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 790 writelfl((pp->crqb_dma & 0xffffffff) | index, 791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 792 else 793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 794 795 /* 796 * initialize response queue 797 */ 798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT; 799 800 WARN_ON(pp->crpb_dma & 0xff); 801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 802 803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 804 writelfl((pp->crpb_dma & 0xffffffff) | index, 805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 806 else 807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 808 809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 811 } 812 813 /** 814 * mv_start_dma - Enable eDMA engine 815 * @base: port base address 816 * @pp: port private data 817 * 818 * Verify the local cache of the eDMA state is accurate with a 819 * WARN_ON. 820 * 821 * LOCKING: 822 * Inherited from caller. 823 */ 824 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 825 struct mv_port_priv *pp, u8 protocol) 826 { 827 int want_ncq = (protocol == ATA_PROT_NCQ); 828 829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 831 if (want_ncq != using_ncq) 832 mv_stop_edma(ap); 833 } 834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 835 struct mv_host_priv *hpriv = ap->host->private_data; 836 int hard_port = mv_hardport_from_port(ap->port_no); 837 void __iomem *hc_mmio = mv_hc_base_from_port( 838 mv_host_base(ap->host), hard_port); 839 u32 hc_irq_cause, ipending; 840 841 /* clear EDMA event indicators, if any */ 842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 843 844 /* clear EDMA interrupt indicator, if any */ 845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 846 ipending = (DEV_IRQ << hard_port) | 847 (CRPB_DMA_DONE << hard_port); 848 if (hc_irq_cause & ipending) { 849 writelfl(hc_irq_cause & ~ipending, 850 hc_mmio + HC_IRQ_CAUSE_OFS); 851 } 852 853 mv_edma_cfg(ap, want_ncq); 854 855 /* clear FIS IRQ Cause */ 856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 857 858 mv_set_edma_ptrs(port_mmio, hpriv, pp); 859 860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 862 } 863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); 864 } 865 866 /** 867 * mv_stop_edma_engine - Disable eDMA engine 868 * @port_mmio: io base address 869 * 870 * LOCKING: 871 * Inherited from caller. 872 */ 873 static int mv_stop_edma_engine(void __iomem *port_mmio) 874 { 875 int i; 876 877 /* Disable eDMA. The disable bit auto clears. */ 878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 879 880 /* Wait for the chip to confirm eDMA is off. */ 881 for (i = 10000; i > 0; i--) { 882 u32 reg = readl(port_mmio + EDMA_CMD_OFS); 883 if (!(reg & EDMA_EN)) 884 return 0; 885 udelay(10); 886 } 887 return -EIO; 888 } 889 890 static int mv_stop_edma(struct ata_port *ap) 891 { 892 void __iomem *port_mmio = mv_ap_base(ap); 893 struct mv_port_priv *pp = ap->private_data; 894 895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 896 return 0; 897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 898 if (mv_stop_edma_engine(port_mmio)) { 899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 900 return -EIO; 901 } 902 return 0; 903 } 904 905 #ifdef ATA_DEBUG 906 static void mv_dump_mem(void __iomem *start, unsigned bytes) 907 { 908 int b, w; 909 for (b = 0; b < bytes; ) { 910 DPRINTK("%p: ", start + b); 911 for (w = 0; b < bytes && w < 4; w++) { 912 printk("%08x ", readl(start + b)); 913 b += sizeof(u32); 914 } 915 printk("\n"); 916 } 917 } 918 #endif 919 920 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 921 { 922 #ifdef ATA_DEBUG 923 int b, w; 924 u32 dw; 925 for (b = 0; b < bytes; ) { 926 DPRINTK("%02x: ", b); 927 for (w = 0; b < bytes && w < 4; w++) { 928 (void) pci_read_config_dword(pdev, b, &dw); 929 printk("%08x ", dw); 930 b += sizeof(u32); 931 } 932 printk("\n"); 933 } 934 #endif 935 } 936 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 937 struct pci_dev *pdev) 938 { 939 #ifdef ATA_DEBUG 940 void __iomem *hc_base = mv_hc_base(mmio_base, 941 port >> MV_PORT_HC_SHIFT); 942 void __iomem *port_base; 943 int start_port, num_ports, p, start_hc, num_hcs, hc; 944 945 if (0 > port) { 946 start_hc = start_port = 0; 947 num_ports = 8; /* shld be benign for 4 port devs */ 948 num_hcs = 2; 949 } else { 950 start_hc = port >> MV_PORT_HC_SHIFT; 951 start_port = port; 952 num_ports = num_hcs = 1; 953 } 954 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 955 num_ports > 1 ? num_ports - 1 : start_port); 956 957 if (NULL != pdev) { 958 DPRINTK("PCI config space regs:\n"); 959 mv_dump_pci_cfg(pdev, 0x68); 960 } 961 DPRINTK("PCI regs:\n"); 962 mv_dump_mem(mmio_base+0xc00, 0x3c); 963 mv_dump_mem(mmio_base+0xd00, 0x34); 964 mv_dump_mem(mmio_base+0xf00, 0x4); 965 mv_dump_mem(mmio_base+0x1d00, 0x6c); 966 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 967 hc_base = mv_hc_base(mmio_base, hc); 968 DPRINTK("HC regs (HC %i):\n", hc); 969 mv_dump_mem(hc_base, 0x1c); 970 } 971 for (p = start_port; p < start_port + num_ports; p++) { 972 port_base = mv_port_base(mmio_base, p); 973 DPRINTK("EDMA regs (port %i):\n", p); 974 mv_dump_mem(port_base, 0x54); 975 DPRINTK("SATA regs (port %i):\n", p); 976 mv_dump_mem(port_base+0x300, 0x60); 977 } 978 #endif 979 } 980 981 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 982 { 983 unsigned int ofs; 984 985 switch (sc_reg_in) { 986 case SCR_STATUS: 987 case SCR_CONTROL: 988 case SCR_ERROR: 989 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 990 break; 991 case SCR_ACTIVE: 992 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 993 break; 994 default: 995 ofs = 0xffffffffU; 996 break; 997 } 998 return ofs; 999 } 1000 1001 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1002 { 1003 unsigned int ofs = mv_scr_offset(sc_reg_in); 1004 1005 if (ofs != 0xffffffffU) { 1006 *val = readl(mv_ap_base(ap) + ofs); 1007 return 0; 1008 } else 1009 return -EINVAL; 1010 } 1011 1012 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1013 { 1014 unsigned int ofs = mv_scr_offset(sc_reg_in); 1015 1016 if (ofs != 0xffffffffU) { 1017 writelfl(val, mv_ap_base(ap) + ofs); 1018 return 0; 1019 } else 1020 return -EINVAL; 1021 } 1022 1023 static void mv6_dev_config(struct ata_device *adev) 1024 { 1025 /* 1026 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1027 * 1028 * Gen-II does not support NCQ over a port multiplier 1029 * (no FIS-based switching). 1030 * 1031 * We don't have hob_nsect when doing NCQ commands on Gen-II. 1032 * See mv_qc_prep() for more info. 1033 */ 1034 if (adev->flags & ATA_DFLAG_NCQ) { 1035 if (sata_pmp_attached(adev->link->ap)) 1036 adev->flags &= ~ATA_DFLAG_NCQ; 1037 else if (adev->max_sectors > ATA_MAX_SECTORS) 1038 adev->max_sectors = ATA_MAX_SECTORS; 1039 } 1040 } 1041 1042 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs) 1043 { 1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode; 1045 /* 1046 * Various bit settings required for operation 1047 * in FIS-based switching (fbs) mode on GenIIe: 1048 */ 1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS); 1050 old_ltmode = readl(port_mmio + LTMODE_OFS); 1051 if (enable_fbs) { 1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC; 1053 new_ltmode = old_ltmode | LTMODE_BIT8; 1054 } else { /* disable fbs */ 1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC; 1056 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1057 } 1058 if (new_fcfg != old_fcfg) 1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS); 1060 if (new_ltmode != old_ltmode) 1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1062 } 1063 1064 static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1065 { 1066 u32 cfg; 1067 struct mv_port_priv *pp = ap->private_data; 1068 struct mv_host_priv *hpriv = ap->host->private_data; 1069 void __iomem *port_mmio = mv_ap_base(ap); 1070 1071 /* set up non-NCQ EDMA configuration */ 1072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1073 1074 if (IS_GEN_I(hpriv)) 1075 cfg |= (1 << 8); /* enab config burst size mask */ 1076 1077 else if (IS_GEN_II(hpriv)) 1078 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1079 1080 else if (IS_GEN_IIE(hpriv)) { 1081 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1083 cfg |= (1 << 18); /* enab early completion */ 1084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1085 1086 if (want_ncq && sata_pmp_attached(ap)) { 1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1088 mv_config_fbs(port_mmio, 1); 1089 } else { 1090 mv_config_fbs(port_mmio, 0); 1091 } 1092 } 1093 1094 if (want_ncq) { 1095 cfg |= EDMA_CFG_NCQ; 1096 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1097 } else 1098 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1099 1100 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1101 } 1102 1103 static void mv_port_free_dma_mem(struct ata_port *ap) 1104 { 1105 struct mv_host_priv *hpriv = ap->host->private_data; 1106 struct mv_port_priv *pp = ap->private_data; 1107 int tag; 1108 1109 if (pp->crqb) { 1110 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1111 pp->crqb = NULL; 1112 } 1113 if (pp->crpb) { 1114 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1115 pp->crpb = NULL; 1116 } 1117 /* 1118 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1119 * For later hardware, we have one unique sg_tbl per NCQ tag. 1120 */ 1121 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1122 if (pp->sg_tbl[tag]) { 1123 if (tag == 0 || !IS_GEN_I(hpriv)) 1124 dma_pool_free(hpriv->sg_tbl_pool, 1125 pp->sg_tbl[tag], 1126 pp->sg_tbl_dma[tag]); 1127 pp->sg_tbl[tag] = NULL; 1128 } 1129 } 1130 } 1131 1132 /** 1133 * mv_port_start - Port specific init/start routine. 1134 * @ap: ATA channel to manipulate 1135 * 1136 * Allocate and point to DMA memory, init port private memory, 1137 * zero indices. 1138 * 1139 * LOCKING: 1140 * Inherited from caller. 1141 */ 1142 static int mv_port_start(struct ata_port *ap) 1143 { 1144 struct device *dev = ap->host->dev; 1145 struct mv_host_priv *hpriv = ap->host->private_data; 1146 struct mv_port_priv *pp; 1147 int tag; 1148 1149 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1150 if (!pp) 1151 return -ENOMEM; 1152 ap->private_data = pp; 1153 1154 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1155 if (!pp->crqb) 1156 return -ENOMEM; 1157 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1158 1159 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1160 if (!pp->crpb) 1161 goto out_port_free_dma_mem; 1162 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1163 1164 /* 1165 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1166 * For later hardware, we need one unique sg_tbl per NCQ tag. 1167 */ 1168 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1169 if (tag == 0 || !IS_GEN_I(hpriv)) { 1170 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1171 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1172 if (!pp->sg_tbl[tag]) 1173 goto out_port_free_dma_mem; 1174 } else { 1175 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1176 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1177 } 1178 } 1179 return 0; 1180 1181 out_port_free_dma_mem: 1182 mv_port_free_dma_mem(ap); 1183 return -ENOMEM; 1184 } 1185 1186 /** 1187 * mv_port_stop - Port specific cleanup/stop routine. 1188 * @ap: ATA channel to manipulate 1189 * 1190 * Stop DMA, cleanup port memory. 1191 * 1192 * LOCKING: 1193 * This routine uses the host lock to protect the DMA stop. 1194 */ 1195 static void mv_port_stop(struct ata_port *ap) 1196 { 1197 mv_stop_edma(ap); 1198 mv_port_free_dma_mem(ap); 1199 } 1200 1201 /** 1202 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1203 * @qc: queued command whose SG list to source from 1204 * 1205 * Populate the SG list and mark the last entry. 1206 * 1207 * LOCKING: 1208 * Inherited from caller. 1209 */ 1210 static void mv_fill_sg(struct ata_queued_cmd *qc) 1211 { 1212 struct mv_port_priv *pp = qc->ap->private_data; 1213 struct scatterlist *sg; 1214 struct mv_sg *mv_sg, *last_sg = NULL; 1215 unsigned int si; 1216 1217 mv_sg = pp->sg_tbl[qc->tag]; 1218 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1219 dma_addr_t addr = sg_dma_address(sg); 1220 u32 sg_len = sg_dma_len(sg); 1221 1222 while (sg_len) { 1223 u32 offset = addr & 0xffff; 1224 u32 len = sg_len; 1225 1226 if ((offset + sg_len > 0x10000)) 1227 len = 0x10000 - offset; 1228 1229 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1230 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1231 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1232 1233 sg_len -= len; 1234 addr += len; 1235 1236 last_sg = mv_sg; 1237 mv_sg++; 1238 } 1239 } 1240 1241 if (likely(last_sg)) 1242 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1243 } 1244 1245 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1246 { 1247 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1248 (last ? CRQB_CMD_LAST : 0); 1249 *cmdw = cpu_to_le16(tmp); 1250 } 1251 1252 /** 1253 * mv_qc_prep - Host specific command preparation. 1254 * @qc: queued command to prepare 1255 * 1256 * This routine simply redirects to the general purpose routine 1257 * if command is not DMA. Else, it handles prep of the CRQB 1258 * (command request block), does some sanity checking, and calls 1259 * the SG load routine. 1260 * 1261 * LOCKING: 1262 * Inherited from caller. 1263 */ 1264 static void mv_qc_prep(struct ata_queued_cmd *qc) 1265 { 1266 struct ata_port *ap = qc->ap; 1267 struct mv_port_priv *pp = ap->private_data; 1268 __le16 *cw; 1269 struct ata_taskfile *tf; 1270 u16 flags = 0; 1271 unsigned in_index; 1272 1273 if ((qc->tf.protocol != ATA_PROT_DMA) && 1274 (qc->tf.protocol != ATA_PROT_NCQ)) 1275 return; 1276 1277 /* Fill in command request block 1278 */ 1279 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1280 flags |= CRQB_FLAG_READ; 1281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1282 flags |= qc->tag << CRQB_TAG_SHIFT; 1283 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1284 1285 /* get current queue index from software */ 1286 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1287 1288 pp->crqb[in_index].sg_addr = 1289 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1290 pp->crqb[in_index].sg_addr_hi = 1291 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1292 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1293 1294 cw = &pp->crqb[in_index].ata_cmd[0]; 1295 tf = &qc->tf; 1296 1297 /* Sadly, the CRQB cannot accomodate all registers--there are 1298 * only 11 bytes...so we must pick and choose required 1299 * registers based on the command. So, we drop feature and 1300 * hob_feature for [RW] DMA commands, but they are needed for 1301 * NCQ. NCQ will drop hob_nsect. 1302 */ 1303 switch (tf->command) { 1304 case ATA_CMD_READ: 1305 case ATA_CMD_READ_EXT: 1306 case ATA_CMD_WRITE: 1307 case ATA_CMD_WRITE_EXT: 1308 case ATA_CMD_WRITE_FUA_EXT: 1309 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1310 break; 1311 case ATA_CMD_FPDMA_READ: 1312 case ATA_CMD_FPDMA_WRITE: 1313 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1314 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1315 break; 1316 default: 1317 /* The only other commands EDMA supports in non-queued and 1318 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1319 * of which are defined/used by Linux. If we get here, this 1320 * driver needs work. 1321 * 1322 * FIXME: modify libata to give qc_prep a return value and 1323 * return error here. 1324 */ 1325 BUG_ON(tf->command); 1326 break; 1327 } 1328 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1329 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1330 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1331 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1332 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1333 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1334 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1335 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1336 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1337 1338 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1339 return; 1340 mv_fill_sg(qc); 1341 } 1342 1343 /** 1344 * mv_qc_prep_iie - Host specific command preparation. 1345 * @qc: queued command to prepare 1346 * 1347 * This routine simply redirects to the general purpose routine 1348 * if command is not DMA. Else, it handles prep of the CRQB 1349 * (command request block), does some sanity checking, and calls 1350 * the SG load routine. 1351 * 1352 * LOCKING: 1353 * Inherited from caller. 1354 */ 1355 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1356 { 1357 struct ata_port *ap = qc->ap; 1358 struct mv_port_priv *pp = ap->private_data; 1359 struct mv_crqb_iie *crqb; 1360 struct ata_taskfile *tf; 1361 unsigned in_index; 1362 u32 flags = 0; 1363 1364 if ((qc->tf.protocol != ATA_PROT_DMA) && 1365 (qc->tf.protocol != ATA_PROT_NCQ)) 1366 return; 1367 1368 /* Fill in Gen IIE command request block */ 1369 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1370 flags |= CRQB_FLAG_READ; 1371 1372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1373 flags |= qc->tag << CRQB_TAG_SHIFT; 1374 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1375 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1376 1377 /* get current queue index from software */ 1378 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1379 1380 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1381 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1382 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1383 crqb->flags = cpu_to_le32(flags); 1384 1385 tf = &qc->tf; 1386 crqb->ata_cmd[0] = cpu_to_le32( 1387 (tf->command << 16) | 1388 (tf->feature << 24) 1389 ); 1390 crqb->ata_cmd[1] = cpu_to_le32( 1391 (tf->lbal << 0) | 1392 (tf->lbam << 8) | 1393 (tf->lbah << 16) | 1394 (tf->device << 24) 1395 ); 1396 crqb->ata_cmd[2] = cpu_to_le32( 1397 (tf->hob_lbal << 0) | 1398 (tf->hob_lbam << 8) | 1399 (tf->hob_lbah << 16) | 1400 (tf->hob_feature << 24) 1401 ); 1402 crqb->ata_cmd[3] = cpu_to_le32( 1403 (tf->nsect << 0) | 1404 (tf->hob_nsect << 8) 1405 ); 1406 1407 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1408 return; 1409 mv_fill_sg(qc); 1410 } 1411 1412 /** 1413 * mv_qc_issue - Initiate a command to the host 1414 * @qc: queued command to start 1415 * 1416 * This routine simply redirects to the general purpose routine 1417 * if command is not DMA. Else, it sanity checks our local 1418 * caches of the request producer/consumer indices then enables 1419 * DMA and bumps the request producer index. 1420 * 1421 * LOCKING: 1422 * Inherited from caller. 1423 */ 1424 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1425 { 1426 struct ata_port *ap = qc->ap; 1427 void __iomem *port_mmio = mv_ap_base(ap); 1428 struct mv_port_priv *pp = ap->private_data; 1429 u32 in_index; 1430 1431 if ((qc->tf.protocol != ATA_PROT_DMA) && 1432 (qc->tf.protocol != ATA_PROT_NCQ)) { 1433 /* 1434 * We're about to send a non-EDMA capable command to the 1435 * port. Turn off EDMA so there won't be problems accessing 1436 * shadow block, etc registers. 1437 */ 1438 mv_stop_edma(ap); 1439 mv_pmp_select(ap, qc->dev->link->pmp); 1440 return ata_sff_qc_issue(qc); 1441 } 1442 1443 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1444 1445 pp->req_idx++; 1446 1447 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; 1448 1449 /* and write the request in pointer to kick the EDMA to life */ 1450 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 1451 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1452 1453 return 0; 1454 } 1455 1456 /** 1457 * mv_err_intr - Handle error interrupts on the port 1458 * @ap: ATA channel to manipulate 1459 * @reset_allowed: bool: 0 == don't trigger from reset here 1460 * 1461 * In most cases, just clear the interrupt and move on. However, 1462 * some cases require an eDMA reset, which also performs a COMRESET. 1463 * The SERR case requires a clear of pending errors in the SATA 1464 * SERROR register. Finally, if the port disabled DMA, 1465 * update our cached copy to match. 1466 * 1467 * LOCKING: 1468 * Inherited from caller. 1469 */ 1470 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1471 { 1472 void __iomem *port_mmio = mv_ap_base(ap); 1473 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1474 struct mv_port_priv *pp = ap->private_data; 1475 struct mv_host_priv *hpriv = ap->host->private_data; 1476 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 1477 unsigned int action = 0, err_mask = 0; 1478 struct ata_eh_info *ehi = &ap->link.eh_info; 1479 1480 ata_ehi_clear_desc(ehi); 1481 1482 if (!edma_enabled) { 1483 /* just a guess: do we need to do this? should we 1484 * expand this, and do it in all cases? 1485 */ 1486 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1487 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1488 } 1489 1490 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1491 1492 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause); 1493 1494 /* 1495 * all generations share these EDMA error cause bits 1496 */ 1497 1498 if (edma_err_cause & EDMA_ERR_DEV) 1499 err_mask |= AC_ERR_DEV; 1500 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1501 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1502 EDMA_ERR_INTRL_PAR)) { 1503 err_mask |= AC_ERR_ATA_BUS; 1504 action |= ATA_EH_RESET; 1505 ata_ehi_push_desc(ehi, "parity error"); 1506 } 1507 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1508 ata_ehi_hotplugged(ehi); 1509 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1510 "dev disconnect" : "dev connect"); 1511 action |= ATA_EH_RESET; 1512 } 1513 1514 if (IS_GEN_I(hpriv)) { 1515 eh_freeze_mask = EDMA_EH_FREEZE_5; 1516 1517 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1518 pp = ap->private_data; 1519 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1520 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1521 } 1522 } else { 1523 eh_freeze_mask = EDMA_EH_FREEZE; 1524 1525 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1526 pp = ap->private_data; 1527 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1528 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1529 } 1530 1531 if (edma_err_cause & EDMA_ERR_SERR) { 1532 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1533 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1534 err_mask = AC_ERR_ATA_BUS; 1535 action |= ATA_EH_RESET; 1536 } 1537 } 1538 1539 /* Clear EDMA now that SERR cleanup done */ 1540 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1541 1542 if (!err_mask) { 1543 err_mask = AC_ERR_OTHER; 1544 action |= ATA_EH_RESET; 1545 } 1546 1547 ehi->serror |= serr; 1548 ehi->action |= action; 1549 1550 if (qc) 1551 qc->err_mask |= err_mask; 1552 else 1553 ehi->err_mask |= err_mask; 1554 1555 if (edma_err_cause & eh_freeze_mask) 1556 ata_port_freeze(ap); 1557 else 1558 ata_port_abort(ap); 1559 } 1560 1561 static void mv_intr_pio(struct ata_port *ap) 1562 { 1563 struct ata_queued_cmd *qc; 1564 u8 ata_status; 1565 1566 /* ignore spurious intr if drive still BUSY */ 1567 ata_status = readb(ap->ioaddr.status_addr); 1568 if (unlikely(ata_status & ATA_BUSY)) 1569 return; 1570 1571 /* get active ATA command */ 1572 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1573 if (unlikely(!qc)) /* no active tag */ 1574 return; 1575 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */ 1576 return; 1577 1578 /* and finally, complete the ATA command */ 1579 qc->err_mask |= ac_err_mask(ata_status); 1580 ata_qc_complete(qc); 1581 } 1582 1583 static void mv_intr_edma(struct ata_port *ap) 1584 { 1585 void __iomem *port_mmio = mv_ap_base(ap); 1586 struct mv_host_priv *hpriv = ap->host->private_data; 1587 struct mv_port_priv *pp = ap->private_data; 1588 struct ata_queued_cmd *qc; 1589 u32 out_index, in_index; 1590 bool work_done = false; 1591 1592 /* get h/w response queue pointer */ 1593 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 1594 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1595 1596 while (1) { 1597 u16 status; 1598 unsigned int tag; 1599 1600 /* get s/w response queue last-read pointer, and compare */ 1601 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; 1602 if (in_index == out_index) 1603 break; 1604 1605 /* 50xx: get active ATA command */ 1606 if (IS_GEN_I(hpriv)) 1607 tag = ap->link.active_tag; 1608 1609 /* Gen II/IIE: get active ATA command via tag, to enable 1610 * support for queueing. this works transparently for 1611 * queued and non-queued modes. 1612 */ 1613 else 1614 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; 1615 1616 qc = ata_qc_from_tag(ap, tag); 1617 1618 /* For non-NCQ mode, the lower 8 bits of status 1619 * are from EDMA_ERR_IRQ_CAUSE_OFS, 1620 * which should be zero if all went well. 1621 */ 1622 status = le16_to_cpu(pp->crpb[out_index].flags); 1623 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1624 mv_err_intr(ap, qc); 1625 return; 1626 } 1627 1628 /* and finally, complete the ATA command */ 1629 if (qc) { 1630 qc->err_mask |= 1631 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); 1632 ata_qc_complete(qc); 1633 } 1634 1635 /* advance software response queue pointer, to 1636 * indicate (after the loop completes) to hardware 1637 * that we have consumed a response queue entry. 1638 */ 1639 work_done = true; 1640 pp->resp_idx++; 1641 } 1642 1643 if (work_done) 1644 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 1645 (out_index << EDMA_RSP_Q_PTR_SHIFT), 1646 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1647 } 1648 1649 /** 1650 * mv_host_intr - Handle all interrupts on the given host controller 1651 * @host: host specific structure 1652 * @relevant: port error bits relevant to this host controller 1653 * @hc: which host controller we're to look at 1654 * 1655 * Read then write clear the HC interrupt status then walk each 1656 * port connected to the HC and see if it needs servicing. Port 1657 * success ints are reported in the HC interrupt status reg, the 1658 * port error ints are reported in the higher level main 1659 * interrupt status register and thus are passed in via the 1660 * 'relevant' argument. 1661 * 1662 * LOCKING: 1663 * Inherited from caller. 1664 */ 1665 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1666 { 1667 struct mv_host_priv *hpriv = host->private_data; 1668 void __iomem *mmio = hpriv->base; 1669 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1670 u32 hc_irq_cause; 1671 int port, port0, last_port; 1672 1673 if (hc == 0) 1674 port0 = 0; 1675 else 1676 port0 = MV_PORTS_PER_HC; 1677 1678 if (HAS_PCI(host)) 1679 last_port = port0 + MV_PORTS_PER_HC; 1680 else 1681 last_port = port0 + hpriv->n_ports; 1682 /* we'll need the HC success int register in most cases */ 1683 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1684 if (!hc_irq_cause) 1685 return; 1686 1687 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1688 1689 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1690 hc, relevant, hc_irq_cause); 1691 1692 for (port = port0; port < last_port; port++) { 1693 struct ata_port *ap = host->ports[port]; 1694 struct mv_port_priv *pp; 1695 int have_err_bits, hard_port, shift; 1696 1697 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) 1698 continue; 1699 1700 pp = ap->private_data; 1701 1702 shift = port << 1; /* (port * 2) */ 1703 if (port >= MV_PORTS_PER_HC) 1704 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1705 1706 have_err_bits = ((PORT0_ERR << shift) & relevant); 1707 1708 if (unlikely(have_err_bits)) { 1709 struct ata_queued_cmd *qc; 1710 1711 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1712 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1713 continue; 1714 1715 mv_err_intr(ap, qc); 1716 continue; 1717 } 1718 1719 hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1720 1721 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1722 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) 1723 mv_intr_edma(ap); 1724 } else { 1725 if ((DEV_IRQ << hard_port) & hc_irq_cause) 1726 mv_intr_pio(ap); 1727 } 1728 } 1729 VPRINTK("EXIT\n"); 1730 } 1731 1732 static void mv_pci_error(struct ata_host *host, void __iomem *mmio) 1733 { 1734 struct mv_host_priv *hpriv = host->private_data; 1735 struct ata_port *ap; 1736 struct ata_queued_cmd *qc; 1737 struct ata_eh_info *ehi; 1738 unsigned int i, err_mask, printed = 0; 1739 u32 err_cause; 1740 1741 err_cause = readl(mmio + hpriv->irq_cause_ofs); 1742 1743 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 1744 err_cause); 1745 1746 DPRINTK("All regs @ PCI error\n"); 1747 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 1748 1749 writelfl(0, mmio + hpriv->irq_cause_ofs); 1750 1751 for (i = 0; i < host->n_ports; i++) { 1752 ap = host->ports[i]; 1753 if (!ata_link_offline(&ap->link)) { 1754 ehi = &ap->link.eh_info; 1755 ata_ehi_clear_desc(ehi); 1756 if (!printed++) 1757 ata_ehi_push_desc(ehi, 1758 "PCI err cause 0x%08x", err_cause); 1759 err_mask = AC_ERR_HOST_BUS; 1760 ehi->action = ATA_EH_RESET; 1761 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1762 if (qc) 1763 qc->err_mask |= err_mask; 1764 else 1765 ehi->err_mask |= err_mask; 1766 1767 ata_port_freeze(ap); 1768 } 1769 } 1770 } 1771 1772 /** 1773 * mv_interrupt - Main interrupt event handler 1774 * @irq: unused 1775 * @dev_instance: private data; in this case the host structure 1776 * 1777 * Read the read only register to determine if any host 1778 * controllers have pending interrupts. If so, call lower level 1779 * routine to handle. Also check for PCI errors which are only 1780 * reported here. 1781 * 1782 * LOCKING: 1783 * This routine holds the host lock while processing pending 1784 * interrupts. 1785 */ 1786 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 1787 { 1788 struct ata_host *host = dev_instance; 1789 struct mv_host_priv *hpriv = host->private_data; 1790 unsigned int hc, handled = 0, n_hcs; 1791 void __iomem *mmio = hpriv->base; 1792 u32 irq_stat, irq_mask; 1793 1794 /* Note to self: &host->lock == &ap->host->lock == ap->lock */ 1795 spin_lock(&host->lock); 1796 1797 irq_stat = readl(hpriv->main_cause_reg_addr); 1798 irq_mask = readl(hpriv->main_mask_reg_addr); 1799 1800 /* check the cases where we either have nothing pending or have read 1801 * a bogus register value which can indicate HW removal or PCI fault 1802 */ 1803 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) 1804 goto out_unlock; 1805 1806 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1807 1808 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) { 1809 mv_pci_error(host, mmio); 1810 handled = 1; 1811 goto out_unlock; /* skip all other HC irq handling */ 1812 } 1813 1814 for (hc = 0; hc < n_hcs; hc++) { 1815 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1816 if (relevant) { 1817 mv_host_intr(host, relevant, hc); 1818 handled = 1; 1819 } 1820 } 1821 1822 out_unlock: 1823 spin_unlock(&host->lock); 1824 1825 return IRQ_RETVAL(handled); 1826 } 1827 1828 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 1829 { 1830 unsigned int ofs; 1831 1832 switch (sc_reg_in) { 1833 case SCR_STATUS: 1834 case SCR_ERROR: 1835 case SCR_CONTROL: 1836 ofs = sc_reg_in * sizeof(u32); 1837 break; 1838 default: 1839 ofs = 0xffffffffU; 1840 break; 1841 } 1842 return ofs; 1843 } 1844 1845 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1846 { 1847 struct mv_host_priv *hpriv = ap->host->private_data; 1848 void __iomem *mmio = hpriv->base; 1849 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 1850 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1851 1852 if (ofs != 0xffffffffU) { 1853 *val = readl(addr + ofs); 1854 return 0; 1855 } else 1856 return -EINVAL; 1857 } 1858 1859 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1860 { 1861 struct mv_host_priv *hpriv = ap->host->private_data; 1862 void __iomem *mmio = hpriv->base; 1863 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 1864 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1865 1866 if (ofs != 0xffffffffU) { 1867 writelfl(val, addr + ofs); 1868 return 0; 1869 } else 1870 return -EINVAL; 1871 } 1872 1873 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 1874 { 1875 struct pci_dev *pdev = to_pci_dev(host->dev); 1876 int early_5080; 1877 1878 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 1879 1880 if (!early_5080) { 1881 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 1882 tmp |= (1 << 0); 1883 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1884 } 1885 1886 mv_reset_pci_bus(host, mmio); 1887 } 1888 1889 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 1890 { 1891 writel(0x0fcfffff, mmio + MV_FLASH_CTL); 1892 } 1893 1894 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 1895 void __iomem *mmio) 1896 { 1897 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 1898 u32 tmp; 1899 1900 tmp = readl(phy_mmio + MV5_PHY_MODE); 1901 1902 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 1903 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 1904 } 1905 1906 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 1907 { 1908 u32 tmp; 1909 1910 writel(0, mmio + MV_GPIO_PORT_CTL); 1911 1912 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 1913 1914 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 1915 tmp |= ~(1 << 0); 1916 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1917 } 1918 1919 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 1920 unsigned int port) 1921 { 1922 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 1923 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 1924 u32 tmp; 1925 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 1926 1927 if (fix_apm_sq) { 1928 tmp = readl(phy_mmio + MV5_LT_MODE); 1929 tmp |= (1 << 19); 1930 writel(tmp, phy_mmio + MV5_LT_MODE); 1931 1932 tmp = readl(phy_mmio + MV5_PHY_CTL); 1933 tmp &= ~0x3; 1934 tmp |= 0x1; 1935 writel(tmp, phy_mmio + MV5_PHY_CTL); 1936 } 1937 1938 tmp = readl(phy_mmio + MV5_PHY_MODE); 1939 tmp &= ~mask; 1940 tmp |= hpriv->signal[port].pre; 1941 tmp |= hpriv->signal[port].amps; 1942 writel(tmp, phy_mmio + MV5_PHY_MODE); 1943 } 1944 1945 1946 #undef ZERO 1947 #define ZERO(reg) writel(0, port_mmio + (reg)) 1948 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 1949 unsigned int port) 1950 { 1951 void __iomem *port_mmio = mv_port_base(mmio, port); 1952 1953 /* 1954 * The datasheet warns against setting ATA_RST when EDMA is active 1955 * (but doesn't say what the problem might be). So we first try 1956 * to disable the EDMA engine before doing the ATA_RST operation. 1957 */ 1958 mv_reset_channel(hpriv, mmio, port); 1959 1960 ZERO(0x028); /* command */ 1961 writel(0x11f, port_mmio + EDMA_CFG_OFS); 1962 ZERO(0x004); /* timer */ 1963 ZERO(0x008); /* irq err cause */ 1964 ZERO(0x00c); /* irq err mask */ 1965 ZERO(0x010); /* rq bah */ 1966 ZERO(0x014); /* rq inp */ 1967 ZERO(0x018); /* rq outp */ 1968 ZERO(0x01c); /* respq bah */ 1969 ZERO(0x024); /* respq outp */ 1970 ZERO(0x020); /* respq inp */ 1971 ZERO(0x02c); /* test control */ 1972 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 1973 } 1974 #undef ZERO 1975 1976 #define ZERO(reg) writel(0, hc_mmio + (reg)) 1977 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 1978 unsigned int hc) 1979 { 1980 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1981 u32 tmp; 1982 1983 ZERO(0x00c); 1984 ZERO(0x010); 1985 ZERO(0x014); 1986 ZERO(0x018); 1987 1988 tmp = readl(hc_mmio + 0x20); 1989 tmp &= 0x1c1c1c1c; 1990 tmp |= 0x03030303; 1991 writel(tmp, hc_mmio + 0x20); 1992 } 1993 #undef ZERO 1994 1995 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 1996 unsigned int n_hc) 1997 { 1998 unsigned int hc, port; 1999 2000 for (hc = 0; hc < n_hc; hc++) { 2001 for (port = 0; port < MV_PORTS_PER_HC; port++) 2002 mv5_reset_hc_port(hpriv, mmio, 2003 (hc * MV_PORTS_PER_HC) + port); 2004 2005 mv5_reset_one_hc(hpriv, mmio, hc); 2006 } 2007 2008 return 0; 2009 } 2010 2011 #undef ZERO 2012 #define ZERO(reg) writel(0, mmio + (reg)) 2013 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 2014 { 2015 struct mv_host_priv *hpriv = host->private_data; 2016 u32 tmp; 2017 2018 tmp = readl(mmio + MV_PCI_MODE); 2019 tmp &= 0xff00ffff; 2020 writel(tmp, mmio + MV_PCI_MODE); 2021 2022 ZERO(MV_PCI_DISC_TIMER); 2023 ZERO(MV_PCI_MSI_TRIGGER); 2024 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 2025 ZERO(HC_MAIN_IRQ_MASK_OFS); 2026 ZERO(MV_PCI_SERR_MASK); 2027 ZERO(hpriv->irq_cause_ofs); 2028 ZERO(hpriv->irq_mask_ofs); 2029 ZERO(MV_PCI_ERR_LOW_ADDRESS); 2030 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 2031 ZERO(MV_PCI_ERR_ATTRIBUTE); 2032 ZERO(MV_PCI_ERR_COMMAND); 2033 } 2034 #undef ZERO 2035 2036 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2037 { 2038 u32 tmp; 2039 2040 mv5_reset_flash(hpriv, mmio); 2041 2042 tmp = readl(mmio + MV_GPIO_PORT_CTL); 2043 tmp &= 0x3; 2044 tmp |= (1 << 5) | (1 << 6); 2045 writel(tmp, mmio + MV_GPIO_PORT_CTL); 2046 } 2047 2048 /** 2049 * mv6_reset_hc - Perform the 6xxx global soft reset 2050 * @mmio: base address of the HBA 2051 * 2052 * This routine only applies to 6xxx parts. 2053 * 2054 * LOCKING: 2055 * Inherited from caller. 2056 */ 2057 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2058 unsigned int n_hc) 2059 { 2060 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 2061 int i, rc = 0; 2062 u32 t; 2063 2064 /* Following procedure defined in PCI "main command and status 2065 * register" table. 2066 */ 2067 t = readl(reg); 2068 writel(t | STOP_PCI_MASTER, reg); 2069 2070 for (i = 0; i < 1000; i++) { 2071 udelay(1); 2072 t = readl(reg); 2073 if (PCI_MASTER_EMPTY & t) 2074 break; 2075 } 2076 if (!(PCI_MASTER_EMPTY & t)) { 2077 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 2078 rc = 1; 2079 goto done; 2080 } 2081 2082 /* set reset */ 2083 i = 5; 2084 do { 2085 writel(t | GLOB_SFT_RST, reg); 2086 t = readl(reg); 2087 udelay(1); 2088 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 2089 2090 if (!(GLOB_SFT_RST & t)) { 2091 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 2092 rc = 1; 2093 goto done; 2094 } 2095 2096 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 2097 i = 5; 2098 do { 2099 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 2100 t = readl(reg); 2101 udelay(1); 2102 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 2103 2104 if (GLOB_SFT_RST & t) { 2105 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2106 rc = 1; 2107 } 2108 /* 2109 * Temporary: wait 3 seconds before port-probing can happen, 2110 * so that we don't miss finding sleepy SilXXXX port-multipliers. 2111 * This can go away once hotplug is fully/correctly implemented. 2112 */ 2113 if (rc == 0) 2114 msleep(3000); 2115 done: 2116 return rc; 2117 } 2118 2119 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 2120 void __iomem *mmio) 2121 { 2122 void __iomem *port_mmio; 2123 u32 tmp; 2124 2125 tmp = readl(mmio + MV_RESET_CFG); 2126 if ((tmp & (1 << 0)) == 0) { 2127 hpriv->signal[idx].amps = 0x7 << 8; 2128 hpriv->signal[idx].pre = 0x1 << 5; 2129 return; 2130 } 2131 2132 port_mmio = mv_port_base(mmio, idx); 2133 tmp = readl(port_mmio + PHY_MODE2); 2134 2135 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2136 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2137 } 2138 2139 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2140 { 2141 writel(0x00000060, mmio + MV_GPIO_PORT_CTL); 2142 } 2143 2144 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2145 unsigned int port) 2146 { 2147 void __iomem *port_mmio = mv_port_base(mmio, port); 2148 2149 u32 hp_flags = hpriv->hp_flags; 2150 int fix_phy_mode2 = 2151 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2152 int fix_phy_mode4 = 2153 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2154 u32 m2, tmp; 2155 2156 if (fix_phy_mode2) { 2157 m2 = readl(port_mmio + PHY_MODE2); 2158 m2 &= ~(1 << 16); 2159 m2 |= (1 << 31); 2160 writel(m2, port_mmio + PHY_MODE2); 2161 2162 udelay(200); 2163 2164 m2 = readl(port_mmio + PHY_MODE2); 2165 m2 &= ~((1 << 16) | (1 << 31)); 2166 writel(m2, port_mmio + PHY_MODE2); 2167 2168 udelay(200); 2169 } 2170 2171 /* who knows what this magic does */ 2172 tmp = readl(port_mmio + PHY_MODE3); 2173 tmp &= ~0x7F800000; 2174 tmp |= 0x2A800000; 2175 writel(tmp, port_mmio + PHY_MODE3); 2176 2177 if (fix_phy_mode4) { 2178 u32 m4; 2179 2180 m4 = readl(port_mmio + PHY_MODE4); 2181 2182 if (hp_flags & MV_HP_ERRATA_60X1B2) 2183 tmp = readl(port_mmio + PHY_MODE3); 2184 2185 /* workaround for errata FEr SATA#10 (part 1) */ 2186 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2187 2188 writel(m4, port_mmio + PHY_MODE4); 2189 2190 if (hp_flags & MV_HP_ERRATA_60X1B2) 2191 writel(tmp, port_mmio + PHY_MODE3); 2192 } 2193 2194 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2195 m2 = readl(port_mmio + PHY_MODE2); 2196 2197 m2 &= ~MV_M2_PREAMP_MASK; 2198 m2 |= hpriv->signal[port].amps; 2199 m2 |= hpriv->signal[port].pre; 2200 m2 &= ~(1 << 16); 2201 2202 /* according to mvSata 3.6.1, some IIE values are fixed */ 2203 if (IS_GEN_IIE(hpriv)) { 2204 m2 &= ~0xC30FF01F; 2205 m2 |= 0x0000900F; 2206 } 2207 2208 writel(m2, port_mmio + PHY_MODE2); 2209 } 2210 2211 /* TODO: use the generic LED interface to configure the SATA Presence */ 2212 /* & Acitivy LEDs on the board */ 2213 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 2214 void __iomem *mmio) 2215 { 2216 return; 2217 } 2218 2219 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 2220 void __iomem *mmio) 2221 { 2222 void __iomem *port_mmio; 2223 u32 tmp; 2224 2225 port_mmio = mv_port_base(mmio, idx); 2226 tmp = readl(port_mmio + PHY_MODE2); 2227 2228 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2229 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2230 } 2231 2232 #undef ZERO 2233 #define ZERO(reg) writel(0, port_mmio + (reg)) 2234 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 2235 void __iomem *mmio, unsigned int port) 2236 { 2237 void __iomem *port_mmio = mv_port_base(mmio, port); 2238 2239 /* 2240 * The datasheet warns against setting ATA_RST when EDMA is active 2241 * (but doesn't say what the problem might be). So we first try 2242 * to disable the EDMA engine before doing the ATA_RST operation. 2243 */ 2244 mv_reset_channel(hpriv, mmio, port); 2245 2246 ZERO(0x028); /* command */ 2247 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2248 ZERO(0x004); /* timer */ 2249 ZERO(0x008); /* irq err cause */ 2250 ZERO(0x00c); /* irq err mask */ 2251 ZERO(0x010); /* rq bah */ 2252 ZERO(0x014); /* rq inp */ 2253 ZERO(0x018); /* rq outp */ 2254 ZERO(0x01c); /* respq bah */ 2255 ZERO(0x024); /* respq outp */ 2256 ZERO(0x020); /* respq inp */ 2257 ZERO(0x02c); /* test control */ 2258 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 2259 } 2260 2261 #undef ZERO 2262 2263 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2264 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 2265 void __iomem *mmio) 2266 { 2267 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 2268 2269 ZERO(0x00c); 2270 ZERO(0x010); 2271 ZERO(0x014); 2272 2273 } 2274 2275 #undef ZERO 2276 2277 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 2278 void __iomem *mmio, unsigned int n_hc) 2279 { 2280 unsigned int port; 2281 2282 for (port = 0; port < hpriv->n_ports; port++) 2283 mv_soc_reset_hc_port(hpriv, mmio, port); 2284 2285 mv_soc_reset_one_hc(hpriv, mmio); 2286 2287 return 0; 2288 } 2289 2290 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 2291 void __iomem *mmio) 2292 { 2293 return; 2294 } 2295 2296 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 2297 { 2298 return; 2299 } 2300 2301 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i) 2302 { 2303 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); 2304 2305 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */ 2306 if (want_gen2i) 2307 ifctl |= (1 << 7); /* enable gen2i speed */ 2308 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); 2309 } 2310 2311 /* 2312 * Caller must ensure that EDMA is not active, 2313 * by first doing mv_stop_edma() where needed. 2314 */ 2315 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2316 unsigned int port_no) 2317 { 2318 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2319 2320 mv_stop_edma_engine(port_mmio); 2321 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2322 2323 if (!IS_GEN_I(hpriv)) { 2324 /* Enable 3.0gb/s link speed */ 2325 mv_setup_ifctl(port_mmio, 1); 2326 } 2327 /* 2328 * Strobing ATA_RST here causes a hard reset of the SATA transport, 2329 * link, and physical layers. It resets all SATA interface registers 2330 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2331 */ 2332 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 2333 udelay(25); /* allow reset propagation */ 2334 writelfl(0, port_mmio + EDMA_CMD_OFS); 2335 2336 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2337 2338 if (IS_GEN_I(hpriv)) 2339 mdelay(1); 2340 } 2341 2342 static void mv_pmp_select(struct ata_port *ap, int pmp) 2343 { 2344 if (sata_pmp_supported(ap)) { 2345 void __iomem *port_mmio = mv_ap_base(ap); 2346 u32 reg = readl(port_mmio + SATA_IFCTL_OFS); 2347 int old = reg & 0xf; 2348 2349 if (old != pmp) { 2350 reg = (reg & ~0xf) | pmp; 2351 writelfl(reg, port_mmio + SATA_IFCTL_OFS); 2352 } 2353 } 2354 } 2355 2356 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 2357 unsigned long deadline) 2358 { 2359 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2360 return sata_std_hardreset(link, class, deadline); 2361 } 2362 2363 static int mv_softreset(struct ata_link *link, unsigned int *class, 2364 unsigned long deadline) 2365 { 2366 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2367 return ata_sff_softreset(link, class, deadline); 2368 } 2369 2370 static int mv_hardreset(struct ata_link *link, unsigned int *class, 2371 unsigned long deadline) 2372 { 2373 struct ata_port *ap = link->ap; 2374 struct mv_host_priv *hpriv = ap->host->private_data; 2375 struct mv_port_priv *pp = ap->private_data; 2376 void __iomem *mmio = hpriv->base; 2377 int rc, attempts = 0, extra = 0; 2378 u32 sstatus; 2379 bool online; 2380 2381 mv_reset_channel(hpriv, mmio, ap->port_no); 2382 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2383 2384 /* Workaround for errata FEr SATA#10 (part 2) */ 2385 do { 2386 const unsigned long *timing = 2387 sata_ehc_deb_timing(&link->eh_context); 2388 2389 rc = sata_link_hardreset(link, timing, deadline + extra, 2390 &online, NULL); 2391 if (rc) 2392 return rc; 2393 sata_scr_read(link, SCR_STATUS, &sstatus); 2394 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2395 /* Force 1.5gb/s link speed and try again */ 2396 mv_setup_ifctl(mv_ap_base(ap), 0); 2397 if (time_after(jiffies + HZ, deadline)) 2398 extra = HZ; /* only extend it once, max */ 2399 } 2400 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 2401 2402 return rc; 2403 } 2404 2405 static void mv_eh_freeze(struct ata_port *ap) 2406 { 2407 struct mv_host_priv *hpriv = ap->host->private_data; 2408 unsigned int hc = (ap->port_no > 3) ? 1 : 0; 2409 u32 tmp, mask; 2410 unsigned int shift; 2411 2412 /* FIXME: handle coalescing completion events properly */ 2413 2414 shift = ap->port_no * 2; 2415 if (hc > 0) 2416 shift++; 2417 2418 mask = 0x3 << shift; 2419 2420 /* disable assertion of portN err, done events */ 2421 tmp = readl(hpriv->main_mask_reg_addr); 2422 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr); 2423 } 2424 2425 static void mv_eh_thaw(struct ata_port *ap) 2426 { 2427 struct mv_host_priv *hpriv = ap->host->private_data; 2428 void __iomem *mmio = hpriv->base; 2429 unsigned int hc = (ap->port_no > 3) ? 1 : 0; 2430 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2431 void __iomem *port_mmio = mv_ap_base(ap); 2432 u32 tmp, mask, hc_irq_cause; 2433 unsigned int shift, hc_port_no = ap->port_no; 2434 2435 /* FIXME: handle coalescing completion events properly */ 2436 2437 shift = ap->port_no * 2; 2438 if (hc > 0) { 2439 shift++; 2440 hc_port_no -= 4; 2441 } 2442 2443 mask = 0x3 << shift; 2444 2445 /* clear EDMA errors on this port */ 2446 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2447 2448 /* clear pending irq events */ 2449 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2450 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */ 2451 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */ 2452 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2453 2454 /* enable assertion of portN err, done events */ 2455 tmp = readl(hpriv->main_mask_reg_addr); 2456 writelfl(tmp | mask, hpriv->main_mask_reg_addr); 2457 } 2458 2459 /** 2460 * mv_port_init - Perform some early initialization on a single port. 2461 * @port: libata data structure storing shadow register addresses 2462 * @port_mmio: base address of the port 2463 * 2464 * Initialize shadow register mmio addresses, clear outstanding 2465 * interrupts on the port, and unmask interrupts for the future 2466 * start of the port. 2467 * 2468 * LOCKING: 2469 * Inherited from caller. 2470 */ 2471 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2472 { 2473 void __iomem *shd_base = port_mmio + SHD_BLK_OFS; 2474 unsigned serr_ofs; 2475 2476 /* PIO related setup 2477 */ 2478 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2479 port->error_addr = 2480 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2481 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2482 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2483 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2484 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2485 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2486 port->status_addr = 2487 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2488 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2489 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2490 2491 /* unused: */ 2492 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; 2493 2494 /* Clear any currently outstanding port interrupt conditions */ 2495 serr_ofs = mv_scr_offset(SCR_ERROR); 2496 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2497 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2498 2499 /* unmask all non-transient EDMA error interrupts */ 2500 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2501 2502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2503 readl(port_mmio + EDMA_CFG_OFS), 2504 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2505 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2506 } 2507 2508 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2509 { 2510 struct pci_dev *pdev = to_pci_dev(host->dev); 2511 struct mv_host_priv *hpriv = host->private_data; 2512 u32 hp_flags = hpriv->hp_flags; 2513 2514 switch (board_idx) { 2515 case chip_5080: 2516 hpriv->ops = &mv5xxx_ops; 2517 hp_flags |= MV_HP_GEN_I; 2518 2519 switch (pdev->revision) { 2520 case 0x1: 2521 hp_flags |= MV_HP_ERRATA_50XXB0; 2522 break; 2523 case 0x3: 2524 hp_flags |= MV_HP_ERRATA_50XXB2; 2525 break; 2526 default: 2527 dev_printk(KERN_WARNING, &pdev->dev, 2528 "Applying 50XXB2 workarounds to unknown rev\n"); 2529 hp_flags |= MV_HP_ERRATA_50XXB2; 2530 break; 2531 } 2532 break; 2533 2534 case chip_504x: 2535 case chip_508x: 2536 hpriv->ops = &mv5xxx_ops; 2537 hp_flags |= MV_HP_GEN_I; 2538 2539 switch (pdev->revision) { 2540 case 0x0: 2541 hp_flags |= MV_HP_ERRATA_50XXB0; 2542 break; 2543 case 0x3: 2544 hp_flags |= MV_HP_ERRATA_50XXB2; 2545 break; 2546 default: 2547 dev_printk(KERN_WARNING, &pdev->dev, 2548 "Applying B2 workarounds to unknown rev\n"); 2549 hp_flags |= MV_HP_ERRATA_50XXB2; 2550 break; 2551 } 2552 break; 2553 2554 case chip_604x: 2555 case chip_608x: 2556 hpriv->ops = &mv6xxx_ops; 2557 hp_flags |= MV_HP_GEN_II; 2558 2559 switch (pdev->revision) { 2560 case 0x7: 2561 hp_flags |= MV_HP_ERRATA_60X1B2; 2562 break; 2563 case 0x9: 2564 hp_flags |= MV_HP_ERRATA_60X1C0; 2565 break; 2566 default: 2567 dev_printk(KERN_WARNING, &pdev->dev, 2568 "Applying B2 workarounds to unknown rev\n"); 2569 hp_flags |= MV_HP_ERRATA_60X1B2; 2570 break; 2571 } 2572 break; 2573 2574 case chip_7042: 2575 hp_flags |= MV_HP_PCIE; 2576 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2577 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2578 { 2579 /* 2580 * Highpoint RocketRAID PCIe 23xx series cards: 2581 * 2582 * Unconfigured drives are treated as "Legacy" 2583 * by the BIOS, and it overwrites sector 8 with 2584 * a "Lgcy" metadata block prior to Linux boot. 2585 * 2586 * Configured drives (RAID or JBOD) leave sector 8 2587 * alone, but instead overwrite a high numbered 2588 * sector for the RAID metadata. This sector can 2589 * be determined exactly, by truncating the physical 2590 * drive capacity to a nice even GB value. 2591 * 2592 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 2593 * 2594 * Warn the user, lest they think we're just buggy. 2595 */ 2596 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 2597 " BIOS CORRUPTS DATA on all attached drives," 2598 " regardless of if/how they are configured." 2599 " BEWARE!\n"); 2600 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 2601 " use sectors 8-9 on \"Legacy\" drives," 2602 " and avoid the final two gigabytes on" 2603 " all RocketRAID BIOS initialized drives.\n"); 2604 } 2605 case chip_6042: 2606 hpriv->ops = &mv6xxx_ops; 2607 hp_flags |= MV_HP_GEN_IIE; 2608 2609 switch (pdev->revision) { 2610 case 0x0: 2611 hp_flags |= MV_HP_ERRATA_XX42A0; 2612 break; 2613 case 0x1: 2614 hp_flags |= MV_HP_ERRATA_60X1C0; 2615 break; 2616 default: 2617 dev_printk(KERN_WARNING, &pdev->dev, 2618 "Applying 60X1C0 workarounds to unknown rev\n"); 2619 hp_flags |= MV_HP_ERRATA_60X1C0; 2620 break; 2621 } 2622 break; 2623 case chip_soc: 2624 hpriv->ops = &mv_soc_ops; 2625 hp_flags |= MV_HP_ERRATA_60X1C0; 2626 break; 2627 2628 default: 2629 dev_printk(KERN_ERR, host->dev, 2630 "BUG: invalid board index %u\n", board_idx); 2631 return 1; 2632 } 2633 2634 hpriv->hp_flags = hp_flags; 2635 if (hp_flags & MV_HP_PCIE) { 2636 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; 2637 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; 2638 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 2639 } else { 2640 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; 2641 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; 2642 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 2643 } 2644 2645 return 0; 2646 } 2647 2648 /** 2649 * mv_init_host - Perform some early initialization of the host. 2650 * @host: ATA host to initialize 2651 * @board_idx: controller index 2652 * 2653 * If possible, do an early global reset of the host. Then do 2654 * our port init and clear/unmask all/relevant host interrupts. 2655 * 2656 * LOCKING: 2657 * Inherited from caller. 2658 */ 2659 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 2660 { 2661 int rc = 0, n_hc, port, hc; 2662 struct mv_host_priv *hpriv = host->private_data; 2663 void __iomem *mmio = hpriv->base; 2664 2665 rc = mv_chip_id(host, board_idx); 2666 if (rc) 2667 goto done; 2668 2669 if (HAS_PCI(host)) { 2670 hpriv->main_cause_reg_addr = hpriv->base + 2671 HC_MAIN_IRQ_CAUSE_OFS; 2672 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS; 2673 } else { 2674 hpriv->main_cause_reg_addr = hpriv->base + 2675 HC_SOC_MAIN_IRQ_CAUSE_OFS; 2676 hpriv->main_mask_reg_addr = hpriv->base + 2677 HC_SOC_MAIN_IRQ_MASK_OFS; 2678 } 2679 /* global interrupt mask */ 2680 writel(0, hpriv->main_mask_reg_addr); 2681 2682 n_hc = mv_get_hc_count(host->ports[0]->flags); 2683 2684 for (port = 0; port < host->n_ports; port++) 2685 hpriv->ops->read_preamp(hpriv, port, mmio); 2686 2687 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 2688 if (rc) 2689 goto done; 2690 2691 hpriv->ops->reset_flash(hpriv, mmio); 2692 hpriv->ops->reset_bus(host, mmio); 2693 hpriv->ops->enable_leds(hpriv, mmio); 2694 2695 for (port = 0; port < host->n_ports; port++) { 2696 struct ata_port *ap = host->ports[port]; 2697 void __iomem *port_mmio = mv_port_base(mmio, port); 2698 2699 mv_port_init(&ap->ioaddr, port_mmio); 2700 2701 #ifdef CONFIG_PCI 2702 if (HAS_PCI(host)) { 2703 unsigned int offset = port_mmio - mmio; 2704 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 2705 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 2706 } 2707 #endif 2708 } 2709 2710 for (hc = 0; hc < n_hc; hc++) { 2711 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2712 2713 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 2714 "(before clear)=0x%08x\n", hc, 2715 readl(hc_mmio + HC_CFG_OFS), 2716 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 2717 2718 /* Clear any currently outstanding hc interrupt conditions */ 2719 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 2720 } 2721 2722 if (HAS_PCI(host)) { 2723 /* Clear any currently outstanding host interrupt conditions */ 2724 writelfl(0, mmio + hpriv->irq_cause_ofs); 2725 2726 /* and unmask interrupt generation for host regs */ 2727 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 2728 if (IS_GEN_I(hpriv)) 2729 writelfl(~HC_MAIN_MASKED_IRQS_5, 2730 hpriv->main_mask_reg_addr); 2731 else 2732 writelfl(~HC_MAIN_MASKED_IRQS, 2733 hpriv->main_mask_reg_addr); 2734 2735 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 2736 "PCI int cause/mask=0x%08x/0x%08x\n", 2737 readl(hpriv->main_cause_reg_addr), 2738 readl(hpriv->main_mask_reg_addr), 2739 readl(mmio + hpriv->irq_cause_ofs), 2740 readl(mmio + hpriv->irq_mask_ofs)); 2741 } else { 2742 writelfl(~HC_MAIN_MASKED_IRQS_SOC, 2743 hpriv->main_mask_reg_addr); 2744 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n", 2745 readl(hpriv->main_cause_reg_addr), 2746 readl(hpriv->main_mask_reg_addr)); 2747 } 2748 done: 2749 return rc; 2750 } 2751 2752 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 2753 { 2754 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 2755 MV_CRQB_Q_SZ, 0); 2756 if (!hpriv->crqb_pool) 2757 return -ENOMEM; 2758 2759 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 2760 MV_CRPB_Q_SZ, 0); 2761 if (!hpriv->crpb_pool) 2762 return -ENOMEM; 2763 2764 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 2765 MV_SG_TBL_SZ, 0); 2766 if (!hpriv->sg_tbl_pool) 2767 return -ENOMEM; 2768 2769 return 0; 2770 } 2771 2772 /** 2773 * mv_platform_probe - handle a positive probe of an soc Marvell 2774 * host 2775 * @pdev: platform device found 2776 * 2777 * LOCKING: 2778 * Inherited from caller. 2779 */ 2780 static int mv_platform_probe(struct platform_device *pdev) 2781 { 2782 static int printed_version; 2783 const struct mv_sata_platform_data *mv_platform_data; 2784 const struct ata_port_info *ppi[] = 2785 { &mv_port_info[chip_soc], NULL }; 2786 struct ata_host *host; 2787 struct mv_host_priv *hpriv; 2788 struct resource *res; 2789 int n_ports, rc; 2790 2791 if (!printed_version++) 2792 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2793 2794 /* 2795 * Simple resource validation .. 2796 */ 2797 if (unlikely(pdev->num_resources != 2)) { 2798 dev_err(&pdev->dev, "invalid number of resources\n"); 2799 return -EINVAL; 2800 } 2801 2802 /* 2803 * Get the register base first 2804 */ 2805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2806 if (res == NULL) 2807 return -EINVAL; 2808 2809 /* allocate host */ 2810 mv_platform_data = pdev->dev.platform_data; 2811 n_ports = mv_platform_data->n_ports; 2812 2813 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2814 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 2815 2816 if (!host || !hpriv) 2817 return -ENOMEM; 2818 host->private_data = hpriv; 2819 hpriv->n_ports = n_ports; 2820 2821 host->iomap = NULL; 2822 hpriv->base = devm_ioremap(&pdev->dev, res->start, 2823 res->end - res->start + 1); 2824 hpriv->base -= MV_SATAHC0_REG_BASE; 2825 2826 rc = mv_create_dma_pools(hpriv, &pdev->dev); 2827 if (rc) 2828 return rc; 2829 2830 /* initialize adapter */ 2831 rc = mv_init_host(host, chip_soc); 2832 if (rc) 2833 return rc; 2834 2835 dev_printk(KERN_INFO, &pdev->dev, 2836 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 2837 host->n_ports); 2838 2839 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 2840 IRQF_SHARED, &mv6_sht); 2841 } 2842 2843 /* 2844 * 2845 * mv_platform_remove - unplug a platform interface 2846 * @pdev: platform device 2847 * 2848 * A platform bus SATA device has been unplugged. Perform the needed 2849 * cleanup. Also called on module unload for any active devices. 2850 */ 2851 static int __devexit mv_platform_remove(struct platform_device *pdev) 2852 { 2853 struct device *dev = &pdev->dev; 2854 struct ata_host *host = dev_get_drvdata(dev); 2855 2856 ata_host_detach(host); 2857 return 0; 2858 } 2859 2860 static struct platform_driver mv_platform_driver = { 2861 .probe = mv_platform_probe, 2862 .remove = __devexit_p(mv_platform_remove), 2863 .driver = { 2864 .name = DRV_NAME, 2865 .owner = THIS_MODULE, 2866 }, 2867 }; 2868 2869 2870 #ifdef CONFIG_PCI 2871 static int mv_pci_init_one(struct pci_dev *pdev, 2872 const struct pci_device_id *ent); 2873 2874 2875 static struct pci_driver mv_pci_driver = { 2876 .name = DRV_NAME, 2877 .id_table = mv_pci_tbl, 2878 .probe = mv_pci_init_one, 2879 .remove = ata_pci_remove_one, 2880 }; 2881 2882 /* 2883 * module options 2884 */ 2885 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 2886 2887 2888 /* move to PCI layer or libata core? */ 2889 static int pci_go_64(struct pci_dev *pdev) 2890 { 2891 int rc; 2892 2893 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2894 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2895 if (rc) { 2896 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2897 if (rc) { 2898 dev_printk(KERN_ERR, &pdev->dev, 2899 "64-bit DMA enable failed\n"); 2900 return rc; 2901 } 2902 } 2903 } else { 2904 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2905 if (rc) { 2906 dev_printk(KERN_ERR, &pdev->dev, 2907 "32-bit DMA enable failed\n"); 2908 return rc; 2909 } 2910 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2911 if (rc) { 2912 dev_printk(KERN_ERR, &pdev->dev, 2913 "32-bit consistent DMA enable failed\n"); 2914 return rc; 2915 } 2916 } 2917 2918 return rc; 2919 } 2920 2921 /** 2922 * mv_print_info - Dump key info to kernel log for perusal. 2923 * @host: ATA host to print info about 2924 * 2925 * FIXME: complete this. 2926 * 2927 * LOCKING: 2928 * Inherited from caller. 2929 */ 2930 static void mv_print_info(struct ata_host *host) 2931 { 2932 struct pci_dev *pdev = to_pci_dev(host->dev); 2933 struct mv_host_priv *hpriv = host->private_data; 2934 u8 scc; 2935 const char *scc_s, *gen; 2936 2937 /* Use this to determine the HW stepping of the chip so we know 2938 * what errata to workaround 2939 */ 2940 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 2941 if (scc == 0) 2942 scc_s = "SCSI"; 2943 else if (scc == 0x01) 2944 scc_s = "RAID"; 2945 else 2946 scc_s = "?"; 2947 2948 if (IS_GEN_I(hpriv)) 2949 gen = "I"; 2950 else if (IS_GEN_II(hpriv)) 2951 gen = "II"; 2952 else if (IS_GEN_IIE(hpriv)) 2953 gen = "IIE"; 2954 else 2955 gen = "?"; 2956 2957 dev_printk(KERN_INFO, &pdev->dev, 2958 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 2959 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 2960 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2961 } 2962 2963 /** 2964 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 2965 * @pdev: PCI device found 2966 * @ent: PCI device ID entry for the matched host 2967 * 2968 * LOCKING: 2969 * Inherited from caller. 2970 */ 2971 static int mv_pci_init_one(struct pci_dev *pdev, 2972 const struct pci_device_id *ent) 2973 { 2974 static int printed_version; 2975 unsigned int board_idx = (unsigned int)ent->driver_data; 2976 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 2977 struct ata_host *host; 2978 struct mv_host_priv *hpriv; 2979 int n_ports, rc; 2980 2981 if (!printed_version++) 2982 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2983 2984 /* allocate host */ 2985 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 2986 2987 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 2988 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 2989 if (!host || !hpriv) 2990 return -ENOMEM; 2991 host->private_data = hpriv; 2992 hpriv->n_ports = n_ports; 2993 2994 /* acquire resources */ 2995 rc = pcim_enable_device(pdev); 2996 if (rc) 2997 return rc; 2998 2999 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 3000 if (rc == -EBUSY) 3001 pcim_pin_device(pdev); 3002 if (rc) 3003 return rc; 3004 host->iomap = pcim_iomap_table(pdev); 3005 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 3006 3007 rc = pci_go_64(pdev); 3008 if (rc) 3009 return rc; 3010 3011 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3012 if (rc) 3013 return rc; 3014 3015 /* initialize adapter */ 3016 rc = mv_init_host(host, board_idx); 3017 if (rc) 3018 return rc; 3019 3020 /* Enable interrupts */ 3021 if (msi && pci_enable_msi(pdev)) 3022 pci_intx(pdev, 1); 3023 3024 mv_dump_pci_cfg(pdev, 0x68); 3025 mv_print_info(host); 3026 3027 pci_set_master(pdev); 3028 pci_try_set_mwi(pdev); 3029 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 3030 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 3031 } 3032 #endif 3033 3034 static int mv_platform_probe(struct platform_device *pdev); 3035 static int __devexit mv_platform_remove(struct platform_device *pdev); 3036 3037 static int __init mv_init(void) 3038 { 3039 int rc = -ENODEV; 3040 #ifdef CONFIG_PCI 3041 rc = pci_register_driver(&mv_pci_driver); 3042 if (rc < 0) 3043 return rc; 3044 #endif 3045 rc = platform_driver_register(&mv_platform_driver); 3046 3047 #ifdef CONFIG_PCI 3048 if (rc < 0) 3049 pci_unregister_driver(&mv_pci_driver); 3050 #endif 3051 return rc; 3052 } 3053 3054 static void __exit mv_exit(void) 3055 { 3056 #ifdef CONFIG_PCI 3057 pci_unregister_driver(&mv_pci_driver); 3058 #endif 3059 platform_driver_unregister(&mv_platform_driver); 3060 } 3061 3062 MODULE_AUTHOR("Brett Russ"); 3063 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 3064 MODULE_LICENSE("GPL"); 3065 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3066 MODULE_VERSION(DRV_VERSION); 3067 MODULE_ALIAS("platform:" DRV_NAME); 3068 3069 #ifdef CONFIG_PCI 3070 module_param(msi, int, 0444); 3071 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 3072 #endif 3073 3074 module_init(mv_init); 3075 module_exit(mv_exit); 3076