1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sata_mv.c - Marvell SATA support 4 * 5 * Copyright 2008-2009: Marvell Corporation, all rights reserved. 6 * Copyright 2005: EMC Corporation, all rights reserved. 7 * Copyright 2005 Red Hat, Inc. All rights reserved. 8 * 9 * Originally written by Brett Russ. 10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. 11 * 12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 13 */ 14 15 /* 16 * sata_mv TODO list: 17 * 18 * --> Develop a low-power-consumption strategy, and implement it. 19 * 20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. 21 * 22 * --> [Experiment, Marvell value added] Is it possible to use target 23 * mode to cross-connect two Linux boxes with Marvell cards? If so, 24 * creating LibATA target mode support would be very interesting. 25 * 26 * Target mode, for those without docs, is the ability to directly 27 * connect two SATA ports. 28 */ 29 30 /* 31 * 80x1-B2 errata PCI#11: 32 * 33 * Users of the 6041/6081 Rev.B2 chips (current is C0) 34 * should be careful to insert those cards only onto PCI-X bus #0, 35 * and only in device slots 0..7, not higher. The chips may not 36 * work correctly otherwise (note: this is a pretty rare condition). 37 */ 38 39 #include <linux/kernel.h> 40 #include <linux/module.h> 41 #include <linux/pci.h> 42 #include <linux/init.h> 43 #include <linux/blkdev.h> 44 #include <linux/delay.h> 45 #include <linux/interrupt.h> 46 #include <linux/dmapool.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/device.h> 49 #include <linux/clk.h> 50 #include <linux/phy/phy.h> 51 #include <linux/platform_device.h> 52 #include <linux/ata_platform.h> 53 #include <linux/mbus.h> 54 #include <linux/bitops.h> 55 #include <linux/gfp.h> 56 #include <linux/of.h> 57 #include <linux/of_irq.h> 58 #include <scsi/scsi_host.h> 59 #include <scsi/scsi_cmnd.h> 60 #include <scsi/scsi_device.h> 61 #include <linux/libata.h> 62 63 #define DRV_NAME "sata_mv" 64 #define DRV_VERSION "1.28" 65 66 /* 67 * module options 68 */ 69 70 #ifdef CONFIG_PCI 71 static int msi; 72 module_param(msi, int, S_IRUGO); 73 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 74 #endif 75 76 static int irq_coalescing_io_count; 77 module_param(irq_coalescing_io_count, int, S_IRUGO); 78 MODULE_PARM_DESC(irq_coalescing_io_count, 79 "IRQ coalescing I/O count threshold (0..255)"); 80 81 static int irq_coalescing_usecs; 82 module_param(irq_coalescing_usecs, int, S_IRUGO); 83 MODULE_PARM_DESC(irq_coalescing_usecs, 84 "IRQ coalescing time threshold in usecs"); 85 86 enum { 87 /* BAR's are enumerated in terms of pci_resource_start() terms */ 88 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 89 MV_IO_BAR = 2, /* offset 0x18: IO space */ 90 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 91 92 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 93 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 94 95 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ 96 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ 97 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ 98 MAX_COAL_IO_COUNT = 255, /* completed I/O count */ 99 100 MV_PCI_REG_BASE = 0, 101 102 /* 103 * Per-chip ("all ports") interrupt coalescing feature. 104 * This is only for GEN_II / GEN_IIE hardware. 105 * 106 * Coalescing defers the interrupt until either the IO_THRESHOLD 107 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 108 */ 109 COAL_REG_BASE = 0x18000, 110 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), 111 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ 112 113 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), 114 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), 115 116 /* 117 * Registers for the (unused here) transaction coalescing feature: 118 */ 119 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), 120 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), 121 122 SATAHC0_REG_BASE = 0x20000, 123 FLASH_CTL = 0x1046c, 124 GPIO_PORT_CTL = 0x104f0, 125 RESET_CFG = 0x180d8, 126 127 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 128 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 129 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 130 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 131 132 MV_MAX_Q_DEPTH = 32, 133 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 134 135 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 136 * CRPB needs alignment on a 256B boundary. Size == 256B 137 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 138 */ 139 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 140 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 141 MV_MAX_SG_CT = 256, 142 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 143 144 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 145 MV_PORT_HC_SHIFT = 2, 146 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 147 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 148 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 149 150 /* Host Flags */ 151 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 152 153 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, 154 155 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, 156 157 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | 158 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, 159 160 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, 161 162 CRQB_FLAG_READ = (1 << 0), 163 CRQB_TAG_SHIFT = 1, 164 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 165 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 166 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 167 CRQB_CMD_ADDR_SHIFT = 8, 168 CRQB_CMD_CS = (0x2 << 11), 169 CRQB_CMD_LAST = (1 << 15), 170 171 CRPB_FLAG_STATUS_SHIFT = 8, 172 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 173 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 174 175 EPRD_FLAG_END_OF_TBL = (1 << 31), 176 177 /* PCI interface registers */ 178 179 MV_PCI_COMMAND = 0xc00, 180 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ 181 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 182 183 PCI_MAIN_CMD_STS = 0xd30, 184 STOP_PCI_MASTER = (1 << 2), 185 PCI_MASTER_EMPTY = (1 << 3), 186 GLOB_SFT_RST = (1 << 4), 187 188 MV_PCI_MODE = 0xd00, 189 MV_PCI_MODE_MASK = 0x30, 190 191 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 192 MV_PCI_DISC_TIMER = 0xd04, 193 MV_PCI_MSI_TRIGGER = 0xc38, 194 MV_PCI_SERR_MASK = 0xc28, 195 MV_PCI_XBAR_TMOUT = 0x1d04, 196 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 197 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 198 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 199 MV_PCI_ERR_COMMAND = 0x1d50, 200 201 PCI_IRQ_CAUSE = 0x1d58, 202 PCI_IRQ_MASK = 0x1d5c, 203 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 204 205 PCIE_IRQ_CAUSE = 0x1900, 206 PCIE_IRQ_MASK = 0x1910, 207 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 208 209 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 210 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, 211 PCI_HC_MAIN_IRQ_MASK = 0x1d64, 212 SOC_HC_MAIN_IRQ_CAUSE = 0x20020, 213 SOC_HC_MAIN_IRQ_MASK = 0x20024, 214 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ 215 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ 216 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 217 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 218 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ 219 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ 220 PCI_ERR = (1 << 18), 221 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ 222 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ 223 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ 224 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ 225 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ 226 GPIO_INT = (1 << 22), 227 SELF_INT = (1 << 23), 228 TWSI_INT = (1 << 24), 229 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 230 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 231 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 232 233 /* SATAHC registers */ 234 HC_CFG = 0x00, 235 236 HC_IRQ_CAUSE = 0x14, 237 DMA_IRQ = (1 << 0), /* shift by port # */ 238 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 239 DEV_IRQ = (1 << 8), /* shift by port # */ 240 241 /* 242 * Per-HC (Host-Controller) interrupt coalescing feature. 243 * This is present on all chip generations. 244 * 245 * Coalescing defers the interrupt until either the IO_THRESHOLD 246 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 247 */ 248 HC_IRQ_COAL_IO_THRESHOLD = 0x000c, 249 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, 250 251 SOC_LED_CTRL = 0x2c, 252 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ 253 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ 254 /* with dev activity LED */ 255 256 /* Shadow block registers */ 257 SHD_BLK = 0x100, 258 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ 259 260 /* SATA registers */ 261 SATA_STATUS = 0x300, /* ctrl, err regs follow status */ 262 SATA_ACTIVE = 0x350, 263 FIS_IRQ_CAUSE = 0x364, 264 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ 265 266 LTMODE = 0x30c, /* requires read-after-write */ 267 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 268 269 PHY_MODE2 = 0x330, 270 PHY_MODE3 = 0x310, 271 272 PHY_MODE4 = 0x314, /* requires read-after-write */ 273 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 274 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 275 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 276 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 277 278 SATA_IFCTL = 0x344, 279 SATA_TESTCTL = 0x348, 280 SATA_IFSTAT = 0x34c, 281 VENDOR_UNIQUE_FIS = 0x35c, 282 283 FISCFG = 0x360, 284 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 285 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 286 287 PHY_MODE9_GEN2 = 0x398, 288 PHY_MODE9_GEN1 = 0x39c, 289 PHYCFG_OFS = 0x3a0, /* only in 65n devices */ 290 291 MV5_PHY_MODE = 0x74, 292 MV5_LTMODE = 0x30, 293 MV5_PHY_CTL = 0x0C, 294 SATA_IFCFG = 0x050, 295 LP_PHY_CTL = 0x058, 296 LP_PHY_CTL_PIN_PU_PLL = (1 << 0), 297 LP_PHY_CTL_PIN_PU_RX = (1 << 1), 298 LP_PHY_CTL_PIN_PU_TX = (1 << 2), 299 LP_PHY_CTL_GEN_TX_3G = (1 << 5), 300 LP_PHY_CTL_GEN_RX_3G = (1 << 9), 301 302 MV_M2_PREAMP_MASK = 0x7e0, 303 304 /* Port registers */ 305 EDMA_CFG = 0, 306 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 307 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 308 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 309 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 310 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 311 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 312 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 313 314 EDMA_ERR_IRQ_CAUSE = 0x8, 315 EDMA_ERR_IRQ_MASK = 0xc, 316 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 317 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 318 EDMA_ERR_DEV = (1 << 2), /* device error */ 319 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 320 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 321 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 322 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 323 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 324 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 325 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 326 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 327 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 328 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 329 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 330 331 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 332 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 333 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 334 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 335 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 336 337 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 338 339 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 340 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 341 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 342 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 343 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 344 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 345 346 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 347 348 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 349 EDMA_ERR_OVERRUN_5 = (1 << 5), 350 EDMA_ERR_UNDERRUN_5 = (1 << 6), 351 352 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 353 EDMA_ERR_LNK_CTRL_RX_1 | 354 EDMA_ERR_LNK_CTRL_RX_3 | 355 EDMA_ERR_LNK_CTRL_TX, 356 357 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 358 EDMA_ERR_PRD_PAR | 359 EDMA_ERR_DEV_DCON | 360 EDMA_ERR_DEV_CON | 361 EDMA_ERR_SERR | 362 EDMA_ERR_SELF_DIS | 363 EDMA_ERR_CRQB_PAR | 364 EDMA_ERR_CRPB_PAR | 365 EDMA_ERR_INTRL_PAR | 366 EDMA_ERR_IORDY | 367 EDMA_ERR_LNK_CTRL_RX_2 | 368 EDMA_ERR_LNK_DATA_RX | 369 EDMA_ERR_LNK_DATA_TX | 370 EDMA_ERR_TRANS_PROTO, 371 372 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 373 EDMA_ERR_PRD_PAR | 374 EDMA_ERR_DEV_DCON | 375 EDMA_ERR_DEV_CON | 376 EDMA_ERR_OVERRUN_5 | 377 EDMA_ERR_UNDERRUN_5 | 378 EDMA_ERR_SELF_DIS_5 | 379 EDMA_ERR_CRQB_PAR | 380 EDMA_ERR_CRPB_PAR | 381 EDMA_ERR_INTRL_PAR | 382 EDMA_ERR_IORDY, 383 384 EDMA_REQ_Q_BASE_HI = 0x10, 385 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ 386 387 EDMA_REQ_Q_OUT_PTR = 0x18, 388 EDMA_REQ_Q_PTR_SHIFT = 5, 389 390 EDMA_RSP_Q_BASE_HI = 0x1c, 391 EDMA_RSP_Q_IN_PTR = 0x20, 392 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ 393 EDMA_RSP_Q_PTR_SHIFT = 3, 394 395 EDMA_CMD = 0x28, /* EDMA command register */ 396 EDMA_EN = (1 << 0), /* enable EDMA */ 397 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 398 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 399 400 EDMA_STATUS = 0x30, /* EDMA engine status */ 401 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 402 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 403 404 EDMA_IORDY_TMOUT = 0x34, 405 EDMA_ARB_CFG = 0x38, 406 407 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ 408 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ 409 410 BMDMA_CMD = 0x224, /* bmdma command register */ 411 BMDMA_STATUS = 0x228, /* bmdma status register */ 412 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ 413 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ 414 415 /* Host private flags (hp_flags) */ 416 MV_HP_FLAG_MSI = (1 << 0), 417 MV_HP_ERRATA_50XXB0 = (1 << 1), 418 MV_HP_ERRATA_50XXB2 = (1 << 2), 419 MV_HP_ERRATA_60X1B2 = (1 << 3), 420 MV_HP_ERRATA_60X1C0 = (1 << 4), 421 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 422 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 423 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 424 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 425 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 426 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 427 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ 428 MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ 429 430 /* Port private flags (pp_flags) */ 431 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 432 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 433 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 434 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 435 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ 436 }; 437 438 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 439 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 440 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 441 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 442 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 443 444 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 445 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 446 447 enum { 448 /* DMA boundary 0xffff is required by the s/g splitting 449 * we need on /length/ in mv_fill-sg(). 450 */ 451 MV_DMA_BOUNDARY = 0xffffU, 452 453 /* mask of register bits containing lower 32 bits 454 * of EDMA request queue DMA address 455 */ 456 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 457 458 /* ditto, for response queue */ 459 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 460 }; 461 462 enum chip_type { 463 chip_504x, 464 chip_508x, 465 chip_5080, 466 chip_604x, 467 chip_608x, 468 chip_6042, 469 chip_7042, 470 chip_soc, 471 }; 472 473 /* Command ReQuest Block: 32B */ 474 struct mv_crqb { 475 __le32 sg_addr; 476 __le32 sg_addr_hi; 477 __le16 ctrl_flags; 478 __le16 ata_cmd[11]; 479 }; 480 481 struct mv_crqb_iie { 482 __le32 addr; 483 __le32 addr_hi; 484 __le32 flags; 485 __le32 len; 486 __le32 ata_cmd[4]; 487 }; 488 489 /* Command ResPonse Block: 8B */ 490 struct mv_crpb { 491 __le16 id; 492 __le16 flags; 493 __le32 tmstmp; 494 }; 495 496 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 497 struct mv_sg { 498 __le32 addr; 499 __le32 flags_size; 500 __le32 addr_hi; 501 __le32 reserved; 502 }; 503 504 /* 505 * We keep a local cache of a few frequently accessed port 506 * registers here, to avoid having to read them (very slow) 507 * when switching between EDMA and non-EDMA modes. 508 */ 509 struct mv_cached_regs { 510 u32 fiscfg; 511 u32 ltmode; 512 u32 haltcond; 513 u32 unknown_rsvd; 514 }; 515 516 struct mv_port_priv { 517 struct mv_crqb *crqb; 518 dma_addr_t crqb_dma; 519 struct mv_crpb *crpb; 520 dma_addr_t crpb_dma; 521 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 522 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 523 524 unsigned int req_idx; 525 unsigned int resp_idx; 526 527 u32 pp_flags; 528 struct mv_cached_regs cached; 529 unsigned int delayed_eh_pmp_map; 530 }; 531 532 struct mv_port_signal { 533 u32 amps; 534 u32 pre; 535 }; 536 537 struct mv_host_priv { 538 u32 hp_flags; 539 unsigned int board_idx; 540 u32 main_irq_mask; 541 struct mv_port_signal signal[8]; 542 const struct mv_hw_ops *ops; 543 int n_ports; 544 void __iomem *base; 545 void __iomem *main_irq_cause_addr; 546 void __iomem *main_irq_mask_addr; 547 u32 irq_cause_offset; 548 u32 irq_mask_offset; 549 u32 unmask_all_irqs; 550 551 /* 552 * Needed on some devices that require their clocks to be enabled. 553 * These are optional: if the platform device does not have any 554 * clocks, they won't be used. Also, if the underlying hardware 555 * does not support the common clock framework (CONFIG_HAVE_CLK=n), 556 * all the clock operations become no-ops (see clk.h). 557 */ 558 struct clk *clk; 559 struct clk **port_clks; 560 /* 561 * Some devices have a SATA PHY which can be enabled/disabled 562 * in order to save power. These are optional: if the platform 563 * devices does not have any phy, they won't be used. 564 */ 565 struct phy **port_phys; 566 /* 567 * These consistent DMA memory pools give us guaranteed 568 * alignment for hardware-accessed data structures, 569 * and less memory waste in accomplishing the alignment. 570 */ 571 struct dma_pool *crqb_pool; 572 struct dma_pool *crpb_pool; 573 struct dma_pool *sg_tbl_pool; 574 }; 575 576 struct mv_hw_ops { 577 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 578 unsigned int port); 579 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 580 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 581 void __iomem *mmio); 582 int (*reset_hc)(struct ata_host *host, void __iomem *mmio, 583 unsigned int n_hc); 584 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 585 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 586 }; 587 588 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 589 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 590 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 591 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 592 static int mv_port_start(struct ata_port *ap); 593 static void mv_port_stop(struct ata_port *ap); 594 static int mv_qc_defer(struct ata_queued_cmd *qc); 595 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); 596 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); 597 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 598 static int mv_hardreset(struct ata_link *link, unsigned int *class, 599 unsigned long deadline); 600 static void mv_eh_freeze(struct ata_port *ap); 601 static void mv_eh_thaw(struct ata_port *ap); 602 static void mv6_dev_config(struct ata_device *dev); 603 604 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 605 unsigned int port); 606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 607 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 608 void __iomem *mmio); 609 static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, 610 unsigned int n_hc); 611 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 612 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 613 614 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 615 unsigned int port); 616 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 617 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 618 void __iomem *mmio); 619 static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, 620 unsigned int n_hc); 621 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 622 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 623 void __iomem *mmio); 624 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 625 void __iomem *mmio); 626 static int mv_soc_reset_hc(struct ata_host *host, 627 void __iomem *mmio, unsigned int n_hc); 628 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 629 void __iomem *mmio); 630 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 631 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 632 void __iomem *mmio, unsigned int port); 633 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 634 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 635 unsigned int port_no); 636 static int mv_stop_edma(struct ata_port *ap); 637 static int mv_stop_edma_engine(void __iomem *port_mmio); 638 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); 639 640 static void mv_pmp_select(struct ata_port *ap, int pmp); 641 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 642 unsigned long deadline); 643 static int mv_softreset(struct ata_link *link, unsigned int *class, 644 unsigned long deadline); 645 static void mv_pmp_error_handler(struct ata_port *ap); 646 static void mv_process_crpb_entries(struct ata_port *ap, 647 struct mv_port_priv *pp); 648 649 static void mv_sff_irq_clear(struct ata_port *ap); 650 static int mv_check_atapi_dma(struct ata_queued_cmd *qc); 651 static void mv_bmdma_setup(struct ata_queued_cmd *qc); 652 static void mv_bmdma_start(struct ata_queued_cmd *qc); 653 static void mv_bmdma_stop(struct ata_queued_cmd *qc); 654 static u8 mv_bmdma_status(struct ata_port *ap); 655 static u8 mv_sff_check_status(struct ata_port *ap); 656 657 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 658 * because we have to allow room for worst case splitting of 659 * PRDs for 64K boundaries in mv_fill_sg(). 660 */ 661 #ifdef CONFIG_PCI 662 static const struct scsi_host_template mv5_sht = { 663 ATA_BASE_SHT(DRV_NAME), 664 .sg_tablesize = MV_MAX_SG_CT / 2, 665 .dma_boundary = MV_DMA_BOUNDARY, 666 }; 667 #endif 668 static const struct scsi_host_template mv6_sht = { 669 __ATA_BASE_SHT(DRV_NAME), 670 .can_queue = MV_MAX_Q_DEPTH - 1, 671 .sg_tablesize = MV_MAX_SG_CT / 2, 672 .dma_boundary = MV_DMA_BOUNDARY, 673 .sdev_groups = ata_ncq_sdev_groups, 674 .change_queue_depth = ata_scsi_change_queue_depth, 675 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 676 .slave_configure = ata_scsi_slave_config 677 }; 678 679 static struct ata_port_operations mv5_ops = { 680 .inherits = &ata_sff_port_ops, 681 682 .lost_interrupt = ATA_OP_NULL, 683 684 .qc_defer = mv_qc_defer, 685 .qc_prep = mv_qc_prep, 686 .qc_issue = mv_qc_issue, 687 688 .freeze = mv_eh_freeze, 689 .thaw = mv_eh_thaw, 690 .hardreset = mv_hardreset, 691 692 .scr_read = mv5_scr_read, 693 .scr_write = mv5_scr_write, 694 695 .port_start = mv_port_start, 696 .port_stop = mv_port_stop, 697 }; 698 699 static struct ata_port_operations mv6_ops = { 700 .inherits = &ata_bmdma_port_ops, 701 702 .lost_interrupt = ATA_OP_NULL, 703 704 .qc_defer = mv_qc_defer, 705 .qc_prep = mv_qc_prep, 706 .qc_issue = mv_qc_issue, 707 708 .dev_config = mv6_dev_config, 709 710 .freeze = mv_eh_freeze, 711 .thaw = mv_eh_thaw, 712 .hardreset = mv_hardreset, 713 .softreset = mv_softreset, 714 .pmp_hardreset = mv_pmp_hardreset, 715 .pmp_softreset = mv_softreset, 716 .error_handler = mv_pmp_error_handler, 717 718 .scr_read = mv_scr_read, 719 .scr_write = mv_scr_write, 720 721 .sff_check_status = mv_sff_check_status, 722 .sff_irq_clear = mv_sff_irq_clear, 723 .check_atapi_dma = mv_check_atapi_dma, 724 .bmdma_setup = mv_bmdma_setup, 725 .bmdma_start = mv_bmdma_start, 726 .bmdma_stop = mv_bmdma_stop, 727 .bmdma_status = mv_bmdma_status, 728 729 .port_start = mv_port_start, 730 .port_stop = mv_port_stop, 731 }; 732 733 static struct ata_port_operations mv_iie_ops = { 734 .inherits = &mv6_ops, 735 .dev_config = ATA_OP_NULL, 736 .qc_prep = mv_qc_prep_iie, 737 }; 738 739 static const struct ata_port_info mv_port_info[] = { 740 { /* chip_504x */ 741 .flags = MV_GEN_I_FLAGS, 742 .pio_mask = ATA_PIO4, 743 .udma_mask = ATA_UDMA6, 744 .port_ops = &mv5_ops, 745 }, 746 { /* chip_508x */ 747 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 748 .pio_mask = ATA_PIO4, 749 .udma_mask = ATA_UDMA6, 750 .port_ops = &mv5_ops, 751 }, 752 { /* chip_5080 */ 753 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 754 .pio_mask = ATA_PIO4, 755 .udma_mask = ATA_UDMA6, 756 .port_ops = &mv5_ops, 757 }, 758 { /* chip_604x */ 759 .flags = MV_GEN_II_FLAGS, 760 .pio_mask = ATA_PIO4, 761 .udma_mask = ATA_UDMA6, 762 .port_ops = &mv6_ops, 763 }, 764 { /* chip_608x */ 765 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, 766 .pio_mask = ATA_PIO4, 767 .udma_mask = ATA_UDMA6, 768 .port_ops = &mv6_ops, 769 }, 770 { /* chip_6042 */ 771 .flags = MV_GEN_IIE_FLAGS, 772 .pio_mask = ATA_PIO4, 773 .udma_mask = ATA_UDMA6, 774 .port_ops = &mv_iie_ops, 775 }, 776 { /* chip_7042 */ 777 .flags = MV_GEN_IIE_FLAGS, 778 .pio_mask = ATA_PIO4, 779 .udma_mask = ATA_UDMA6, 780 .port_ops = &mv_iie_ops, 781 }, 782 { /* chip_soc */ 783 .flags = MV_GEN_IIE_FLAGS, 784 .pio_mask = ATA_PIO4, 785 .udma_mask = ATA_UDMA6, 786 .port_ops = &mv_iie_ops, 787 }, 788 }; 789 790 static const struct mv_hw_ops mv5xxx_ops = { 791 .phy_errata = mv5_phy_errata, 792 .enable_leds = mv5_enable_leds, 793 .read_preamp = mv5_read_preamp, 794 .reset_hc = mv5_reset_hc, 795 .reset_flash = mv5_reset_flash, 796 .reset_bus = mv5_reset_bus, 797 }; 798 799 static const struct mv_hw_ops mv6xxx_ops = { 800 .phy_errata = mv6_phy_errata, 801 .enable_leds = mv6_enable_leds, 802 .read_preamp = mv6_read_preamp, 803 .reset_hc = mv6_reset_hc, 804 .reset_flash = mv6_reset_flash, 805 .reset_bus = mv_reset_pci_bus, 806 }; 807 808 static const struct mv_hw_ops mv_soc_ops = { 809 .phy_errata = mv6_phy_errata, 810 .enable_leds = mv_soc_enable_leds, 811 .read_preamp = mv_soc_read_preamp, 812 .reset_hc = mv_soc_reset_hc, 813 .reset_flash = mv_soc_reset_flash, 814 .reset_bus = mv_soc_reset_bus, 815 }; 816 817 static const struct mv_hw_ops mv_soc_65n_ops = { 818 .phy_errata = mv_soc_65n_phy_errata, 819 .enable_leds = mv_soc_enable_leds, 820 .reset_hc = mv_soc_reset_hc, 821 .reset_flash = mv_soc_reset_flash, 822 .reset_bus = mv_soc_reset_bus, 823 }; 824 825 /* 826 * Functions 827 */ 828 829 static inline void writelfl(unsigned long data, void __iomem *addr) 830 { 831 writel(data, addr); 832 (void) readl(addr); /* flush to avoid PCI posted write */ 833 } 834 835 static inline unsigned int mv_hc_from_port(unsigned int port) 836 { 837 return port >> MV_PORT_HC_SHIFT; 838 } 839 840 static inline unsigned int mv_hardport_from_port(unsigned int port) 841 { 842 return port & MV_PORT_MASK; 843 } 844 845 /* 846 * Consolidate some rather tricky bit shift calculations. 847 * This is hot-path stuff, so not a function. 848 * Simple code, with two return values, so macro rather than inline. 849 * 850 * port is the sole input, in range 0..7. 851 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 852 * hardport is the other output, in range 0..3. 853 * 854 * Note that port and hardport may be the same variable in some cases. 855 */ 856 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 857 { \ 858 shift = mv_hc_from_port(port) * HC_SHIFT; \ 859 hardport = mv_hardport_from_port(port); \ 860 shift += hardport * 2; \ 861 } 862 863 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 864 { 865 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 866 } 867 868 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 869 unsigned int port) 870 { 871 return mv_hc_base(base, mv_hc_from_port(port)); 872 } 873 874 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 875 { 876 return mv_hc_base_from_port(base, port) + 877 MV_SATAHC_ARBTR_REG_SZ + 878 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 879 } 880 881 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 882 { 883 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 884 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 885 886 return hc_mmio + ofs; 887 } 888 889 static inline void __iomem *mv_host_base(struct ata_host *host) 890 { 891 struct mv_host_priv *hpriv = host->private_data; 892 return hpriv->base; 893 } 894 895 static inline void __iomem *mv_ap_base(struct ata_port *ap) 896 { 897 return mv_port_base(mv_host_base(ap->host), ap->port_no); 898 } 899 900 static inline int mv_get_hc_count(unsigned long port_flags) 901 { 902 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 903 } 904 905 /** 906 * mv_save_cached_regs - (re-)initialize cached port registers 907 * @ap: the port whose registers we are caching 908 * 909 * Initialize the local cache of port registers, 910 * so that reading them over and over again can 911 * be avoided on the hotter paths of this driver. 912 * This saves a few microseconds each time we switch 913 * to/from EDMA mode to perform (eg.) a drive cache flush. 914 */ 915 static void mv_save_cached_regs(struct ata_port *ap) 916 { 917 void __iomem *port_mmio = mv_ap_base(ap); 918 struct mv_port_priv *pp = ap->private_data; 919 920 pp->cached.fiscfg = readl(port_mmio + FISCFG); 921 pp->cached.ltmode = readl(port_mmio + LTMODE); 922 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); 923 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); 924 } 925 926 /** 927 * mv_write_cached_reg - write to a cached port register 928 * @addr: hardware address of the register 929 * @old: pointer to cached value of the register 930 * @new: new value for the register 931 * 932 * Write a new value to a cached register, 933 * but only if the value is different from before. 934 */ 935 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) 936 { 937 if (new != *old) { 938 unsigned long laddr; 939 *old = new; 940 /* 941 * Workaround for 88SX60x1-B2 FEr SATA#13: 942 * Read-after-write is needed to prevent generating 64-bit 943 * write cycles on the PCI bus for SATA interface registers 944 * at offsets ending in 0x4 or 0xc. 945 * 946 * Looks like a lot of fuss, but it avoids an unnecessary 947 * +1 usec read-after-write delay for unaffected registers. 948 */ 949 laddr = (unsigned long)addr & 0xffff; 950 if (laddr >= 0x300 && laddr <= 0x33c) { 951 laddr &= 0x000f; 952 if (laddr == 0x4 || laddr == 0xc) { 953 writelfl(new, addr); /* read after write */ 954 return; 955 } 956 } 957 writel(new, addr); /* unaffected by the errata */ 958 } 959 } 960 961 static void mv_set_edma_ptrs(void __iomem *port_mmio, 962 struct mv_host_priv *hpriv, 963 struct mv_port_priv *pp) 964 { 965 u32 index; 966 967 /* 968 * initialize request queue 969 */ 970 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 971 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 972 973 WARN_ON(pp->crqb_dma & 0x3ff); 974 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); 975 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 976 port_mmio + EDMA_REQ_Q_IN_PTR); 977 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); 978 979 /* 980 * initialize response queue 981 */ 982 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 983 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 984 985 WARN_ON(pp->crpb_dma & 0xff); 986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); 987 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); 988 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 989 port_mmio + EDMA_RSP_Q_OUT_PTR); 990 } 991 992 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) 993 { 994 /* 995 * When writing to the main_irq_mask in hardware, 996 * we must ensure exclusivity between the interrupt coalescing bits 997 * and the corresponding individual port DONE_IRQ bits. 998 * 999 * Note that this register is really an "IRQ enable" register, 1000 * not an "IRQ mask" register as Marvell's naming might suggest. 1001 */ 1002 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) 1003 mask &= ~DONE_IRQ_0_3; 1004 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) 1005 mask &= ~DONE_IRQ_4_7; 1006 writelfl(mask, hpriv->main_irq_mask_addr); 1007 } 1008 1009 static void mv_set_main_irq_mask(struct ata_host *host, 1010 u32 disable_bits, u32 enable_bits) 1011 { 1012 struct mv_host_priv *hpriv = host->private_data; 1013 u32 old_mask, new_mask; 1014 1015 old_mask = hpriv->main_irq_mask; 1016 new_mask = (old_mask & ~disable_bits) | enable_bits; 1017 if (new_mask != old_mask) { 1018 hpriv->main_irq_mask = new_mask; 1019 mv_write_main_irq_mask(new_mask, hpriv); 1020 } 1021 } 1022 1023 static void mv_enable_port_irqs(struct ata_port *ap, 1024 unsigned int port_bits) 1025 { 1026 unsigned int shift, hardport, port = ap->port_no; 1027 u32 disable_bits, enable_bits; 1028 1029 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 1030 1031 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 1032 enable_bits = port_bits << shift; 1033 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 1034 } 1035 1036 static void mv_clear_and_enable_port_irqs(struct ata_port *ap, 1037 void __iomem *port_mmio, 1038 unsigned int port_irqs) 1039 { 1040 struct mv_host_priv *hpriv = ap->host->private_data; 1041 int hardport = mv_hardport_from_port(ap->port_no); 1042 void __iomem *hc_mmio = mv_hc_base_from_port( 1043 mv_host_base(ap->host), ap->port_no); 1044 u32 hc_irq_cause; 1045 1046 /* clear EDMA event indicators, if any */ 1047 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 1048 1049 /* clear pending irq events */ 1050 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 1051 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 1052 1053 /* clear FIS IRQ Cause */ 1054 if (IS_GEN_IIE(hpriv)) 1055 writelfl(0, port_mmio + FIS_IRQ_CAUSE); 1056 1057 mv_enable_port_irqs(ap, port_irqs); 1058 } 1059 1060 static void mv_set_irq_coalescing(struct ata_host *host, 1061 unsigned int count, unsigned int usecs) 1062 { 1063 struct mv_host_priv *hpriv = host->private_data; 1064 void __iomem *mmio = hpriv->base, *hc_mmio; 1065 u32 coal_enable = 0; 1066 unsigned long flags; 1067 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; 1068 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 1069 ALL_PORTS_COAL_DONE; 1070 1071 /* Disable IRQ coalescing if either threshold is zero */ 1072 if (!usecs || !count) { 1073 clks = count = 0; 1074 } else { 1075 /* Respect maximum limits of the hardware */ 1076 clks = usecs * COAL_CLOCKS_PER_USEC; 1077 if (clks > MAX_COAL_TIME_THRESHOLD) 1078 clks = MAX_COAL_TIME_THRESHOLD; 1079 if (count > MAX_COAL_IO_COUNT) 1080 count = MAX_COAL_IO_COUNT; 1081 } 1082 1083 spin_lock_irqsave(&host->lock, flags); 1084 mv_set_main_irq_mask(host, coal_disable, 0); 1085 1086 if (is_dual_hc && !IS_GEN_I(hpriv)) { 1087 /* 1088 * GEN_II/GEN_IIE with dual host controllers: 1089 * one set of global thresholds for the entire chip. 1090 */ 1091 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); 1092 writel(count, mmio + IRQ_COAL_IO_THRESHOLD); 1093 /* clear leftover coal IRQ bit */ 1094 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 1095 if (count) 1096 coal_enable = ALL_PORTS_COAL_DONE; 1097 clks = count = 0; /* force clearing of regular regs below */ 1098 } 1099 1100 /* 1101 * All chips: independent thresholds for each HC on the chip. 1102 */ 1103 hc_mmio = mv_hc_base_from_port(mmio, 0); 1104 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1105 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1106 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1107 if (count) 1108 coal_enable |= PORTS_0_3_COAL_DONE; 1109 if (is_dual_hc) { 1110 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); 1111 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1112 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1113 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1114 if (count) 1115 coal_enable |= PORTS_4_7_COAL_DONE; 1116 } 1117 1118 mv_set_main_irq_mask(host, 0, coal_enable); 1119 spin_unlock_irqrestore(&host->lock, flags); 1120 } 1121 1122 /* 1123 * mv_start_edma - Enable eDMA engine 1124 * @pp: port private data 1125 * 1126 * Verify the local cache of the eDMA state is accurate with a 1127 * WARN_ON. 1128 * 1129 * LOCKING: 1130 * Inherited from caller. 1131 */ 1132 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, 1133 struct mv_port_priv *pp, u8 protocol) 1134 { 1135 int want_ncq = (protocol == ATA_PROT_NCQ); 1136 1137 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1138 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 1139 if (want_ncq != using_ncq) 1140 mv_stop_edma(ap); 1141 } 1142 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 1143 struct mv_host_priv *hpriv = ap->host->private_data; 1144 1145 mv_edma_cfg(ap, want_ncq, 1); 1146 1147 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1148 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); 1149 1150 writelfl(EDMA_EN, port_mmio + EDMA_CMD); 1151 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 1152 } 1153 } 1154 1155 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 1156 { 1157 void __iomem *port_mmio = mv_ap_base(ap); 1158 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 1159 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 1160 int i; 1161 1162 /* 1163 * Wait for the EDMA engine to finish transactions in progress. 1164 * No idea what a good "timeout" value might be, but measurements 1165 * indicate that it often requires hundreds of microseconds 1166 * with two drives in-use. So we use the 15msec value above 1167 * as a rough guess at what even more drives might require. 1168 */ 1169 for (i = 0; i < timeout; ++i) { 1170 u32 edma_stat = readl(port_mmio + EDMA_STATUS); 1171 if ((edma_stat & empty_idle) == empty_idle) 1172 break; 1173 udelay(per_loop); 1174 } 1175 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ 1176 } 1177 1178 /** 1179 * mv_stop_edma_engine - Disable eDMA engine 1180 * @port_mmio: io base address 1181 * 1182 * LOCKING: 1183 * Inherited from caller. 1184 */ 1185 static int mv_stop_edma_engine(void __iomem *port_mmio) 1186 { 1187 int i; 1188 1189 /* Disable eDMA. The disable bit auto clears. */ 1190 writelfl(EDMA_DS, port_mmio + EDMA_CMD); 1191 1192 /* Wait for the chip to confirm eDMA is off. */ 1193 for (i = 10000; i > 0; i--) { 1194 u32 reg = readl(port_mmio + EDMA_CMD); 1195 if (!(reg & EDMA_EN)) 1196 return 0; 1197 udelay(10); 1198 } 1199 return -EIO; 1200 } 1201 1202 static int mv_stop_edma(struct ata_port *ap) 1203 { 1204 void __iomem *port_mmio = mv_ap_base(ap); 1205 struct mv_port_priv *pp = ap->private_data; 1206 int err = 0; 1207 1208 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1209 return 0; 1210 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1211 mv_wait_for_edma_empty_idle(ap); 1212 if (mv_stop_edma_engine(port_mmio)) { 1213 ata_port_err(ap, "Unable to stop eDMA\n"); 1214 err = -EIO; 1215 } 1216 mv_edma_cfg(ap, 0, 0); 1217 return err; 1218 } 1219 1220 static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes) 1221 { 1222 int b, w, o; 1223 unsigned char linebuf[38]; 1224 1225 for (b = 0; b < bytes; ) { 1226 for (w = 0, o = 0; b < bytes && w < 4; w++) { 1227 o += scnprintf(linebuf + o, sizeof(linebuf) - o, 1228 "%08x ", readl(start + b)); 1229 b += sizeof(u32); 1230 } 1231 dev_dbg(dev, "%s: %p: %s\n", 1232 __func__, start + b, linebuf); 1233 } 1234 } 1235 1236 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 1237 { 1238 int b, w, o; 1239 u32 dw = 0; 1240 unsigned char linebuf[38]; 1241 1242 for (b = 0; b < bytes; ) { 1243 for (w = 0, o = 0; b < bytes && w < 4; w++) { 1244 (void) pci_read_config_dword(pdev, b, &dw); 1245 o += snprintf(linebuf + o, sizeof(linebuf) - o, 1246 "%08x ", dw); 1247 b += sizeof(u32); 1248 } 1249 dev_dbg(&pdev->dev, "%s: %02x: %s\n", 1250 __func__, b, linebuf); 1251 } 1252 } 1253 1254 static void mv_dump_all_regs(void __iomem *mmio_base, 1255 struct pci_dev *pdev) 1256 { 1257 void __iomem *hc_base; 1258 void __iomem *port_base; 1259 int start_port, num_ports, p, start_hc, num_hcs, hc; 1260 1261 start_hc = start_port = 0; 1262 num_ports = 8; /* should be benign for 4 port devs */ 1263 num_hcs = 2; 1264 dev_dbg(&pdev->dev, 1265 "%s: All registers for port(s) %u-%u:\n", __func__, 1266 start_port, num_ports > 1 ? num_ports - 1 : start_port); 1267 1268 dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__); 1269 mv_dump_pci_cfg(pdev, 0x68); 1270 1271 dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__); 1272 mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c); 1273 mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34); 1274 mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4); 1275 mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c); 1276 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1277 hc_base = mv_hc_base(mmio_base, hc); 1278 dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc); 1279 mv_dump_mem(&pdev->dev, hc_base, 0x1c); 1280 } 1281 for (p = start_port; p < start_port + num_ports; p++) { 1282 port_base = mv_port_base(mmio_base, p); 1283 dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p); 1284 mv_dump_mem(&pdev->dev, port_base, 0x54); 1285 dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p); 1286 mv_dump_mem(&pdev->dev, port_base+0x300, 0x60); 1287 } 1288 } 1289 1290 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1291 { 1292 unsigned int ofs; 1293 1294 switch (sc_reg_in) { 1295 case SCR_STATUS: 1296 case SCR_CONTROL: 1297 case SCR_ERROR: 1298 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); 1299 break; 1300 case SCR_ACTIVE: 1301 ofs = SATA_ACTIVE; /* active is not with the others */ 1302 break; 1303 default: 1304 ofs = 0xffffffffU; 1305 break; 1306 } 1307 return ofs; 1308 } 1309 1310 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 1311 { 1312 unsigned int ofs = mv_scr_offset(sc_reg_in); 1313 1314 if (ofs != 0xffffffffU) { 1315 *val = readl(mv_ap_base(link->ap) + ofs); 1316 return 0; 1317 } else 1318 return -EINVAL; 1319 } 1320 1321 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 1322 { 1323 unsigned int ofs = mv_scr_offset(sc_reg_in); 1324 1325 if (ofs != 0xffffffffU) { 1326 void __iomem *addr = mv_ap_base(link->ap) + ofs; 1327 struct mv_host_priv *hpriv = link->ap->host->private_data; 1328 if (sc_reg_in == SCR_CONTROL) { 1329 /* 1330 * Workaround for 88SX60x1 FEr SATA#26: 1331 * 1332 * COMRESETs have to take care not to accidentally 1333 * put the drive to sleep when writing SCR_CONTROL. 1334 * Setting bits 12..15 prevents this problem. 1335 * 1336 * So if we see an outbound COMMRESET, set those bits. 1337 * Ditto for the followup write that clears the reset. 1338 * 1339 * The proprietary driver does this for 1340 * all chip versions, and so do we. 1341 */ 1342 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) 1343 val |= 0xf000; 1344 1345 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { 1346 void __iomem *lp_phy_addr = 1347 mv_ap_base(link->ap) + LP_PHY_CTL; 1348 /* 1349 * Set PHY speed according to SControl speed. 1350 */ 1351 u32 lp_phy_val = 1352 LP_PHY_CTL_PIN_PU_PLL | 1353 LP_PHY_CTL_PIN_PU_RX | 1354 LP_PHY_CTL_PIN_PU_TX; 1355 1356 if ((val & 0xf0) != 0x10) 1357 lp_phy_val |= 1358 LP_PHY_CTL_GEN_TX_3G | 1359 LP_PHY_CTL_GEN_RX_3G; 1360 1361 writelfl(lp_phy_val, lp_phy_addr); 1362 } 1363 } 1364 writelfl(val, addr); 1365 return 0; 1366 } else 1367 return -EINVAL; 1368 } 1369 1370 static void mv6_dev_config(struct ata_device *adev) 1371 { 1372 /* 1373 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1374 * 1375 * Gen-II does not support NCQ over a port multiplier 1376 * (no FIS-based switching). 1377 */ 1378 if (adev->flags & ATA_DFLAG_NCQ) { 1379 if (sata_pmp_attached(adev->link->ap)) { 1380 adev->flags &= ~ATA_DFLAG_NCQ; 1381 ata_dev_info(adev, 1382 "NCQ disabled for command-based switching\n"); 1383 } 1384 } 1385 } 1386 1387 static int mv_qc_defer(struct ata_queued_cmd *qc) 1388 { 1389 struct ata_link *link = qc->dev->link; 1390 struct ata_port *ap = link->ap; 1391 struct mv_port_priv *pp = ap->private_data; 1392 1393 /* 1394 * Don't allow new commands if we're in a delayed EH state 1395 * for NCQ and/or FIS-based switching. 1396 */ 1397 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1398 return ATA_DEFER_PORT; 1399 1400 /* PIO commands need exclusive link: no other commands [DMA or PIO] 1401 * can run concurrently. 1402 * set excl_link when we want to send a PIO command in DMA mode 1403 * or a non-NCQ command in NCQ mode. 1404 * When we receive a command from that link, and there are no 1405 * outstanding commands, mark a flag to clear excl_link and let 1406 * the command go through. 1407 */ 1408 if (unlikely(ap->excl_link)) { 1409 if (link == ap->excl_link) { 1410 if (ap->nr_active_links) 1411 return ATA_DEFER_PORT; 1412 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 1413 return 0; 1414 } else 1415 return ATA_DEFER_PORT; 1416 } 1417 1418 /* 1419 * If the port is completely idle, then allow the new qc. 1420 */ 1421 if (ap->nr_active_links == 0) 1422 return 0; 1423 1424 /* 1425 * The port is operating in host queuing mode (EDMA) with NCQ 1426 * enabled, allow multiple NCQ commands. EDMA also allows 1427 * queueing multiple DMA commands but libata core currently 1428 * doesn't allow it. 1429 */ 1430 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1431 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1432 if (ata_is_ncq(qc->tf.protocol)) 1433 return 0; 1434 else { 1435 ap->excl_link = link; 1436 return ATA_DEFER_PORT; 1437 } 1438 } 1439 1440 return ATA_DEFER_PORT; 1441 } 1442 1443 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) 1444 { 1445 struct mv_port_priv *pp = ap->private_data; 1446 void __iomem *port_mmio; 1447 1448 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; 1449 u32 ltmode, *old_ltmode = &pp->cached.ltmode; 1450 u32 haltcond, *old_haltcond = &pp->cached.haltcond; 1451 1452 ltmode = *old_ltmode & ~LTMODE_BIT8; 1453 haltcond = *old_haltcond | EDMA_ERR_DEV; 1454 1455 if (want_fbs) { 1456 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; 1457 ltmode = *old_ltmode | LTMODE_BIT8; 1458 if (want_ncq) 1459 haltcond &= ~EDMA_ERR_DEV; 1460 else 1461 fiscfg |= FISCFG_WAIT_DEV_ERR; 1462 } else { 1463 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1464 } 1465 1466 port_mmio = mv_ap_base(ap); 1467 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); 1468 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); 1469 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); 1470 } 1471 1472 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1473 { 1474 struct mv_host_priv *hpriv = ap->host->private_data; 1475 u32 old, new; 1476 1477 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1478 old = readl(hpriv->base + GPIO_PORT_CTL); 1479 if (want_ncq) 1480 new = old | (1 << 22); 1481 else 1482 new = old & ~(1 << 22); 1483 if (new != old) 1484 writel(new, hpriv->base + GPIO_PORT_CTL); 1485 } 1486 1487 /* 1488 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma 1489 * @ap: Port being initialized 1490 * 1491 * There are two DMA modes on these chips: basic DMA, and EDMA. 1492 * 1493 * Bit-0 of the "EDMA RESERVED" register enables/disables use 1494 * of basic DMA on the GEN_IIE versions of the chips. 1495 * 1496 * This bit survives EDMA resets, and must be set for basic DMA 1497 * to function, and should be cleared when EDMA is active. 1498 */ 1499 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) 1500 { 1501 struct mv_port_priv *pp = ap->private_data; 1502 u32 new, *old = &pp->cached.unknown_rsvd; 1503 1504 if (enable_bmdma) 1505 new = *old | 1; 1506 else 1507 new = *old & ~1; 1508 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); 1509 } 1510 1511 /* 1512 * SOC chips have an issue whereby the HDD LEDs don't always blink 1513 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode 1514 * of the SOC takes care of it, generating a steady blink rate when 1515 * any drive on the chip is active. 1516 * 1517 * Unfortunately, the blink mode is a global hardware setting for the SOC, 1518 * so we must use it whenever at least one port on the SOC has NCQ enabled. 1519 * 1520 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal 1521 * LED operation works then, and provides better (more accurate) feedback. 1522 * 1523 * Note that this code assumes that an SOC never has more than one HC onboard. 1524 */ 1525 static void mv_soc_led_blink_enable(struct ata_port *ap) 1526 { 1527 struct ata_host *host = ap->host; 1528 struct mv_host_priv *hpriv = host->private_data; 1529 void __iomem *hc_mmio; 1530 u32 led_ctrl; 1531 1532 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) 1533 return; 1534 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; 1535 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1536 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1537 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1538 } 1539 1540 static void mv_soc_led_blink_disable(struct ata_port *ap) 1541 { 1542 struct ata_host *host = ap->host; 1543 struct mv_host_priv *hpriv = host->private_data; 1544 void __iomem *hc_mmio; 1545 u32 led_ctrl; 1546 unsigned int port; 1547 1548 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) 1549 return; 1550 1551 /* disable led-blink only if no ports are using NCQ */ 1552 for (port = 0; port < hpriv->n_ports; port++) { 1553 struct ata_port *this_ap = host->ports[port]; 1554 struct mv_port_priv *pp = this_ap->private_data; 1555 1556 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1557 return; 1558 } 1559 1560 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; 1561 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1562 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1563 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1564 } 1565 1566 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) 1567 { 1568 u32 cfg; 1569 struct mv_port_priv *pp = ap->private_data; 1570 struct mv_host_priv *hpriv = ap->host->private_data; 1571 void __iomem *port_mmio = mv_ap_base(ap); 1572 1573 /* set up non-NCQ EDMA configuration */ 1574 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1575 pp->pp_flags &= 1576 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 1577 1578 if (IS_GEN_I(hpriv)) 1579 cfg |= (1 << 8); /* enab config burst size mask */ 1580 1581 else if (IS_GEN_II(hpriv)) { 1582 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1583 mv_60x1_errata_sata25(ap, want_ncq); 1584 1585 } else if (IS_GEN_IIE(hpriv)) { 1586 int want_fbs = sata_pmp_attached(ap); 1587 /* 1588 * Possible future enhancement: 1589 * 1590 * The chip can use FBS with non-NCQ, if we allow it, 1591 * But first we need to have the error handling in place 1592 * for this mode (datasheet section 7.3.15.4.2.3). 1593 * So disallow non-NCQ FBS for now. 1594 */ 1595 want_fbs &= want_ncq; 1596 1597 mv_config_fbs(ap, want_ncq, want_fbs); 1598 1599 if (want_fbs) { 1600 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1601 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1602 } 1603 1604 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1605 if (want_edma) { 1606 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1607 if (!IS_SOC(hpriv)) 1608 cfg |= (1 << 18); /* enab early completion */ 1609 } 1610 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1611 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1612 mv_bmdma_enable_iie(ap, !want_edma); 1613 1614 if (IS_SOC(hpriv)) { 1615 if (want_ncq) 1616 mv_soc_led_blink_enable(ap); 1617 else 1618 mv_soc_led_blink_disable(ap); 1619 } 1620 } 1621 1622 if (want_ncq) { 1623 cfg |= EDMA_CFG_NCQ; 1624 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1625 } 1626 1627 writelfl(cfg, port_mmio + EDMA_CFG); 1628 } 1629 1630 static void mv_port_free_dma_mem(struct ata_port *ap) 1631 { 1632 struct mv_host_priv *hpriv = ap->host->private_data; 1633 struct mv_port_priv *pp = ap->private_data; 1634 int tag; 1635 1636 if (pp->crqb) { 1637 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1638 pp->crqb = NULL; 1639 } 1640 if (pp->crpb) { 1641 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1642 pp->crpb = NULL; 1643 } 1644 /* 1645 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1646 * For later hardware, we have one unique sg_tbl per NCQ tag. 1647 */ 1648 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1649 if (pp->sg_tbl[tag]) { 1650 if (tag == 0 || !IS_GEN_I(hpriv)) 1651 dma_pool_free(hpriv->sg_tbl_pool, 1652 pp->sg_tbl[tag], 1653 pp->sg_tbl_dma[tag]); 1654 pp->sg_tbl[tag] = NULL; 1655 } 1656 } 1657 } 1658 1659 /** 1660 * mv_port_start - Port specific init/start routine. 1661 * @ap: ATA channel to manipulate 1662 * 1663 * Allocate and point to DMA memory, init port private memory, 1664 * zero indices. 1665 * 1666 * LOCKING: 1667 * Inherited from caller. 1668 */ 1669 static int mv_port_start(struct ata_port *ap) 1670 { 1671 struct device *dev = ap->host->dev; 1672 struct mv_host_priv *hpriv = ap->host->private_data; 1673 struct mv_port_priv *pp; 1674 unsigned long flags; 1675 int tag; 1676 1677 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1678 if (!pp) 1679 return -ENOMEM; 1680 ap->private_data = pp; 1681 1682 pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1683 if (!pp->crqb) 1684 return -ENOMEM; 1685 1686 pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1687 if (!pp->crpb) 1688 goto out_port_free_dma_mem; 1689 1690 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ 1691 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) 1692 ap->flags |= ATA_FLAG_AN; 1693 /* 1694 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1695 * For later hardware, we need one unique sg_tbl per NCQ tag. 1696 */ 1697 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1698 if (tag == 0 || !IS_GEN_I(hpriv)) { 1699 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1700 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1701 if (!pp->sg_tbl[tag]) 1702 goto out_port_free_dma_mem; 1703 } else { 1704 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1705 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1706 } 1707 } 1708 1709 spin_lock_irqsave(ap->lock, flags); 1710 mv_save_cached_regs(ap); 1711 mv_edma_cfg(ap, 0, 0); 1712 spin_unlock_irqrestore(ap->lock, flags); 1713 1714 return 0; 1715 1716 out_port_free_dma_mem: 1717 mv_port_free_dma_mem(ap); 1718 return -ENOMEM; 1719 } 1720 1721 /** 1722 * mv_port_stop - Port specific cleanup/stop routine. 1723 * @ap: ATA channel to manipulate 1724 * 1725 * Stop DMA, cleanup port memory. 1726 * 1727 * LOCKING: 1728 * This routine uses the host lock to protect the DMA stop. 1729 */ 1730 static void mv_port_stop(struct ata_port *ap) 1731 { 1732 unsigned long flags; 1733 1734 spin_lock_irqsave(ap->lock, flags); 1735 mv_stop_edma(ap); 1736 mv_enable_port_irqs(ap, 0); 1737 spin_unlock_irqrestore(ap->lock, flags); 1738 mv_port_free_dma_mem(ap); 1739 } 1740 1741 /** 1742 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1743 * @qc: queued command whose SG list to source from 1744 * 1745 * Populate the SG list and mark the last entry. 1746 * 1747 * LOCKING: 1748 * Inherited from caller. 1749 */ 1750 static void mv_fill_sg(struct ata_queued_cmd *qc) 1751 { 1752 struct mv_port_priv *pp = qc->ap->private_data; 1753 struct scatterlist *sg; 1754 struct mv_sg *mv_sg, *last_sg = NULL; 1755 unsigned int si; 1756 1757 mv_sg = pp->sg_tbl[qc->hw_tag]; 1758 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1759 dma_addr_t addr = sg_dma_address(sg); 1760 u32 sg_len = sg_dma_len(sg); 1761 1762 while (sg_len) { 1763 u32 offset = addr & 0xffff; 1764 u32 len = sg_len; 1765 1766 if (offset + len > 0x10000) 1767 len = 0x10000 - offset; 1768 1769 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1770 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1771 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1772 mv_sg->reserved = 0; 1773 1774 sg_len -= len; 1775 addr += len; 1776 1777 last_sg = mv_sg; 1778 mv_sg++; 1779 } 1780 } 1781 1782 if (likely(last_sg)) 1783 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1784 mb(); /* ensure data structure is visible to the chipset */ 1785 } 1786 1787 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1788 { 1789 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1790 (last ? CRQB_CMD_LAST : 0); 1791 *cmdw = cpu_to_le16(tmp); 1792 } 1793 1794 /** 1795 * mv_sff_irq_clear - Clear hardware interrupt after DMA. 1796 * @ap: Port associated with this ATA transaction. 1797 * 1798 * We need this only for ATAPI bmdma transactions, 1799 * as otherwise we experience spurious interrupts 1800 * after libata-sff handles the bmdma interrupts. 1801 */ 1802 static void mv_sff_irq_clear(struct ata_port *ap) 1803 { 1804 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); 1805 } 1806 1807 /** 1808 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. 1809 * @qc: queued command to check for chipset/DMA compatibility. 1810 * 1811 * The bmdma engines cannot handle speculative data sizes 1812 * (bytecount under/over flow). So only allow DMA for 1813 * data transfer commands with known data sizes. 1814 * 1815 * LOCKING: 1816 * Inherited from caller. 1817 */ 1818 static int mv_check_atapi_dma(struct ata_queued_cmd *qc) 1819 { 1820 struct scsi_cmnd *scmd = qc->scsicmd; 1821 1822 if (scmd) { 1823 switch (scmd->cmnd[0]) { 1824 case READ_6: 1825 case READ_10: 1826 case READ_12: 1827 case WRITE_6: 1828 case WRITE_10: 1829 case WRITE_12: 1830 case GPCMD_READ_CD: 1831 case GPCMD_SEND_DVD_STRUCTURE: 1832 case GPCMD_SEND_CUE_SHEET: 1833 return 0; /* DMA is safe */ 1834 } 1835 } 1836 return -EOPNOTSUPP; /* use PIO instead */ 1837 } 1838 1839 /** 1840 * mv_bmdma_setup - Set up BMDMA transaction 1841 * @qc: queued command to prepare DMA for. 1842 * 1843 * LOCKING: 1844 * Inherited from caller. 1845 */ 1846 static void mv_bmdma_setup(struct ata_queued_cmd *qc) 1847 { 1848 struct ata_port *ap = qc->ap; 1849 void __iomem *port_mmio = mv_ap_base(ap); 1850 struct mv_port_priv *pp = ap->private_data; 1851 1852 mv_fill_sg(qc); 1853 1854 /* clear all DMA cmd bits */ 1855 writel(0, port_mmio + BMDMA_CMD); 1856 1857 /* load PRD table addr. */ 1858 writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16, 1859 port_mmio + BMDMA_PRD_HIGH); 1860 writelfl(pp->sg_tbl_dma[qc->hw_tag], 1861 port_mmio + BMDMA_PRD_LOW); 1862 1863 /* issue r/w command */ 1864 ap->ops->sff_exec_command(ap, &qc->tf); 1865 } 1866 1867 /** 1868 * mv_bmdma_start - Start a BMDMA transaction 1869 * @qc: queued command to start DMA on. 1870 * 1871 * LOCKING: 1872 * Inherited from caller. 1873 */ 1874 static void mv_bmdma_start(struct ata_queued_cmd *qc) 1875 { 1876 struct ata_port *ap = qc->ap; 1877 void __iomem *port_mmio = mv_ap_base(ap); 1878 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 1879 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; 1880 1881 /* start host DMA transaction */ 1882 writelfl(cmd, port_mmio + BMDMA_CMD); 1883 } 1884 1885 /** 1886 * mv_bmdma_stop_ap - Stop BMDMA transfer 1887 * @ap: port to stop 1888 * 1889 * Clears the ATA_DMA_START flag in the bmdma control register 1890 * 1891 * LOCKING: 1892 * Inherited from caller. 1893 */ 1894 static void mv_bmdma_stop_ap(struct ata_port *ap) 1895 { 1896 void __iomem *port_mmio = mv_ap_base(ap); 1897 u32 cmd; 1898 1899 /* clear start/stop bit */ 1900 cmd = readl(port_mmio + BMDMA_CMD); 1901 if (cmd & ATA_DMA_START) { 1902 cmd &= ~ATA_DMA_START; 1903 writelfl(cmd, port_mmio + BMDMA_CMD); 1904 1905 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1906 ata_sff_dma_pause(ap); 1907 } 1908 } 1909 1910 static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1911 { 1912 mv_bmdma_stop_ap(qc->ap); 1913 } 1914 1915 /** 1916 * mv_bmdma_status - Read BMDMA status 1917 * @ap: port for which to retrieve DMA status. 1918 * 1919 * Read and return equivalent of the sff BMDMA status register. 1920 * 1921 * LOCKING: 1922 * Inherited from caller. 1923 */ 1924 static u8 mv_bmdma_status(struct ata_port *ap) 1925 { 1926 void __iomem *port_mmio = mv_ap_base(ap); 1927 u32 reg, status; 1928 1929 /* 1930 * Other bits are valid only if ATA_DMA_ACTIVE==0, 1931 * and the ATA_DMA_INTR bit doesn't exist. 1932 */ 1933 reg = readl(port_mmio + BMDMA_STATUS); 1934 if (reg & ATA_DMA_ACTIVE) 1935 status = ATA_DMA_ACTIVE; 1936 else if (reg & ATA_DMA_ERR) 1937 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1938 else { 1939 /* 1940 * Just because DMA_ACTIVE is 0 (DMA completed), 1941 * this does _not_ mean the device is "done". 1942 * So we should not yet be signalling ATA_DMA_INTR 1943 * in some cases. Eg. DSM/TRIM, and perhaps others. 1944 */ 1945 mv_bmdma_stop_ap(ap); 1946 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) 1947 status = 0; 1948 else 1949 status = ATA_DMA_INTR; 1950 } 1951 return status; 1952 } 1953 1954 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) 1955 { 1956 struct ata_taskfile *tf = &qc->tf; 1957 /* 1958 * Workaround for 88SX60x1 FEr SATA#24. 1959 * 1960 * Chip may corrupt WRITEs if multi_count >= 4kB. 1961 * Note that READs are unaffected. 1962 * 1963 * It's not clear if this errata really means "4K bytes", 1964 * or if it always happens for multi_count > 7 1965 * regardless of device sector_size. 1966 * 1967 * So, for safety, any write with multi_count > 7 1968 * gets converted here into a regular PIO write instead: 1969 */ 1970 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { 1971 if (qc->dev->multi_count > 7) { 1972 switch (tf->command) { 1973 case ATA_CMD_WRITE_MULTI: 1974 tf->command = ATA_CMD_PIO_WRITE; 1975 break; 1976 case ATA_CMD_WRITE_MULTI_FUA_EXT: 1977 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ 1978 fallthrough; 1979 case ATA_CMD_WRITE_MULTI_EXT: 1980 tf->command = ATA_CMD_PIO_WRITE_EXT; 1981 break; 1982 } 1983 } 1984 } 1985 } 1986 1987 /** 1988 * mv_qc_prep - Host specific command preparation. 1989 * @qc: queued command to prepare 1990 * 1991 * This routine simply redirects to the general purpose routine 1992 * if command is not DMA. Else, it handles prep of the CRQB 1993 * (command request block), does some sanity checking, and calls 1994 * the SG load routine. 1995 * 1996 * LOCKING: 1997 * Inherited from caller. 1998 */ 1999 static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) 2000 { 2001 struct ata_port *ap = qc->ap; 2002 struct mv_port_priv *pp = ap->private_data; 2003 __le16 *cw; 2004 struct ata_taskfile *tf = &qc->tf; 2005 u16 flags = 0; 2006 unsigned in_index; 2007 2008 switch (tf->protocol) { 2009 case ATA_PROT_DMA: 2010 if (tf->command == ATA_CMD_DSM) 2011 return AC_ERR_OK; 2012 fallthrough; 2013 case ATA_PROT_NCQ: 2014 break; /* continue below */ 2015 case ATA_PROT_PIO: 2016 mv_rw_multi_errata_sata24(qc); 2017 return AC_ERR_OK; 2018 default: 2019 return AC_ERR_OK; 2020 } 2021 2022 /* Fill in command request block 2023 */ 2024 if (!(tf->flags & ATA_TFLAG_WRITE)) 2025 flags |= CRQB_FLAG_READ; 2026 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); 2027 flags |= qc->hw_tag << CRQB_TAG_SHIFT; 2028 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2029 2030 /* get current queue index from software */ 2031 in_index = pp->req_idx; 2032 2033 pp->crqb[in_index].sg_addr = 2034 cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); 2035 pp->crqb[in_index].sg_addr_hi = 2036 cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); 2037 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 2038 2039 cw = &pp->crqb[in_index].ata_cmd[0]; 2040 2041 /* Sadly, the CRQB cannot accommodate all registers--there are 2042 * only 11 bytes...so we must pick and choose required 2043 * registers based on the command. So, we drop feature and 2044 * hob_feature for [RW] DMA commands, but they are needed for 2045 * NCQ. NCQ will drop hob_nsect, which is not needed there 2046 * (nsect is used only for the tag; feat/hob_feat hold true nsect). 2047 */ 2048 switch (tf->command) { 2049 case ATA_CMD_READ: 2050 case ATA_CMD_READ_EXT: 2051 case ATA_CMD_WRITE: 2052 case ATA_CMD_WRITE_EXT: 2053 case ATA_CMD_WRITE_FUA_EXT: 2054 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 2055 break; 2056 case ATA_CMD_FPDMA_READ: 2057 case ATA_CMD_FPDMA_WRITE: 2058 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 2059 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 2060 break; 2061 default: 2062 /* The only other commands EDMA supports in non-queued and 2063 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 2064 * of which are defined/used by Linux. If we get here, this 2065 * driver needs work. 2066 */ 2067 ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, 2068 tf->command); 2069 return AC_ERR_INVALID; 2070 } 2071 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 2072 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 2073 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 2074 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 2075 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 2076 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 2077 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 2078 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 2079 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 2080 2081 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2082 return AC_ERR_OK; 2083 mv_fill_sg(qc); 2084 2085 return AC_ERR_OK; 2086 } 2087 2088 /** 2089 * mv_qc_prep_iie - Host specific command preparation. 2090 * @qc: queued command to prepare 2091 * 2092 * This routine simply redirects to the general purpose routine 2093 * if command is not DMA. Else, it handles prep of the CRQB 2094 * (command request block), does some sanity checking, and calls 2095 * the SG load routine. 2096 * 2097 * LOCKING: 2098 * Inherited from caller. 2099 */ 2100 static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) 2101 { 2102 struct ata_port *ap = qc->ap; 2103 struct mv_port_priv *pp = ap->private_data; 2104 struct mv_crqb_iie *crqb; 2105 struct ata_taskfile *tf = &qc->tf; 2106 unsigned in_index; 2107 u32 flags = 0; 2108 2109 if ((tf->protocol != ATA_PROT_DMA) && 2110 (tf->protocol != ATA_PROT_NCQ)) 2111 return AC_ERR_OK; 2112 if (tf->command == ATA_CMD_DSM) 2113 return AC_ERR_OK; /* use bmdma for this */ 2114 2115 /* Fill in Gen IIE command request block */ 2116 if (!(tf->flags & ATA_TFLAG_WRITE)) 2117 flags |= CRQB_FLAG_READ; 2118 2119 WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); 2120 flags |= qc->hw_tag << CRQB_TAG_SHIFT; 2121 flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT; 2122 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2123 2124 /* get current queue index from software */ 2125 in_index = pp->req_idx; 2126 2127 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 2128 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); 2129 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); 2130 crqb->flags = cpu_to_le32(flags); 2131 2132 crqb->ata_cmd[0] = cpu_to_le32( 2133 (tf->command << 16) | 2134 (tf->feature << 24) 2135 ); 2136 crqb->ata_cmd[1] = cpu_to_le32( 2137 (tf->lbal << 0) | 2138 (tf->lbam << 8) | 2139 (tf->lbah << 16) | 2140 (tf->device << 24) 2141 ); 2142 crqb->ata_cmd[2] = cpu_to_le32( 2143 (tf->hob_lbal << 0) | 2144 (tf->hob_lbam << 8) | 2145 (tf->hob_lbah << 16) | 2146 (tf->hob_feature << 24) 2147 ); 2148 crqb->ata_cmd[3] = cpu_to_le32( 2149 (tf->nsect << 0) | 2150 (tf->hob_nsect << 8) 2151 ); 2152 2153 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2154 return AC_ERR_OK; 2155 mv_fill_sg(qc); 2156 2157 return AC_ERR_OK; 2158 } 2159 2160 /** 2161 * mv_sff_check_status - fetch device status, if valid 2162 * @ap: ATA port to fetch status from 2163 * 2164 * When using command issue via mv_qc_issue_fis(), 2165 * the initial ATA_BUSY state does not show up in the 2166 * ATA status (shadow) register. This can confuse libata! 2167 * 2168 * So we have a hook here to fake ATA_BUSY for that situation, 2169 * until the first time a BUSY, DRQ, or ERR bit is seen. 2170 * 2171 * The rest of the time, it simply returns the ATA status register. 2172 */ 2173 static u8 mv_sff_check_status(struct ata_port *ap) 2174 { 2175 u8 stat = ioread8(ap->ioaddr.status_addr); 2176 struct mv_port_priv *pp = ap->private_data; 2177 2178 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { 2179 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) 2180 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; 2181 else 2182 stat = ATA_BUSY; 2183 } 2184 return stat; 2185 } 2186 2187 /** 2188 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register 2189 * @ap: ATA port to send a FIS 2190 * @fis: fis to be sent 2191 * @nwords: number of 32-bit words in the fis 2192 */ 2193 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) 2194 { 2195 void __iomem *port_mmio = mv_ap_base(ap); 2196 u32 ifctl, old_ifctl, ifstat; 2197 int i, timeout = 200, final_word = nwords - 1; 2198 2199 /* Initiate FIS transmission mode */ 2200 old_ifctl = readl(port_mmio + SATA_IFCTL); 2201 ifctl = 0x100 | (old_ifctl & 0xf); 2202 writelfl(ifctl, port_mmio + SATA_IFCTL); 2203 2204 /* Send all words of the FIS except for the final word */ 2205 for (i = 0; i < final_word; ++i) 2206 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); 2207 2208 /* Flag end-of-transmission, and then send the final word */ 2209 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); 2210 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); 2211 2212 /* 2213 * Wait for FIS transmission to complete. 2214 * This typically takes just a single iteration. 2215 */ 2216 do { 2217 ifstat = readl(port_mmio + SATA_IFSTAT); 2218 } while (!(ifstat & 0x1000) && --timeout); 2219 2220 /* Restore original port configuration */ 2221 writelfl(old_ifctl, port_mmio + SATA_IFCTL); 2222 2223 /* See if it worked */ 2224 if ((ifstat & 0x3000) != 0x1000) { 2225 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n", 2226 __func__, ifstat); 2227 return AC_ERR_OTHER; 2228 } 2229 return 0; 2230 } 2231 2232 /** 2233 * mv_qc_issue_fis - Issue a command directly as a FIS 2234 * @qc: queued command to start 2235 * 2236 * Note that the ATA shadow registers are not updated 2237 * after command issue, so the device will appear "READY" 2238 * if polled, even while it is BUSY processing the command. 2239 * 2240 * So we use a status hook to fake ATA_BUSY until the drive changes state. 2241 * 2242 * Note: we don't get updated shadow regs on *completion* 2243 * of non-data commands. So avoid sending them via this function, 2244 * as they will appear to have completed immediately. 2245 * 2246 * GEN_IIE has special registers that we could get the result tf from, 2247 * but earlier chipsets do not. For now, we ignore those registers. 2248 */ 2249 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) 2250 { 2251 struct ata_port *ap = qc->ap; 2252 struct mv_port_priv *pp = ap->private_data; 2253 struct ata_link *link = qc->dev->link; 2254 u32 fis[5]; 2255 int err = 0; 2256 2257 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); 2258 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); 2259 if (err) 2260 return err; 2261 2262 switch (qc->tf.protocol) { 2263 case ATAPI_PROT_PIO: 2264 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2265 fallthrough; 2266 case ATAPI_PROT_NODATA: 2267 ap->hsm_task_state = HSM_ST_FIRST; 2268 break; 2269 case ATA_PROT_PIO: 2270 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2271 if (qc->tf.flags & ATA_TFLAG_WRITE) 2272 ap->hsm_task_state = HSM_ST_FIRST; 2273 else 2274 ap->hsm_task_state = HSM_ST; 2275 break; 2276 default: 2277 ap->hsm_task_state = HSM_ST_LAST; 2278 break; 2279 } 2280 2281 if (qc->tf.flags & ATA_TFLAG_POLLING) 2282 ata_sff_queue_pio_task(link, 0); 2283 return 0; 2284 } 2285 2286 /** 2287 * mv_qc_issue - Initiate a command to the host 2288 * @qc: queued command to start 2289 * 2290 * This routine simply redirects to the general purpose routine 2291 * if command is not DMA. Else, it sanity checks our local 2292 * caches of the request producer/consumer indices then enables 2293 * DMA and bumps the request producer index. 2294 * 2295 * LOCKING: 2296 * Inherited from caller. 2297 */ 2298 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 2299 { 2300 static int limit_warnings = 10; 2301 struct ata_port *ap = qc->ap; 2302 void __iomem *port_mmio = mv_ap_base(ap); 2303 struct mv_port_priv *pp = ap->private_data; 2304 u32 in_index; 2305 unsigned int port_irqs; 2306 2307 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ 2308 2309 switch (qc->tf.protocol) { 2310 case ATA_PROT_DMA: 2311 if (qc->tf.command == ATA_CMD_DSM) { 2312 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ 2313 return AC_ERR_OTHER; 2314 break; /* use bmdma for this */ 2315 } 2316 fallthrough; 2317 case ATA_PROT_NCQ: 2318 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2319 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2320 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 2321 2322 /* Write the request in pointer to kick the EDMA to life */ 2323 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 2324 port_mmio + EDMA_REQ_Q_IN_PTR); 2325 return 0; 2326 2327 case ATA_PROT_PIO: 2328 /* 2329 * Errata SATA#16, SATA#24: warn if multiple DRQs expected. 2330 * 2331 * Someday, we might implement special polling workarounds 2332 * for these, but it all seems rather unnecessary since we 2333 * normally use only DMA for commands which transfer more 2334 * than a single block of data. 2335 * 2336 * Much of the time, this could just work regardless. 2337 * So for now, just log the incident, and allow the attempt. 2338 */ 2339 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { 2340 --limit_warnings; 2341 ata_link_warn(qc->dev->link, DRV_NAME 2342 ": attempting PIO w/multiple DRQ: " 2343 "this may fail due to h/w errata\n"); 2344 } 2345 fallthrough; 2346 case ATA_PROT_NODATA: 2347 case ATAPI_PROT_PIO: 2348 case ATAPI_PROT_NODATA: 2349 if (ap->flags & ATA_FLAG_PIO_POLLING) 2350 qc->tf.flags |= ATA_TFLAG_POLLING; 2351 break; 2352 } 2353 2354 if (qc->tf.flags & ATA_TFLAG_POLLING) 2355 port_irqs = ERR_IRQ; /* mask device interrupt when polling */ 2356 else 2357 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ 2358 2359 /* 2360 * We're about to send a non-EDMA capable command to the 2361 * port. Turn off EDMA so there won't be problems accessing 2362 * shadow block, etc registers. 2363 */ 2364 mv_stop_edma(ap); 2365 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); 2366 mv_pmp_select(ap, qc->dev->link->pmp); 2367 2368 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { 2369 struct mv_host_priv *hpriv = ap->host->private_data; 2370 /* 2371 * Workaround for 88SX60x1 FEr SATA#25 (part 2). 2372 * 2373 * After any NCQ error, the READ_LOG_EXT command 2374 * from libata-eh *must* use mv_qc_issue_fis(). 2375 * Otherwise it might fail, due to chip errata. 2376 * 2377 * Rather than special-case it, we'll just *always* 2378 * use this method here for READ_LOG_EXT, making for 2379 * easier testing. 2380 */ 2381 if (IS_GEN_II(hpriv)) 2382 return mv_qc_issue_fis(qc); 2383 } 2384 return ata_bmdma_qc_issue(qc); 2385 } 2386 2387 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 2388 { 2389 struct mv_port_priv *pp = ap->private_data; 2390 struct ata_queued_cmd *qc; 2391 2392 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 2393 return NULL; 2394 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2395 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) 2396 return qc; 2397 return NULL; 2398 } 2399 2400 static void mv_pmp_error_handler(struct ata_port *ap) 2401 { 2402 unsigned int pmp, pmp_map; 2403 struct mv_port_priv *pp = ap->private_data; 2404 2405 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 2406 /* 2407 * Perform NCQ error analysis on failed PMPs 2408 * before we freeze the port entirely. 2409 * 2410 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 2411 */ 2412 pmp_map = pp->delayed_eh_pmp_map; 2413 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 2414 for (pmp = 0; pmp_map != 0; pmp++) { 2415 unsigned int this_pmp = (1 << pmp); 2416 if (pmp_map & this_pmp) { 2417 struct ata_link *link = &ap->pmp_link[pmp]; 2418 pmp_map &= ~this_pmp; 2419 ata_eh_analyze_ncq_error(link); 2420 } 2421 } 2422 ata_port_freeze(ap); 2423 } 2424 sata_pmp_error_handler(ap); 2425 } 2426 2427 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 2428 { 2429 void __iomem *port_mmio = mv_ap_base(ap); 2430 2431 return readl(port_mmio + SATA_TESTCTL) >> 16; 2432 } 2433 2434 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 2435 { 2436 unsigned int pmp; 2437 2438 /* 2439 * Initialize EH info for PMPs which saw device errors 2440 */ 2441 for (pmp = 0; pmp_map != 0; pmp++) { 2442 unsigned int this_pmp = (1 << pmp); 2443 if (pmp_map & this_pmp) { 2444 struct ata_link *link = &ap->pmp_link[pmp]; 2445 struct ata_eh_info *ehi = &link->eh_info; 2446 2447 pmp_map &= ~this_pmp; 2448 ata_ehi_clear_desc(ehi); 2449 ata_ehi_push_desc(ehi, "dev err"); 2450 ehi->err_mask |= AC_ERR_DEV; 2451 ehi->action |= ATA_EH_RESET; 2452 ata_link_abort(link); 2453 } 2454 } 2455 } 2456 2457 static int mv_req_q_empty(struct ata_port *ap) 2458 { 2459 void __iomem *port_mmio = mv_ap_base(ap); 2460 u32 in_ptr, out_ptr; 2461 2462 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) 2463 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2464 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) 2465 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2466 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 2467 } 2468 2469 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 2470 { 2471 struct mv_port_priv *pp = ap->private_data; 2472 int failed_links; 2473 unsigned int old_map, new_map; 2474 2475 /* 2476 * Device error during FBS+NCQ operation: 2477 * 2478 * Set a port flag to prevent further I/O being enqueued. 2479 * Leave the EDMA running to drain outstanding commands from this port. 2480 * Perform the post-mortem/EH only when all responses are complete. 2481 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 2482 */ 2483 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 2484 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 2485 pp->delayed_eh_pmp_map = 0; 2486 } 2487 old_map = pp->delayed_eh_pmp_map; 2488 new_map = old_map | mv_get_err_pmp_map(ap); 2489 2490 if (old_map != new_map) { 2491 pp->delayed_eh_pmp_map = new_map; 2492 mv_pmp_eh_prep(ap, new_map & ~old_map); 2493 } 2494 failed_links = hweight16(new_map); 2495 2496 ata_port_info(ap, 2497 "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n", 2498 __func__, pp->delayed_eh_pmp_map, 2499 ap->qc_active, failed_links, 2500 ap->nr_active_links); 2501 2502 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 2503 mv_process_crpb_entries(ap, pp); 2504 mv_stop_edma(ap); 2505 mv_eh_freeze(ap); 2506 ata_port_info(ap, "%s: done\n", __func__); 2507 return 1; /* handled */ 2508 } 2509 ata_port_info(ap, "%s: waiting\n", __func__); 2510 return 1; /* handled */ 2511 } 2512 2513 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 2514 { 2515 /* 2516 * Possible future enhancement: 2517 * 2518 * FBS+non-NCQ operation is not yet implemented. 2519 * See related notes in mv_edma_cfg(). 2520 * 2521 * Device error during FBS+non-NCQ operation: 2522 * 2523 * We need to snapshot the shadow registers for each failed command. 2524 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 2525 */ 2526 return 0; /* not handled */ 2527 } 2528 2529 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 2530 { 2531 struct mv_port_priv *pp = ap->private_data; 2532 2533 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 2534 return 0; /* EDMA was not active: not handled */ 2535 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 2536 return 0; /* FBS was not active: not handled */ 2537 2538 if (!(edma_err_cause & EDMA_ERR_DEV)) 2539 return 0; /* non DEV error: not handled */ 2540 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 2541 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 2542 return 0; /* other problems: not handled */ 2543 2544 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 2545 /* 2546 * EDMA should NOT have self-disabled for this case. 2547 * If it did, then something is wrong elsewhere, 2548 * and we cannot handle it here. 2549 */ 2550 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2551 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", 2552 __func__, edma_err_cause, pp->pp_flags); 2553 return 0; /* not handled */ 2554 } 2555 return mv_handle_fbs_ncq_dev_err(ap); 2556 } else { 2557 /* 2558 * EDMA should have self-disabled for this case. 2559 * If it did not, then something is wrong elsewhere, 2560 * and we cannot handle it here. 2561 */ 2562 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 2563 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", 2564 __func__, edma_err_cause, pp->pp_flags); 2565 return 0; /* not handled */ 2566 } 2567 return mv_handle_fbs_non_ncq_dev_err(ap); 2568 } 2569 return 0; /* not handled */ 2570 } 2571 2572 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 2573 { 2574 struct ata_eh_info *ehi = &ap->link.eh_info; 2575 char *when = "idle"; 2576 2577 ata_ehi_clear_desc(ehi); 2578 if (edma_was_enabled) { 2579 when = "EDMA enabled"; 2580 } else { 2581 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 2582 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 2583 when = "polling"; 2584 } 2585 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 2586 ehi->err_mask |= AC_ERR_OTHER; 2587 ehi->action |= ATA_EH_RESET; 2588 ata_port_freeze(ap); 2589 } 2590 2591 /** 2592 * mv_err_intr - Handle error interrupts on the port 2593 * @ap: ATA channel to manipulate 2594 * 2595 * Most cases require a full reset of the chip's state machine, 2596 * which also performs a COMRESET. 2597 * Also, if the port disabled DMA, update our cached copy to match. 2598 * 2599 * LOCKING: 2600 * Inherited from caller. 2601 */ 2602 static void mv_err_intr(struct ata_port *ap) 2603 { 2604 void __iomem *port_mmio = mv_ap_base(ap); 2605 u32 edma_err_cause, eh_freeze_mask, serr = 0; 2606 u32 fis_cause = 0; 2607 struct mv_port_priv *pp = ap->private_data; 2608 struct mv_host_priv *hpriv = ap->host->private_data; 2609 unsigned int action = 0, err_mask = 0; 2610 struct ata_eh_info *ehi = &ap->link.eh_info; 2611 struct ata_queued_cmd *qc; 2612 int abort = 0; 2613 2614 /* 2615 * Read and clear the SError and err_cause bits. 2616 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 2617 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 2618 */ 2619 sata_scr_read(&ap->link, SCR_ERROR, &serr); 2620 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 2621 2622 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); 2623 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2624 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); 2625 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); 2626 } 2627 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); 2628 2629 if (edma_err_cause & EDMA_ERR_DEV) { 2630 /* 2631 * Device errors during FIS-based switching operation 2632 * require special handling. 2633 */ 2634 if (mv_handle_dev_err(ap, edma_err_cause)) 2635 return; 2636 } 2637 2638 qc = mv_get_active_qc(ap); 2639 ata_ehi_clear_desc(ehi); 2640 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 2641 edma_err_cause, pp->pp_flags); 2642 2643 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2644 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 2645 if (fis_cause & FIS_IRQ_CAUSE_AN) { 2646 u32 ec = edma_err_cause & 2647 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 2648 sata_async_notification(ap); 2649 if (!ec) 2650 return; /* Just an AN; no need for the nukes */ 2651 ata_ehi_push_desc(ehi, "SDB notify"); 2652 } 2653 } 2654 /* 2655 * All generations share these EDMA error cause bits: 2656 */ 2657 if (edma_err_cause & EDMA_ERR_DEV) { 2658 err_mask |= AC_ERR_DEV; 2659 action |= ATA_EH_RESET; 2660 ata_ehi_push_desc(ehi, "dev error"); 2661 } 2662 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 2663 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 2664 EDMA_ERR_INTRL_PAR)) { 2665 err_mask |= AC_ERR_ATA_BUS; 2666 action |= ATA_EH_RESET; 2667 ata_ehi_push_desc(ehi, "parity error"); 2668 } 2669 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 2670 ata_ehi_hotplugged(ehi); 2671 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 2672 "dev disconnect" : "dev connect"); 2673 action |= ATA_EH_RESET; 2674 } 2675 2676 /* 2677 * Gen-I has a different SELF_DIS bit, 2678 * different FREEZE bits, and no SERR bit: 2679 */ 2680 if (IS_GEN_I(hpriv)) { 2681 eh_freeze_mask = EDMA_EH_FREEZE_5; 2682 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 2683 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2684 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2685 } 2686 } else { 2687 eh_freeze_mask = EDMA_EH_FREEZE; 2688 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2689 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2690 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2691 } 2692 if (edma_err_cause & EDMA_ERR_SERR) { 2693 ata_ehi_push_desc(ehi, "SError=%08x", serr); 2694 err_mask |= AC_ERR_ATA_BUS; 2695 action |= ATA_EH_RESET; 2696 } 2697 } 2698 2699 if (!err_mask) { 2700 err_mask = AC_ERR_OTHER; 2701 action |= ATA_EH_RESET; 2702 } 2703 2704 ehi->serror |= serr; 2705 ehi->action |= action; 2706 2707 if (qc) 2708 qc->err_mask |= err_mask; 2709 else 2710 ehi->err_mask |= err_mask; 2711 2712 if (err_mask == AC_ERR_DEV) { 2713 /* 2714 * Cannot do ata_port_freeze() here, 2715 * because it would kill PIO access, 2716 * which is needed for further diagnosis. 2717 */ 2718 mv_eh_freeze(ap); 2719 abort = 1; 2720 } else if (edma_err_cause & eh_freeze_mask) { 2721 /* 2722 * Note to self: ata_port_freeze() calls ata_port_abort() 2723 */ 2724 ata_port_freeze(ap); 2725 } else { 2726 abort = 1; 2727 } 2728 2729 if (abort) { 2730 if (qc) 2731 ata_link_abort(qc->dev->link); 2732 else 2733 ata_port_abort(ap); 2734 } 2735 } 2736 2737 static bool mv_process_crpb_response(struct ata_port *ap, 2738 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 2739 { 2740 u8 ata_status; 2741 u16 edma_status = le16_to_cpu(response->flags); 2742 2743 /* 2744 * edma_status from a response queue entry: 2745 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). 2746 * MSB is saved ATA status from command completion. 2747 */ 2748 if (!ncq_enabled) { 2749 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 2750 if (err_cause) { 2751 /* 2752 * Error will be seen/handled by 2753 * mv_err_intr(). So do nothing at all here. 2754 */ 2755 return false; 2756 } 2757 } 2758 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 2759 if (!ac_err_mask(ata_status)) 2760 return true; 2761 /* else: leave it for mv_err_intr() */ 2762 return false; 2763 } 2764 2765 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2766 { 2767 void __iomem *port_mmio = mv_ap_base(ap); 2768 struct mv_host_priv *hpriv = ap->host->private_data; 2769 u32 in_index; 2770 bool work_done = false; 2771 u32 done_mask = 0; 2772 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2773 2774 /* Get the hardware queue position index */ 2775 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) 2776 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2777 2778 /* Process new responses from since the last time we looked */ 2779 while (in_index != pp->resp_idx) { 2780 unsigned int tag; 2781 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2782 2783 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2784 2785 if (IS_GEN_I(hpriv)) { 2786 /* 50xx: no NCQ, only one command active at a time */ 2787 tag = ap->link.active_tag; 2788 } else { 2789 /* Gen II/IIE: get command tag from CRPB entry */ 2790 tag = le16_to_cpu(response->id) & 0x1f; 2791 } 2792 if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) 2793 done_mask |= 1 << tag; 2794 work_done = true; 2795 } 2796 2797 if (work_done) { 2798 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask); 2799 2800 /* Update the software queue position index in hardware */ 2801 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2802 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2803 port_mmio + EDMA_RSP_Q_OUT_PTR); 2804 } 2805 } 2806 2807 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2808 { 2809 struct mv_port_priv *pp; 2810 int edma_was_enabled; 2811 2812 /* 2813 * Grab a snapshot of the EDMA_EN flag setting, 2814 * so that we have a consistent view for this port, 2815 * even if something we call of our routines changes it. 2816 */ 2817 pp = ap->private_data; 2818 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2819 /* 2820 * Process completed CRPB response(s) before other events. 2821 */ 2822 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2823 mv_process_crpb_entries(ap, pp); 2824 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2825 mv_handle_fbs_ncq_dev_err(ap); 2826 } 2827 /* 2828 * Handle chip-reported errors, or continue on to handle PIO. 2829 */ 2830 if (unlikely(port_cause & ERR_IRQ)) { 2831 mv_err_intr(ap); 2832 } else if (!edma_was_enabled) { 2833 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2834 if (qc) 2835 ata_bmdma_port_intr(ap, qc); 2836 else 2837 mv_unexpected_intr(ap, edma_was_enabled); 2838 } 2839 } 2840 2841 /** 2842 * mv_host_intr - Handle all interrupts on the given host controller 2843 * @host: host specific structure 2844 * @main_irq_cause: Main interrupt cause register for the chip. 2845 * 2846 * LOCKING: 2847 * Inherited from caller. 2848 */ 2849 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2850 { 2851 struct mv_host_priv *hpriv = host->private_data; 2852 void __iomem *mmio = hpriv->base, *hc_mmio; 2853 unsigned int handled = 0, port; 2854 2855 /* If asserted, clear the "all ports" IRQ coalescing bit */ 2856 if (main_irq_cause & ALL_PORTS_COAL_DONE) 2857 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 2858 2859 for (port = 0; port < hpriv->n_ports; port++) { 2860 struct ata_port *ap = host->ports[port]; 2861 unsigned int p, shift, hardport, port_cause; 2862 2863 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2864 /* 2865 * Each hc within the host has its own hc_irq_cause register, 2866 * where the interrupting ports bits get ack'd. 2867 */ 2868 if (hardport == 0) { /* first port on this hc ? */ 2869 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2870 u32 port_mask, ack_irqs; 2871 /* 2872 * Skip this entire hc if nothing pending for any ports 2873 */ 2874 if (!hc_cause) { 2875 port += MV_PORTS_PER_HC - 1; 2876 continue; 2877 } 2878 /* 2879 * We don't need/want to read the hc_irq_cause register, 2880 * because doing so hurts performance, and 2881 * main_irq_cause already gives us everything we need. 2882 * 2883 * But we do have to *write* to the hc_irq_cause to ack 2884 * the ports that we are handling this time through. 2885 * 2886 * This requires that we create a bitmap for those 2887 * ports which interrupted us, and use that bitmap 2888 * to ack (only) those ports via hc_irq_cause. 2889 */ 2890 ack_irqs = 0; 2891 if (hc_cause & PORTS_0_3_COAL_DONE) 2892 ack_irqs = HC_COAL_IRQ; 2893 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2894 if ((port + p) >= hpriv->n_ports) 2895 break; 2896 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2897 if (hc_cause & port_mask) 2898 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2899 } 2900 hc_mmio = mv_hc_base_from_port(mmio, port); 2901 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); 2902 handled = 1; 2903 } 2904 /* 2905 * Handle interrupts signalled for this port: 2906 */ 2907 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2908 if (port_cause) 2909 mv_port_intr(ap, port_cause); 2910 } 2911 return handled; 2912 } 2913 2914 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2915 { 2916 struct mv_host_priv *hpriv = host->private_data; 2917 struct ata_port *ap; 2918 struct ata_queued_cmd *qc; 2919 struct ata_eh_info *ehi; 2920 unsigned int i, err_mask, printed = 0; 2921 u32 err_cause; 2922 2923 err_cause = readl(mmio + hpriv->irq_cause_offset); 2924 2925 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); 2926 2927 dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__); 2928 mv_dump_all_regs(mmio, to_pci_dev(host->dev)); 2929 2930 writelfl(0, mmio + hpriv->irq_cause_offset); 2931 2932 for (i = 0; i < host->n_ports; i++) { 2933 ap = host->ports[i]; 2934 if (!ata_link_offline(&ap->link)) { 2935 ehi = &ap->link.eh_info; 2936 ata_ehi_clear_desc(ehi); 2937 if (!printed++) 2938 ata_ehi_push_desc(ehi, 2939 "PCI err cause 0x%08x", err_cause); 2940 err_mask = AC_ERR_HOST_BUS; 2941 ehi->action = ATA_EH_RESET; 2942 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2943 if (qc) 2944 qc->err_mask |= err_mask; 2945 else 2946 ehi->err_mask |= err_mask; 2947 2948 ata_port_freeze(ap); 2949 } 2950 } 2951 return 1; /* handled */ 2952 } 2953 2954 /** 2955 * mv_interrupt - Main interrupt event handler 2956 * @irq: unused 2957 * @dev_instance: private data; in this case the host structure 2958 * 2959 * Read the read only register to determine if any host 2960 * controllers have pending interrupts. If so, call lower level 2961 * routine to handle. Also check for PCI errors which are only 2962 * reported here. 2963 * 2964 * LOCKING: 2965 * This routine holds the host lock while processing pending 2966 * interrupts. 2967 */ 2968 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2969 { 2970 struct ata_host *host = dev_instance; 2971 struct mv_host_priv *hpriv = host->private_data; 2972 unsigned int handled = 0; 2973 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; 2974 u32 main_irq_cause, pending_irqs; 2975 2976 spin_lock(&host->lock); 2977 2978 /* for MSI: block new interrupts while in here */ 2979 if (using_msi) 2980 mv_write_main_irq_mask(0, hpriv); 2981 2982 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2983 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2984 /* 2985 * Deal with cases where we either have nothing pending, or have read 2986 * a bogus register value which can indicate HW removal or PCI fault. 2987 */ 2988 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2989 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 2990 handled = mv_pci_error(host, hpriv->base); 2991 else 2992 handled = mv_host_intr(host, pending_irqs); 2993 } 2994 2995 /* for MSI: unmask; interrupt cause bits will retrigger now */ 2996 if (using_msi) 2997 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); 2998 2999 spin_unlock(&host->lock); 3000 3001 return IRQ_RETVAL(handled); 3002 } 3003 3004 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 3005 { 3006 unsigned int ofs; 3007 3008 switch (sc_reg_in) { 3009 case SCR_STATUS: 3010 case SCR_ERROR: 3011 case SCR_CONTROL: 3012 ofs = sc_reg_in * sizeof(u32); 3013 break; 3014 default: 3015 ofs = 0xffffffffU; 3016 break; 3017 } 3018 return ofs; 3019 } 3020 3021 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 3022 { 3023 struct mv_host_priv *hpriv = link->ap->host->private_data; 3024 void __iomem *mmio = hpriv->base; 3025 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 3026 unsigned int ofs = mv5_scr_offset(sc_reg_in); 3027 3028 if (ofs != 0xffffffffU) { 3029 *val = readl(addr + ofs); 3030 return 0; 3031 } else 3032 return -EINVAL; 3033 } 3034 3035 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 3036 { 3037 struct mv_host_priv *hpriv = link->ap->host->private_data; 3038 void __iomem *mmio = hpriv->base; 3039 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 3040 unsigned int ofs = mv5_scr_offset(sc_reg_in); 3041 3042 if (ofs != 0xffffffffU) { 3043 writelfl(val, addr + ofs); 3044 return 0; 3045 } else 3046 return -EINVAL; 3047 } 3048 3049 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 3050 { 3051 struct pci_dev *pdev = to_pci_dev(host->dev); 3052 int early_5080; 3053 3054 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 3055 3056 if (!early_5080) { 3057 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3058 tmp |= (1 << 0); 3059 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3060 } 3061 3062 mv_reset_pci_bus(host, mmio); 3063 } 3064 3065 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3066 { 3067 writel(0x0fcfffff, mmio + FLASH_CTL); 3068 } 3069 3070 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 3071 void __iomem *mmio) 3072 { 3073 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 3074 u32 tmp; 3075 3076 tmp = readl(phy_mmio + MV5_PHY_MODE); 3077 3078 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 3079 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 3080 } 3081 3082 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3083 { 3084 u32 tmp; 3085 3086 writel(0, mmio + GPIO_PORT_CTL); 3087 3088 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 3089 3090 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3091 tmp |= ~(1 << 0); 3092 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3093 } 3094 3095 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3096 unsigned int port) 3097 { 3098 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 3099 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 3100 u32 tmp; 3101 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 3102 3103 if (fix_apm_sq) { 3104 tmp = readl(phy_mmio + MV5_LTMODE); 3105 tmp |= (1 << 19); 3106 writel(tmp, phy_mmio + MV5_LTMODE); 3107 3108 tmp = readl(phy_mmio + MV5_PHY_CTL); 3109 tmp &= ~0x3; 3110 tmp |= 0x1; 3111 writel(tmp, phy_mmio + MV5_PHY_CTL); 3112 } 3113 3114 tmp = readl(phy_mmio + MV5_PHY_MODE); 3115 tmp &= ~mask; 3116 tmp |= hpriv->signal[port].pre; 3117 tmp |= hpriv->signal[port].amps; 3118 writel(tmp, phy_mmio + MV5_PHY_MODE); 3119 } 3120 3121 3122 #undef ZERO 3123 #define ZERO(reg) writel(0, port_mmio + (reg)) 3124 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 3125 unsigned int port) 3126 { 3127 void __iomem *port_mmio = mv_port_base(mmio, port); 3128 3129 mv_reset_channel(hpriv, mmio, port); 3130 3131 ZERO(0x028); /* command */ 3132 writel(0x11f, port_mmio + EDMA_CFG); 3133 ZERO(0x004); /* timer */ 3134 ZERO(0x008); /* irq err cause */ 3135 ZERO(0x00c); /* irq err mask */ 3136 ZERO(0x010); /* rq bah */ 3137 ZERO(0x014); /* rq inp */ 3138 ZERO(0x018); /* rq outp */ 3139 ZERO(0x01c); /* respq bah */ 3140 ZERO(0x024); /* respq outp */ 3141 ZERO(0x020); /* respq inp */ 3142 ZERO(0x02c); /* test control */ 3143 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 3144 } 3145 #undef ZERO 3146 3147 #define ZERO(reg) writel(0, hc_mmio + (reg)) 3148 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3149 unsigned int hc) 3150 { 3151 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3152 u32 tmp; 3153 3154 ZERO(0x00c); 3155 ZERO(0x010); 3156 ZERO(0x014); 3157 ZERO(0x018); 3158 3159 tmp = readl(hc_mmio + 0x20); 3160 tmp &= 0x1c1c1c1c; 3161 tmp |= 0x03030303; 3162 writel(tmp, hc_mmio + 0x20); 3163 } 3164 #undef ZERO 3165 3166 static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, 3167 unsigned int n_hc) 3168 { 3169 struct mv_host_priv *hpriv = host->private_data; 3170 unsigned int hc, port; 3171 3172 for (hc = 0; hc < n_hc; hc++) { 3173 for (port = 0; port < MV_PORTS_PER_HC; port++) 3174 mv5_reset_hc_port(hpriv, mmio, 3175 (hc * MV_PORTS_PER_HC) + port); 3176 3177 mv5_reset_one_hc(hpriv, mmio, hc); 3178 } 3179 3180 return 0; 3181 } 3182 3183 #undef ZERO 3184 #define ZERO(reg) writel(0, mmio + (reg)) 3185 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 3186 { 3187 struct mv_host_priv *hpriv = host->private_data; 3188 u32 tmp; 3189 3190 tmp = readl(mmio + MV_PCI_MODE); 3191 tmp &= 0xff00ffff; 3192 writel(tmp, mmio + MV_PCI_MODE); 3193 3194 ZERO(MV_PCI_DISC_TIMER); 3195 ZERO(MV_PCI_MSI_TRIGGER); 3196 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 3197 ZERO(MV_PCI_SERR_MASK); 3198 ZERO(hpriv->irq_cause_offset); 3199 ZERO(hpriv->irq_mask_offset); 3200 ZERO(MV_PCI_ERR_LOW_ADDRESS); 3201 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 3202 ZERO(MV_PCI_ERR_ATTRIBUTE); 3203 ZERO(MV_PCI_ERR_COMMAND); 3204 } 3205 #undef ZERO 3206 3207 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3208 { 3209 u32 tmp; 3210 3211 mv5_reset_flash(hpriv, mmio); 3212 3213 tmp = readl(mmio + GPIO_PORT_CTL); 3214 tmp &= 0x3; 3215 tmp |= (1 << 5) | (1 << 6); 3216 writel(tmp, mmio + GPIO_PORT_CTL); 3217 } 3218 3219 /* 3220 * mv6_reset_hc - Perform the 6xxx global soft reset 3221 * @mmio: base address of the HBA 3222 * 3223 * This routine only applies to 6xxx parts. 3224 * 3225 * LOCKING: 3226 * Inherited from caller. 3227 */ 3228 static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, 3229 unsigned int n_hc) 3230 { 3231 void __iomem *reg = mmio + PCI_MAIN_CMD_STS; 3232 int i, rc = 0; 3233 u32 t; 3234 3235 /* Following procedure defined in PCI "main command and status 3236 * register" table. 3237 */ 3238 t = readl(reg); 3239 writel(t | STOP_PCI_MASTER, reg); 3240 3241 for (i = 0; i < 1000; i++) { 3242 udelay(1); 3243 t = readl(reg); 3244 if (PCI_MASTER_EMPTY & t) 3245 break; 3246 } 3247 if (!(PCI_MASTER_EMPTY & t)) { 3248 dev_err(host->dev, "PCI master won't flush\n"); 3249 rc = 1; 3250 goto done; 3251 } 3252 3253 /* set reset */ 3254 i = 5; 3255 do { 3256 writel(t | GLOB_SFT_RST, reg); 3257 t = readl(reg); 3258 udelay(1); 3259 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 3260 3261 if (!(GLOB_SFT_RST & t)) { 3262 dev_err(host->dev, "can't set global reset\n"); 3263 rc = 1; 3264 goto done; 3265 } 3266 3267 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 3268 i = 5; 3269 do { 3270 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 3271 t = readl(reg); 3272 udelay(1); 3273 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 3274 3275 if (GLOB_SFT_RST & t) { 3276 dev_err(host->dev, "can't clear global reset\n"); 3277 rc = 1; 3278 } 3279 done: 3280 return rc; 3281 } 3282 3283 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 3284 void __iomem *mmio) 3285 { 3286 void __iomem *port_mmio; 3287 u32 tmp; 3288 3289 tmp = readl(mmio + RESET_CFG); 3290 if ((tmp & (1 << 0)) == 0) { 3291 hpriv->signal[idx].amps = 0x7 << 8; 3292 hpriv->signal[idx].pre = 0x1 << 5; 3293 return; 3294 } 3295 3296 port_mmio = mv_port_base(mmio, idx); 3297 tmp = readl(port_mmio + PHY_MODE2); 3298 3299 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3300 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3301 } 3302 3303 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3304 { 3305 writel(0x00000060, mmio + GPIO_PORT_CTL); 3306 } 3307 3308 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3309 unsigned int port) 3310 { 3311 void __iomem *port_mmio = mv_port_base(mmio, port); 3312 3313 u32 hp_flags = hpriv->hp_flags; 3314 int fix_phy_mode2 = 3315 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3316 int fix_phy_mode4 = 3317 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3318 u32 m2, m3; 3319 3320 if (fix_phy_mode2) { 3321 m2 = readl(port_mmio + PHY_MODE2); 3322 m2 &= ~(1 << 16); 3323 m2 |= (1 << 31); 3324 writel(m2, port_mmio + PHY_MODE2); 3325 3326 udelay(200); 3327 3328 m2 = readl(port_mmio + PHY_MODE2); 3329 m2 &= ~((1 << 16) | (1 << 31)); 3330 writel(m2, port_mmio + PHY_MODE2); 3331 3332 udelay(200); 3333 } 3334 3335 /* 3336 * Gen-II/IIe PHY_MODE3 errata RM#2: 3337 * Achieves better receiver noise performance than the h/w default: 3338 */ 3339 m3 = readl(port_mmio + PHY_MODE3); 3340 m3 = (m3 & 0x1f) | (0x5555601 << 5); 3341 3342 /* Guideline 88F5182 (GL# SATA-S11) */ 3343 if (IS_SOC(hpriv)) 3344 m3 &= ~0x1c; 3345 3346 if (fix_phy_mode4) { 3347 u32 m4 = readl(port_mmio + PHY_MODE4); 3348 /* 3349 * Enforce reserved-bit restrictions on GenIIe devices only. 3350 * For earlier chipsets, force only the internal config field 3351 * (workaround for errata FEr SATA#10 part 1). 3352 */ 3353 if (IS_GEN_IIE(hpriv)) 3354 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 3355 else 3356 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 3357 writel(m4, port_mmio + PHY_MODE4); 3358 } 3359 /* 3360 * Workaround for 60x1-B2 errata SATA#13: 3361 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, 3362 * so we must always rewrite PHY_MODE3 after PHY_MODE4. 3363 * Or ensure we use writelfl() when writing PHY_MODE4. 3364 */ 3365 writel(m3, port_mmio + PHY_MODE3); 3366 3367 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3368 m2 = readl(port_mmio + PHY_MODE2); 3369 3370 m2 &= ~MV_M2_PREAMP_MASK; 3371 m2 |= hpriv->signal[port].amps; 3372 m2 |= hpriv->signal[port].pre; 3373 m2 &= ~(1 << 16); 3374 3375 /* according to mvSata 3.6.1, some IIE values are fixed */ 3376 if (IS_GEN_IIE(hpriv)) { 3377 m2 &= ~0xC30FF01F; 3378 m2 |= 0x0000900F; 3379 } 3380 3381 writel(m2, port_mmio + PHY_MODE2); 3382 } 3383 3384 /* TODO: use the generic LED interface to configure the SATA Presence */ 3385 /* & Acitivy LEDs on the board */ 3386 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 3387 void __iomem *mmio) 3388 { 3389 return; 3390 } 3391 3392 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 3393 void __iomem *mmio) 3394 { 3395 void __iomem *port_mmio; 3396 u32 tmp; 3397 3398 port_mmio = mv_port_base(mmio, idx); 3399 tmp = readl(port_mmio + PHY_MODE2); 3400 3401 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3402 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3403 } 3404 3405 #undef ZERO 3406 #define ZERO(reg) writel(0, port_mmio + (reg)) 3407 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 3408 void __iomem *mmio, unsigned int port) 3409 { 3410 void __iomem *port_mmio = mv_port_base(mmio, port); 3411 3412 mv_reset_channel(hpriv, mmio, port); 3413 3414 ZERO(0x028); /* command */ 3415 writel(0x101f, port_mmio + EDMA_CFG); 3416 ZERO(0x004); /* timer */ 3417 ZERO(0x008); /* irq err cause */ 3418 ZERO(0x00c); /* irq err mask */ 3419 ZERO(0x010); /* rq bah */ 3420 ZERO(0x014); /* rq inp */ 3421 ZERO(0x018); /* rq outp */ 3422 ZERO(0x01c); /* respq bah */ 3423 ZERO(0x024); /* respq outp */ 3424 ZERO(0x020); /* respq inp */ 3425 ZERO(0x02c); /* test control */ 3426 writel(0x800, port_mmio + EDMA_IORDY_TMOUT); 3427 } 3428 3429 #undef ZERO 3430 3431 #define ZERO(reg) writel(0, hc_mmio + (reg)) 3432 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 3433 void __iomem *mmio) 3434 { 3435 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 3436 3437 ZERO(0x00c); 3438 ZERO(0x010); 3439 ZERO(0x014); 3440 3441 } 3442 3443 #undef ZERO 3444 3445 static int mv_soc_reset_hc(struct ata_host *host, 3446 void __iomem *mmio, unsigned int n_hc) 3447 { 3448 struct mv_host_priv *hpriv = host->private_data; 3449 unsigned int port; 3450 3451 for (port = 0; port < hpriv->n_ports; port++) 3452 mv_soc_reset_hc_port(hpriv, mmio, port); 3453 3454 mv_soc_reset_one_hc(hpriv, mmio); 3455 3456 return 0; 3457 } 3458 3459 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 3460 void __iomem *mmio) 3461 { 3462 return; 3463 } 3464 3465 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 3466 { 3467 return; 3468 } 3469 3470 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 3471 void __iomem *mmio, unsigned int port) 3472 { 3473 void __iomem *port_mmio = mv_port_base(mmio, port); 3474 u32 reg; 3475 3476 reg = readl(port_mmio + PHY_MODE3); 3477 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ 3478 reg |= (0x1 << 27); 3479 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ 3480 reg |= (0x1 << 29); 3481 writel(reg, port_mmio + PHY_MODE3); 3482 3483 reg = readl(port_mmio + PHY_MODE4); 3484 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ 3485 reg |= (0x1 << 16); 3486 writel(reg, port_mmio + PHY_MODE4); 3487 3488 reg = readl(port_mmio + PHY_MODE9_GEN2); 3489 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3490 reg |= 0x8; 3491 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3492 writel(reg, port_mmio + PHY_MODE9_GEN2); 3493 3494 reg = readl(port_mmio + PHY_MODE9_GEN1); 3495 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3496 reg |= 0x8; 3497 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3498 writel(reg, port_mmio + PHY_MODE9_GEN1); 3499 } 3500 3501 /* 3502 * soc_is_65 - check if the soc is 65 nano device 3503 * 3504 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS 3505 * register, this register should contain non-zero value and it exists only 3506 * in the 65 nano devices, when reading it from older devices we get 0. 3507 */ 3508 static bool soc_is_65n(struct mv_host_priv *hpriv) 3509 { 3510 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); 3511 3512 if (readl(port0_mmio + PHYCFG_OFS)) 3513 return true; 3514 return false; 3515 } 3516 3517 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 3518 { 3519 u32 ifcfg = readl(port_mmio + SATA_IFCFG); 3520 3521 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 3522 if (want_gen2i) 3523 ifcfg |= (1 << 7); /* enable gen2i speed */ 3524 writelfl(ifcfg, port_mmio + SATA_IFCFG); 3525 } 3526 3527 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 3528 unsigned int port_no) 3529 { 3530 void __iomem *port_mmio = mv_port_base(mmio, port_no); 3531 3532 /* 3533 * The datasheet warns against setting EDMA_RESET when EDMA is active 3534 * (but doesn't say what the problem might be). So we first try 3535 * to disable the EDMA engine before doing the EDMA_RESET operation. 3536 */ 3537 mv_stop_edma_engine(port_mmio); 3538 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3539 3540 if (!IS_GEN_I(hpriv)) { 3541 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 3542 mv_setup_ifcfg(port_mmio, 1); 3543 } 3544 /* 3545 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 3546 * link, and physical layers. It resets all SATA interface registers 3547 * (except for SATA_IFCFG), and issues a COMRESET to the dev. 3548 */ 3549 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3550 udelay(25); /* allow reset propagation */ 3551 writelfl(0, port_mmio + EDMA_CMD); 3552 3553 hpriv->ops->phy_errata(hpriv, mmio, port_no); 3554 3555 if (IS_GEN_I(hpriv)) 3556 usleep_range(500, 1000); 3557 } 3558 3559 static void mv_pmp_select(struct ata_port *ap, int pmp) 3560 { 3561 if (sata_pmp_supported(ap)) { 3562 void __iomem *port_mmio = mv_ap_base(ap); 3563 u32 reg = readl(port_mmio + SATA_IFCTL); 3564 int old = reg & 0xf; 3565 3566 if (old != pmp) { 3567 reg = (reg & ~0xf) | pmp; 3568 writelfl(reg, port_mmio + SATA_IFCTL); 3569 } 3570 } 3571 } 3572 3573 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 3574 unsigned long deadline) 3575 { 3576 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3577 return sata_std_hardreset(link, class, deadline); 3578 } 3579 3580 static int mv_softreset(struct ata_link *link, unsigned int *class, 3581 unsigned long deadline) 3582 { 3583 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3584 return ata_sff_softreset(link, class, deadline); 3585 } 3586 3587 static int mv_hardreset(struct ata_link *link, unsigned int *class, 3588 unsigned long deadline) 3589 { 3590 struct ata_port *ap = link->ap; 3591 struct mv_host_priv *hpriv = ap->host->private_data; 3592 struct mv_port_priv *pp = ap->private_data; 3593 void __iomem *mmio = hpriv->base; 3594 int rc, attempts = 0, extra = 0; 3595 u32 sstatus; 3596 bool online; 3597 3598 mv_reset_channel(hpriv, mmio, ap->port_no); 3599 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 3600 pp->pp_flags &= 3601 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 3602 3603 /* Workaround for errata FEr SATA#10 (part 2) */ 3604 do { 3605 const unsigned int *timing = 3606 sata_ehc_deb_timing(&link->eh_context); 3607 3608 rc = sata_link_hardreset(link, timing, deadline + extra, 3609 &online, NULL); 3610 rc = online ? -EAGAIN : rc; 3611 if (rc) 3612 return rc; 3613 sata_scr_read(link, SCR_STATUS, &sstatus); 3614 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 3615 /* Force 1.5gb/s link speed and try again */ 3616 mv_setup_ifcfg(mv_ap_base(ap), 0); 3617 if (time_after(jiffies + HZ, deadline)) 3618 extra = HZ; /* only extend it once, max */ 3619 } 3620 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 3621 mv_save_cached_regs(ap); 3622 mv_edma_cfg(ap, 0, 0); 3623 3624 return rc; 3625 } 3626 3627 static void mv_eh_freeze(struct ata_port *ap) 3628 { 3629 mv_stop_edma(ap); 3630 mv_enable_port_irqs(ap, 0); 3631 } 3632 3633 static void mv_eh_thaw(struct ata_port *ap) 3634 { 3635 struct mv_host_priv *hpriv = ap->host->private_data; 3636 unsigned int port = ap->port_no; 3637 unsigned int hardport = mv_hardport_from_port(port); 3638 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 3639 void __iomem *port_mmio = mv_ap_base(ap); 3640 u32 hc_irq_cause; 3641 3642 /* clear EDMA errors on this port */ 3643 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3644 3645 /* clear pending irq events */ 3646 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 3647 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 3648 3649 mv_enable_port_irqs(ap, ERR_IRQ); 3650 } 3651 3652 /** 3653 * mv_port_init - Perform some early initialization on a single port. 3654 * @port: libata data structure storing shadow register addresses 3655 * @port_mmio: base address of the port 3656 * 3657 * Initialize shadow register mmio addresses, clear outstanding 3658 * interrupts on the port, and unmask interrupts for the future 3659 * start of the port. 3660 * 3661 * LOCKING: 3662 * Inherited from caller. 3663 */ 3664 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 3665 { 3666 void __iomem *serr, *shd_base = port_mmio + SHD_BLK; 3667 3668 /* PIO related setup 3669 */ 3670 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 3671 port->error_addr = 3672 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 3673 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 3674 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 3675 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 3676 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 3677 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 3678 port->status_addr = 3679 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 3680 /* special case: control/altstatus doesn't have ATA_REG_ address */ 3681 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; 3682 3683 /* Clear any currently outstanding port interrupt conditions */ 3684 serr = port_mmio + mv_scr_offset(SCR_ERROR); 3685 writelfl(readl(serr), serr); 3686 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3687 3688 /* unmask all non-transient EDMA error interrupts */ 3689 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); 3690 } 3691 3692 static unsigned int mv_in_pcix_mode(struct ata_host *host) 3693 { 3694 struct mv_host_priv *hpriv = host->private_data; 3695 void __iomem *mmio = hpriv->base; 3696 u32 reg; 3697 3698 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 3699 return 0; /* not PCI-X capable */ 3700 reg = readl(mmio + MV_PCI_MODE); 3701 if ((reg & MV_PCI_MODE_MASK) == 0) 3702 return 0; /* conventional PCI mode */ 3703 return 1; /* chip is in PCI-X mode */ 3704 } 3705 3706 static int mv_pci_cut_through_okay(struct ata_host *host) 3707 { 3708 struct mv_host_priv *hpriv = host->private_data; 3709 void __iomem *mmio = hpriv->base; 3710 u32 reg; 3711 3712 if (!mv_in_pcix_mode(host)) { 3713 reg = readl(mmio + MV_PCI_COMMAND); 3714 if (reg & MV_PCI_COMMAND_MRDTRIG) 3715 return 0; /* not okay */ 3716 } 3717 return 1; /* okay */ 3718 } 3719 3720 static void mv_60x1b2_errata_pci7(struct ata_host *host) 3721 { 3722 struct mv_host_priv *hpriv = host->private_data; 3723 void __iomem *mmio = hpriv->base; 3724 3725 /* workaround for 60x1-B2 errata PCI#7 */ 3726 if (mv_in_pcix_mode(host)) { 3727 u32 reg = readl(mmio + MV_PCI_COMMAND); 3728 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); 3729 } 3730 } 3731 3732 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 3733 { 3734 struct pci_dev *pdev = to_pci_dev(host->dev); 3735 struct mv_host_priv *hpriv = host->private_data; 3736 u32 hp_flags = hpriv->hp_flags; 3737 3738 switch (board_idx) { 3739 case chip_5080: 3740 hpriv->ops = &mv5xxx_ops; 3741 hp_flags |= MV_HP_GEN_I; 3742 3743 switch (pdev->revision) { 3744 case 0x1: 3745 hp_flags |= MV_HP_ERRATA_50XXB0; 3746 break; 3747 case 0x3: 3748 hp_flags |= MV_HP_ERRATA_50XXB2; 3749 break; 3750 default: 3751 dev_warn(&pdev->dev, 3752 "Applying 50XXB2 workarounds to unknown rev\n"); 3753 hp_flags |= MV_HP_ERRATA_50XXB2; 3754 break; 3755 } 3756 break; 3757 3758 case chip_504x: 3759 case chip_508x: 3760 hpriv->ops = &mv5xxx_ops; 3761 hp_flags |= MV_HP_GEN_I; 3762 3763 switch (pdev->revision) { 3764 case 0x0: 3765 hp_flags |= MV_HP_ERRATA_50XXB0; 3766 break; 3767 case 0x3: 3768 hp_flags |= MV_HP_ERRATA_50XXB2; 3769 break; 3770 default: 3771 dev_warn(&pdev->dev, 3772 "Applying B2 workarounds to unknown rev\n"); 3773 hp_flags |= MV_HP_ERRATA_50XXB2; 3774 break; 3775 } 3776 break; 3777 3778 case chip_604x: 3779 case chip_608x: 3780 hpriv->ops = &mv6xxx_ops; 3781 hp_flags |= MV_HP_GEN_II; 3782 3783 switch (pdev->revision) { 3784 case 0x7: 3785 mv_60x1b2_errata_pci7(host); 3786 hp_flags |= MV_HP_ERRATA_60X1B2; 3787 break; 3788 case 0x9: 3789 hp_flags |= MV_HP_ERRATA_60X1C0; 3790 break; 3791 default: 3792 dev_warn(&pdev->dev, 3793 "Applying B2 workarounds to unknown rev\n"); 3794 hp_flags |= MV_HP_ERRATA_60X1B2; 3795 break; 3796 } 3797 break; 3798 3799 case chip_7042: 3800 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 3801 if (pdev->vendor == PCI_VENDOR_ID_TTI && 3802 (pdev->device == 0x2300 || pdev->device == 0x2310)) 3803 { 3804 /* 3805 * Highpoint RocketRAID PCIe 23xx series cards: 3806 * 3807 * Unconfigured drives are treated as "Legacy" 3808 * by the BIOS, and it overwrites sector 8 with 3809 * a "Lgcy" metadata block prior to Linux boot. 3810 * 3811 * Configured drives (RAID or JBOD) leave sector 8 3812 * alone, but instead overwrite a high numbered 3813 * sector for the RAID metadata. This sector can 3814 * be determined exactly, by truncating the physical 3815 * drive capacity to a nice even GB value. 3816 * 3817 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 3818 * 3819 * Warn the user, lest they think we're just buggy. 3820 */ 3821 dev_warn(&pdev->dev, "Highpoint RocketRAID" 3822 " BIOS CORRUPTS DATA on all attached drives," 3823 " regardless of if/how they are configured." 3824 " BEWARE!\n"); 3825 dev_warn(&pdev->dev, "For data safety, do not" 3826 " use sectors 8-9 on \"Legacy\" drives," 3827 " and avoid the final two gigabytes on" 3828 " all RocketRAID BIOS initialized drives.\n"); 3829 } 3830 fallthrough; 3831 case chip_6042: 3832 hpriv->ops = &mv6xxx_ops; 3833 hp_flags |= MV_HP_GEN_IIE; 3834 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3835 hp_flags |= MV_HP_CUT_THROUGH; 3836 3837 switch (pdev->revision) { 3838 case 0x2: /* Rev.B0: the first/only public release */ 3839 hp_flags |= MV_HP_ERRATA_60X1C0; 3840 break; 3841 default: 3842 dev_warn(&pdev->dev, 3843 "Applying 60X1C0 workarounds to unknown rev\n"); 3844 hp_flags |= MV_HP_ERRATA_60X1C0; 3845 break; 3846 } 3847 break; 3848 case chip_soc: 3849 if (soc_is_65n(hpriv)) 3850 hpriv->ops = &mv_soc_65n_ops; 3851 else 3852 hpriv->ops = &mv_soc_ops; 3853 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3854 MV_HP_ERRATA_60X1C0; 3855 break; 3856 3857 default: 3858 dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx); 3859 return -EINVAL; 3860 } 3861 3862 hpriv->hp_flags = hp_flags; 3863 if (hp_flags & MV_HP_PCIE) { 3864 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; 3865 hpriv->irq_mask_offset = PCIE_IRQ_MASK; 3866 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3867 } else { 3868 hpriv->irq_cause_offset = PCI_IRQ_CAUSE; 3869 hpriv->irq_mask_offset = PCI_IRQ_MASK; 3870 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3871 } 3872 3873 return 0; 3874 } 3875 3876 /** 3877 * mv_init_host - Perform some early initialization of the host. 3878 * @host: ATA host to initialize 3879 * 3880 * If possible, do an early global reset of the host. Then do 3881 * our port init and clear/unmask all/relevant host interrupts. 3882 * 3883 * LOCKING: 3884 * Inherited from caller. 3885 */ 3886 static int mv_init_host(struct ata_host *host) 3887 { 3888 int rc = 0, n_hc, port, hc; 3889 struct mv_host_priv *hpriv = host->private_data; 3890 void __iomem *mmio = hpriv->base; 3891 3892 rc = mv_chip_id(host, hpriv->board_idx); 3893 if (rc) 3894 goto done; 3895 3896 if (IS_SOC(hpriv)) { 3897 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; 3898 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; 3899 } else { 3900 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; 3901 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; 3902 } 3903 3904 /* initialize shadow irq mask with register's value */ 3905 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); 3906 3907 /* global interrupt mask: 0 == mask everything */ 3908 mv_set_main_irq_mask(host, ~0, 0); 3909 3910 n_hc = mv_get_hc_count(host->ports[0]->flags); 3911 3912 for (port = 0; port < host->n_ports; port++) 3913 if (hpriv->ops->read_preamp) 3914 hpriv->ops->read_preamp(hpriv, port, mmio); 3915 3916 rc = hpriv->ops->reset_hc(host, mmio, n_hc); 3917 if (rc) 3918 goto done; 3919 3920 hpriv->ops->reset_flash(hpriv, mmio); 3921 hpriv->ops->reset_bus(host, mmio); 3922 hpriv->ops->enable_leds(hpriv, mmio); 3923 3924 for (port = 0; port < host->n_ports; port++) { 3925 struct ata_port *ap = host->ports[port]; 3926 void __iomem *port_mmio = mv_port_base(mmio, port); 3927 3928 mv_port_init(&ap->ioaddr, port_mmio); 3929 } 3930 3931 for (hc = 0; hc < n_hc; hc++) { 3932 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3933 3934 dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause " 3935 "(before clear)=0x%08x\n", hc, 3936 readl(hc_mmio + HC_CFG), 3937 readl(hc_mmio + HC_IRQ_CAUSE)); 3938 3939 /* Clear any currently outstanding hc interrupt conditions */ 3940 writelfl(0, hc_mmio + HC_IRQ_CAUSE); 3941 } 3942 3943 if (!IS_SOC(hpriv)) { 3944 /* Clear any currently outstanding host interrupt conditions */ 3945 writelfl(0, mmio + hpriv->irq_cause_offset); 3946 3947 /* and unmask interrupt generation for host regs */ 3948 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); 3949 } 3950 3951 /* 3952 * enable only global host interrupts for now. 3953 * The per-port interrupts get done later as ports are set up. 3954 */ 3955 mv_set_main_irq_mask(host, 0, PCI_ERR); 3956 mv_set_irq_coalescing(host, irq_coalescing_io_count, 3957 irq_coalescing_usecs); 3958 done: 3959 return rc; 3960 } 3961 3962 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3963 { 3964 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3965 MV_CRQB_Q_SZ, 0); 3966 if (!hpriv->crqb_pool) 3967 return -ENOMEM; 3968 3969 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3970 MV_CRPB_Q_SZ, 0); 3971 if (!hpriv->crpb_pool) 3972 return -ENOMEM; 3973 3974 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3975 MV_SG_TBL_SZ, 0); 3976 if (!hpriv->sg_tbl_pool) 3977 return -ENOMEM; 3978 3979 return 0; 3980 } 3981 3982 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3983 const struct mbus_dram_target_info *dram) 3984 { 3985 int i; 3986 3987 for (i = 0; i < 4; i++) { 3988 writel(0, hpriv->base + WINDOW_CTRL(i)); 3989 writel(0, hpriv->base + WINDOW_BASE(i)); 3990 } 3991 3992 for (i = 0; i < dram->num_cs; i++) { 3993 const struct mbus_dram_window *cs = dram->cs + i; 3994 3995 writel(((cs->size - 1) & 0xffff0000) | 3996 (cs->mbus_attr << 8) | 3997 (dram->mbus_dram_target_id << 4) | 1, 3998 hpriv->base + WINDOW_CTRL(i)); 3999 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 4000 } 4001 } 4002 4003 /** 4004 * mv_platform_probe - handle a positive probe of an soc Marvell 4005 * host 4006 * @pdev: platform device found 4007 * 4008 * LOCKING: 4009 * Inherited from caller. 4010 */ 4011 static int mv_platform_probe(struct platform_device *pdev) 4012 { 4013 const struct mv_sata_platform_data *mv_platform_data; 4014 const struct mbus_dram_target_info *dram; 4015 const struct ata_port_info *ppi[] = 4016 { &mv_port_info[chip_soc], NULL }; 4017 struct ata_host *host; 4018 struct mv_host_priv *hpriv; 4019 struct resource *res; 4020 int n_ports = 0, irq = 0; 4021 int rc; 4022 int port; 4023 4024 ata_print_version_once(&pdev->dev, DRV_VERSION); 4025 4026 /* 4027 * Simple resource validation .. 4028 */ 4029 if (unlikely(pdev->num_resources != 1)) { 4030 dev_err(&pdev->dev, "invalid number of resources\n"); 4031 return -EINVAL; 4032 } 4033 4034 /* 4035 * Get the register base first 4036 */ 4037 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4038 if (res == NULL) 4039 return -EINVAL; 4040 4041 /* allocate host */ 4042 if (pdev->dev.of_node) { 4043 rc = of_property_read_u32(pdev->dev.of_node, "nr-ports", 4044 &n_ports); 4045 if (rc) { 4046 dev_err(&pdev->dev, 4047 "error parsing nr-ports property: %d\n", rc); 4048 return rc; 4049 } 4050 4051 if (n_ports <= 0) { 4052 dev_err(&pdev->dev, "nr-ports must be positive: %d\n", 4053 n_ports); 4054 return -EINVAL; 4055 } 4056 4057 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 4058 } else { 4059 mv_platform_data = dev_get_platdata(&pdev->dev); 4060 n_ports = mv_platform_data->n_ports; 4061 irq = platform_get_irq(pdev, 0); 4062 } 4063 if (irq < 0) 4064 return irq; 4065 if (!irq) 4066 return -EINVAL; 4067 4068 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4069 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4070 4071 if (!host || !hpriv) 4072 return -ENOMEM; 4073 hpriv->port_clks = devm_kcalloc(&pdev->dev, 4074 n_ports, sizeof(struct clk *), 4075 GFP_KERNEL); 4076 if (!hpriv->port_clks) 4077 return -ENOMEM; 4078 hpriv->port_phys = devm_kcalloc(&pdev->dev, 4079 n_ports, sizeof(struct phy *), 4080 GFP_KERNEL); 4081 if (!hpriv->port_phys) 4082 return -ENOMEM; 4083 host->private_data = hpriv; 4084 hpriv->board_idx = chip_soc; 4085 4086 host->iomap = NULL; 4087 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4088 resource_size(res)); 4089 if (!hpriv->base) 4090 return -ENOMEM; 4091 4092 hpriv->base -= SATAHC0_REG_BASE; 4093 4094 hpriv->clk = clk_get(&pdev->dev, NULL); 4095 if (IS_ERR(hpriv->clk)) 4096 dev_notice(&pdev->dev, "cannot get optional clkdev\n"); 4097 else 4098 clk_prepare_enable(hpriv->clk); 4099 4100 for (port = 0; port < n_ports; port++) { 4101 char port_number[16]; 4102 sprintf(port_number, "%d", port); 4103 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number); 4104 if (!IS_ERR(hpriv->port_clks[port])) 4105 clk_prepare_enable(hpriv->port_clks[port]); 4106 4107 sprintf(port_number, "port%d", port); 4108 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev, 4109 port_number); 4110 if (IS_ERR(hpriv->port_phys[port])) { 4111 rc = PTR_ERR(hpriv->port_phys[port]); 4112 hpriv->port_phys[port] = NULL; 4113 if (rc != -EPROBE_DEFER) 4114 dev_warn(&pdev->dev, "error getting phy %d", rc); 4115 4116 /* Cleanup only the initialized ports */ 4117 hpriv->n_ports = port; 4118 goto err; 4119 } else 4120 phy_power_on(hpriv->port_phys[port]); 4121 } 4122 4123 /* All the ports have been initialized */ 4124 hpriv->n_ports = n_ports; 4125 4126 /* 4127 * (Re-)program MBUS remapping windows if we are asked to. 4128 */ 4129 dram = mv_mbus_dram_info(); 4130 if (dram) 4131 mv_conf_mbus_windows(hpriv, dram); 4132 4133 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4134 if (rc) 4135 goto err; 4136 4137 /* 4138 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be 4139 * updated in the LP_PHY_CTL register. 4140 */ 4141 if (pdev->dev.of_node && 4142 of_device_is_compatible(pdev->dev.of_node, 4143 "marvell,armada-370-sata")) 4144 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; 4145 4146 /* initialize adapter */ 4147 rc = mv_init_host(host); 4148 if (rc) 4149 goto err; 4150 4151 dev_info(&pdev->dev, "slots %u ports %d\n", 4152 (unsigned)MV_MAX_Q_DEPTH, host->n_ports); 4153 4154 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); 4155 if (!rc) 4156 return 0; 4157 4158 err: 4159 if (!IS_ERR(hpriv->clk)) { 4160 clk_disable_unprepare(hpriv->clk); 4161 clk_put(hpriv->clk); 4162 } 4163 for (port = 0; port < hpriv->n_ports; port++) { 4164 if (!IS_ERR(hpriv->port_clks[port])) { 4165 clk_disable_unprepare(hpriv->port_clks[port]); 4166 clk_put(hpriv->port_clks[port]); 4167 } 4168 phy_power_off(hpriv->port_phys[port]); 4169 } 4170 4171 return rc; 4172 } 4173 4174 /* 4175 * 4176 * mv_platform_remove - unplug a platform interface 4177 * @pdev: platform device 4178 * 4179 * A platform bus SATA device has been unplugged. Perform the needed 4180 * cleanup. Also called on module unload for any active devices. 4181 */ 4182 static void mv_platform_remove(struct platform_device *pdev) 4183 { 4184 struct ata_host *host = platform_get_drvdata(pdev); 4185 struct mv_host_priv *hpriv = host->private_data; 4186 int port; 4187 ata_host_detach(host); 4188 4189 if (!IS_ERR(hpriv->clk)) { 4190 clk_disable_unprepare(hpriv->clk); 4191 clk_put(hpriv->clk); 4192 } 4193 for (port = 0; port < host->n_ports; port++) { 4194 if (!IS_ERR(hpriv->port_clks[port])) { 4195 clk_disable_unprepare(hpriv->port_clks[port]); 4196 clk_put(hpriv->port_clks[port]); 4197 } 4198 phy_power_off(hpriv->port_phys[port]); 4199 } 4200 } 4201 4202 #ifdef CONFIG_PM_SLEEP 4203 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) 4204 { 4205 struct ata_host *host = platform_get_drvdata(pdev); 4206 4207 if (host) 4208 ata_host_suspend(host, state); 4209 return 0; 4210 } 4211 4212 static int mv_platform_resume(struct platform_device *pdev) 4213 { 4214 struct ata_host *host = platform_get_drvdata(pdev); 4215 const struct mbus_dram_target_info *dram; 4216 int ret; 4217 4218 if (host) { 4219 struct mv_host_priv *hpriv = host->private_data; 4220 4221 /* 4222 * (Re-)program MBUS remapping windows if we are asked to. 4223 */ 4224 dram = mv_mbus_dram_info(); 4225 if (dram) 4226 mv_conf_mbus_windows(hpriv, dram); 4227 4228 /* initialize adapter */ 4229 ret = mv_init_host(host); 4230 if (ret) { 4231 dev_err(&pdev->dev, "Error during HW init\n"); 4232 return ret; 4233 } 4234 ata_host_resume(host); 4235 } 4236 4237 return 0; 4238 } 4239 #else 4240 #define mv_platform_suspend NULL 4241 #define mv_platform_resume NULL 4242 #endif 4243 4244 #ifdef CONFIG_OF 4245 static const struct of_device_id mv_sata_dt_ids[] = { 4246 { .compatible = "marvell,armada-370-sata", }, 4247 { .compatible = "marvell,orion-sata", }, 4248 { /* sentinel */ } 4249 }; 4250 MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); 4251 #endif 4252 4253 static struct platform_driver mv_platform_driver = { 4254 .probe = mv_platform_probe, 4255 .remove_new = mv_platform_remove, 4256 .suspend = mv_platform_suspend, 4257 .resume = mv_platform_resume, 4258 .driver = { 4259 .name = DRV_NAME, 4260 .of_match_table = of_match_ptr(mv_sata_dt_ids), 4261 }, 4262 }; 4263 4264 4265 #ifdef CONFIG_PCI 4266 static int mv_pci_init_one(struct pci_dev *pdev, 4267 const struct pci_device_id *ent); 4268 #ifdef CONFIG_PM_SLEEP 4269 static int mv_pci_device_resume(struct pci_dev *pdev); 4270 #endif 4271 4272 static const struct pci_device_id mv_pci_tbl[] = { 4273 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 4274 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 4275 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 4276 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 4277 /* RocketRAID 1720/174x have different identifiers */ 4278 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 4279 { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, 4280 { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, 4281 4282 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 4283 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 4284 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 4285 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 4286 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 4287 4288 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 4289 4290 /* Adaptec 1430SA */ 4291 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 4292 4293 /* Marvell 7042 support */ 4294 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 4295 4296 /* Highpoint RocketRAID PCIe series */ 4297 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 4298 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 4299 4300 { } /* terminate list */ 4301 }; 4302 4303 static struct pci_driver mv_pci_driver = { 4304 .name = DRV_NAME, 4305 .id_table = mv_pci_tbl, 4306 .probe = mv_pci_init_one, 4307 .remove = ata_pci_remove_one, 4308 #ifdef CONFIG_PM_SLEEP 4309 .suspend = ata_pci_device_suspend, 4310 .resume = mv_pci_device_resume, 4311 #endif 4312 4313 }; 4314 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 4315 4316 /** 4317 * mv_print_info - Dump key info to kernel log for perusal. 4318 * @host: ATA host to print info about 4319 * 4320 * FIXME: complete this. 4321 * 4322 * LOCKING: 4323 * Inherited from caller. 4324 */ 4325 static void mv_print_info(struct ata_host *host) 4326 { 4327 struct pci_dev *pdev = to_pci_dev(host->dev); 4328 struct mv_host_priv *hpriv = host->private_data; 4329 u8 scc; 4330 const char *scc_s, *gen; 4331 4332 /* Use this to determine the HW stepping of the chip so we know 4333 * what errata to workaround 4334 */ 4335 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 4336 if (scc == 0) 4337 scc_s = "SCSI"; 4338 else if (scc == 0x01) 4339 scc_s = "RAID"; 4340 else 4341 scc_s = "?"; 4342 4343 if (IS_GEN_I(hpriv)) 4344 gen = "I"; 4345 else if (IS_GEN_II(hpriv)) 4346 gen = "II"; 4347 else if (IS_GEN_IIE(hpriv)) 4348 gen = "IIE"; 4349 else 4350 gen = "?"; 4351 4352 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 4353 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 4354 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 4355 } 4356 4357 /** 4358 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 4359 * @pdev: PCI device found 4360 * @ent: PCI device ID entry for the matched host 4361 * 4362 * LOCKING: 4363 * Inherited from caller. 4364 */ 4365 static int mv_pci_init_one(struct pci_dev *pdev, 4366 const struct pci_device_id *ent) 4367 { 4368 unsigned int board_idx = (unsigned int)ent->driver_data; 4369 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 4370 struct ata_host *host; 4371 struct mv_host_priv *hpriv; 4372 int n_ports, port, rc; 4373 4374 ata_print_version_once(&pdev->dev, DRV_VERSION); 4375 4376 /* allocate host */ 4377 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 4378 4379 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4380 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4381 if (!host || !hpriv) 4382 return -ENOMEM; 4383 host->private_data = hpriv; 4384 hpriv->n_ports = n_ports; 4385 hpriv->board_idx = board_idx; 4386 4387 /* acquire resources */ 4388 rc = pcim_enable_device(pdev); 4389 if (rc) 4390 return rc; 4391 4392 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 4393 if (rc == -EBUSY) 4394 pcim_pin_device(pdev); 4395 if (rc) 4396 return rc; 4397 host->iomap = pcim_iomap_table(pdev); 4398 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 4399 4400 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4401 if (rc) { 4402 dev_err(&pdev->dev, "DMA enable failed\n"); 4403 return rc; 4404 } 4405 4406 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4407 if (rc) 4408 return rc; 4409 4410 for (port = 0; port < host->n_ports; port++) { 4411 struct ata_port *ap = host->ports[port]; 4412 void __iomem *port_mmio = mv_port_base(hpriv->base, port); 4413 unsigned int offset = port_mmio - hpriv->base; 4414 4415 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 4416 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 4417 } 4418 4419 /* initialize adapter */ 4420 rc = mv_init_host(host); 4421 if (rc) 4422 return rc; 4423 4424 /* Enable message-switched interrupts, if requested */ 4425 if (msi && pci_enable_msi(pdev) == 0) 4426 hpriv->hp_flags |= MV_HP_FLAG_MSI; 4427 4428 mv_dump_pci_cfg(pdev, 0x68); 4429 mv_print_info(host); 4430 4431 pci_set_master(pdev); 4432 pci_try_set_mwi(pdev); 4433 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 4434 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 4435 } 4436 4437 #ifdef CONFIG_PM_SLEEP 4438 static int mv_pci_device_resume(struct pci_dev *pdev) 4439 { 4440 struct ata_host *host = pci_get_drvdata(pdev); 4441 int rc; 4442 4443 rc = ata_pci_device_do_resume(pdev); 4444 if (rc) 4445 return rc; 4446 4447 /* initialize adapter */ 4448 rc = mv_init_host(host); 4449 if (rc) 4450 return rc; 4451 4452 ata_host_resume(host); 4453 4454 return 0; 4455 } 4456 #endif 4457 #endif 4458 4459 static int __init mv_init(void) 4460 { 4461 int rc = -ENODEV; 4462 #ifdef CONFIG_PCI 4463 rc = pci_register_driver(&mv_pci_driver); 4464 if (rc < 0) 4465 return rc; 4466 #endif 4467 rc = platform_driver_register(&mv_platform_driver); 4468 4469 #ifdef CONFIG_PCI 4470 if (rc < 0) 4471 pci_unregister_driver(&mv_pci_driver); 4472 #endif 4473 return rc; 4474 } 4475 4476 static void __exit mv_exit(void) 4477 { 4478 #ifdef CONFIG_PCI 4479 pci_unregister_driver(&mv_pci_driver); 4480 #endif 4481 platform_driver_unregister(&mv_platform_driver); 4482 } 4483 4484 MODULE_AUTHOR("Brett Russ"); 4485 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 4486 MODULE_LICENSE("GPL v2"); 4487 MODULE_VERSION(DRV_VERSION); 4488 MODULE_ALIAS("platform:" DRV_NAME); 4489 4490 module_init(mv_init); 4491 module_exit(mv_exit); 4492