1*e34212c7SDmitry Osipenko // SPDX-License-Identifier: GPL-2.0+ 2*e34212c7SDmitry Osipenko /* 3*e34212c7SDmitry Osipenko * Tegra30 External Memory Controller driver 4*e34212c7SDmitry Osipenko * 5*e34212c7SDmitry Osipenko * Based on downstream driver from NVIDIA and tegra124-emc.c 6*e34212c7SDmitry Osipenko * Copyright (C) 2011-2014 NVIDIA Corporation 7*e34212c7SDmitry Osipenko * 8*e34212c7SDmitry Osipenko * Author: Dmitry Osipenko <digetx@gmail.com> 9*e34212c7SDmitry Osipenko * Copyright (C) 2019 GRATE-DRIVER project 10*e34212c7SDmitry Osipenko */ 11*e34212c7SDmitry Osipenko 12*e34212c7SDmitry Osipenko #include <linux/clk.h> 13*e34212c7SDmitry Osipenko #include <linux/clk/tegra.h> 14*e34212c7SDmitry Osipenko #include <linux/completion.h> 15*e34212c7SDmitry Osipenko #include <linux/delay.h> 16*e34212c7SDmitry Osipenko #include <linux/err.h> 17*e34212c7SDmitry Osipenko #include <linux/interrupt.h> 18*e34212c7SDmitry Osipenko #include <linux/io.h> 19*e34212c7SDmitry Osipenko #include <linux/iopoll.h> 20*e34212c7SDmitry Osipenko #include <linux/kernel.h> 21*e34212c7SDmitry Osipenko #include <linux/module.h> 22*e34212c7SDmitry Osipenko #include <linux/of_platform.h> 23*e34212c7SDmitry Osipenko #include <linux/platform_device.h> 24*e34212c7SDmitry Osipenko #include <linux/sort.h> 25*e34212c7SDmitry Osipenko #include <linux/types.h> 26*e34212c7SDmitry Osipenko 27*e34212c7SDmitry Osipenko #include <soc/tegra/fuse.h> 28*e34212c7SDmitry Osipenko 29*e34212c7SDmitry Osipenko #include "mc.h" 30*e34212c7SDmitry Osipenko 31*e34212c7SDmitry Osipenko #define EMC_INTSTATUS 0x000 32*e34212c7SDmitry Osipenko #define EMC_INTMASK 0x004 33*e34212c7SDmitry Osipenko #define EMC_DBG 0x008 34*e34212c7SDmitry Osipenko #define EMC_CFG 0x00c 35*e34212c7SDmitry Osipenko #define EMC_REFCTRL 0x020 36*e34212c7SDmitry Osipenko #define EMC_TIMING_CONTROL 0x028 37*e34212c7SDmitry Osipenko #define EMC_RC 0x02c 38*e34212c7SDmitry Osipenko #define EMC_RFC 0x030 39*e34212c7SDmitry Osipenko #define EMC_RAS 0x034 40*e34212c7SDmitry Osipenko #define EMC_RP 0x038 41*e34212c7SDmitry Osipenko #define EMC_R2W 0x03c 42*e34212c7SDmitry Osipenko #define EMC_W2R 0x040 43*e34212c7SDmitry Osipenko #define EMC_R2P 0x044 44*e34212c7SDmitry Osipenko #define EMC_W2P 0x048 45*e34212c7SDmitry Osipenko #define EMC_RD_RCD 0x04c 46*e34212c7SDmitry Osipenko #define EMC_WR_RCD 0x050 47*e34212c7SDmitry Osipenko #define EMC_RRD 0x054 48*e34212c7SDmitry Osipenko #define EMC_REXT 0x058 49*e34212c7SDmitry Osipenko #define EMC_WDV 0x05c 50*e34212c7SDmitry Osipenko #define EMC_QUSE 0x060 51*e34212c7SDmitry Osipenko #define EMC_QRST 0x064 52*e34212c7SDmitry Osipenko #define EMC_QSAFE 0x068 53*e34212c7SDmitry Osipenko #define EMC_RDV 0x06c 54*e34212c7SDmitry Osipenko #define EMC_REFRESH 0x070 55*e34212c7SDmitry Osipenko #define EMC_BURST_REFRESH_NUM 0x074 56*e34212c7SDmitry Osipenko #define EMC_PDEX2WR 0x078 57*e34212c7SDmitry Osipenko #define EMC_PDEX2RD 0x07c 58*e34212c7SDmitry Osipenko #define EMC_PCHG2PDEN 0x080 59*e34212c7SDmitry Osipenko #define EMC_ACT2PDEN 0x084 60*e34212c7SDmitry Osipenko #define EMC_AR2PDEN 0x088 61*e34212c7SDmitry Osipenko #define EMC_RW2PDEN 0x08c 62*e34212c7SDmitry Osipenko #define EMC_TXSR 0x090 63*e34212c7SDmitry Osipenko #define EMC_TCKE 0x094 64*e34212c7SDmitry Osipenko #define EMC_TFAW 0x098 65*e34212c7SDmitry Osipenko #define EMC_TRPAB 0x09c 66*e34212c7SDmitry Osipenko #define EMC_TCLKSTABLE 0x0a0 67*e34212c7SDmitry Osipenko #define EMC_TCLKSTOP 0x0a4 68*e34212c7SDmitry Osipenko #define EMC_TREFBW 0x0a8 69*e34212c7SDmitry Osipenko #define EMC_QUSE_EXTRA 0x0ac 70*e34212c7SDmitry Osipenko #define EMC_ODT_WRITE 0x0b0 71*e34212c7SDmitry Osipenko #define EMC_ODT_READ 0x0b4 72*e34212c7SDmitry Osipenko #define EMC_WEXT 0x0b8 73*e34212c7SDmitry Osipenko #define EMC_CTT 0x0bc 74*e34212c7SDmitry Osipenko #define EMC_MRS_WAIT_CNT 0x0c8 75*e34212c7SDmitry Osipenko #define EMC_MRS 0x0cc 76*e34212c7SDmitry Osipenko #define EMC_EMRS 0x0d0 77*e34212c7SDmitry Osipenko #define EMC_SELF_REF 0x0e0 78*e34212c7SDmitry Osipenko #define EMC_MRW 0x0e8 79*e34212c7SDmitry Osipenko #define EMC_XM2DQSPADCTRL3 0x0f8 80*e34212c7SDmitry Osipenko #define EMC_FBIO_SPARE 0x100 81*e34212c7SDmitry Osipenko #define EMC_FBIO_CFG5 0x104 82*e34212c7SDmitry Osipenko #define EMC_FBIO_CFG6 0x114 83*e34212c7SDmitry Osipenko #define EMC_CFG_RSV 0x120 84*e34212c7SDmitry Osipenko #define EMC_AUTO_CAL_CONFIG 0x2a4 85*e34212c7SDmitry Osipenko #define EMC_AUTO_CAL_INTERVAL 0x2a8 86*e34212c7SDmitry Osipenko #define EMC_AUTO_CAL_STATUS 0x2ac 87*e34212c7SDmitry Osipenko #define EMC_STATUS 0x2b4 88*e34212c7SDmitry Osipenko #define EMC_CFG_2 0x2b8 89*e34212c7SDmitry Osipenko #define EMC_CFG_DIG_DLL 0x2bc 90*e34212c7SDmitry Osipenko #define EMC_CFG_DIG_DLL_PERIOD 0x2c0 91*e34212c7SDmitry Osipenko #define EMC_CTT_DURATION 0x2d8 92*e34212c7SDmitry Osipenko #define EMC_CTT_TERM_CTRL 0x2dc 93*e34212c7SDmitry Osipenko #define EMC_ZCAL_INTERVAL 0x2e0 94*e34212c7SDmitry Osipenko #define EMC_ZCAL_WAIT_CNT 0x2e4 95*e34212c7SDmitry Osipenko #define EMC_ZQ_CAL 0x2ec 96*e34212c7SDmitry Osipenko #define EMC_XM2CMDPADCTRL 0x2f0 97*e34212c7SDmitry Osipenko #define EMC_XM2DQSPADCTRL2 0x2fc 98*e34212c7SDmitry Osipenko #define EMC_XM2DQPADCTRL2 0x304 99*e34212c7SDmitry Osipenko #define EMC_XM2CLKPADCTRL 0x308 100*e34212c7SDmitry Osipenko #define EMC_XM2COMPPADCTRL 0x30c 101*e34212c7SDmitry Osipenko #define EMC_XM2VTTGENPADCTRL 0x310 102*e34212c7SDmitry Osipenko #define EMC_XM2VTTGENPADCTRL2 0x314 103*e34212c7SDmitry Osipenko #define EMC_XM2QUSEPADCTRL 0x318 104*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS0 0x328 105*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS1 0x32c 106*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS2 0x330 107*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS3 0x334 108*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS4 0x338 109*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS5 0x33c 110*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS6 0x340 111*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQS7 0x344 112*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE0 0x348 113*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE1 0x34c 114*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE2 0x350 115*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE3 0x354 116*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE4 0x358 117*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE5 0x35c 118*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE6 0x360 119*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_QUSE7 0x364 120*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQ0 0x368 121*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQ1 0x36c 122*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQ2 0x370 123*e34212c7SDmitry Osipenko #define EMC_DLL_XFORM_DQ3 0x374 124*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS0 0x3a8 125*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS1 0x3ac 126*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS2 0x3b0 127*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS3 0x3b4 128*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS4 0x3b8 129*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS5 0x3bc 130*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS6 0x3c0 131*e34212c7SDmitry Osipenko #define EMC_DLI_TRIM_TXDQS7 0x3c4 132*e34212c7SDmitry Osipenko #define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8 133*e34212c7SDmitry Osipenko #define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc 134*e34212c7SDmitry Osipenko #define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0 135*e34212c7SDmitry Osipenko #define EMC_SEL_DPD_CTRL 0x3d8 136*e34212c7SDmitry Osipenko #define EMC_PRE_REFRESH_REQ_CNT 0x3dc 137*e34212c7SDmitry Osipenko #define EMC_DYN_SELF_REF_CONTROL 0x3e0 138*e34212c7SDmitry Osipenko #define EMC_TXSRDLL 0x3e4 139*e34212c7SDmitry Osipenko 140*e34212c7SDmitry Osipenko #define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23) 141*e34212c7SDmitry Osipenko 142*e34212c7SDmitry Osipenko #define EMC_MODE_SET_DLL_RESET BIT(8) 143*e34212c7SDmitry Osipenko #define EMC_MODE_SET_LONG_CNT BIT(26) 144*e34212c7SDmitry Osipenko 145*e34212c7SDmitry Osipenko #define EMC_SELF_REF_CMD_ENABLED BIT(0) 146*e34212c7SDmitry Osipenko 147*e34212c7SDmitry Osipenko #define DRAM_DEV_SEL_ALL (0 << 30) 148*e34212c7SDmitry Osipenko #define DRAM_DEV_SEL_0 (2 << 30) 149*e34212c7SDmitry Osipenko #define DRAM_DEV_SEL_1 (1 << 30) 150*e34212c7SDmitry Osipenko #define DRAM_BROADCAST(num) \ 151*e34212c7SDmitry Osipenko ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0) 152*e34212c7SDmitry Osipenko 153*e34212c7SDmitry Osipenko #define EMC_ZQ_CAL_CMD BIT(0) 154*e34212c7SDmitry Osipenko #define EMC_ZQ_CAL_LONG BIT(4) 155*e34212c7SDmitry Osipenko #define EMC_ZQ_CAL_LONG_CMD_DEV0 \ 156*e34212c7SDmitry Osipenko (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) 157*e34212c7SDmitry Osipenko #define EMC_ZQ_CAL_LONG_CMD_DEV1 \ 158*e34212c7SDmitry Osipenko (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) 159*e34212c7SDmitry Osipenko 160*e34212c7SDmitry Osipenko #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0) 161*e34212c7SDmitry Osipenko #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1) 162*e34212c7SDmitry Osipenko #define EMC_DBG_FORCE_UPDATE BIT(2) 163*e34212c7SDmitry Osipenko #define EMC_DBG_CFG_PRIORITY BIT(24) 164*e34212c7SDmitry Osipenko 165*e34212c7SDmitry Osipenko #define EMC_CFG5_QUSE_MODE_SHIFT 13 166*e34212c7SDmitry Osipenko #define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT) 167*e34212c7SDmitry Osipenko 168*e34212c7SDmitry Osipenko #define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2 169*e34212c7SDmitry Osipenko #define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3 170*e34212c7SDmitry Osipenko 171*e34212c7SDmitry Osipenko #define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9) 172*e34212c7SDmitry Osipenko 173*e34212c7SDmitry Osipenko #define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10) 174*e34212c7SDmitry Osipenko 175*e34212c7SDmitry Osipenko #define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4) 176*e34212c7SDmitry Osipenko 177*e34212c7SDmitry Osipenko #define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5) 178*e34212c7SDmitry Osipenko #define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5) 179*e34212c7SDmitry Osipenko 180*e34212c7SDmitry Osipenko #define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31) 181*e34212c7SDmitry Osipenko 182*e34212c7SDmitry Osipenko #define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3 183*e34212c7SDmitry Osipenko 184*e34212c7SDmitry Osipenko #define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff 185*e34212c7SDmitry Osipenko #define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16 186*e34212c7SDmitry Osipenko #define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \ 187*e34212c7SDmitry Osipenko (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) 188*e34212c7SDmitry Osipenko 189*e34212c7SDmitry Osipenko #define EMC_REFCTRL_DEV_SEL_MASK 0x3 190*e34212c7SDmitry Osipenko #define EMC_REFCTRL_ENABLE BIT(31) 191*e34212c7SDmitry Osipenko #define EMC_REFCTRL_ENABLE_ALL(num) \ 192*e34212c7SDmitry Osipenko (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE) 193*e34212c7SDmitry Osipenko #define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2) 194*e34212c7SDmitry Osipenko 195*e34212c7SDmitry Osipenko #define EMC_CFG_PERIODIC_QRST BIT(21) 196*e34212c7SDmitry Osipenko #define EMC_CFG_DYN_SREF_ENABLE BIT(28) 197*e34212c7SDmitry Osipenko 198*e34212c7SDmitry Osipenko #define EMC_CLKCHANGE_REQ_ENABLE BIT(0) 199*e34212c7SDmitry Osipenko #define EMC_CLKCHANGE_PD_ENABLE BIT(1) 200*e34212c7SDmitry Osipenko #define EMC_CLKCHANGE_SR_ENABLE BIT(2) 201*e34212c7SDmitry Osipenko 202*e34212c7SDmitry Osipenko #define EMC_TIMING_UPDATE BIT(0) 203*e34212c7SDmitry Osipenko 204*e34212c7SDmitry Osipenko #define EMC_REFRESH_OVERFLOW_INT BIT(3) 205*e34212c7SDmitry Osipenko #define EMC_CLKCHANGE_COMPLETE_INT BIT(4) 206*e34212c7SDmitry Osipenko 207*e34212c7SDmitry Osipenko enum emc_dram_type { 208*e34212c7SDmitry Osipenko DRAM_TYPE_DDR3, 209*e34212c7SDmitry Osipenko DRAM_TYPE_DDR1, 210*e34212c7SDmitry Osipenko DRAM_TYPE_LPDDR2, 211*e34212c7SDmitry Osipenko DRAM_TYPE_DDR2, 212*e34212c7SDmitry Osipenko }; 213*e34212c7SDmitry Osipenko 214*e34212c7SDmitry Osipenko enum emc_dll_change { 215*e34212c7SDmitry Osipenko DLL_CHANGE_NONE, 216*e34212c7SDmitry Osipenko DLL_CHANGE_ON, 217*e34212c7SDmitry Osipenko DLL_CHANGE_OFF 218*e34212c7SDmitry Osipenko }; 219*e34212c7SDmitry Osipenko 220*e34212c7SDmitry Osipenko static const u16 emc_timing_registers[] = { 221*e34212c7SDmitry Osipenko [0] = EMC_RC, 222*e34212c7SDmitry Osipenko [1] = EMC_RFC, 223*e34212c7SDmitry Osipenko [2] = EMC_RAS, 224*e34212c7SDmitry Osipenko [3] = EMC_RP, 225*e34212c7SDmitry Osipenko [4] = EMC_R2W, 226*e34212c7SDmitry Osipenko [5] = EMC_W2R, 227*e34212c7SDmitry Osipenko [6] = EMC_R2P, 228*e34212c7SDmitry Osipenko [7] = EMC_W2P, 229*e34212c7SDmitry Osipenko [8] = EMC_RD_RCD, 230*e34212c7SDmitry Osipenko [9] = EMC_WR_RCD, 231*e34212c7SDmitry Osipenko [10] = EMC_RRD, 232*e34212c7SDmitry Osipenko [11] = EMC_REXT, 233*e34212c7SDmitry Osipenko [12] = EMC_WEXT, 234*e34212c7SDmitry Osipenko [13] = EMC_WDV, 235*e34212c7SDmitry Osipenko [14] = EMC_QUSE, 236*e34212c7SDmitry Osipenko [15] = EMC_QRST, 237*e34212c7SDmitry Osipenko [16] = EMC_QSAFE, 238*e34212c7SDmitry Osipenko [17] = EMC_RDV, 239*e34212c7SDmitry Osipenko [18] = EMC_REFRESH, 240*e34212c7SDmitry Osipenko [19] = EMC_BURST_REFRESH_NUM, 241*e34212c7SDmitry Osipenko [20] = EMC_PRE_REFRESH_REQ_CNT, 242*e34212c7SDmitry Osipenko [21] = EMC_PDEX2WR, 243*e34212c7SDmitry Osipenko [22] = EMC_PDEX2RD, 244*e34212c7SDmitry Osipenko [23] = EMC_PCHG2PDEN, 245*e34212c7SDmitry Osipenko [24] = EMC_ACT2PDEN, 246*e34212c7SDmitry Osipenko [25] = EMC_AR2PDEN, 247*e34212c7SDmitry Osipenko [26] = EMC_RW2PDEN, 248*e34212c7SDmitry Osipenko [27] = EMC_TXSR, 249*e34212c7SDmitry Osipenko [28] = EMC_TXSRDLL, 250*e34212c7SDmitry Osipenko [29] = EMC_TCKE, 251*e34212c7SDmitry Osipenko [30] = EMC_TFAW, 252*e34212c7SDmitry Osipenko [31] = EMC_TRPAB, 253*e34212c7SDmitry Osipenko [32] = EMC_TCLKSTABLE, 254*e34212c7SDmitry Osipenko [33] = EMC_TCLKSTOP, 255*e34212c7SDmitry Osipenko [34] = EMC_TREFBW, 256*e34212c7SDmitry Osipenko [35] = EMC_QUSE_EXTRA, 257*e34212c7SDmitry Osipenko [36] = EMC_FBIO_CFG6, 258*e34212c7SDmitry Osipenko [37] = EMC_ODT_WRITE, 259*e34212c7SDmitry Osipenko [38] = EMC_ODT_READ, 260*e34212c7SDmitry Osipenko [39] = EMC_FBIO_CFG5, 261*e34212c7SDmitry Osipenko [40] = EMC_CFG_DIG_DLL, 262*e34212c7SDmitry Osipenko [41] = EMC_CFG_DIG_DLL_PERIOD, 263*e34212c7SDmitry Osipenko [42] = EMC_DLL_XFORM_DQS0, 264*e34212c7SDmitry Osipenko [43] = EMC_DLL_XFORM_DQS1, 265*e34212c7SDmitry Osipenko [44] = EMC_DLL_XFORM_DQS2, 266*e34212c7SDmitry Osipenko [45] = EMC_DLL_XFORM_DQS3, 267*e34212c7SDmitry Osipenko [46] = EMC_DLL_XFORM_DQS4, 268*e34212c7SDmitry Osipenko [47] = EMC_DLL_XFORM_DQS5, 269*e34212c7SDmitry Osipenko [48] = EMC_DLL_XFORM_DQS6, 270*e34212c7SDmitry Osipenko [49] = EMC_DLL_XFORM_DQS7, 271*e34212c7SDmitry Osipenko [50] = EMC_DLL_XFORM_QUSE0, 272*e34212c7SDmitry Osipenko [51] = EMC_DLL_XFORM_QUSE1, 273*e34212c7SDmitry Osipenko [52] = EMC_DLL_XFORM_QUSE2, 274*e34212c7SDmitry Osipenko [53] = EMC_DLL_XFORM_QUSE3, 275*e34212c7SDmitry Osipenko [54] = EMC_DLL_XFORM_QUSE4, 276*e34212c7SDmitry Osipenko [55] = EMC_DLL_XFORM_QUSE5, 277*e34212c7SDmitry Osipenko [56] = EMC_DLL_XFORM_QUSE6, 278*e34212c7SDmitry Osipenko [57] = EMC_DLL_XFORM_QUSE7, 279*e34212c7SDmitry Osipenko [58] = EMC_DLI_TRIM_TXDQS0, 280*e34212c7SDmitry Osipenko [59] = EMC_DLI_TRIM_TXDQS1, 281*e34212c7SDmitry Osipenko [60] = EMC_DLI_TRIM_TXDQS2, 282*e34212c7SDmitry Osipenko [61] = EMC_DLI_TRIM_TXDQS3, 283*e34212c7SDmitry Osipenko [62] = EMC_DLI_TRIM_TXDQS4, 284*e34212c7SDmitry Osipenko [63] = EMC_DLI_TRIM_TXDQS5, 285*e34212c7SDmitry Osipenko [64] = EMC_DLI_TRIM_TXDQS6, 286*e34212c7SDmitry Osipenko [65] = EMC_DLI_TRIM_TXDQS7, 287*e34212c7SDmitry Osipenko [66] = EMC_DLL_XFORM_DQ0, 288*e34212c7SDmitry Osipenko [67] = EMC_DLL_XFORM_DQ1, 289*e34212c7SDmitry Osipenko [68] = EMC_DLL_XFORM_DQ2, 290*e34212c7SDmitry Osipenko [69] = EMC_DLL_XFORM_DQ3, 291*e34212c7SDmitry Osipenko [70] = EMC_XM2CMDPADCTRL, 292*e34212c7SDmitry Osipenko [71] = EMC_XM2DQSPADCTRL2, 293*e34212c7SDmitry Osipenko [72] = EMC_XM2DQPADCTRL2, 294*e34212c7SDmitry Osipenko [73] = EMC_XM2CLKPADCTRL, 295*e34212c7SDmitry Osipenko [74] = EMC_XM2COMPPADCTRL, 296*e34212c7SDmitry Osipenko [75] = EMC_XM2VTTGENPADCTRL, 297*e34212c7SDmitry Osipenko [76] = EMC_XM2VTTGENPADCTRL2, 298*e34212c7SDmitry Osipenko [77] = EMC_XM2QUSEPADCTRL, 299*e34212c7SDmitry Osipenko [78] = EMC_XM2DQSPADCTRL3, 300*e34212c7SDmitry Osipenko [79] = EMC_CTT_TERM_CTRL, 301*e34212c7SDmitry Osipenko [80] = EMC_ZCAL_INTERVAL, 302*e34212c7SDmitry Osipenko [81] = EMC_ZCAL_WAIT_CNT, 303*e34212c7SDmitry Osipenko [82] = EMC_MRS_WAIT_CNT, 304*e34212c7SDmitry Osipenko [83] = EMC_AUTO_CAL_CONFIG, 305*e34212c7SDmitry Osipenko [84] = EMC_CTT, 306*e34212c7SDmitry Osipenko [85] = EMC_CTT_DURATION, 307*e34212c7SDmitry Osipenko [86] = EMC_DYN_SELF_REF_CONTROL, 308*e34212c7SDmitry Osipenko [87] = EMC_FBIO_SPARE, 309*e34212c7SDmitry Osipenko [88] = EMC_CFG_RSV, 310*e34212c7SDmitry Osipenko }; 311*e34212c7SDmitry Osipenko 312*e34212c7SDmitry Osipenko struct emc_timing { 313*e34212c7SDmitry Osipenko unsigned long rate; 314*e34212c7SDmitry Osipenko 315*e34212c7SDmitry Osipenko u32 data[ARRAY_SIZE(emc_timing_registers)]; 316*e34212c7SDmitry Osipenko 317*e34212c7SDmitry Osipenko u32 emc_auto_cal_interval; 318*e34212c7SDmitry Osipenko u32 emc_mode_1; 319*e34212c7SDmitry Osipenko u32 emc_mode_2; 320*e34212c7SDmitry Osipenko u32 emc_mode_reset; 321*e34212c7SDmitry Osipenko u32 emc_zcal_cnt_long; 322*e34212c7SDmitry Osipenko bool emc_cfg_periodic_qrst; 323*e34212c7SDmitry Osipenko bool emc_cfg_dyn_self_ref; 324*e34212c7SDmitry Osipenko }; 325*e34212c7SDmitry Osipenko 326*e34212c7SDmitry Osipenko struct tegra_emc { 327*e34212c7SDmitry Osipenko struct device *dev; 328*e34212c7SDmitry Osipenko struct tegra_mc *mc; 329*e34212c7SDmitry Osipenko struct completion clk_handshake_complete; 330*e34212c7SDmitry Osipenko struct notifier_block clk_nb; 331*e34212c7SDmitry Osipenko struct clk *clk; 332*e34212c7SDmitry Osipenko void __iomem *regs; 333*e34212c7SDmitry Osipenko unsigned int irq; 334*e34212c7SDmitry Osipenko 335*e34212c7SDmitry Osipenko struct emc_timing *timings; 336*e34212c7SDmitry Osipenko unsigned int num_timings; 337*e34212c7SDmitry Osipenko 338*e34212c7SDmitry Osipenko u32 mc_override; 339*e34212c7SDmitry Osipenko u32 emc_cfg; 340*e34212c7SDmitry Osipenko 341*e34212c7SDmitry Osipenko u32 emc_mode_1; 342*e34212c7SDmitry Osipenko u32 emc_mode_2; 343*e34212c7SDmitry Osipenko u32 emc_mode_reset; 344*e34212c7SDmitry Osipenko 345*e34212c7SDmitry Osipenko bool vref_cal_toggle : 1; 346*e34212c7SDmitry Osipenko bool zcal_long : 1; 347*e34212c7SDmitry Osipenko bool dll_on : 1; 348*e34212c7SDmitry Osipenko bool prepared : 1; 349*e34212c7SDmitry Osipenko bool bad_state : 1; 350*e34212c7SDmitry Osipenko }; 351*e34212c7SDmitry Osipenko 352*e34212c7SDmitry Osipenko static irqreturn_t tegra_emc_isr(int irq, void *data) 353*e34212c7SDmitry Osipenko { 354*e34212c7SDmitry Osipenko struct tegra_emc *emc = data; 355*e34212c7SDmitry Osipenko u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; 356*e34212c7SDmitry Osipenko u32 status; 357*e34212c7SDmitry Osipenko 358*e34212c7SDmitry Osipenko status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask; 359*e34212c7SDmitry Osipenko if (!status) 360*e34212c7SDmitry Osipenko return IRQ_NONE; 361*e34212c7SDmitry Osipenko 362*e34212c7SDmitry Osipenko /* notify about EMC-CAR handshake completion */ 363*e34212c7SDmitry Osipenko if (status & EMC_CLKCHANGE_COMPLETE_INT) 364*e34212c7SDmitry Osipenko complete(&emc->clk_handshake_complete); 365*e34212c7SDmitry Osipenko 366*e34212c7SDmitry Osipenko /* notify about HW problem */ 367*e34212c7SDmitry Osipenko if (status & EMC_REFRESH_OVERFLOW_INT) 368*e34212c7SDmitry Osipenko dev_err_ratelimited(emc->dev, 369*e34212c7SDmitry Osipenko "refresh request overflow timeout\n"); 370*e34212c7SDmitry Osipenko 371*e34212c7SDmitry Osipenko /* clear interrupts */ 372*e34212c7SDmitry Osipenko writel_relaxed(status, emc->regs + EMC_INTSTATUS); 373*e34212c7SDmitry Osipenko 374*e34212c7SDmitry Osipenko return IRQ_HANDLED; 375*e34212c7SDmitry Osipenko } 376*e34212c7SDmitry Osipenko 377*e34212c7SDmitry Osipenko static struct emc_timing *emc_find_timing(struct tegra_emc *emc, 378*e34212c7SDmitry Osipenko unsigned long rate) 379*e34212c7SDmitry Osipenko { 380*e34212c7SDmitry Osipenko struct emc_timing *timing = NULL; 381*e34212c7SDmitry Osipenko unsigned int i; 382*e34212c7SDmitry Osipenko 383*e34212c7SDmitry Osipenko for (i = 0; i < emc->num_timings; i++) { 384*e34212c7SDmitry Osipenko if (emc->timings[i].rate >= rate) { 385*e34212c7SDmitry Osipenko timing = &emc->timings[i]; 386*e34212c7SDmitry Osipenko break; 387*e34212c7SDmitry Osipenko } 388*e34212c7SDmitry Osipenko } 389*e34212c7SDmitry Osipenko 390*e34212c7SDmitry Osipenko if (!timing) { 391*e34212c7SDmitry Osipenko dev_err(emc->dev, "no timing for rate %lu\n", rate); 392*e34212c7SDmitry Osipenko return NULL; 393*e34212c7SDmitry Osipenko } 394*e34212c7SDmitry Osipenko 395*e34212c7SDmitry Osipenko return timing; 396*e34212c7SDmitry Osipenko } 397*e34212c7SDmitry Osipenko 398*e34212c7SDmitry Osipenko static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing, 399*e34212c7SDmitry Osipenko bool *schmitt_to_vref) 400*e34212c7SDmitry Osipenko { 401*e34212c7SDmitry Osipenko bool preset = false; 402*e34212c7SDmitry Osipenko u32 val; 403*e34212c7SDmitry Osipenko 404*e34212c7SDmitry Osipenko if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) { 405*e34212c7SDmitry Osipenko val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2); 406*e34212c7SDmitry Osipenko 407*e34212c7SDmitry Osipenko if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) { 408*e34212c7SDmitry Osipenko val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE; 409*e34212c7SDmitry Osipenko writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2); 410*e34212c7SDmitry Osipenko 411*e34212c7SDmitry Osipenko preset = true; 412*e34212c7SDmitry Osipenko } 413*e34212c7SDmitry Osipenko } 414*e34212c7SDmitry Osipenko 415*e34212c7SDmitry Osipenko if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) { 416*e34212c7SDmitry Osipenko val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3); 417*e34212c7SDmitry Osipenko 418*e34212c7SDmitry Osipenko if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) { 419*e34212c7SDmitry Osipenko val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE; 420*e34212c7SDmitry Osipenko writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3); 421*e34212c7SDmitry Osipenko 422*e34212c7SDmitry Osipenko preset = true; 423*e34212c7SDmitry Osipenko } 424*e34212c7SDmitry Osipenko } 425*e34212c7SDmitry Osipenko 426*e34212c7SDmitry Osipenko if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) { 427*e34212c7SDmitry Osipenko val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL); 428*e34212c7SDmitry Osipenko 429*e34212c7SDmitry Osipenko if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) { 430*e34212c7SDmitry Osipenko val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE; 431*e34212c7SDmitry Osipenko writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL); 432*e34212c7SDmitry Osipenko 433*e34212c7SDmitry Osipenko *schmitt_to_vref = true; 434*e34212c7SDmitry Osipenko preset = true; 435*e34212c7SDmitry Osipenko } 436*e34212c7SDmitry Osipenko } 437*e34212c7SDmitry Osipenko 438*e34212c7SDmitry Osipenko return preset; 439*e34212c7SDmitry Osipenko } 440*e34212c7SDmitry Osipenko 441*e34212c7SDmitry Osipenko static int emc_seq_update_timing(struct tegra_emc *emc) 442*e34212c7SDmitry Osipenko { 443*e34212c7SDmitry Osipenko u32 val; 444*e34212c7SDmitry Osipenko int err; 445*e34212c7SDmitry Osipenko 446*e34212c7SDmitry Osipenko writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL); 447*e34212c7SDmitry Osipenko 448*e34212c7SDmitry Osipenko err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val, 449*e34212c7SDmitry Osipenko !(val & EMC_STATUS_TIMING_UPDATE_STALLED), 450*e34212c7SDmitry Osipenko 1, 200); 451*e34212c7SDmitry Osipenko if (err) { 452*e34212c7SDmitry Osipenko dev_err(emc->dev, "failed to update timing: %d\n", err); 453*e34212c7SDmitry Osipenko return err; 454*e34212c7SDmitry Osipenko } 455*e34212c7SDmitry Osipenko 456*e34212c7SDmitry Osipenko return 0; 457*e34212c7SDmitry Osipenko } 458*e34212c7SDmitry Osipenko 459*e34212c7SDmitry Osipenko static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate) 460*e34212c7SDmitry Osipenko { 461*e34212c7SDmitry Osipenko struct tegra_mc *mc = emc->mc; 462*e34212c7SDmitry Osipenko unsigned int misc0_index = 16; 463*e34212c7SDmitry Osipenko unsigned int i; 464*e34212c7SDmitry Osipenko bool same; 465*e34212c7SDmitry Osipenko 466*e34212c7SDmitry Osipenko for (i = 0; i < mc->num_timings; i++) { 467*e34212c7SDmitry Osipenko if (mc->timings[i].rate != rate) 468*e34212c7SDmitry Osipenko continue; 469*e34212c7SDmitry Osipenko 470*e34212c7SDmitry Osipenko if (mc->timings[i].emem_data[misc0_index] & BIT(27)) 471*e34212c7SDmitry Osipenko same = true; 472*e34212c7SDmitry Osipenko else 473*e34212c7SDmitry Osipenko same = false; 474*e34212c7SDmitry Osipenko 475*e34212c7SDmitry Osipenko return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same); 476*e34212c7SDmitry Osipenko } 477*e34212c7SDmitry Osipenko 478*e34212c7SDmitry Osipenko return -EINVAL; 479*e34212c7SDmitry Osipenko } 480*e34212c7SDmitry Osipenko 481*e34212c7SDmitry Osipenko static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) 482*e34212c7SDmitry Osipenko { 483*e34212c7SDmitry Osipenko struct emc_timing *timing = emc_find_timing(emc, rate); 484*e34212c7SDmitry Osipenko enum emc_dll_change dll_change; 485*e34212c7SDmitry Osipenko enum emc_dram_type dram_type; 486*e34212c7SDmitry Osipenko bool schmitt_to_vref = false; 487*e34212c7SDmitry Osipenko unsigned int pre_wait = 0; 488*e34212c7SDmitry Osipenko bool qrst_used = false; 489*e34212c7SDmitry Osipenko unsigned int dram_num; 490*e34212c7SDmitry Osipenko unsigned int i; 491*e34212c7SDmitry Osipenko u32 fbio_cfg5; 492*e34212c7SDmitry Osipenko u32 emc_dbg; 493*e34212c7SDmitry Osipenko u32 val; 494*e34212c7SDmitry Osipenko int err; 495*e34212c7SDmitry Osipenko 496*e34212c7SDmitry Osipenko if (!timing || emc->bad_state) 497*e34212c7SDmitry Osipenko return -EINVAL; 498*e34212c7SDmitry Osipenko 499*e34212c7SDmitry Osipenko dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n", 500*e34212c7SDmitry Osipenko __func__, timing->rate, rate); 501*e34212c7SDmitry Osipenko 502*e34212c7SDmitry Osipenko emc->bad_state = true; 503*e34212c7SDmitry Osipenko 504*e34212c7SDmitry Osipenko err = emc_prepare_mc_clk_cfg(emc, rate); 505*e34212c7SDmitry Osipenko if (err) { 506*e34212c7SDmitry Osipenko dev_err(emc->dev, "mc clock preparation failed: %d\n", err); 507*e34212c7SDmitry Osipenko return err; 508*e34212c7SDmitry Osipenko } 509*e34212c7SDmitry Osipenko 510*e34212c7SDmitry Osipenko emc->vref_cal_toggle = false; 511*e34212c7SDmitry Osipenko emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE); 512*e34212c7SDmitry Osipenko emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG); 513*e34212c7SDmitry Osipenko emc_dbg = readl_relaxed(emc->regs + EMC_DBG); 514*e34212c7SDmitry Osipenko 515*e34212c7SDmitry Osipenko if (emc->dll_on == !!(timing->emc_mode_1 & 0x1)) 516*e34212c7SDmitry Osipenko dll_change = DLL_CHANGE_NONE; 517*e34212c7SDmitry Osipenko else if (timing->emc_mode_1 & 0x1) 518*e34212c7SDmitry Osipenko dll_change = DLL_CHANGE_ON; 519*e34212c7SDmitry Osipenko else 520*e34212c7SDmitry Osipenko dll_change = DLL_CHANGE_OFF; 521*e34212c7SDmitry Osipenko 522*e34212c7SDmitry Osipenko emc->dll_on = !!(timing->emc_mode_1 & 0x1); 523*e34212c7SDmitry Osipenko 524*e34212c7SDmitry Osipenko if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL)) 525*e34212c7SDmitry Osipenko emc->zcal_long = true; 526*e34212c7SDmitry Osipenko else 527*e34212c7SDmitry Osipenko emc->zcal_long = false; 528*e34212c7SDmitry Osipenko 529*e34212c7SDmitry Osipenko fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); 530*e34212c7SDmitry Osipenko dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; 531*e34212c7SDmitry Osipenko 532*e34212c7SDmitry Osipenko dram_num = tegra_mc_get_emem_device_count(emc->mc); 533*e34212c7SDmitry Osipenko 534*e34212c7SDmitry Osipenko /* disable dynamic self-refresh */ 535*e34212c7SDmitry Osipenko if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) { 536*e34212c7SDmitry Osipenko emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE; 537*e34212c7SDmitry Osipenko writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 538*e34212c7SDmitry Osipenko 539*e34212c7SDmitry Osipenko pre_wait = 5; 540*e34212c7SDmitry Osipenko } 541*e34212c7SDmitry Osipenko 542*e34212c7SDmitry Osipenko /* update MC arbiter settings */ 543*e34212c7SDmitry Osipenko val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ); 544*e34212c7SDmitry Osipenko if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) || 545*e34212c7SDmitry Osipenko ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) { 546*e34212c7SDmitry Osipenko 547*e34212c7SDmitry Osipenko val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE | 548*e34212c7SDmitry Osipenko MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50; 549*e34212c7SDmitry Osipenko mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ); 550*e34212c7SDmitry Osipenko mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL); 551*e34212c7SDmitry Osipenko } 552*e34212c7SDmitry Osipenko 553*e34212c7SDmitry Osipenko if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK) 554*e34212c7SDmitry Osipenko mc_writel(emc->mc, 555*e34212c7SDmitry Osipenko emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK, 556*e34212c7SDmitry Osipenko MC_EMEM_ARB_OVERRIDE); 557*e34212c7SDmitry Osipenko 558*e34212c7SDmitry Osipenko /* check DQ/DQS VREF delay */ 559*e34212c7SDmitry Osipenko if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) { 560*e34212c7SDmitry Osipenko if (pre_wait < 3) 561*e34212c7SDmitry Osipenko pre_wait = 3; 562*e34212c7SDmitry Osipenko } 563*e34212c7SDmitry Osipenko 564*e34212c7SDmitry Osipenko if (pre_wait) { 565*e34212c7SDmitry Osipenko err = emc_seq_update_timing(emc); 566*e34212c7SDmitry Osipenko if (err) 567*e34212c7SDmitry Osipenko return err; 568*e34212c7SDmitry Osipenko 569*e34212c7SDmitry Osipenko udelay(pre_wait); 570*e34212c7SDmitry Osipenko } 571*e34212c7SDmitry Osipenko 572*e34212c7SDmitry Osipenko /* disable auto-calibration if VREF mode is switching */ 573*e34212c7SDmitry Osipenko if (timing->emc_auto_cal_interval) { 574*e34212c7SDmitry Osipenko val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL); 575*e34212c7SDmitry Osipenko val ^= timing->data[74]; 576*e34212c7SDmitry Osipenko 577*e34212c7SDmitry Osipenko if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) { 578*e34212c7SDmitry Osipenko writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL); 579*e34212c7SDmitry Osipenko 580*e34212c7SDmitry Osipenko err = readl_relaxed_poll_timeout_atomic( 581*e34212c7SDmitry Osipenko emc->regs + EMC_AUTO_CAL_STATUS, val, 582*e34212c7SDmitry Osipenko !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300); 583*e34212c7SDmitry Osipenko if (err) { 584*e34212c7SDmitry Osipenko dev_err(emc->dev, 585*e34212c7SDmitry Osipenko "failed to disable auto-cal: %d\n", 586*e34212c7SDmitry Osipenko err); 587*e34212c7SDmitry Osipenko return err; 588*e34212c7SDmitry Osipenko } 589*e34212c7SDmitry Osipenko 590*e34212c7SDmitry Osipenko emc->vref_cal_toggle = true; 591*e34212c7SDmitry Osipenko } 592*e34212c7SDmitry Osipenko } 593*e34212c7SDmitry Osipenko 594*e34212c7SDmitry Osipenko /* program shadow registers */ 595*e34212c7SDmitry Osipenko for (i = 0; i < ARRAY_SIZE(timing->data); i++) { 596*e34212c7SDmitry Osipenko /* EMC_XM2CLKPADCTRL should be programmed separately */ 597*e34212c7SDmitry Osipenko if (i != 73) 598*e34212c7SDmitry Osipenko writel_relaxed(timing->data[i], 599*e34212c7SDmitry Osipenko emc->regs + emc_timing_registers[i]); 600*e34212c7SDmitry Osipenko } 601*e34212c7SDmitry Osipenko 602*e34212c7SDmitry Osipenko err = tegra_mc_write_emem_configuration(emc->mc, timing->rate); 603*e34212c7SDmitry Osipenko if (err) 604*e34212c7SDmitry Osipenko return err; 605*e34212c7SDmitry Osipenko 606*e34212c7SDmitry Osipenko /* DDR3: predict MRS long wait count */ 607*e34212c7SDmitry Osipenko if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) { 608*e34212c7SDmitry Osipenko u32 cnt = 512; 609*e34212c7SDmitry Osipenko 610*e34212c7SDmitry Osipenko if (emc->zcal_long) 611*e34212c7SDmitry Osipenko cnt -= dram_num * 256; 612*e34212c7SDmitry Osipenko 613*e34212c7SDmitry Osipenko val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK; 614*e34212c7SDmitry Osipenko if (cnt < val) 615*e34212c7SDmitry Osipenko cnt = val; 616*e34212c7SDmitry Osipenko 617*e34212c7SDmitry Osipenko val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; 618*e34212c7SDmitry Osipenko val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) & 619*e34212c7SDmitry Osipenko EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; 620*e34212c7SDmitry Osipenko 621*e34212c7SDmitry Osipenko writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT); 622*e34212c7SDmitry Osipenko } 623*e34212c7SDmitry Osipenko 624*e34212c7SDmitry Osipenko /* disable interrupt since read access is prohibited after stalling */ 625*e34212c7SDmitry Osipenko disable_irq(emc->irq); 626*e34212c7SDmitry Osipenko 627*e34212c7SDmitry Osipenko /* this read also completes the writes */ 628*e34212c7SDmitry Osipenko val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL); 629*e34212c7SDmitry Osipenko 630*e34212c7SDmitry Osipenko if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) { 631*e34212c7SDmitry Osipenko u32 cur_mode, new_mode; 632*e34212c7SDmitry Osipenko 633*e34212c7SDmitry Osipenko cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK; 634*e34212c7SDmitry Osipenko cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; 635*e34212c7SDmitry Osipenko 636*e34212c7SDmitry Osipenko new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK; 637*e34212c7SDmitry Osipenko new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; 638*e34212c7SDmitry Osipenko 639*e34212c7SDmitry Osipenko if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && 640*e34212c7SDmitry Osipenko cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) || 641*e34212c7SDmitry Osipenko (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && 642*e34212c7SDmitry Osipenko new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) 643*e34212c7SDmitry Osipenko qrst_used = true; 644*e34212c7SDmitry Osipenko } 645*e34212c7SDmitry Osipenko 646*e34212c7SDmitry Osipenko /* flow control marker 1 */ 647*e34212c7SDmitry Osipenko writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE); 648*e34212c7SDmitry Osipenko 649*e34212c7SDmitry Osipenko /* enable periodic reset */ 650*e34212c7SDmitry Osipenko if (qrst_used) { 651*e34212c7SDmitry Osipenko writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, 652*e34212c7SDmitry Osipenko emc->regs + EMC_DBG); 653*e34212c7SDmitry Osipenko writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST, 654*e34212c7SDmitry Osipenko emc->regs + EMC_CFG); 655*e34212c7SDmitry Osipenko writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 656*e34212c7SDmitry Osipenko } 657*e34212c7SDmitry Osipenko 658*e34212c7SDmitry Osipenko /* disable auto-refresh to save time after clock change */ 659*e34212c7SDmitry Osipenko writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num), 660*e34212c7SDmitry Osipenko emc->regs + EMC_REFCTRL); 661*e34212c7SDmitry Osipenko 662*e34212c7SDmitry Osipenko /* turn off DLL and enter self-refresh on DDR3 */ 663*e34212c7SDmitry Osipenko if (dram_type == DRAM_TYPE_DDR3) { 664*e34212c7SDmitry Osipenko if (dll_change == DLL_CHANGE_OFF) 665*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_mode_1, 666*e34212c7SDmitry Osipenko emc->regs + EMC_EMRS); 667*e34212c7SDmitry Osipenko 668*e34212c7SDmitry Osipenko writel_relaxed(DRAM_BROADCAST(dram_num) | 669*e34212c7SDmitry Osipenko EMC_SELF_REF_CMD_ENABLED, 670*e34212c7SDmitry Osipenko emc->regs + EMC_SELF_REF); 671*e34212c7SDmitry Osipenko } 672*e34212c7SDmitry Osipenko 673*e34212c7SDmitry Osipenko /* flow control marker 2 */ 674*e34212c7SDmitry Osipenko writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE); 675*e34212c7SDmitry Osipenko 676*e34212c7SDmitry Osipenko /* enable write-active MUX, update unshadowed pad control */ 677*e34212c7SDmitry Osipenko writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG); 678*e34212c7SDmitry Osipenko writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL); 679*e34212c7SDmitry Osipenko 680*e34212c7SDmitry Osipenko /* restore periodic QRST and disable write-active MUX */ 681*e34212c7SDmitry Osipenko val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST); 682*e34212c7SDmitry Osipenko if (qrst_used || timing->emc_cfg_periodic_qrst != val) { 683*e34212c7SDmitry Osipenko if (timing->emc_cfg_periodic_qrst) 684*e34212c7SDmitry Osipenko emc->emc_cfg |= EMC_CFG_PERIODIC_QRST; 685*e34212c7SDmitry Osipenko else 686*e34212c7SDmitry Osipenko emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST; 687*e34212c7SDmitry Osipenko 688*e34212c7SDmitry Osipenko writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 689*e34212c7SDmitry Osipenko } 690*e34212c7SDmitry Osipenko writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 691*e34212c7SDmitry Osipenko 692*e34212c7SDmitry Osipenko /* exit self-refresh on DDR3 */ 693*e34212c7SDmitry Osipenko if (dram_type == DRAM_TYPE_DDR3) 694*e34212c7SDmitry Osipenko writel_relaxed(DRAM_BROADCAST(dram_num), 695*e34212c7SDmitry Osipenko emc->regs + EMC_SELF_REF); 696*e34212c7SDmitry Osipenko 697*e34212c7SDmitry Osipenko /* set DRAM-mode registers */ 698*e34212c7SDmitry Osipenko if (dram_type == DRAM_TYPE_DDR3) { 699*e34212c7SDmitry Osipenko if (timing->emc_mode_1 != emc->emc_mode_1) 700*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_mode_1, 701*e34212c7SDmitry Osipenko emc->regs + EMC_EMRS); 702*e34212c7SDmitry Osipenko 703*e34212c7SDmitry Osipenko if (timing->emc_mode_2 != emc->emc_mode_2) 704*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_mode_2, 705*e34212c7SDmitry Osipenko emc->regs + EMC_EMRS); 706*e34212c7SDmitry Osipenko 707*e34212c7SDmitry Osipenko if (timing->emc_mode_reset != emc->emc_mode_reset || 708*e34212c7SDmitry Osipenko dll_change == DLL_CHANGE_ON) { 709*e34212c7SDmitry Osipenko val = timing->emc_mode_reset; 710*e34212c7SDmitry Osipenko if (dll_change == DLL_CHANGE_ON) { 711*e34212c7SDmitry Osipenko val |= EMC_MODE_SET_DLL_RESET; 712*e34212c7SDmitry Osipenko val |= EMC_MODE_SET_LONG_CNT; 713*e34212c7SDmitry Osipenko } else { 714*e34212c7SDmitry Osipenko val &= ~EMC_MODE_SET_DLL_RESET; 715*e34212c7SDmitry Osipenko } 716*e34212c7SDmitry Osipenko writel_relaxed(val, emc->regs + EMC_MRS); 717*e34212c7SDmitry Osipenko } 718*e34212c7SDmitry Osipenko } else { 719*e34212c7SDmitry Osipenko if (timing->emc_mode_2 != emc->emc_mode_2) 720*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_mode_2, 721*e34212c7SDmitry Osipenko emc->regs + EMC_MRW); 722*e34212c7SDmitry Osipenko 723*e34212c7SDmitry Osipenko if (timing->emc_mode_1 != emc->emc_mode_1) 724*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_mode_1, 725*e34212c7SDmitry Osipenko emc->regs + EMC_MRW); 726*e34212c7SDmitry Osipenko } 727*e34212c7SDmitry Osipenko 728*e34212c7SDmitry Osipenko emc->emc_mode_1 = timing->emc_mode_1; 729*e34212c7SDmitry Osipenko emc->emc_mode_2 = timing->emc_mode_2; 730*e34212c7SDmitry Osipenko emc->emc_mode_reset = timing->emc_mode_reset; 731*e34212c7SDmitry Osipenko 732*e34212c7SDmitry Osipenko /* issue ZCAL command if turning ZCAL on */ 733*e34212c7SDmitry Osipenko if (emc->zcal_long) { 734*e34212c7SDmitry Osipenko writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0, 735*e34212c7SDmitry Osipenko emc->regs + EMC_ZQ_CAL); 736*e34212c7SDmitry Osipenko 737*e34212c7SDmitry Osipenko if (dram_num > 1) 738*e34212c7SDmitry Osipenko writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1, 739*e34212c7SDmitry Osipenko emc->regs + EMC_ZQ_CAL); 740*e34212c7SDmitry Osipenko } 741*e34212c7SDmitry Osipenko 742*e34212c7SDmitry Osipenko /* re-enable auto-refresh */ 743*e34212c7SDmitry Osipenko writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num), 744*e34212c7SDmitry Osipenko emc->regs + EMC_REFCTRL); 745*e34212c7SDmitry Osipenko 746*e34212c7SDmitry Osipenko /* flow control marker 3 */ 747*e34212c7SDmitry Osipenko writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE); 748*e34212c7SDmitry Osipenko 749*e34212c7SDmitry Osipenko reinit_completion(&emc->clk_handshake_complete); 750*e34212c7SDmitry Osipenko 751*e34212c7SDmitry Osipenko /* interrupt can be re-enabled now */ 752*e34212c7SDmitry Osipenko enable_irq(emc->irq); 753*e34212c7SDmitry Osipenko 754*e34212c7SDmitry Osipenko emc->bad_state = false; 755*e34212c7SDmitry Osipenko emc->prepared = true; 756*e34212c7SDmitry Osipenko 757*e34212c7SDmitry Osipenko return 0; 758*e34212c7SDmitry Osipenko } 759*e34212c7SDmitry Osipenko 760*e34212c7SDmitry Osipenko static int emc_complete_timing_change(struct tegra_emc *emc, 761*e34212c7SDmitry Osipenko unsigned long rate) 762*e34212c7SDmitry Osipenko { 763*e34212c7SDmitry Osipenko struct emc_timing *timing = emc_find_timing(emc, rate); 764*e34212c7SDmitry Osipenko unsigned long timeout; 765*e34212c7SDmitry Osipenko int ret; 766*e34212c7SDmitry Osipenko 767*e34212c7SDmitry Osipenko timeout = wait_for_completion_timeout(&emc->clk_handshake_complete, 768*e34212c7SDmitry Osipenko msecs_to_jiffies(100)); 769*e34212c7SDmitry Osipenko if (timeout == 0) { 770*e34212c7SDmitry Osipenko dev_err(emc->dev, "emc-car handshake failed\n"); 771*e34212c7SDmitry Osipenko emc->bad_state = true; 772*e34212c7SDmitry Osipenko return -EIO; 773*e34212c7SDmitry Osipenko } 774*e34212c7SDmitry Osipenko 775*e34212c7SDmitry Osipenko /* restore auto-calibration */ 776*e34212c7SDmitry Osipenko if (emc->vref_cal_toggle) 777*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_auto_cal_interval, 778*e34212c7SDmitry Osipenko emc->regs + EMC_AUTO_CAL_INTERVAL); 779*e34212c7SDmitry Osipenko 780*e34212c7SDmitry Osipenko /* restore dynamic self-refresh */ 781*e34212c7SDmitry Osipenko if (timing->emc_cfg_dyn_self_ref) { 782*e34212c7SDmitry Osipenko emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE; 783*e34212c7SDmitry Osipenko writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 784*e34212c7SDmitry Osipenko } 785*e34212c7SDmitry Osipenko 786*e34212c7SDmitry Osipenko /* set number of clocks to wait after each ZQ command */ 787*e34212c7SDmitry Osipenko if (emc->zcal_long) 788*e34212c7SDmitry Osipenko writel_relaxed(timing->emc_zcal_cnt_long, 789*e34212c7SDmitry Osipenko emc->regs + EMC_ZCAL_WAIT_CNT); 790*e34212c7SDmitry Osipenko 791*e34212c7SDmitry Osipenko udelay(2); 792*e34212c7SDmitry Osipenko /* update restored timing */ 793*e34212c7SDmitry Osipenko ret = emc_seq_update_timing(emc); 794*e34212c7SDmitry Osipenko if (ret) 795*e34212c7SDmitry Osipenko emc->bad_state = true; 796*e34212c7SDmitry Osipenko 797*e34212c7SDmitry Osipenko /* restore early ACK */ 798*e34212c7SDmitry Osipenko mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE); 799*e34212c7SDmitry Osipenko 800*e34212c7SDmitry Osipenko emc->prepared = false; 801*e34212c7SDmitry Osipenko 802*e34212c7SDmitry Osipenko return ret; 803*e34212c7SDmitry Osipenko } 804*e34212c7SDmitry Osipenko 805*e34212c7SDmitry Osipenko static int emc_unprepare_timing_change(struct tegra_emc *emc, 806*e34212c7SDmitry Osipenko unsigned long rate) 807*e34212c7SDmitry Osipenko { 808*e34212c7SDmitry Osipenko if (emc->prepared && !emc->bad_state) { 809*e34212c7SDmitry Osipenko /* shouldn't ever happen in practice */ 810*e34212c7SDmitry Osipenko dev_err(emc->dev, "timing configuration can't be reverted\n"); 811*e34212c7SDmitry Osipenko emc->bad_state = true; 812*e34212c7SDmitry Osipenko } 813*e34212c7SDmitry Osipenko 814*e34212c7SDmitry Osipenko return 0; 815*e34212c7SDmitry Osipenko } 816*e34212c7SDmitry Osipenko 817*e34212c7SDmitry Osipenko static int emc_clk_change_notify(struct notifier_block *nb, 818*e34212c7SDmitry Osipenko unsigned long msg, void *data) 819*e34212c7SDmitry Osipenko { 820*e34212c7SDmitry Osipenko struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); 821*e34212c7SDmitry Osipenko struct clk_notifier_data *cnd = data; 822*e34212c7SDmitry Osipenko int err; 823*e34212c7SDmitry Osipenko 824*e34212c7SDmitry Osipenko switch (msg) { 825*e34212c7SDmitry Osipenko case PRE_RATE_CHANGE: 826*e34212c7SDmitry Osipenko err = emc_prepare_timing_change(emc, cnd->new_rate); 827*e34212c7SDmitry Osipenko break; 828*e34212c7SDmitry Osipenko 829*e34212c7SDmitry Osipenko case ABORT_RATE_CHANGE: 830*e34212c7SDmitry Osipenko err = emc_unprepare_timing_change(emc, cnd->old_rate); 831*e34212c7SDmitry Osipenko break; 832*e34212c7SDmitry Osipenko 833*e34212c7SDmitry Osipenko case POST_RATE_CHANGE: 834*e34212c7SDmitry Osipenko err = emc_complete_timing_change(emc, cnd->new_rate); 835*e34212c7SDmitry Osipenko break; 836*e34212c7SDmitry Osipenko 837*e34212c7SDmitry Osipenko default: 838*e34212c7SDmitry Osipenko return NOTIFY_DONE; 839*e34212c7SDmitry Osipenko } 840*e34212c7SDmitry Osipenko 841*e34212c7SDmitry Osipenko return notifier_from_errno(err); 842*e34212c7SDmitry Osipenko } 843*e34212c7SDmitry Osipenko 844*e34212c7SDmitry Osipenko static int load_one_timing_from_dt(struct tegra_emc *emc, 845*e34212c7SDmitry Osipenko struct emc_timing *timing, 846*e34212c7SDmitry Osipenko struct device_node *node) 847*e34212c7SDmitry Osipenko { 848*e34212c7SDmitry Osipenko u32 value; 849*e34212c7SDmitry Osipenko int err; 850*e34212c7SDmitry Osipenko 851*e34212c7SDmitry Osipenko err = of_property_read_u32(node, "clock-frequency", &value); 852*e34212c7SDmitry Osipenko if (err) { 853*e34212c7SDmitry Osipenko dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n", 854*e34212c7SDmitry Osipenko node, err); 855*e34212c7SDmitry Osipenko return err; 856*e34212c7SDmitry Osipenko } 857*e34212c7SDmitry Osipenko 858*e34212c7SDmitry Osipenko timing->rate = value; 859*e34212c7SDmitry Osipenko 860*e34212c7SDmitry Osipenko err = of_property_read_u32_array(node, "nvidia,emc-configuration", 861*e34212c7SDmitry Osipenko timing->data, 862*e34212c7SDmitry Osipenko ARRAY_SIZE(emc_timing_registers)); 863*e34212c7SDmitry Osipenko if (err) { 864*e34212c7SDmitry Osipenko dev_err(emc->dev, 865*e34212c7SDmitry Osipenko "timing %pOF: failed to read emc timing data: %d\n", 866*e34212c7SDmitry Osipenko node, err); 867*e34212c7SDmitry Osipenko return err; 868*e34212c7SDmitry Osipenko } 869*e34212c7SDmitry Osipenko 870*e34212c7SDmitry Osipenko #define EMC_READ_BOOL(prop, dtprop) \ 871*e34212c7SDmitry Osipenko timing->prop = of_property_read_bool(node, dtprop); 872*e34212c7SDmitry Osipenko 873*e34212c7SDmitry Osipenko #define EMC_READ_U32(prop, dtprop) \ 874*e34212c7SDmitry Osipenko err = of_property_read_u32(node, dtprop, &timing->prop); \ 875*e34212c7SDmitry Osipenko if (err) { \ 876*e34212c7SDmitry Osipenko dev_err(emc->dev, \ 877*e34212c7SDmitry Osipenko "timing %pOFn: failed to read " #prop ": %d\n", \ 878*e34212c7SDmitry Osipenko node, err); \ 879*e34212c7SDmitry Osipenko return err; \ 880*e34212c7SDmitry Osipenko } 881*e34212c7SDmitry Osipenko 882*e34212c7SDmitry Osipenko EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval") 883*e34212c7SDmitry Osipenko EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1") 884*e34212c7SDmitry Osipenko EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2") 885*e34212c7SDmitry Osipenko EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset") 886*e34212c7SDmitry Osipenko EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long") 887*e34212c7SDmitry Osipenko EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref") 888*e34212c7SDmitry Osipenko EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst") 889*e34212c7SDmitry Osipenko 890*e34212c7SDmitry Osipenko #undef EMC_READ_U32 891*e34212c7SDmitry Osipenko #undef EMC_READ_BOOL 892*e34212c7SDmitry Osipenko 893*e34212c7SDmitry Osipenko dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate); 894*e34212c7SDmitry Osipenko 895*e34212c7SDmitry Osipenko return 0; 896*e34212c7SDmitry Osipenko } 897*e34212c7SDmitry Osipenko 898*e34212c7SDmitry Osipenko static int cmp_timings(const void *_a, const void *_b) 899*e34212c7SDmitry Osipenko { 900*e34212c7SDmitry Osipenko const struct emc_timing *a = _a; 901*e34212c7SDmitry Osipenko const struct emc_timing *b = _b; 902*e34212c7SDmitry Osipenko 903*e34212c7SDmitry Osipenko if (a->rate < b->rate) 904*e34212c7SDmitry Osipenko return -1; 905*e34212c7SDmitry Osipenko 906*e34212c7SDmitry Osipenko if (a->rate > b->rate) 907*e34212c7SDmitry Osipenko return 1; 908*e34212c7SDmitry Osipenko 909*e34212c7SDmitry Osipenko return 0; 910*e34212c7SDmitry Osipenko } 911*e34212c7SDmitry Osipenko 912*e34212c7SDmitry Osipenko static int emc_check_mc_timings(struct tegra_emc *emc) 913*e34212c7SDmitry Osipenko { 914*e34212c7SDmitry Osipenko struct tegra_mc *mc = emc->mc; 915*e34212c7SDmitry Osipenko unsigned int i; 916*e34212c7SDmitry Osipenko 917*e34212c7SDmitry Osipenko if (emc->num_timings != mc->num_timings) { 918*e34212c7SDmitry Osipenko dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n", 919*e34212c7SDmitry Osipenko emc->num_timings, mc->num_timings); 920*e34212c7SDmitry Osipenko return -EINVAL; 921*e34212c7SDmitry Osipenko } 922*e34212c7SDmitry Osipenko 923*e34212c7SDmitry Osipenko for (i = 0; i < mc->num_timings; i++) { 924*e34212c7SDmitry Osipenko if (emc->timings[i].rate != mc->timings[i].rate) { 925*e34212c7SDmitry Osipenko dev_err(emc->dev, 926*e34212c7SDmitry Osipenko "emc/mc timing rate mismatch: %lu %lu\n", 927*e34212c7SDmitry Osipenko emc->timings[i].rate, mc->timings[i].rate); 928*e34212c7SDmitry Osipenko return -EINVAL; 929*e34212c7SDmitry Osipenko } 930*e34212c7SDmitry Osipenko } 931*e34212c7SDmitry Osipenko 932*e34212c7SDmitry Osipenko return 0; 933*e34212c7SDmitry Osipenko } 934*e34212c7SDmitry Osipenko 935*e34212c7SDmitry Osipenko static int emc_load_timings_from_dt(struct tegra_emc *emc, 936*e34212c7SDmitry Osipenko struct device_node *node) 937*e34212c7SDmitry Osipenko { 938*e34212c7SDmitry Osipenko struct device_node *child; 939*e34212c7SDmitry Osipenko struct emc_timing *timing; 940*e34212c7SDmitry Osipenko int child_count; 941*e34212c7SDmitry Osipenko int err; 942*e34212c7SDmitry Osipenko 943*e34212c7SDmitry Osipenko child_count = of_get_child_count(node); 944*e34212c7SDmitry Osipenko if (!child_count) { 945*e34212c7SDmitry Osipenko dev_err(emc->dev, "no memory timings in: %pOF\n", node); 946*e34212c7SDmitry Osipenko return -EINVAL; 947*e34212c7SDmitry Osipenko } 948*e34212c7SDmitry Osipenko 949*e34212c7SDmitry Osipenko emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing), 950*e34212c7SDmitry Osipenko GFP_KERNEL); 951*e34212c7SDmitry Osipenko if (!emc->timings) 952*e34212c7SDmitry Osipenko return -ENOMEM; 953*e34212c7SDmitry Osipenko 954*e34212c7SDmitry Osipenko emc->num_timings = child_count; 955*e34212c7SDmitry Osipenko timing = emc->timings; 956*e34212c7SDmitry Osipenko 957*e34212c7SDmitry Osipenko for_each_child_of_node(node, child) { 958*e34212c7SDmitry Osipenko err = load_one_timing_from_dt(emc, timing++, child); 959*e34212c7SDmitry Osipenko if (err) { 960*e34212c7SDmitry Osipenko of_node_put(child); 961*e34212c7SDmitry Osipenko return err; 962*e34212c7SDmitry Osipenko } 963*e34212c7SDmitry Osipenko } 964*e34212c7SDmitry Osipenko 965*e34212c7SDmitry Osipenko sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, 966*e34212c7SDmitry Osipenko NULL); 967*e34212c7SDmitry Osipenko 968*e34212c7SDmitry Osipenko err = emc_check_mc_timings(emc); 969*e34212c7SDmitry Osipenko if (err) 970*e34212c7SDmitry Osipenko return err; 971*e34212c7SDmitry Osipenko 972*e34212c7SDmitry Osipenko dev_info(emc->dev, 973*e34212c7SDmitry Osipenko "got %u timings for RAM code %u (min %luMHz max %luMHz)\n", 974*e34212c7SDmitry Osipenko emc->num_timings, 975*e34212c7SDmitry Osipenko tegra_read_ram_code(), 976*e34212c7SDmitry Osipenko emc->timings[0].rate / 1000000, 977*e34212c7SDmitry Osipenko emc->timings[emc->num_timings - 1].rate / 1000000); 978*e34212c7SDmitry Osipenko 979*e34212c7SDmitry Osipenko return 0; 980*e34212c7SDmitry Osipenko } 981*e34212c7SDmitry Osipenko 982*e34212c7SDmitry Osipenko static struct device_node *emc_find_node_by_ram_code(struct device *dev) 983*e34212c7SDmitry Osipenko { 984*e34212c7SDmitry Osipenko struct device_node *np; 985*e34212c7SDmitry Osipenko u32 value, ram_code; 986*e34212c7SDmitry Osipenko int err; 987*e34212c7SDmitry Osipenko 988*e34212c7SDmitry Osipenko ram_code = tegra_read_ram_code(); 989*e34212c7SDmitry Osipenko 990*e34212c7SDmitry Osipenko for_each_child_of_node(dev->of_node, np) { 991*e34212c7SDmitry Osipenko err = of_property_read_u32(np, "nvidia,ram-code", &value); 992*e34212c7SDmitry Osipenko if (err || value != ram_code) 993*e34212c7SDmitry Osipenko continue; 994*e34212c7SDmitry Osipenko 995*e34212c7SDmitry Osipenko return np; 996*e34212c7SDmitry Osipenko } 997*e34212c7SDmitry Osipenko 998*e34212c7SDmitry Osipenko dev_err(dev, "no memory timings for RAM code %u found in device-tree\n", 999*e34212c7SDmitry Osipenko ram_code); 1000*e34212c7SDmitry Osipenko 1001*e34212c7SDmitry Osipenko return NULL; 1002*e34212c7SDmitry Osipenko } 1003*e34212c7SDmitry Osipenko 1004*e34212c7SDmitry Osipenko static int emc_setup_hw(struct tegra_emc *emc) 1005*e34212c7SDmitry Osipenko { 1006*e34212c7SDmitry Osipenko u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; 1007*e34212c7SDmitry Osipenko u32 fbio_cfg5, emc_cfg, emc_dbg; 1008*e34212c7SDmitry Osipenko enum emc_dram_type dram_type; 1009*e34212c7SDmitry Osipenko 1010*e34212c7SDmitry Osipenko fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); 1011*e34212c7SDmitry Osipenko dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; 1012*e34212c7SDmitry Osipenko 1013*e34212c7SDmitry Osipenko emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2); 1014*e34212c7SDmitry Osipenko 1015*e34212c7SDmitry Osipenko /* enable EMC and CAR to handshake on PLL divider/source changes */ 1016*e34212c7SDmitry Osipenko emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE; 1017*e34212c7SDmitry Osipenko 1018*e34212c7SDmitry Osipenko /* configure clock change mode accordingly to DRAM type */ 1019*e34212c7SDmitry Osipenko switch (dram_type) { 1020*e34212c7SDmitry Osipenko case DRAM_TYPE_LPDDR2: 1021*e34212c7SDmitry Osipenko emc_cfg |= EMC_CLKCHANGE_PD_ENABLE; 1022*e34212c7SDmitry Osipenko emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; 1023*e34212c7SDmitry Osipenko break; 1024*e34212c7SDmitry Osipenko 1025*e34212c7SDmitry Osipenko default: 1026*e34212c7SDmitry Osipenko emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; 1027*e34212c7SDmitry Osipenko emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE; 1028*e34212c7SDmitry Osipenko break; 1029*e34212c7SDmitry Osipenko } 1030*e34212c7SDmitry Osipenko 1031*e34212c7SDmitry Osipenko writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2); 1032*e34212c7SDmitry Osipenko 1033*e34212c7SDmitry Osipenko /* initialize interrupt */ 1034*e34212c7SDmitry Osipenko writel_relaxed(intmask, emc->regs + EMC_INTMASK); 1035*e34212c7SDmitry Osipenko writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS); 1036*e34212c7SDmitry Osipenko 1037*e34212c7SDmitry Osipenko /* ensure that unwanted debug features are disabled */ 1038*e34212c7SDmitry Osipenko emc_dbg = readl_relaxed(emc->regs + EMC_DBG); 1039*e34212c7SDmitry Osipenko emc_dbg |= EMC_DBG_CFG_PRIORITY; 1040*e34212c7SDmitry Osipenko emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY; 1041*e34212c7SDmitry Osipenko emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE; 1042*e34212c7SDmitry Osipenko emc_dbg &= ~EMC_DBG_FORCE_UPDATE; 1043*e34212c7SDmitry Osipenko writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 1044*e34212c7SDmitry Osipenko 1045*e34212c7SDmitry Osipenko return 0; 1046*e34212c7SDmitry Osipenko } 1047*e34212c7SDmitry Osipenko 1048*e34212c7SDmitry Osipenko static long emc_round_rate(unsigned long rate, 1049*e34212c7SDmitry Osipenko unsigned long min_rate, 1050*e34212c7SDmitry Osipenko unsigned long max_rate, 1051*e34212c7SDmitry Osipenko void *arg) 1052*e34212c7SDmitry Osipenko { 1053*e34212c7SDmitry Osipenko struct emc_timing *timing = NULL; 1054*e34212c7SDmitry Osipenko struct tegra_emc *emc = arg; 1055*e34212c7SDmitry Osipenko unsigned int i; 1056*e34212c7SDmitry Osipenko 1057*e34212c7SDmitry Osipenko min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate); 1058*e34212c7SDmitry Osipenko 1059*e34212c7SDmitry Osipenko for (i = 0; i < emc->num_timings; i++) { 1060*e34212c7SDmitry Osipenko if (emc->timings[i].rate < rate && i != emc->num_timings - 1) 1061*e34212c7SDmitry Osipenko continue; 1062*e34212c7SDmitry Osipenko 1063*e34212c7SDmitry Osipenko if (emc->timings[i].rate > max_rate) { 1064*e34212c7SDmitry Osipenko i = max(i, 1u) - 1; 1065*e34212c7SDmitry Osipenko 1066*e34212c7SDmitry Osipenko if (emc->timings[i].rate < min_rate) 1067*e34212c7SDmitry Osipenko break; 1068*e34212c7SDmitry Osipenko } 1069*e34212c7SDmitry Osipenko 1070*e34212c7SDmitry Osipenko if (emc->timings[i].rate < min_rate) 1071*e34212c7SDmitry Osipenko continue; 1072*e34212c7SDmitry Osipenko 1073*e34212c7SDmitry Osipenko timing = &emc->timings[i]; 1074*e34212c7SDmitry Osipenko break; 1075*e34212c7SDmitry Osipenko } 1076*e34212c7SDmitry Osipenko 1077*e34212c7SDmitry Osipenko if (!timing) { 1078*e34212c7SDmitry Osipenko dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n", 1079*e34212c7SDmitry Osipenko rate, min_rate, max_rate); 1080*e34212c7SDmitry Osipenko return -EINVAL; 1081*e34212c7SDmitry Osipenko } 1082*e34212c7SDmitry Osipenko 1083*e34212c7SDmitry Osipenko return timing->rate; 1084*e34212c7SDmitry Osipenko } 1085*e34212c7SDmitry Osipenko 1086*e34212c7SDmitry Osipenko static int tegra_emc_probe(struct platform_device *pdev) 1087*e34212c7SDmitry Osipenko { 1088*e34212c7SDmitry Osipenko struct platform_device *mc; 1089*e34212c7SDmitry Osipenko struct device_node *np; 1090*e34212c7SDmitry Osipenko struct tegra_emc *emc; 1091*e34212c7SDmitry Osipenko int err; 1092*e34212c7SDmitry Osipenko 1093*e34212c7SDmitry Osipenko if (of_get_child_count(pdev->dev.of_node) == 0) { 1094*e34212c7SDmitry Osipenko dev_info(&pdev->dev, 1095*e34212c7SDmitry Osipenko "device-tree node doesn't have memory timings\n"); 1096*e34212c7SDmitry Osipenko return 0; 1097*e34212c7SDmitry Osipenko } 1098*e34212c7SDmitry Osipenko 1099*e34212c7SDmitry Osipenko np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0); 1100*e34212c7SDmitry Osipenko if (!np) { 1101*e34212c7SDmitry Osipenko dev_err(&pdev->dev, "could not get memory controller node\n"); 1102*e34212c7SDmitry Osipenko return -ENOENT; 1103*e34212c7SDmitry Osipenko } 1104*e34212c7SDmitry Osipenko 1105*e34212c7SDmitry Osipenko mc = of_find_device_by_node(np); 1106*e34212c7SDmitry Osipenko of_node_put(np); 1107*e34212c7SDmitry Osipenko if (!mc) 1108*e34212c7SDmitry Osipenko return -ENOENT; 1109*e34212c7SDmitry Osipenko 1110*e34212c7SDmitry Osipenko np = emc_find_node_by_ram_code(&pdev->dev); 1111*e34212c7SDmitry Osipenko if (!np) 1112*e34212c7SDmitry Osipenko return -EINVAL; 1113*e34212c7SDmitry Osipenko 1114*e34212c7SDmitry Osipenko emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); 1115*e34212c7SDmitry Osipenko if (!emc) { 1116*e34212c7SDmitry Osipenko of_node_put(np); 1117*e34212c7SDmitry Osipenko return -ENOMEM; 1118*e34212c7SDmitry Osipenko } 1119*e34212c7SDmitry Osipenko 1120*e34212c7SDmitry Osipenko emc->mc = platform_get_drvdata(mc); 1121*e34212c7SDmitry Osipenko if (!emc->mc) 1122*e34212c7SDmitry Osipenko return -EPROBE_DEFER; 1123*e34212c7SDmitry Osipenko 1124*e34212c7SDmitry Osipenko init_completion(&emc->clk_handshake_complete); 1125*e34212c7SDmitry Osipenko emc->clk_nb.notifier_call = emc_clk_change_notify; 1126*e34212c7SDmitry Osipenko emc->dev = &pdev->dev; 1127*e34212c7SDmitry Osipenko 1128*e34212c7SDmitry Osipenko err = emc_load_timings_from_dt(emc, np); 1129*e34212c7SDmitry Osipenko of_node_put(np); 1130*e34212c7SDmitry Osipenko if (err) 1131*e34212c7SDmitry Osipenko return err; 1132*e34212c7SDmitry Osipenko 1133*e34212c7SDmitry Osipenko emc->regs = devm_platform_ioremap_resource(pdev, 0); 1134*e34212c7SDmitry Osipenko if (IS_ERR(emc->regs)) 1135*e34212c7SDmitry Osipenko return PTR_ERR(emc->regs); 1136*e34212c7SDmitry Osipenko 1137*e34212c7SDmitry Osipenko err = emc_setup_hw(emc); 1138*e34212c7SDmitry Osipenko if (err) 1139*e34212c7SDmitry Osipenko return err; 1140*e34212c7SDmitry Osipenko 1141*e34212c7SDmitry Osipenko err = platform_get_irq(pdev, 0); 1142*e34212c7SDmitry Osipenko if (err < 0) { 1143*e34212c7SDmitry Osipenko dev_err(&pdev->dev, "interrupt not specified: %d\n", err); 1144*e34212c7SDmitry Osipenko return err; 1145*e34212c7SDmitry Osipenko } 1146*e34212c7SDmitry Osipenko emc->irq = err; 1147*e34212c7SDmitry Osipenko 1148*e34212c7SDmitry Osipenko err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0, 1149*e34212c7SDmitry Osipenko dev_name(&pdev->dev), emc); 1150*e34212c7SDmitry Osipenko if (err) { 1151*e34212c7SDmitry Osipenko dev_err(&pdev->dev, "failed to request irq: %d\n", err); 1152*e34212c7SDmitry Osipenko return err; 1153*e34212c7SDmitry Osipenko } 1154*e34212c7SDmitry Osipenko 1155*e34212c7SDmitry Osipenko tegra20_clk_set_emc_round_callback(emc_round_rate, emc); 1156*e34212c7SDmitry Osipenko 1157*e34212c7SDmitry Osipenko emc->clk = devm_clk_get(&pdev->dev, "emc"); 1158*e34212c7SDmitry Osipenko if (IS_ERR(emc->clk)) { 1159*e34212c7SDmitry Osipenko err = PTR_ERR(emc->clk); 1160*e34212c7SDmitry Osipenko dev_err(&pdev->dev, "failed to get emc clock: %d\n", err); 1161*e34212c7SDmitry Osipenko goto unset_cb; 1162*e34212c7SDmitry Osipenko } 1163*e34212c7SDmitry Osipenko 1164*e34212c7SDmitry Osipenko err = clk_notifier_register(emc->clk, &emc->clk_nb); 1165*e34212c7SDmitry Osipenko if (err) { 1166*e34212c7SDmitry Osipenko dev_err(&pdev->dev, "failed to register clk notifier: %d\n", 1167*e34212c7SDmitry Osipenko err); 1168*e34212c7SDmitry Osipenko goto unset_cb; 1169*e34212c7SDmitry Osipenko } 1170*e34212c7SDmitry Osipenko 1171*e34212c7SDmitry Osipenko platform_set_drvdata(pdev, emc); 1172*e34212c7SDmitry Osipenko 1173*e34212c7SDmitry Osipenko return 0; 1174*e34212c7SDmitry Osipenko 1175*e34212c7SDmitry Osipenko unset_cb: 1176*e34212c7SDmitry Osipenko tegra20_clk_set_emc_round_callback(NULL, NULL); 1177*e34212c7SDmitry Osipenko 1178*e34212c7SDmitry Osipenko return err; 1179*e34212c7SDmitry Osipenko } 1180*e34212c7SDmitry Osipenko 1181*e34212c7SDmitry Osipenko static int tegra_emc_suspend(struct device *dev) 1182*e34212c7SDmitry Osipenko { 1183*e34212c7SDmitry Osipenko struct tegra_emc *emc = dev_get_drvdata(dev); 1184*e34212c7SDmitry Osipenko 1185*e34212c7SDmitry Osipenko /* 1186*e34212c7SDmitry Osipenko * Suspending in a bad state will hang machine. The "prepared" var 1187*e34212c7SDmitry Osipenko * shall be always false here unless it's a kernel bug that caused 1188*e34212c7SDmitry Osipenko * suspending in a wrong order. 1189*e34212c7SDmitry Osipenko */ 1190*e34212c7SDmitry Osipenko if (WARN_ON(emc->prepared) || emc->bad_state) 1191*e34212c7SDmitry Osipenko return -EINVAL; 1192*e34212c7SDmitry Osipenko 1193*e34212c7SDmitry Osipenko emc->bad_state = true; 1194*e34212c7SDmitry Osipenko 1195*e34212c7SDmitry Osipenko return 0; 1196*e34212c7SDmitry Osipenko } 1197*e34212c7SDmitry Osipenko 1198*e34212c7SDmitry Osipenko static int tegra_emc_resume(struct device *dev) 1199*e34212c7SDmitry Osipenko { 1200*e34212c7SDmitry Osipenko struct tegra_emc *emc = dev_get_drvdata(dev); 1201*e34212c7SDmitry Osipenko 1202*e34212c7SDmitry Osipenko emc_setup_hw(emc); 1203*e34212c7SDmitry Osipenko emc->bad_state = false; 1204*e34212c7SDmitry Osipenko 1205*e34212c7SDmitry Osipenko return 0; 1206*e34212c7SDmitry Osipenko } 1207*e34212c7SDmitry Osipenko 1208*e34212c7SDmitry Osipenko static const struct dev_pm_ops tegra_emc_pm_ops = { 1209*e34212c7SDmitry Osipenko .suspend = tegra_emc_suspend, 1210*e34212c7SDmitry Osipenko .resume = tegra_emc_resume, 1211*e34212c7SDmitry Osipenko }; 1212*e34212c7SDmitry Osipenko 1213*e34212c7SDmitry Osipenko static const struct of_device_id tegra_emc_of_match[] = { 1214*e34212c7SDmitry Osipenko { .compatible = "nvidia,tegra30-emc", }, 1215*e34212c7SDmitry Osipenko {}, 1216*e34212c7SDmitry Osipenko }; 1217*e34212c7SDmitry Osipenko 1218*e34212c7SDmitry Osipenko static struct platform_driver tegra_emc_driver = { 1219*e34212c7SDmitry Osipenko .probe = tegra_emc_probe, 1220*e34212c7SDmitry Osipenko .driver = { 1221*e34212c7SDmitry Osipenko .name = "tegra30-emc", 1222*e34212c7SDmitry Osipenko .of_match_table = tegra_emc_of_match, 1223*e34212c7SDmitry Osipenko .pm = &tegra_emc_pm_ops, 1224*e34212c7SDmitry Osipenko .suppress_bind_attrs = true, 1225*e34212c7SDmitry Osipenko }, 1226*e34212c7SDmitry Osipenko }; 1227*e34212c7SDmitry Osipenko 1228*e34212c7SDmitry Osipenko static int __init tegra_emc_init(void) 1229*e34212c7SDmitry Osipenko { 1230*e34212c7SDmitry Osipenko return platform_driver_register(&tegra_emc_driver); 1231*e34212c7SDmitry Osipenko } 1232*e34212c7SDmitry Osipenko subsys_initcall(tegra_emc_init); 1233