1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Tegra30 External Memory Controller driver 4 * 5 * Based on downstream driver from NVIDIA and tegra124-emc.c 6 * Copyright (C) 2011-2014 NVIDIA Corporation 7 * 8 * Author: Dmitry Osipenko <digetx@gmail.com> 9 * Copyright (C) 2019 GRATE-DRIVER project 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/clk/tegra.h> 14 #include <linux/completion.h> 15 #include <linux/delay.h> 16 #include <linux/err.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/of_platform.h> 23 #include <linux/platform_device.h> 24 #include <linux/sort.h> 25 #include <linux/types.h> 26 27 #include <soc/tegra/fuse.h> 28 29 #include "mc.h" 30 31 #define EMC_INTSTATUS 0x000 32 #define EMC_INTMASK 0x004 33 #define EMC_DBG 0x008 34 #define EMC_CFG 0x00c 35 #define EMC_REFCTRL 0x020 36 #define EMC_TIMING_CONTROL 0x028 37 #define EMC_RC 0x02c 38 #define EMC_RFC 0x030 39 #define EMC_RAS 0x034 40 #define EMC_RP 0x038 41 #define EMC_R2W 0x03c 42 #define EMC_W2R 0x040 43 #define EMC_R2P 0x044 44 #define EMC_W2P 0x048 45 #define EMC_RD_RCD 0x04c 46 #define EMC_WR_RCD 0x050 47 #define EMC_RRD 0x054 48 #define EMC_REXT 0x058 49 #define EMC_WDV 0x05c 50 #define EMC_QUSE 0x060 51 #define EMC_QRST 0x064 52 #define EMC_QSAFE 0x068 53 #define EMC_RDV 0x06c 54 #define EMC_REFRESH 0x070 55 #define EMC_BURST_REFRESH_NUM 0x074 56 #define EMC_PDEX2WR 0x078 57 #define EMC_PDEX2RD 0x07c 58 #define EMC_PCHG2PDEN 0x080 59 #define EMC_ACT2PDEN 0x084 60 #define EMC_AR2PDEN 0x088 61 #define EMC_RW2PDEN 0x08c 62 #define EMC_TXSR 0x090 63 #define EMC_TCKE 0x094 64 #define EMC_TFAW 0x098 65 #define EMC_TRPAB 0x09c 66 #define EMC_TCLKSTABLE 0x0a0 67 #define EMC_TCLKSTOP 0x0a4 68 #define EMC_TREFBW 0x0a8 69 #define EMC_QUSE_EXTRA 0x0ac 70 #define EMC_ODT_WRITE 0x0b0 71 #define EMC_ODT_READ 0x0b4 72 #define EMC_WEXT 0x0b8 73 #define EMC_CTT 0x0bc 74 #define EMC_MRS_WAIT_CNT 0x0c8 75 #define EMC_MRS 0x0cc 76 #define EMC_EMRS 0x0d0 77 #define EMC_SELF_REF 0x0e0 78 #define EMC_MRW 0x0e8 79 #define EMC_XM2DQSPADCTRL3 0x0f8 80 #define EMC_FBIO_SPARE 0x100 81 #define EMC_FBIO_CFG5 0x104 82 #define EMC_FBIO_CFG6 0x114 83 #define EMC_CFG_RSV 0x120 84 #define EMC_AUTO_CAL_CONFIG 0x2a4 85 #define EMC_AUTO_CAL_INTERVAL 0x2a8 86 #define EMC_AUTO_CAL_STATUS 0x2ac 87 #define EMC_STATUS 0x2b4 88 #define EMC_CFG_2 0x2b8 89 #define EMC_CFG_DIG_DLL 0x2bc 90 #define EMC_CFG_DIG_DLL_PERIOD 0x2c0 91 #define EMC_CTT_DURATION 0x2d8 92 #define EMC_CTT_TERM_CTRL 0x2dc 93 #define EMC_ZCAL_INTERVAL 0x2e0 94 #define EMC_ZCAL_WAIT_CNT 0x2e4 95 #define EMC_ZQ_CAL 0x2ec 96 #define EMC_XM2CMDPADCTRL 0x2f0 97 #define EMC_XM2DQSPADCTRL2 0x2fc 98 #define EMC_XM2DQPADCTRL2 0x304 99 #define EMC_XM2CLKPADCTRL 0x308 100 #define EMC_XM2COMPPADCTRL 0x30c 101 #define EMC_XM2VTTGENPADCTRL 0x310 102 #define EMC_XM2VTTGENPADCTRL2 0x314 103 #define EMC_XM2QUSEPADCTRL 0x318 104 #define EMC_DLL_XFORM_DQS0 0x328 105 #define EMC_DLL_XFORM_DQS1 0x32c 106 #define EMC_DLL_XFORM_DQS2 0x330 107 #define EMC_DLL_XFORM_DQS3 0x334 108 #define EMC_DLL_XFORM_DQS4 0x338 109 #define EMC_DLL_XFORM_DQS5 0x33c 110 #define EMC_DLL_XFORM_DQS6 0x340 111 #define EMC_DLL_XFORM_DQS7 0x344 112 #define EMC_DLL_XFORM_QUSE0 0x348 113 #define EMC_DLL_XFORM_QUSE1 0x34c 114 #define EMC_DLL_XFORM_QUSE2 0x350 115 #define EMC_DLL_XFORM_QUSE3 0x354 116 #define EMC_DLL_XFORM_QUSE4 0x358 117 #define EMC_DLL_XFORM_QUSE5 0x35c 118 #define EMC_DLL_XFORM_QUSE6 0x360 119 #define EMC_DLL_XFORM_QUSE7 0x364 120 #define EMC_DLL_XFORM_DQ0 0x368 121 #define EMC_DLL_XFORM_DQ1 0x36c 122 #define EMC_DLL_XFORM_DQ2 0x370 123 #define EMC_DLL_XFORM_DQ3 0x374 124 #define EMC_DLI_TRIM_TXDQS0 0x3a8 125 #define EMC_DLI_TRIM_TXDQS1 0x3ac 126 #define EMC_DLI_TRIM_TXDQS2 0x3b0 127 #define EMC_DLI_TRIM_TXDQS3 0x3b4 128 #define EMC_DLI_TRIM_TXDQS4 0x3b8 129 #define EMC_DLI_TRIM_TXDQS5 0x3bc 130 #define EMC_DLI_TRIM_TXDQS6 0x3c0 131 #define EMC_DLI_TRIM_TXDQS7 0x3c4 132 #define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8 133 #define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc 134 #define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0 135 #define EMC_SEL_DPD_CTRL 0x3d8 136 #define EMC_PRE_REFRESH_REQ_CNT 0x3dc 137 #define EMC_DYN_SELF_REF_CONTROL 0x3e0 138 #define EMC_TXSRDLL 0x3e4 139 140 #define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23) 141 142 #define EMC_MODE_SET_DLL_RESET BIT(8) 143 #define EMC_MODE_SET_LONG_CNT BIT(26) 144 145 #define EMC_SELF_REF_CMD_ENABLED BIT(0) 146 147 #define DRAM_DEV_SEL_ALL (0 << 30) 148 #define DRAM_DEV_SEL_0 (2 << 30) 149 #define DRAM_DEV_SEL_1 (1 << 30) 150 #define DRAM_BROADCAST(num) \ 151 ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0) 152 153 #define EMC_ZQ_CAL_CMD BIT(0) 154 #define EMC_ZQ_CAL_LONG BIT(4) 155 #define EMC_ZQ_CAL_LONG_CMD_DEV0 \ 156 (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) 157 #define EMC_ZQ_CAL_LONG_CMD_DEV1 \ 158 (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) 159 160 #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0) 161 #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1) 162 #define EMC_DBG_FORCE_UPDATE BIT(2) 163 #define EMC_DBG_CFG_PRIORITY BIT(24) 164 165 #define EMC_CFG5_QUSE_MODE_SHIFT 13 166 #define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT) 167 168 #define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2 169 #define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3 170 171 #define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9) 172 173 #define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10) 174 175 #define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4) 176 177 #define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5) 178 #define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5) 179 180 #define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31) 181 182 #define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3 183 184 #define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff 185 #define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16 186 #define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \ 187 (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) 188 189 #define EMC_REFCTRL_DEV_SEL_MASK 0x3 190 #define EMC_REFCTRL_ENABLE BIT(31) 191 #define EMC_REFCTRL_ENABLE_ALL(num) \ 192 (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE) 193 #define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2) 194 195 #define EMC_CFG_PERIODIC_QRST BIT(21) 196 #define EMC_CFG_DYN_SREF_ENABLE BIT(28) 197 198 #define EMC_CLKCHANGE_REQ_ENABLE BIT(0) 199 #define EMC_CLKCHANGE_PD_ENABLE BIT(1) 200 #define EMC_CLKCHANGE_SR_ENABLE BIT(2) 201 202 #define EMC_TIMING_UPDATE BIT(0) 203 204 #define EMC_REFRESH_OVERFLOW_INT BIT(3) 205 #define EMC_CLKCHANGE_COMPLETE_INT BIT(4) 206 207 enum emc_dram_type { 208 DRAM_TYPE_DDR3, 209 DRAM_TYPE_DDR1, 210 DRAM_TYPE_LPDDR2, 211 DRAM_TYPE_DDR2, 212 }; 213 214 enum emc_dll_change { 215 DLL_CHANGE_NONE, 216 DLL_CHANGE_ON, 217 DLL_CHANGE_OFF 218 }; 219 220 static const u16 emc_timing_registers[] = { 221 [0] = EMC_RC, 222 [1] = EMC_RFC, 223 [2] = EMC_RAS, 224 [3] = EMC_RP, 225 [4] = EMC_R2W, 226 [5] = EMC_W2R, 227 [6] = EMC_R2P, 228 [7] = EMC_W2P, 229 [8] = EMC_RD_RCD, 230 [9] = EMC_WR_RCD, 231 [10] = EMC_RRD, 232 [11] = EMC_REXT, 233 [12] = EMC_WEXT, 234 [13] = EMC_WDV, 235 [14] = EMC_QUSE, 236 [15] = EMC_QRST, 237 [16] = EMC_QSAFE, 238 [17] = EMC_RDV, 239 [18] = EMC_REFRESH, 240 [19] = EMC_BURST_REFRESH_NUM, 241 [20] = EMC_PRE_REFRESH_REQ_CNT, 242 [21] = EMC_PDEX2WR, 243 [22] = EMC_PDEX2RD, 244 [23] = EMC_PCHG2PDEN, 245 [24] = EMC_ACT2PDEN, 246 [25] = EMC_AR2PDEN, 247 [26] = EMC_RW2PDEN, 248 [27] = EMC_TXSR, 249 [28] = EMC_TXSRDLL, 250 [29] = EMC_TCKE, 251 [30] = EMC_TFAW, 252 [31] = EMC_TRPAB, 253 [32] = EMC_TCLKSTABLE, 254 [33] = EMC_TCLKSTOP, 255 [34] = EMC_TREFBW, 256 [35] = EMC_QUSE_EXTRA, 257 [36] = EMC_FBIO_CFG6, 258 [37] = EMC_ODT_WRITE, 259 [38] = EMC_ODT_READ, 260 [39] = EMC_FBIO_CFG5, 261 [40] = EMC_CFG_DIG_DLL, 262 [41] = EMC_CFG_DIG_DLL_PERIOD, 263 [42] = EMC_DLL_XFORM_DQS0, 264 [43] = EMC_DLL_XFORM_DQS1, 265 [44] = EMC_DLL_XFORM_DQS2, 266 [45] = EMC_DLL_XFORM_DQS3, 267 [46] = EMC_DLL_XFORM_DQS4, 268 [47] = EMC_DLL_XFORM_DQS5, 269 [48] = EMC_DLL_XFORM_DQS6, 270 [49] = EMC_DLL_XFORM_DQS7, 271 [50] = EMC_DLL_XFORM_QUSE0, 272 [51] = EMC_DLL_XFORM_QUSE1, 273 [52] = EMC_DLL_XFORM_QUSE2, 274 [53] = EMC_DLL_XFORM_QUSE3, 275 [54] = EMC_DLL_XFORM_QUSE4, 276 [55] = EMC_DLL_XFORM_QUSE5, 277 [56] = EMC_DLL_XFORM_QUSE6, 278 [57] = EMC_DLL_XFORM_QUSE7, 279 [58] = EMC_DLI_TRIM_TXDQS0, 280 [59] = EMC_DLI_TRIM_TXDQS1, 281 [60] = EMC_DLI_TRIM_TXDQS2, 282 [61] = EMC_DLI_TRIM_TXDQS3, 283 [62] = EMC_DLI_TRIM_TXDQS4, 284 [63] = EMC_DLI_TRIM_TXDQS5, 285 [64] = EMC_DLI_TRIM_TXDQS6, 286 [65] = EMC_DLI_TRIM_TXDQS7, 287 [66] = EMC_DLL_XFORM_DQ0, 288 [67] = EMC_DLL_XFORM_DQ1, 289 [68] = EMC_DLL_XFORM_DQ2, 290 [69] = EMC_DLL_XFORM_DQ3, 291 [70] = EMC_XM2CMDPADCTRL, 292 [71] = EMC_XM2DQSPADCTRL2, 293 [72] = EMC_XM2DQPADCTRL2, 294 [73] = EMC_XM2CLKPADCTRL, 295 [74] = EMC_XM2COMPPADCTRL, 296 [75] = EMC_XM2VTTGENPADCTRL, 297 [76] = EMC_XM2VTTGENPADCTRL2, 298 [77] = EMC_XM2QUSEPADCTRL, 299 [78] = EMC_XM2DQSPADCTRL3, 300 [79] = EMC_CTT_TERM_CTRL, 301 [80] = EMC_ZCAL_INTERVAL, 302 [81] = EMC_ZCAL_WAIT_CNT, 303 [82] = EMC_MRS_WAIT_CNT, 304 [83] = EMC_AUTO_CAL_CONFIG, 305 [84] = EMC_CTT, 306 [85] = EMC_CTT_DURATION, 307 [86] = EMC_DYN_SELF_REF_CONTROL, 308 [87] = EMC_FBIO_SPARE, 309 [88] = EMC_CFG_RSV, 310 }; 311 312 struct emc_timing { 313 unsigned long rate; 314 315 u32 data[ARRAY_SIZE(emc_timing_registers)]; 316 317 u32 emc_auto_cal_interval; 318 u32 emc_mode_1; 319 u32 emc_mode_2; 320 u32 emc_mode_reset; 321 u32 emc_zcal_cnt_long; 322 bool emc_cfg_periodic_qrst; 323 bool emc_cfg_dyn_self_ref; 324 }; 325 326 struct tegra_emc { 327 struct device *dev; 328 struct tegra_mc *mc; 329 struct completion clk_handshake_complete; 330 struct notifier_block clk_nb; 331 struct clk *clk; 332 void __iomem *regs; 333 unsigned int irq; 334 335 struct emc_timing *timings; 336 unsigned int num_timings; 337 338 u32 mc_override; 339 u32 emc_cfg; 340 341 u32 emc_mode_1; 342 u32 emc_mode_2; 343 u32 emc_mode_reset; 344 345 bool vref_cal_toggle : 1; 346 bool zcal_long : 1; 347 bool dll_on : 1; 348 bool prepared : 1; 349 bool bad_state : 1; 350 }; 351 352 static irqreturn_t tegra_emc_isr(int irq, void *data) 353 { 354 struct tegra_emc *emc = data; 355 u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; 356 u32 status; 357 358 status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask; 359 if (!status) 360 return IRQ_NONE; 361 362 /* notify about EMC-CAR handshake completion */ 363 if (status & EMC_CLKCHANGE_COMPLETE_INT) 364 complete(&emc->clk_handshake_complete); 365 366 /* notify about HW problem */ 367 if (status & EMC_REFRESH_OVERFLOW_INT) 368 dev_err_ratelimited(emc->dev, 369 "refresh request overflow timeout\n"); 370 371 /* clear interrupts */ 372 writel_relaxed(status, emc->regs + EMC_INTSTATUS); 373 374 return IRQ_HANDLED; 375 } 376 377 static struct emc_timing *emc_find_timing(struct tegra_emc *emc, 378 unsigned long rate) 379 { 380 struct emc_timing *timing = NULL; 381 unsigned int i; 382 383 for (i = 0; i < emc->num_timings; i++) { 384 if (emc->timings[i].rate >= rate) { 385 timing = &emc->timings[i]; 386 break; 387 } 388 } 389 390 if (!timing) { 391 dev_err(emc->dev, "no timing for rate %lu\n", rate); 392 return NULL; 393 } 394 395 return timing; 396 } 397 398 static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing, 399 bool *schmitt_to_vref) 400 { 401 bool preset = false; 402 u32 val; 403 404 if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) { 405 val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2); 406 407 if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) { 408 val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE; 409 writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2); 410 411 preset = true; 412 } 413 } 414 415 if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) { 416 val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3); 417 418 if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) { 419 val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE; 420 writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3); 421 422 preset = true; 423 } 424 } 425 426 if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) { 427 val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL); 428 429 if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) { 430 val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE; 431 writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL); 432 433 *schmitt_to_vref = true; 434 preset = true; 435 } 436 } 437 438 return preset; 439 } 440 441 static int emc_seq_update_timing(struct tegra_emc *emc) 442 { 443 u32 val; 444 int err; 445 446 writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL); 447 448 err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val, 449 !(val & EMC_STATUS_TIMING_UPDATE_STALLED), 450 1, 200); 451 if (err) { 452 dev_err(emc->dev, "failed to update timing: %d\n", err); 453 return err; 454 } 455 456 return 0; 457 } 458 459 static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate) 460 { 461 struct tegra_mc *mc = emc->mc; 462 unsigned int misc0_index = 16; 463 unsigned int i; 464 bool same; 465 466 for (i = 0; i < mc->num_timings; i++) { 467 if (mc->timings[i].rate != rate) 468 continue; 469 470 if (mc->timings[i].emem_data[misc0_index] & BIT(27)) 471 same = true; 472 else 473 same = false; 474 475 return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same); 476 } 477 478 return -EINVAL; 479 } 480 481 static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) 482 { 483 struct emc_timing *timing = emc_find_timing(emc, rate); 484 enum emc_dll_change dll_change; 485 enum emc_dram_type dram_type; 486 bool schmitt_to_vref = false; 487 unsigned int pre_wait = 0; 488 bool qrst_used = false; 489 unsigned int dram_num; 490 unsigned int i; 491 u32 fbio_cfg5; 492 u32 emc_dbg; 493 u32 val; 494 int err; 495 496 if (!timing || emc->bad_state) 497 return -EINVAL; 498 499 dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n", 500 __func__, timing->rate, rate); 501 502 emc->bad_state = true; 503 504 err = emc_prepare_mc_clk_cfg(emc, rate); 505 if (err) { 506 dev_err(emc->dev, "mc clock preparation failed: %d\n", err); 507 return err; 508 } 509 510 emc->vref_cal_toggle = false; 511 emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE); 512 emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG); 513 emc_dbg = readl_relaxed(emc->regs + EMC_DBG); 514 515 if (emc->dll_on == !!(timing->emc_mode_1 & 0x1)) 516 dll_change = DLL_CHANGE_NONE; 517 else if (timing->emc_mode_1 & 0x1) 518 dll_change = DLL_CHANGE_ON; 519 else 520 dll_change = DLL_CHANGE_OFF; 521 522 emc->dll_on = !!(timing->emc_mode_1 & 0x1); 523 524 if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL)) 525 emc->zcal_long = true; 526 else 527 emc->zcal_long = false; 528 529 fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); 530 dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; 531 532 dram_num = tegra_mc_get_emem_device_count(emc->mc); 533 534 /* disable dynamic self-refresh */ 535 if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) { 536 emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE; 537 writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 538 539 pre_wait = 5; 540 } 541 542 /* update MC arbiter settings */ 543 val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ); 544 if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) || 545 ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) { 546 547 val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE | 548 MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50; 549 mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ); 550 mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL); 551 } 552 553 if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK) 554 mc_writel(emc->mc, 555 emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK, 556 MC_EMEM_ARB_OVERRIDE); 557 558 /* check DQ/DQS VREF delay */ 559 if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) { 560 if (pre_wait < 3) 561 pre_wait = 3; 562 } 563 564 if (pre_wait) { 565 err = emc_seq_update_timing(emc); 566 if (err) 567 return err; 568 569 udelay(pre_wait); 570 } 571 572 /* disable auto-calibration if VREF mode is switching */ 573 if (timing->emc_auto_cal_interval) { 574 val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL); 575 val ^= timing->data[74]; 576 577 if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) { 578 writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL); 579 580 err = readl_relaxed_poll_timeout_atomic( 581 emc->regs + EMC_AUTO_CAL_STATUS, val, 582 !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300); 583 if (err) { 584 dev_err(emc->dev, 585 "failed to disable auto-cal: %d\n", 586 err); 587 return err; 588 } 589 590 emc->vref_cal_toggle = true; 591 } 592 } 593 594 /* program shadow registers */ 595 for (i = 0; i < ARRAY_SIZE(timing->data); i++) { 596 /* EMC_XM2CLKPADCTRL should be programmed separately */ 597 if (i != 73) 598 writel_relaxed(timing->data[i], 599 emc->regs + emc_timing_registers[i]); 600 } 601 602 err = tegra_mc_write_emem_configuration(emc->mc, timing->rate); 603 if (err) 604 return err; 605 606 /* DDR3: predict MRS long wait count */ 607 if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) { 608 u32 cnt = 512; 609 610 if (emc->zcal_long) 611 cnt -= dram_num * 256; 612 613 val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK; 614 if (cnt < val) 615 cnt = val; 616 617 val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; 618 val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) & 619 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; 620 621 writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT); 622 } 623 624 /* disable interrupt since read access is prohibited after stalling */ 625 disable_irq(emc->irq); 626 627 /* this read also completes the writes */ 628 val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL); 629 630 if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) { 631 u32 cur_mode, new_mode; 632 633 cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK; 634 cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; 635 636 new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK; 637 new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; 638 639 if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && 640 cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) || 641 (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && 642 new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) 643 qrst_used = true; 644 } 645 646 /* flow control marker 1 */ 647 writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE); 648 649 /* enable periodic reset */ 650 if (qrst_used) { 651 writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, 652 emc->regs + EMC_DBG); 653 writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST, 654 emc->regs + EMC_CFG); 655 writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 656 } 657 658 /* disable auto-refresh to save time after clock change */ 659 writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num), 660 emc->regs + EMC_REFCTRL); 661 662 /* turn off DLL and enter self-refresh on DDR3 */ 663 if (dram_type == DRAM_TYPE_DDR3) { 664 if (dll_change == DLL_CHANGE_OFF) 665 writel_relaxed(timing->emc_mode_1, 666 emc->regs + EMC_EMRS); 667 668 writel_relaxed(DRAM_BROADCAST(dram_num) | 669 EMC_SELF_REF_CMD_ENABLED, 670 emc->regs + EMC_SELF_REF); 671 } 672 673 /* flow control marker 2 */ 674 writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE); 675 676 /* enable write-active MUX, update unshadowed pad control */ 677 writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG); 678 writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL); 679 680 /* restore periodic QRST and disable write-active MUX */ 681 val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST); 682 if (qrst_used || timing->emc_cfg_periodic_qrst != val) { 683 if (timing->emc_cfg_periodic_qrst) 684 emc->emc_cfg |= EMC_CFG_PERIODIC_QRST; 685 else 686 emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST; 687 688 writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 689 } 690 writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 691 692 /* exit self-refresh on DDR3 */ 693 if (dram_type == DRAM_TYPE_DDR3) 694 writel_relaxed(DRAM_BROADCAST(dram_num), 695 emc->regs + EMC_SELF_REF); 696 697 /* set DRAM-mode registers */ 698 if (dram_type == DRAM_TYPE_DDR3) { 699 if (timing->emc_mode_1 != emc->emc_mode_1) 700 writel_relaxed(timing->emc_mode_1, 701 emc->regs + EMC_EMRS); 702 703 if (timing->emc_mode_2 != emc->emc_mode_2) 704 writel_relaxed(timing->emc_mode_2, 705 emc->regs + EMC_EMRS); 706 707 if (timing->emc_mode_reset != emc->emc_mode_reset || 708 dll_change == DLL_CHANGE_ON) { 709 val = timing->emc_mode_reset; 710 if (dll_change == DLL_CHANGE_ON) { 711 val |= EMC_MODE_SET_DLL_RESET; 712 val |= EMC_MODE_SET_LONG_CNT; 713 } else { 714 val &= ~EMC_MODE_SET_DLL_RESET; 715 } 716 writel_relaxed(val, emc->regs + EMC_MRS); 717 } 718 } else { 719 if (timing->emc_mode_2 != emc->emc_mode_2) 720 writel_relaxed(timing->emc_mode_2, 721 emc->regs + EMC_MRW); 722 723 if (timing->emc_mode_1 != emc->emc_mode_1) 724 writel_relaxed(timing->emc_mode_1, 725 emc->regs + EMC_MRW); 726 } 727 728 emc->emc_mode_1 = timing->emc_mode_1; 729 emc->emc_mode_2 = timing->emc_mode_2; 730 emc->emc_mode_reset = timing->emc_mode_reset; 731 732 /* issue ZCAL command if turning ZCAL on */ 733 if (emc->zcal_long) { 734 writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0, 735 emc->regs + EMC_ZQ_CAL); 736 737 if (dram_num > 1) 738 writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1, 739 emc->regs + EMC_ZQ_CAL); 740 } 741 742 /* re-enable auto-refresh */ 743 writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num), 744 emc->regs + EMC_REFCTRL); 745 746 /* flow control marker 3 */ 747 writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE); 748 749 reinit_completion(&emc->clk_handshake_complete); 750 751 /* interrupt can be re-enabled now */ 752 enable_irq(emc->irq); 753 754 emc->bad_state = false; 755 emc->prepared = true; 756 757 return 0; 758 } 759 760 static int emc_complete_timing_change(struct tegra_emc *emc, 761 unsigned long rate) 762 { 763 struct emc_timing *timing = emc_find_timing(emc, rate); 764 unsigned long timeout; 765 int ret; 766 767 timeout = wait_for_completion_timeout(&emc->clk_handshake_complete, 768 msecs_to_jiffies(100)); 769 if (timeout == 0) { 770 dev_err(emc->dev, "emc-car handshake failed\n"); 771 emc->bad_state = true; 772 return -EIO; 773 } 774 775 /* restore auto-calibration */ 776 if (emc->vref_cal_toggle) 777 writel_relaxed(timing->emc_auto_cal_interval, 778 emc->regs + EMC_AUTO_CAL_INTERVAL); 779 780 /* restore dynamic self-refresh */ 781 if (timing->emc_cfg_dyn_self_ref) { 782 emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE; 783 writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); 784 } 785 786 /* set number of clocks to wait after each ZQ command */ 787 if (emc->zcal_long) 788 writel_relaxed(timing->emc_zcal_cnt_long, 789 emc->regs + EMC_ZCAL_WAIT_CNT); 790 791 udelay(2); 792 /* update restored timing */ 793 ret = emc_seq_update_timing(emc); 794 if (ret) 795 emc->bad_state = true; 796 797 /* restore early ACK */ 798 mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE); 799 800 emc->prepared = false; 801 802 return ret; 803 } 804 805 static int emc_unprepare_timing_change(struct tegra_emc *emc, 806 unsigned long rate) 807 { 808 if (emc->prepared && !emc->bad_state) { 809 /* shouldn't ever happen in practice */ 810 dev_err(emc->dev, "timing configuration can't be reverted\n"); 811 emc->bad_state = true; 812 } 813 814 return 0; 815 } 816 817 static int emc_clk_change_notify(struct notifier_block *nb, 818 unsigned long msg, void *data) 819 { 820 struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); 821 struct clk_notifier_data *cnd = data; 822 int err; 823 824 switch (msg) { 825 case PRE_RATE_CHANGE: 826 err = emc_prepare_timing_change(emc, cnd->new_rate); 827 break; 828 829 case ABORT_RATE_CHANGE: 830 err = emc_unprepare_timing_change(emc, cnd->old_rate); 831 break; 832 833 case POST_RATE_CHANGE: 834 err = emc_complete_timing_change(emc, cnd->new_rate); 835 break; 836 837 default: 838 return NOTIFY_DONE; 839 } 840 841 return notifier_from_errno(err); 842 } 843 844 static int load_one_timing_from_dt(struct tegra_emc *emc, 845 struct emc_timing *timing, 846 struct device_node *node) 847 { 848 u32 value; 849 int err; 850 851 err = of_property_read_u32(node, "clock-frequency", &value); 852 if (err) { 853 dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n", 854 node, err); 855 return err; 856 } 857 858 timing->rate = value; 859 860 err = of_property_read_u32_array(node, "nvidia,emc-configuration", 861 timing->data, 862 ARRAY_SIZE(emc_timing_registers)); 863 if (err) { 864 dev_err(emc->dev, 865 "timing %pOF: failed to read emc timing data: %d\n", 866 node, err); 867 return err; 868 } 869 870 #define EMC_READ_BOOL(prop, dtprop) \ 871 timing->prop = of_property_read_bool(node, dtprop); 872 873 #define EMC_READ_U32(prop, dtprop) \ 874 err = of_property_read_u32(node, dtprop, &timing->prop); \ 875 if (err) { \ 876 dev_err(emc->dev, \ 877 "timing %pOFn: failed to read " #prop ": %d\n", \ 878 node, err); \ 879 return err; \ 880 } 881 882 EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval") 883 EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1") 884 EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2") 885 EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset") 886 EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long") 887 EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref") 888 EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst") 889 890 #undef EMC_READ_U32 891 #undef EMC_READ_BOOL 892 893 dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate); 894 895 return 0; 896 } 897 898 static int cmp_timings(const void *_a, const void *_b) 899 { 900 const struct emc_timing *a = _a; 901 const struct emc_timing *b = _b; 902 903 if (a->rate < b->rate) 904 return -1; 905 906 if (a->rate > b->rate) 907 return 1; 908 909 return 0; 910 } 911 912 static int emc_check_mc_timings(struct tegra_emc *emc) 913 { 914 struct tegra_mc *mc = emc->mc; 915 unsigned int i; 916 917 if (emc->num_timings != mc->num_timings) { 918 dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n", 919 emc->num_timings, mc->num_timings); 920 return -EINVAL; 921 } 922 923 for (i = 0; i < mc->num_timings; i++) { 924 if (emc->timings[i].rate != mc->timings[i].rate) { 925 dev_err(emc->dev, 926 "emc/mc timing rate mismatch: %lu %lu\n", 927 emc->timings[i].rate, mc->timings[i].rate); 928 return -EINVAL; 929 } 930 } 931 932 return 0; 933 } 934 935 static int emc_load_timings_from_dt(struct tegra_emc *emc, 936 struct device_node *node) 937 { 938 struct device_node *child; 939 struct emc_timing *timing; 940 int child_count; 941 int err; 942 943 child_count = of_get_child_count(node); 944 if (!child_count) { 945 dev_err(emc->dev, "no memory timings in: %pOF\n", node); 946 return -EINVAL; 947 } 948 949 emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing), 950 GFP_KERNEL); 951 if (!emc->timings) 952 return -ENOMEM; 953 954 emc->num_timings = child_count; 955 timing = emc->timings; 956 957 for_each_child_of_node(node, child) { 958 err = load_one_timing_from_dt(emc, timing++, child); 959 if (err) { 960 of_node_put(child); 961 return err; 962 } 963 } 964 965 sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings, 966 NULL); 967 968 err = emc_check_mc_timings(emc); 969 if (err) 970 return err; 971 972 dev_info(emc->dev, 973 "got %u timings for RAM code %u (min %luMHz max %luMHz)\n", 974 emc->num_timings, 975 tegra_read_ram_code(), 976 emc->timings[0].rate / 1000000, 977 emc->timings[emc->num_timings - 1].rate / 1000000); 978 979 return 0; 980 } 981 982 static struct device_node *emc_find_node_by_ram_code(struct device *dev) 983 { 984 struct device_node *np; 985 u32 value, ram_code; 986 int err; 987 988 ram_code = tegra_read_ram_code(); 989 990 for_each_child_of_node(dev->of_node, np) { 991 err = of_property_read_u32(np, "nvidia,ram-code", &value); 992 if (err || value != ram_code) 993 continue; 994 995 return np; 996 } 997 998 dev_err(dev, "no memory timings for RAM code %u found in device-tree\n", 999 ram_code); 1000 1001 return NULL; 1002 } 1003 1004 static int emc_setup_hw(struct tegra_emc *emc) 1005 { 1006 u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT; 1007 u32 fbio_cfg5, emc_cfg, emc_dbg; 1008 enum emc_dram_type dram_type; 1009 1010 fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); 1011 dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; 1012 1013 emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2); 1014 1015 /* enable EMC and CAR to handshake on PLL divider/source changes */ 1016 emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE; 1017 1018 /* configure clock change mode accordingly to DRAM type */ 1019 switch (dram_type) { 1020 case DRAM_TYPE_LPDDR2: 1021 emc_cfg |= EMC_CLKCHANGE_PD_ENABLE; 1022 emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; 1023 break; 1024 1025 default: 1026 emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; 1027 emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE; 1028 break; 1029 } 1030 1031 writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2); 1032 1033 /* initialize interrupt */ 1034 writel_relaxed(intmask, emc->regs + EMC_INTMASK); 1035 writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS); 1036 1037 /* ensure that unwanted debug features are disabled */ 1038 emc_dbg = readl_relaxed(emc->regs + EMC_DBG); 1039 emc_dbg |= EMC_DBG_CFG_PRIORITY; 1040 emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY; 1041 emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE; 1042 emc_dbg &= ~EMC_DBG_FORCE_UPDATE; 1043 writel_relaxed(emc_dbg, emc->regs + EMC_DBG); 1044 1045 return 0; 1046 } 1047 1048 static long emc_round_rate(unsigned long rate, 1049 unsigned long min_rate, 1050 unsigned long max_rate, 1051 void *arg) 1052 { 1053 struct emc_timing *timing = NULL; 1054 struct tegra_emc *emc = arg; 1055 unsigned int i; 1056 1057 min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate); 1058 1059 for (i = 0; i < emc->num_timings; i++) { 1060 if (emc->timings[i].rate < rate && i != emc->num_timings - 1) 1061 continue; 1062 1063 if (emc->timings[i].rate > max_rate) { 1064 i = max(i, 1u) - 1; 1065 1066 if (emc->timings[i].rate < min_rate) 1067 break; 1068 } 1069 1070 if (emc->timings[i].rate < min_rate) 1071 continue; 1072 1073 timing = &emc->timings[i]; 1074 break; 1075 } 1076 1077 if (!timing) { 1078 dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n", 1079 rate, min_rate, max_rate); 1080 return -EINVAL; 1081 } 1082 1083 return timing->rate; 1084 } 1085 1086 static int tegra_emc_probe(struct platform_device *pdev) 1087 { 1088 struct platform_device *mc; 1089 struct device_node *np; 1090 struct tegra_emc *emc; 1091 int err; 1092 1093 if (of_get_child_count(pdev->dev.of_node) == 0) { 1094 dev_info(&pdev->dev, 1095 "device-tree node doesn't have memory timings\n"); 1096 return -ENODEV; 1097 } 1098 1099 np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0); 1100 if (!np) { 1101 dev_err(&pdev->dev, "could not get memory controller node\n"); 1102 return -ENOENT; 1103 } 1104 1105 mc = of_find_device_by_node(np); 1106 of_node_put(np); 1107 if (!mc) 1108 return -ENOENT; 1109 1110 np = emc_find_node_by_ram_code(&pdev->dev); 1111 if (!np) 1112 return -EINVAL; 1113 1114 emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL); 1115 if (!emc) { 1116 of_node_put(np); 1117 return -ENOMEM; 1118 } 1119 1120 emc->mc = platform_get_drvdata(mc); 1121 if (!emc->mc) 1122 return -EPROBE_DEFER; 1123 1124 init_completion(&emc->clk_handshake_complete); 1125 emc->clk_nb.notifier_call = emc_clk_change_notify; 1126 emc->dev = &pdev->dev; 1127 1128 err = emc_load_timings_from_dt(emc, np); 1129 of_node_put(np); 1130 if (err) 1131 return err; 1132 1133 emc->regs = devm_platform_ioremap_resource(pdev, 0); 1134 if (IS_ERR(emc->regs)) 1135 return PTR_ERR(emc->regs); 1136 1137 err = emc_setup_hw(emc); 1138 if (err) 1139 return err; 1140 1141 err = platform_get_irq(pdev, 0); 1142 if (err < 0) { 1143 dev_err(&pdev->dev, "interrupt not specified: %d\n", err); 1144 return err; 1145 } 1146 emc->irq = err; 1147 1148 err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0, 1149 dev_name(&pdev->dev), emc); 1150 if (err) { 1151 dev_err(&pdev->dev, "failed to request irq: %d\n", err); 1152 return err; 1153 } 1154 1155 tegra20_clk_set_emc_round_callback(emc_round_rate, emc); 1156 1157 emc->clk = devm_clk_get(&pdev->dev, "emc"); 1158 if (IS_ERR(emc->clk)) { 1159 err = PTR_ERR(emc->clk); 1160 dev_err(&pdev->dev, "failed to get emc clock: %d\n", err); 1161 goto unset_cb; 1162 } 1163 1164 err = clk_notifier_register(emc->clk, &emc->clk_nb); 1165 if (err) { 1166 dev_err(&pdev->dev, "failed to register clk notifier: %d\n", 1167 err); 1168 goto unset_cb; 1169 } 1170 1171 platform_set_drvdata(pdev, emc); 1172 1173 return 0; 1174 1175 unset_cb: 1176 tegra20_clk_set_emc_round_callback(NULL, NULL); 1177 1178 return err; 1179 } 1180 1181 static int tegra_emc_suspend(struct device *dev) 1182 { 1183 struct tegra_emc *emc = dev_get_drvdata(dev); 1184 1185 /* 1186 * Suspending in a bad state will hang machine. The "prepared" var 1187 * shall be always false here unless it's a kernel bug that caused 1188 * suspending in a wrong order. 1189 */ 1190 if (WARN_ON(emc->prepared) || emc->bad_state) 1191 return -EINVAL; 1192 1193 emc->bad_state = true; 1194 1195 return 0; 1196 } 1197 1198 static int tegra_emc_resume(struct device *dev) 1199 { 1200 struct tegra_emc *emc = dev_get_drvdata(dev); 1201 1202 emc_setup_hw(emc); 1203 emc->bad_state = false; 1204 1205 return 0; 1206 } 1207 1208 static const struct dev_pm_ops tegra_emc_pm_ops = { 1209 .suspend = tegra_emc_suspend, 1210 .resume = tegra_emc_resume, 1211 }; 1212 1213 static const struct of_device_id tegra_emc_of_match[] = { 1214 { .compatible = "nvidia,tegra30-emc", }, 1215 {}, 1216 }; 1217 1218 static struct platform_driver tegra_emc_driver = { 1219 .probe = tegra_emc_probe, 1220 .driver = { 1221 .name = "tegra30-emc", 1222 .of_match_table = tegra_emc_of_match, 1223 .pm = &tegra_emc_pm_ops, 1224 .suppress_bind_attrs = true, 1225 }, 1226 }; 1227 1228 static int __init tegra_emc_init(void) 1229 { 1230 return platform_driver_register(&tegra_emc_driver); 1231 } 1232 subsys_initcall(tegra_emc_init); 1233