1 /* 2 * (C) Copyright 2010-2015 3 * NVIDIA Corporation <www.nvidia.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 /* Tegra AP (Application Processor) code */ 9 10 #include <common.h> 11 #include <asm/io.h> 12 #include <asm/arch/gp_padctrl.h> 13 #include <asm/arch/mc.h> 14 #include <asm/arch-tegra/ap.h> 15 #include <asm/arch-tegra/clock.h> 16 #include <asm/arch-tegra/fuse.h> 17 #include <asm/arch-tegra/pmc.h> 18 #include <asm/arch-tegra/scu.h> 19 #include <asm/arch-tegra/tegra.h> 20 #include <asm/arch-tegra/warmboot.h> 21 22 int tegra_get_chip(void) 23 { 24 int rev; 25 struct apb_misc_gp_ctlr *gp = 26 (struct apb_misc_gp_ctlr *)NV_PA_APB_MISC_GP_BASE; 27 28 /* 29 * This is undocumented, Chip ID is bits 15:8 of the register 30 * APB_MISC + 0x804, and has value 0x20 for Tegra20, 0x30 for 31 * Tegra30, 0x35 for T114, and 0x40 for Tegra124. 32 */ 33 rev = (readl(&gp->hidrev) & HIDREV_CHIPID_MASK) >> HIDREV_CHIPID_SHIFT; 34 debug("%s: CHIPID is 0x%02X\n", __func__, rev); 35 36 return rev; 37 } 38 39 int tegra_get_sku_info(void) 40 { 41 int sku_id; 42 struct fuse_regs *fuse = (struct fuse_regs *)NV_PA_FUSE_BASE; 43 44 sku_id = readl(&fuse->sku_info) & 0xff; 45 debug("%s: SKU info byte is 0x%02X\n", __func__, sku_id); 46 47 return sku_id; 48 } 49 50 int tegra_get_chip_sku(void) 51 { 52 uint sku_id, chip_id; 53 54 chip_id = tegra_get_chip(); 55 sku_id = tegra_get_sku_info(); 56 57 switch (chip_id) { 58 case CHIPID_TEGRA20: 59 switch (sku_id) { 60 case SKU_ID_T20_7: 61 case SKU_ID_T20: 62 return TEGRA_SOC_T20; 63 case SKU_ID_T25SE: 64 case SKU_ID_AP25: 65 case SKU_ID_T25: 66 case SKU_ID_AP25E: 67 case SKU_ID_T25E: 68 return TEGRA_SOC_T25; 69 } 70 break; 71 case CHIPID_TEGRA30: 72 switch (sku_id) { 73 case SKU_ID_T33: 74 case SKU_ID_T30: 75 case SKU_ID_TM30MQS_P_A3: 76 default: 77 return TEGRA_SOC_T30; 78 } 79 break; 80 case CHIPID_TEGRA114: 81 switch (sku_id) { 82 case SKU_ID_T114_ENG: 83 case SKU_ID_T114_1: 84 default: 85 return TEGRA_SOC_T114; 86 } 87 break; 88 case CHIPID_TEGRA124: 89 switch (sku_id) { 90 case SKU_ID_T124_ENG: 91 default: 92 return TEGRA_SOC_T124; 93 } 94 break; 95 case CHIPID_TEGRA210: 96 switch (sku_id) { 97 case SKU_ID_T210_ENG: 98 default: 99 return TEGRA_SOC_T210; 100 } 101 break; 102 } 103 104 /* unknown chip/sku id */ 105 printf("%s: ERROR: UNKNOWN CHIP/SKU ID COMBO (0x%02X/0x%02X)\n", 106 __func__, chip_id, sku_id); 107 return TEGRA_SOC_UNKNOWN; 108 } 109 110 #ifndef CONFIG_ARM64 111 static void enable_scu(void) 112 { 113 struct scu_ctlr *scu = (struct scu_ctlr *)NV_PA_ARM_PERIPHBASE; 114 u32 reg; 115 116 /* Only enable the SCU on T20/T25 */ 117 if (tegra_get_chip() != CHIPID_TEGRA20) 118 return; 119 120 /* If SCU already setup/enabled, return */ 121 if (readl(&scu->scu_ctrl) & SCU_CTRL_ENABLE) 122 return; 123 124 /* Invalidate all ways for all processors */ 125 writel(0xFFFF, &scu->scu_inv_all); 126 127 /* Enable SCU - bit 0 */ 128 reg = readl(&scu->scu_ctrl); 129 reg |= SCU_CTRL_ENABLE; 130 writel(reg, &scu->scu_ctrl); 131 } 132 133 static u32 get_odmdata(void) 134 { 135 /* 136 * ODMDATA is stored in the BCT in IRAM by the BootROM. 137 * The BCT start and size are stored in the BIT in IRAM. 138 * Read the data @ bct_start + (bct_size - 12). This works 139 * on BCTs for currently supported SoCs, which are locked down. 140 * If this changes in new chips, we can revisit this algorithm. 141 */ 142 unsigned long bct_start; 143 u32 odmdata; 144 145 bct_start = readl(NV_PA_BASE_SRAM + NVBOOTINFOTABLE_BCTPTR); 146 odmdata = readl(bct_start + BCT_ODMDATA_OFFSET); 147 148 return odmdata; 149 } 150 151 static void init_pmc_scratch(void) 152 { 153 struct pmc_ctlr *const pmc = (struct pmc_ctlr *)NV_PA_PMC_BASE; 154 u32 odmdata; 155 int i; 156 157 /* SCRATCH0 is initialized by the boot ROM and shouldn't be cleared */ 158 for (i = 0; i < 23; i++) 159 writel(0, &pmc->pmc_scratch1+i); 160 161 /* ODMDATA is for kernel use to determine RAM size, LP config, etc. */ 162 odmdata = get_odmdata(); 163 writel(odmdata, &pmc->pmc_scratch20); 164 } 165 166 #ifdef CONFIG_ARMV7_SECURE_RESERVE_SIZE 167 void protect_secure_section(void) 168 { 169 struct mc_ctlr *mc = (struct mc_ctlr *)NV_PA_MC_BASE; 170 171 /* Must be MB aligned */ 172 BUILD_BUG_ON(CONFIG_ARMV7_SECURE_BASE & 0xFFFFF); 173 BUILD_BUG_ON(CONFIG_ARMV7_SECURE_RESERVE_SIZE & 0xFFFFF); 174 175 writel(CONFIG_ARMV7_SECURE_BASE, &mc->mc_security_cfg0); 176 writel(CONFIG_ARMV7_SECURE_RESERVE_SIZE >> 20, &mc->mc_security_cfg1); 177 } 178 #endif 179 180 #if defined(CONFIG_ARMV7_NONSEC) 181 static void smmu_flush(struct mc_ctlr *mc) 182 { 183 (void)readl(&mc->mc_smmu_config); 184 } 185 186 static void smmu_enable(void) 187 { 188 struct mc_ctlr *mc = (struct mc_ctlr *)NV_PA_MC_BASE; 189 u32 value; 190 191 /* 192 * Enable translation for all clients since access to this register 193 * is restricted to TrustZone-secured requestors. The kernel will use 194 * the per-SWGROUP enable bits to enable or disable translations. 195 */ 196 writel(0xffffffff, &mc->mc_smmu_translation_enable_0); 197 writel(0xffffffff, &mc->mc_smmu_translation_enable_1); 198 writel(0xffffffff, &mc->mc_smmu_translation_enable_2); 199 writel(0xffffffff, &mc->mc_smmu_translation_enable_3); 200 201 /* 202 * Enable SMMU globally since access to this register is restricted 203 * to TrustZone-secured requestors. 204 */ 205 value = readl(&mc->mc_smmu_config); 206 value |= TEGRA_MC_SMMU_CONFIG_ENABLE; 207 writel(value, &mc->mc_smmu_config); 208 209 smmu_flush(mc); 210 } 211 #else 212 static void smmu_enable(void) 213 { 214 } 215 #endif 216 217 void s_init(void) 218 { 219 /* Init PMC scratch memory */ 220 init_pmc_scratch(); 221 222 enable_scu(); 223 224 /* init the cache */ 225 config_cache(); 226 227 /* enable SMMU */ 228 smmu_enable(); 229 } 230 #endif 231