1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2012-2015 Panasonic Corporation
4  * Copyright (C) 2015-2017 Socionext Inc.
5  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
6  */
7 
8 #include <common.h>
9 #include <fdt_support.h>
10 #include <fdtdec.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/printk.h>
14 #include <linux/sizes.h>
15 #include <asm/global_data.h>
16 
17 #include "sg-regs.h"
18 #include "soc-info.h"
19 
20 DECLARE_GLOBAL_DATA_PTR;
21 
22 struct uniphier_memif_data {
23 	unsigned int soc_id;
24 	unsigned long sparse_ch1_base;
25 	int have_ch2;
26 };
27 
28 static const struct uniphier_memif_data uniphier_memif_data[] = {
29 	{
30 		.soc_id = UNIPHIER_LD4_ID,
31 		.sparse_ch1_base = 0xc0000000,
32 	},
33 	{
34 		.soc_id = UNIPHIER_PRO4_ID,
35 		.sparse_ch1_base = 0xa0000000,
36 	},
37 	{
38 		.soc_id = UNIPHIER_SLD8_ID,
39 		.sparse_ch1_base = 0xc0000000,
40 	},
41 	{
42 		.soc_id = UNIPHIER_PRO5_ID,
43 		.sparse_ch1_base = 0xc0000000,
44 	},
45 	{
46 		.soc_id = UNIPHIER_PXS2_ID,
47 		.sparse_ch1_base = 0xc0000000,
48 		.have_ch2 = 1,
49 	},
50 	{
51 		.soc_id = UNIPHIER_LD6B_ID,
52 		.sparse_ch1_base = 0xc0000000,
53 		.have_ch2 = 1,
54 	},
55 	{
56 		.soc_id = UNIPHIER_LD11_ID,
57 		.sparse_ch1_base = 0xc0000000,
58 	},
59 	{
60 		.soc_id = UNIPHIER_LD20_ID,
61 		.sparse_ch1_base = 0xc0000000,
62 		.have_ch2 = 1,
63 	},
64 	{
65 		.soc_id = UNIPHIER_PXS3_ID,
66 		.sparse_ch1_base = 0xc0000000,
67 		.have_ch2 = 1,
68 	},
69 };
70 UNIPHIER_DEFINE_SOCDATA_FUNC(uniphier_get_memif_data, uniphier_memif_data)
71 
72 struct uniphier_dram_map {
73 	unsigned long base;
74 	unsigned long size;
75 };
76 
77 static int uniphier_memconf_decode(struct uniphier_dram_map *dram_map)
78 {
79 	const struct uniphier_memif_data *data;
80 	unsigned long size;
81 	u32 val;
82 
83 	data = uniphier_get_memif_data();
84 	if (!data) {
85 		pr_err("unsupported SoC\n");
86 		return -EINVAL;
87 	}
88 
89 	val = readl(SG_MEMCONF);
90 
91 	/* set up ch0 */
92 	dram_map[0].base = CONFIG_SYS_SDRAM_BASE;
93 
94 	switch (val & SG_MEMCONF_CH0_SZ_MASK) {
95 	case SG_MEMCONF_CH0_SZ_64M:
96 		size = SZ_64M;
97 		break;
98 	case SG_MEMCONF_CH0_SZ_128M:
99 		size = SZ_128M;
100 		break;
101 	case SG_MEMCONF_CH0_SZ_256M:
102 		size = SZ_256M;
103 		break;
104 	case SG_MEMCONF_CH0_SZ_512M:
105 		size = SZ_512M;
106 		break;
107 	case SG_MEMCONF_CH0_SZ_1G:
108 		size = SZ_1G;
109 		break;
110 	default:
111 		pr_err("error: invalid value is set to MEMCONF ch0 size\n");
112 		return -EINVAL;
113 	}
114 
115 	if ((val & SG_MEMCONF_CH0_NUM_MASK) == SG_MEMCONF_CH0_NUM_2)
116 		size *= 2;
117 
118 	dram_map[0].size = size;
119 
120 	/* set up ch1 */
121 	dram_map[1].base = dram_map[0].base + size;
122 
123 	if (val & SG_MEMCONF_SPARSEMEM) {
124 		if (dram_map[1].base > data->sparse_ch1_base) {
125 			pr_warn("Sparse mem is enabled, but ch0 and ch1 overlap\n");
126 			pr_warn("Only ch0 is available\n");
127 			dram_map[1].base = 0;
128 			return 0;
129 		}
130 
131 		dram_map[1].base = data->sparse_ch1_base;
132 	}
133 
134 	switch (val & SG_MEMCONF_CH1_SZ_MASK) {
135 	case SG_MEMCONF_CH1_SZ_64M:
136 		size = SZ_64M;
137 		break;
138 	case SG_MEMCONF_CH1_SZ_128M:
139 		size = SZ_128M;
140 		break;
141 	case SG_MEMCONF_CH1_SZ_256M:
142 		size = SZ_256M;
143 		break;
144 	case SG_MEMCONF_CH1_SZ_512M:
145 		size = SZ_512M;
146 		break;
147 	case SG_MEMCONF_CH1_SZ_1G:
148 		size = SZ_1G;
149 		break;
150 	default:
151 		pr_err("error: invalid value is set to MEMCONF ch1 size\n");
152 		return -EINVAL;
153 	}
154 
155 	if ((val & SG_MEMCONF_CH1_NUM_MASK) == SG_MEMCONF_CH1_NUM_2)
156 		size *= 2;
157 
158 	dram_map[1].size = size;
159 
160 	if (!data->have_ch2 || val & SG_MEMCONF_CH2_DISABLE)
161 		return 0;
162 
163 	/* set up ch2 */
164 	dram_map[2].base = dram_map[1].base + size;
165 
166 	switch (val & SG_MEMCONF_CH2_SZ_MASK) {
167 	case SG_MEMCONF_CH2_SZ_64M:
168 		size = SZ_64M;
169 		break;
170 	case SG_MEMCONF_CH2_SZ_128M:
171 		size = SZ_128M;
172 		break;
173 	case SG_MEMCONF_CH2_SZ_256M:
174 		size = SZ_256M;
175 		break;
176 	case SG_MEMCONF_CH2_SZ_512M:
177 		size = SZ_512M;
178 		break;
179 	case SG_MEMCONF_CH2_SZ_1G:
180 		size = SZ_1G;
181 		break;
182 	default:
183 		pr_err("error: invalid value is set to MEMCONF ch2 size\n");
184 		return -EINVAL;
185 	}
186 
187 	if ((val & SG_MEMCONF_CH2_NUM_MASK) == SG_MEMCONF_CH2_NUM_2)
188 		size *= 2;
189 
190 	dram_map[2].size = size;
191 
192 	return 0;
193 }
194 
195 int dram_init(void)
196 {
197 	struct uniphier_dram_map dram_map[3] = {};
198 	int ret, i;
199 
200 	gd->ram_size = 0;
201 
202 	ret = uniphier_memconf_decode(dram_map);
203 	if (ret)
204 		return ret;
205 
206 	for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
207 		unsigned long max_size;
208 
209 		if (!dram_map[i].size)
210 			break;
211 
212 		/*
213 		 * U-Boot relocates itself to the tail of the memory region,
214 		 * but it does not expect sparse memory.  We use the first
215 		 * contiguous chunk here.
216 		 */
217 		if (i > 0 && dram_map[i - 1].base + dram_map[i - 1].size <
218 							dram_map[i].base)
219 			break;
220 
221 		/*
222 		 * Do not use memory that exceeds 32bit address range.  U-Boot
223 		 * relocates itself to the end of the effectively available RAM.
224 		 * This could be a problem for DMA engines that do not support
225 		 * 64bit address (SDMA of SDHCI, UniPhier AV-ether, etc.)
226 		 */
227 		if (dram_map[i].base >= 1ULL << 32)
228 			break;
229 
230 		max_size = (1ULL << 32) - dram_map[i].base;
231 
232 		if (dram_map[i].size > max_size) {
233 			gd->ram_size += max_size;
234 			break;
235 		}
236 
237 		gd->ram_size += dram_map[i].size;
238 	}
239 
240 	/*
241 	 * LD20 uses the last 64 byte for each channel for dynamic
242 	 * DDR PHY training
243 	 */
244 	if (uniphier_get_soc_id() == UNIPHIER_LD20_ID)
245 		gd->ram_size -= 64;
246 
247 	return 0;
248 }
249 
250 int dram_init_banksize(void)
251 {
252 	struct uniphier_dram_map dram_map[3] = {};
253 	int i;
254 
255 	uniphier_memconf_decode(dram_map);
256 
257 	for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
258 		if (i >= ARRAY_SIZE(gd->bd->bi_dram))
259 			break;
260 
261 		gd->bd->bi_dram[i].start = dram_map[i].base;
262 		gd->bd->bi_dram[i].size = dram_map[i].size;
263 	}
264 
265 	return 0;
266 }
267 
268 #ifdef CONFIG_OF_BOARD_SETUP
269 /*
270  * The DRAM PHY requires 64 byte scratch area in each DRAM channel
271  * for its dynamic PHY training feature.
272  */
273 int ft_board_setup(void *fdt, bd_t *bd)
274 {
275 	unsigned long rsv_addr;
276 	const unsigned long rsv_size = 64;
277 	int i, ret;
278 
279 	if (uniphier_get_soc_id() != UNIPHIER_LD20_ID)
280 		return 0;
281 
282 	for (i = 0; i < ARRAY_SIZE(gd->bd->bi_dram); i++) {
283 		if (!gd->bd->bi_dram[i].size)
284 			continue;
285 
286 		rsv_addr = gd->bd->bi_dram[i].start + gd->bd->bi_dram[i].size;
287 		rsv_addr -= rsv_size;
288 
289 		ret = fdt_add_mem_rsv(fdt, rsv_addr, rsv_size);
290 		if (ret)
291 			return -ENOSPC;
292 
293 		pr_notice("   Reserved memory region for DRAM PHY training: addr=%lx size=%lx\n",
294 			  rsv_addr, rsv_size);
295 	}
296 
297 	return 0;
298 }
299 #endif
300