1 /*
2  * Copyright (C) 2012-2015 Panasonic Corporation
3  * Copyright (C) 2015-2017 Socionext Inc.
4  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <fdt_support.h>
11 #include <fdtdec.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/printk.h>
15 #include <linux/sizes.h>
16 #include <asm/global_data.h>
17 
18 #include "sg-regs.h"
19 #include "soc-info.h"
20 
21 DECLARE_GLOBAL_DATA_PTR;
22 
23 struct uniphier_memif_data {
24 	unsigned int soc_id;
25 	unsigned long sparse_ch1_base;
26 	int have_ch2;
27 };
28 
29 static const struct uniphier_memif_data uniphier_memif_data[] = {
30 	{
31 		.soc_id = UNIPHIER_LD4_ID,
32 		.sparse_ch1_base = 0xc0000000,
33 	},
34 	{
35 		.soc_id = UNIPHIER_PRO4_ID,
36 		.sparse_ch1_base = 0xa0000000,
37 	},
38 	{
39 		.soc_id = UNIPHIER_SLD8_ID,
40 		.sparse_ch1_base = 0xc0000000,
41 	},
42 	{
43 		.soc_id = UNIPHIER_PRO5_ID,
44 		.sparse_ch1_base = 0xc0000000,
45 	},
46 	{
47 		.soc_id = UNIPHIER_PXS2_ID,
48 		.sparse_ch1_base = 0xc0000000,
49 		.have_ch2 = 1,
50 	},
51 	{
52 		.soc_id = UNIPHIER_LD6B_ID,
53 		.sparse_ch1_base = 0xc0000000,
54 		.have_ch2 = 1,
55 	},
56 	{
57 		.soc_id = UNIPHIER_LD11_ID,
58 		.sparse_ch1_base = 0xc0000000,
59 	},
60 	{
61 		.soc_id = UNIPHIER_LD20_ID,
62 		.sparse_ch1_base = 0xc0000000,
63 		.have_ch2 = 1,
64 	},
65 	{
66 		.soc_id = UNIPHIER_PXS3_ID,
67 		.sparse_ch1_base = 0xc0000000,
68 		.have_ch2 = 1,
69 	},
70 };
71 UNIPHIER_DEFINE_SOCDATA_FUNC(uniphier_get_memif_data, uniphier_memif_data)
72 
73 struct uniphier_dram_map {
74 	unsigned long base;
75 	unsigned long size;
76 };
77 
78 static int uniphier_memconf_decode(struct uniphier_dram_map *dram_map)
79 {
80 	const struct uniphier_memif_data *data;
81 	unsigned long size;
82 	u32 val;
83 
84 	data = uniphier_get_memif_data();
85 	if (!data) {
86 		pr_err("unsupported SoC\n");
87 		return -EINVAL;
88 	}
89 
90 	val = readl(SG_MEMCONF);
91 
92 	/* set up ch0 */
93 	dram_map[0].base = CONFIG_SYS_SDRAM_BASE;
94 
95 	switch (val & SG_MEMCONF_CH0_SZ_MASK) {
96 	case SG_MEMCONF_CH0_SZ_64M:
97 		size = SZ_64M;
98 		break;
99 	case SG_MEMCONF_CH0_SZ_128M:
100 		size = SZ_128M;
101 		break;
102 	case SG_MEMCONF_CH0_SZ_256M:
103 		size = SZ_256M;
104 		break;
105 	case SG_MEMCONF_CH0_SZ_512M:
106 		size = SZ_512M;
107 		break;
108 	case SG_MEMCONF_CH0_SZ_1G:
109 		size = SZ_1G;
110 		break;
111 	default:
112 		pr_err("error: invalid value is set to MEMCONF ch0 size\n");
113 		return -EINVAL;
114 	}
115 
116 	if ((val & SG_MEMCONF_CH0_NUM_MASK) == SG_MEMCONF_CH0_NUM_2)
117 		size *= 2;
118 
119 	dram_map[0].size = size;
120 
121 	/* set up ch1 */
122 	dram_map[1].base = dram_map[0].base + size;
123 
124 	if (val & SG_MEMCONF_SPARSEMEM) {
125 		if (dram_map[1].base > data->sparse_ch1_base) {
126 			pr_warn("Sparse mem is enabled, but ch0 and ch1 overlap\n");
127 			pr_warn("Only ch0 is available\n");
128 			dram_map[1].base = 0;
129 			return 0;
130 		}
131 
132 		dram_map[1].base = data->sparse_ch1_base;
133 	}
134 
135 	switch (val & SG_MEMCONF_CH1_SZ_MASK) {
136 	case SG_MEMCONF_CH1_SZ_64M:
137 		size = SZ_64M;
138 		break;
139 	case SG_MEMCONF_CH1_SZ_128M:
140 		size = SZ_128M;
141 		break;
142 	case SG_MEMCONF_CH1_SZ_256M:
143 		size = SZ_256M;
144 		break;
145 	case SG_MEMCONF_CH1_SZ_512M:
146 		size = SZ_512M;
147 		break;
148 	case SG_MEMCONF_CH1_SZ_1G:
149 		size = SZ_1G;
150 		break;
151 	default:
152 		pr_err("error: invalid value is set to MEMCONF ch1 size\n");
153 		return -EINVAL;
154 	}
155 
156 	if ((val & SG_MEMCONF_CH1_NUM_MASK) == SG_MEMCONF_CH1_NUM_2)
157 		size *= 2;
158 
159 	dram_map[1].size = size;
160 
161 	if (!data->have_ch2 || val & SG_MEMCONF_CH2_DISABLE)
162 		return 0;
163 
164 	/* set up ch2 */
165 	dram_map[2].base = dram_map[1].base + size;
166 
167 	switch (val & SG_MEMCONF_CH2_SZ_MASK) {
168 	case SG_MEMCONF_CH2_SZ_64M:
169 		size = SZ_64M;
170 		break;
171 	case SG_MEMCONF_CH2_SZ_128M:
172 		size = SZ_128M;
173 		break;
174 	case SG_MEMCONF_CH2_SZ_256M:
175 		size = SZ_256M;
176 		break;
177 	case SG_MEMCONF_CH2_SZ_512M:
178 		size = SZ_512M;
179 		break;
180 	case SG_MEMCONF_CH2_SZ_1G:
181 		size = SZ_1G;
182 		break;
183 	default:
184 		pr_err("error: invalid value is set to MEMCONF ch2 size\n");
185 		return -EINVAL;
186 	}
187 
188 	if ((val & SG_MEMCONF_CH2_NUM_MASK) == SG_MEMCONF_CH2_NUM_2)
189 		size *= 2;
190 
191 	dram_map[2].size = size;
192 
193 	return 0;
194 }
195 
196 int dram_init(void)
197 {
198 	struct uniphier_dram_map dram_map[3] = {};
199 	int ret, i;
200 
201 	gd->ram_size = 0;
202 
203 	ret = uniphier_memconf_decode(dram_map);
204 	if (ret)
205 		return ret;
206 
207 	for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
208 		unsigned long max_size;
209 
210 		if (!dram_map[i].size)
211 			break;
212 
213 		/*
214 		 * U-Boot relocates itself to the tail of the memory region,
215 		 * but it does not expect sparse memory.  We use the first
216 		 * contiguous chunk here.
217 		 */
218 		if (i > 0 && dram_map[i - 1].base + dram_map[i - 1].size <
219 							dram_map[i].base)
220 			break;
221 
222 		/*
223 		 * Do not use memory that exceeds 32bit address range.  U-Boot
224 		 * relocates itself to the end of the effectively available RAM.
225 		 * This could be a problem for DMA engines that do not support
226 		 * 64bit address (SDMA of SDHCI, UniPhier AV-ether, etc.)
227 		 */
228 		if (dram_map[i].base >= 1ULL << 32)
229 			break;
230 
231 		max_size = (1ULL << 32) - dram_map[i].base;
232 
233 		if (dram_map[i].size > max_size) {
234 			gd->ram_size += max_size;
235 			break;
236 		}
237 
238 		gd->ram_size += dram_map[i].size;
239 	}
240 
241 	/*
242 	 * LD20 uses the last 64 byte for each channel for dynamic
243 	 * DDR PHY training
244 	 */
245 	if (uniphier_get_soc_id() == UNIPHIER_LD20_ID)
246 		gd->ram_size -= 64;
247 
248 	return 0;
249 }
250 
251 int dram_init_banksize(void)
252 {
253 	struct uniphier_dram_map dram_map[3] = {};
254 	int i;
255 
256 	uniphier_memconf_decode(dram_map);
257 
258 	for (i = 0; i < ARRAY_SIZE(dram_map); i++) {
259 		if (i >= ARRAY_SIZE(gd->bd->bi_dram))
260 			break;
261 
262 		gd->bd->bi_dram[i].start = dram_map[i].base;
263 		gd->bd->bi_dram[i].size = dram_map[i].size;
264 	}
265 
266 	return 0;
267 }
268 
269 #ifdef CONFIG_OF_BOARD_SETUP
270 /*
271  * The DRAM PHY requires 64 byte scratch area in each DRAM channel
272  * for its dynamic PHY training feature.
273  */
274 int ft_board_setup(void *fdt, bd_t *bd)
275 {
276 	unsigned long rsv_addr;
277 	const unsigned long rsv_size = 64;
278 	int i, ret;
279 
280 	if (uniphier_get_soc_id() != UNIPHIER_LD20_ID)
281 		return 0;
282 
283 	for (i = 0; i < ARRAY_SIZE(gd->bd->bi_dram); i++) {
284 		if (!gd->bd->bi_dram[i].size)
285 			continue;
286 
287 		rsv_addr = gd->bd->bi_dram[i].start + gd->bd->bi_dram[i].size;
288 		rsv_addr -= rsv_size;
289 
290 		ret = fdt_add_mem_rsv(fdt, rsv_addr, rsv_size);
291 		if (ret)
292 			return -ENOSPC;
293 
294 		pr_notice("   Reserved memory region for DRAM PHY training: addr=%lx size=%lx\n",
295 			  rsv_addr, rsv_size);
296 	}
297 
298 	return 0;
299 }
300 #endif
301