1 /*
2 * Allwinner R40 SDRAM Controller emulation
3 *
4 * CCopyright (C) 2023 qianfan Zhao <qianfanguijin@163.com>
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/error-report.h"
23 #include "hw/sysbus.h"
24 #include "migration/vmstate.h"
25 #include "qemu/log.h"
26 #include "qemu/module.h"
27 #include "exec/address-spaces.h"
28 #include "hw/qdev-properties.h"
29 #include "qapi/error.h"
30 #include "qemu/bitops.h"
31 #include "hw/misc/allwinner-r40-dramc.h"
32 #include "trace.h"
33
34 #define REG_INDEX(offset) (offset / sizeof(uint32_t))
35
36 /* DRAMCOM register offsets */
37 enum {
38 REG_DRAMCOM_CR = 0x0000, /* Control Register */
39 };
40
41 /* DRAMCOMM register flags */
42 enum {
43 REG_DRAMCOM_CR_DUAL_RANK = (1 << 0),
44 };
45
46 /* DRAMCTL register offsets */
47 enum {
48 REG_DRAMCTL_PIR = 0x0000, /* PHY Initialization Register */
49 REG_DRAMCTL_PGSR = 0x0010, /* PHY General Status Register */
50 REG_DRAMCTL_STATR = 0x0018, /* Status Register */
51 REG_DRAMCTL_PGCR = 0x0100, /* PHY general configuration registers */
52 };
53
54 /* DRAMCTL register flags */
55 enum {
56 REG_DRAMCTL_PGSR_INITDONE = (1 << 0),
57 REG_DRAMCTL_PGSR_READ_TIMEOUT = (1 << 13),
58 REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT = (1 << 25),
59 };
60
61 enum {
62 REG_DRAMCTL_STATR_ACTIVE = (1 << 0),
63 };
64
65 #define DRAM_MAX_ROW_BITS 16
66 #define DRAM_MAX_COL_BITS 13 /* 8192 */
67 #define DRAM_MAX_BANK 3
68
69 static uint64_t dram_autodetect_cells[DRAM_MAX_ROW_BITS]
70 [DRAM_MAX_BANK]
71 [DRAM_MAX_COL_BITS];
72 struct VirtualDDRChip {
73 uint32_t ram_size;
74 uint8_t bank_bits;
75 uint8_t row_bits;
76 uint8_t col_bits;
77 };
78
79 /*
80 * Only power of 2 RAM sizes from 256MiB up to 2048MiB are supported,
81 * 2GiB memory is not supported due to dual rank feature.
82 */
83 static const struct VirtualDDRChip dummy_ddr_chips[] = {
84 {
85 .ram_size = 256,
86 .bank_bits = 3,
87 .row_bits = 12,
88 .col_bits = 13,
89 }, {
90 .ram_size = 512,
91 .bank_bits = 3,
92 .row_bits = 13,
93 .col_bits = 13,
94 }, {
95 .ram_size = 1024,
96 .bank_bits = 3,
97 .row_bits = 14,
98 .col_bits = 13,
99 }, {
100 0
101 }
102 };
103
get_match_ddr(uint32_t ram_size)104 static const struct VirtualDDRChip *get_match_ddr(uint32_t ram_size)
105 {
106 const struct VirtualDDRChip *ddr;
107
108 for (ddr = &dummy_ddr_chips[0]; ddr->ram_size; ddr++) {
109 if (ddr->ram_size == ram_size) {
110 return ddr;
111 }
112 }
113
114 return NULL;
115 }
116
address_to_autodetect_cells(AwR40DramCtlState * s,const struct VirtualDDRChip * ddr,uint32_t offset)117 static uint64_t *address_to_autodetect_cells(AwR40DramCtlState *s,
118 const struct VirtualDDRChip *ddr,
119 uint32_t offset)
120 {
121 int row_index = 0, bank_index = 0, col_index = 0;
122 uint32_t row_addr, bank_addr, col_addr;
123
124 row_addr = extract32(offset, s->set_col_bits + s->set_bank_bits,
125 s->set_row_bits);
126 bank_addr = extract32(offset, s->set_col_bits, s->set_bank_bits);
127 col_addr = extract32(offset, 0, s->set_col_bits);
128
129 for (int i = 0; i < ddr->row_bits; i++) {
130 if (row_addr & BIT(i)) {
131 row_index = i;
132 }
133 }
134
135 for (int i = 0; i < ddr->bank_bits; i++) {
136 if (bank_addr & BIT(i)) {
137 bank_index = i;
138 }
139 }
140
141 for (int i = 0; i < ddr->col_bits; i++) {
142 if (col_addr & BIT(i)) {
143 col_index = i;
144 }
145 }
146
147 trace_allwinner_r40_dramc_offset_to_cell(offset, row_index, bank_index,
148 col_index);
149 return &dram_autodetect_cells[row_index][bank_index][col_index];
150 }
151
allwinner_r40_dramc_map_rows(AwR40DramCtlState * s,uint8_t row_bits,uint8_t bank_bits,uint8_t col_bits)152 static void allwinner_r40_dramc_map_rows(AwR40DramCtlState *s, uint8_t row_bits,
153 uint8_t bank_bits, uint8_t col_bits)
154 {
155 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
156 bool enable_detect_cells;
157
158 trace_allwinner_r40_dramc_map_rows(row_bits, bank_bits, col_bits);
159
160 if (!ddr) {
161 return;
162 }
163
164 s->set_row_bits = row_bits;
165 s->set_bank_bits = bank_bits;
166 s->set_col_bits = col_bits;
167
168 enable_detect_cells = ddr->bank_bits != bank_bits
169 || ddr->row_bits != row_bits
170 || ddr->col_bits != col_bits;
171
172 if (enable_detect_cells) {
173 trace_allwinner_r40_dramc_detect_cells_enable();
174 } else {
175 trace_allwinner_r40_dramc_detect_cells_disable();
176 }
177
178 memory_region_set_enabled(&s->detect_cells, enable_detect_cells);
179 }
180
allwinner_r40_dramcom_read(void * opaque,hwaddr offset,unsigned size)181 static uint64_t allwinner_r40_dramcom_read(void *opaque, hwaddr offset,
182 unsigned size)
183 {
184 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
185 const uint32_t idx = REG_INDEX(offset);
186
187 if (idx >= AW_R40_DRAMCOM_REGS_NUM) {
188 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
189 __func__, (uint32_t)offset);
190 return 0;
191 }
192
193 trace_allwinner_r40_dramcom_read(offset, s->dramcom[idx], size);
194 return s->dramcom[idx];
195 }
196
allwinner_r40_dramcom_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)197 static void allwinner_r40_dramcom_write(void *opaque, hwaddr offset,
198 uint64_t val, unsigned size)
199 {
200 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
201 const uint32_t idx = REG_INDEX(offset);
202
203 trace_allwinner_r40_dramcom_write(offset, val, size);
204
205 if (idx >= AW_R40_DRAMCOM_REGS_NUM) {
206 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
207 __func__, (uint32_t)offset);
208 return;
209 }
210
211 switch (offset) {
212 case REG_DRAMCOM_CR: /* Control Register */
213 if (!(val & REG_DRAMCOM_CR_DUAL_RANK)) {
214 allwinner_r40_dramc_map_rows(s, ((val >> 4) & 0xf) + 1,
215 ((val >> 2) & 0x1) + 2,
216 (((val >> 8) & 0xf) + 3));
217 }
218 break;
219 };
220
221 s->dramcom[idx] = (uint32_t) val;
222 }
223
allwinner_r40_dramctl_read(void * opaque,hwaddr offset,unsigned size)224 static uint64_t allwinner_r40_dramctl_read(void *opaque, hwaddr offset,
225 unsigned size)
226 {
227 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
228 const uint32_t idx = REG_INDEX(offset);
229
230 if (idx >= AW_R40_DRAMCTL_REGS_NUM) {
231 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
232 __func__, (uint32_t)offset);
233 return 0;
234 }
235
236 trace_allwinner_r40_dramctl_read(offset, s->dramctl[idx], size);
237 return s->dramctl[idx];
238 }
239
allwinner_r40_dramctl_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)240 static void allwinner_r40_dramctl_write(void *opaque, hwaddr offset,
241 uint64_t val, unsigned size)
242 {
243 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
244 const uint32_t idx = REG_INDEX(offset);
245
246 trace_allwinner_r40_dramctl_write(offset, val, size);
247
248 if (idx >= AW_R40_DRAMCTL_REGS_NUM) {
249 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
250 __func__, (uint32_t)offset);
251 return;
252 }
253
254 switch (offset) {
255 case REG_DRAMCTL_PIR: /* PHY Initialization Register */
256 s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |= REG_DRAMCTL_PGSR_INITDONE;
257 s->dramctl[REG_INDEX(REG_DRAMCTL_STATR)] |= REG_DRAMCTL_STATR_ACTIVE;
258 break;
259 }
260
261 s->dramctl[idx] = (uint32_t) val;
262 }
263
allwinner_r40_dramphy_read(void * opaque,hwaddr offset,unsigned size)264 static uint64_t allwinner_r40_dramphy_read(void *opaque, hwaddr offset,
265 unsigned size)
266 {
267 const AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
268 const uint32_t idx = REG_INDEX(offset);
269
270 if (idx >= AW_R40_DRAMPHY_REGS_NUM) {
271 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
272 __func__, (uint32_t)offset);
273 return 0;
274 }
275
276 trace_allwinner_r40_dramphy_read(offset, s->dramphy[idx], size);
277 return s->dramphy[idx];
278 }
279
allwinner_r40_dramphy_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)280 static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset,
281 uint64_t val, unsigned size)
282 {
283 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
284 const uint32_t idx = REG_INDEX(offset);
285
286 trace_allwinner_r40_dramphy_write(offset, val, size);
287
288 if (idx >= AW_R40_DRAMPHY_REGS_NUM) {
289 qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n",
290 __func__, (uint32_t)offset);
291 return;
292 }
293
294 s->dramphy[idx] = (uint32_t) val;
295 }
296
297 static const MemoryRegionOps allwinner_r40_dramcom_ops = {
298 .read = allwinner_r40_dramcom_read,
299 .write = allwinner_r40_dramcom_write,
300 .endianness = DEVICE_NATIVE_ENDIAN,
301 .valid = {
302 .min_access_size = 4,
303 .max_access_size = 4,
304 },
305 .impl.min_access_size = 4,
306 };
307
308 static const MemoryRegionOps allwinner_r40_dramctl_ops = {
309 .read = allwinner_r40_dramctl_read,
310 .write = allwinner_r40_dramctl_write,
311 .endianness = DEVICE_NATIVE_ENDIAN,
312 .valid = {
313 .min_access_size = 4,
314 .max_access_size = 4,
315 },
316 .impl.min_access_size = 4,
317 };
318
319 static const MemoryRegionOps allwinner_r40_dramphy_ops = {
320 .read = allwinner_r40_dramphy_read,
321 .write = allwinner_r40_dramphy_write,
322 .endianness = DEVICE_NATIVE_ENDIAN,
323 .valid = {
324 .min_access_size = 4,
325 .max_access_size = 4,
326 },
327 .impl.min_access_size = 4,
328 };
329
allwinner_r40_detect_read(void * opaque,hwaddr offset,unsigned size)330 static uint64_t allwinner_r40_detect_read(void *opaque, hwaddr offset,
331 unsigned size)
332 {
333 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
334 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
335 uint64_t data = 0;
336
337 if (ddr) {
338 data = *address_to_autodetect_cells(s, ddr, (uint32_t)offset);
339 }
340
341 trace_allwinner_r40_dramc_detect_cell_read(offset, data);
342 return data;
343 }
344
allwinner_r40_detect_write(void * opaque,hwaddr offset,uint64_t data,unsigned size)345 static void allwinner_r40_detect_write(void *opaque, hwaddr offset,
346 uint64_t data, unsigned size)
347 {
348 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
349 const struct VirtualDDRChip *ddr = get_match_ddr(s->ram_size);
350
351 if (ddr) {
352 uint64_t *cell = address_to_autodetect_cells(s, ddr, (uint32_t)offset);
353 trace_allwinner_r40_dramc_detect_cell_write(offset, data);
354 *cell = data;
355 }
356 }
357
358 static const MemoryRegionOps allwinner_r40_detect_ops = {
359 .read = allwinner_r40_detect_read,
360 .write = allwinner_r40_detect_write,
361 .endianness = DEVICE_NATIVE_ENDIAN,
362 .valid = {
363 .min_access_size = 4,
364 .max_access_size = 4,
365 },
366 .impl.min_access_size = 4,
367 };
368
369 /*
370 * mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR
371 * to detect whether the board support dual_rank or not. Create a virtual memory
372 * if the board's ram_size less or equal than 1G, and set read time out flag of
373 * REG_DRAMCTL_PGSR when the user touch this high dram.
374 */
allwinner_r40_dualrank_detect_read(void * opaque,hwaddr offset,unsigned size)375 static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset,
376 unsigned size)
377 {
378 AwR40DramCtlState *s = AW_R40_DRAMC(opaque);
379 uint32_t reg;
380
381 reg = s->dramctl[REG_INDEX(REG_DRAMCTL_PGCR)];
382 if (reg & REG_DRAMCTL_PGCR_ENABLE_READ_TIMEOUT) { /* Enable read time out */
383 /*
384 * this driver only support one rank, mark READ_TIMEOUT when try
385 * read the second rank.
386 */
387 s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)]
388 |= REG_DRAMCTL_PGSR_READ_TIMEOUT;
389 }
390
391 return 0;
392 }
393
394 static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = {
395 .read = allwinner_r40_dualrank_detect_read,
396 .endianness = DEVICE_NATIVE_ENDIAN,
397 .valid = {
398 .min_access_size = 4,
399 .max_access_size = 4,
400 },
401 .impl.min_access_size = 4,
402 };
403
allwinner_r40_dramc_reset(DeviceState * dev)404 static void allwinner_r40_dramc_reset(DeviceState *dev)
405 {
406 AwR40DramCtlState *s = AW_R40_DRAMC(dev);
407
408 /* Set default values for registers */
409 memset(&s->dramcom, 0, sizeof(s->dramcom));
410 memset(&s->dramctl, 0, sizeof(s->dramctl));
411 memset(&s->dramphy, 0, sizeof(s->dramphy));
412 }
413
allwinner_r40_dramc_realize(DeviceState * dev,Error ** errp)414 static void allwinner_r40_dramc_realize(DeviceState *dev, Error **errp)
415 {
416 AwR40DramCtlState *s = AW_R40_DRAMC(dev);
417
418 if (!get_match_ddr(s->ram_size)) {
419 error_report("%s: ram-size %u MiB is not supported",
420 __func__, s->ram_size);
421 exit(1);
422 }
423
424 /* R40 support max 2G memory but we only support up to 1G now. */
425 memory_region_init_io(&s->detect_cells, OBJECT(s),
426 &allwinner_r40_detect_ops, s,
427 "DRAMCELLS", 1 * GiB);
428 memory_region_add_subregion_overlap(get_system_memory(), s->ram_addr,
429 &s->detect_cells, 10);
430 memory_region_set_enabled(&s->detect_cells, false);
431
432 /*
433 * We only support DRAM size up to 1G now, so prepare a high memory page
434 * after 1G for dualrank detect.
435 */
436 memory_region_init_io(&s->dram_high, OBJECT(s),
437 &allwinner_r40_dualrank_detect_ops, s,
438 "DRAMHIGH", KiB);
439 memory_region_add_subregion(get_system_memory(), s->ram_addr + GiB,
440 &s->dram_high);
441 }
442
allwinner_r40_dramc_init(Object * obj)443 static void allwinner_r40_dramc_init(Object *obj)
444 {
445 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
446 AwR40DramCtlState *s = AW_R40_DRAMC(obj);
447
448 /* DRAMCOM registers, index 0 */
449 memory_region_init_io(&s->dramcom_iomem, OBJECT(s),
450 &allwinner_r40_dramcom_ops, s,
451 "DRAMCOM", 4 * KiB);
452 sysbus_init_mmio(sbd, &s->dramcom_iomem);
453
454 /* DRAMCTL registers, index 1 */
455 memory_region_init_io(&s->dramctl_iomem, OBJECT(s),
456 &allwinner_r40_dramctl_ops, s,
457 "DRAMCTL", 4 * KiB);
458 sysbus_init_mmio(sbd, &s->dramctl_iomem);
459
460 /* DRAMPHY registers. index 2 */
461 memory_region_init_io(&s->dramphy_iomem, OBJECT(s),
462 &allwinner_r40_dramphy_ops, s,
463 "DRAMPHY", 4 * KiB);
464 sysbus_init_mmio(sbd, &s->dramphy_iomem);
465 }
466
467 static Property allwinner_r40_dramc_properties[] = {
468 DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0),
469 DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */
470 DEFINE_PROP_END_OF_LIST()
471 };
472
473 static const VMStateDescription allwinner_r40_dramc_vmstate = {
474 .name = "allwinner-r40-dramc",
475 .version_id = 1,
476 .minimum_version_id = 1,
477 .fields = (const VMStateField[]) {
478 VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState,
479 AW_R40_DRAMCOM_REGS_NUM),
480 VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState,
481 AW_R40_DRAMCTL_REGS_NUM),
482 VMSTATE_UINT32_ARRAY(dramphy, AwR40DramCtlState,
483 AW_R40_DRAMPHY_REGS_NUM),
484 VMSTATE_END_OF_LIST()
485 }
486 };
487
allwinner_r40_dramc_class_init(ObjectClass * klass,void * data)488 static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data)
489 {
490 DeviceClass *dc = DEVICE_CLASS(klass);
491
492 dc->reset = allwinner_r40_dramc_reset;
493 dc->vmsd = &allwinner_r40_dramc_vmstate;
494 dc->realize = allwinner_r40_dramc_realize;
495 device_class_set_props(dc, allwinner_r40_dramc_properties);
496 }
497
498 static const TypeInfo allwinner_r40_dramc_info = {
499 .name = TYPE_AW_R40_DRAMC,
500 .parent = TYPE_SYS_BUS_DEVICE,
501 .instance_init = allwinner_r40_dramc_init,
502 .instance_size = sizeof(AwR40DramCtlState),
503 .class_init = allwinner_r40_dramc_class_init,
504 };
505
allwinner_r40_dramc_register(void)506 static void allwinner_r40_dramc_register(void)
507 {
508 type_register_static(&allwinner_r40_dramc_info);
509 }
510
511 type_init(allwinner_r40_dramc_register)
512