1 /*
2 * QEMU Firmware configuration device emulation
3 *
4 * Copyright (c) 2008 Gleb Natapov
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "system/system.h"
28 #include "system/dma.h"
29 #include "system/reset.h"
30 #include "exec/address-spaces.h"
31 #include "hw/boards.h"
32 #include "hw/nvram/fw_cfg.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/sysbus.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "trace.h"
38 #include "qemu/error-report.h"
39 #include "qemu/option.h"
40 #include "qemu/config-file.h"
41 #include "qemu/cutils.h"
42 #include "qapi/error.h"
43 #include "hw/acpi/aml-build.h"
44 #include "hw/loader.h"
45
46 #define FW_CFG_FILE_SLOTS_DFLT 0x20
47
48 /* FW_CFG_VERSION bits */
49 #define FW_CFG_VERSION 0x01
50 #define FW_CFG_VERSION_DMA 0x02
51
52 /* FW_CFG_DMA_CONTROL bits */
53 #define FW_CFG_DMA_CTL_ERROR 0x01
54 #define FW_CFG_DMA_CTL_READ 0x02
55 #define FW_CFG_DMA_CTL_SKIP 0x04
56 #define FW_CFG_DMA_CTL_SELECT 0x08
57 #define FW_CFG_DMA_CTL_WRITE 0x10
58
59 #define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
60
61 struct FWCfgEntry {
62 uint32_t len;
63 bool allow_write;
64 uint8_t *data;
65 void *callback_opaque;
66 FWCfgCallback select_cb;
67 FWCfgWriteCallback write_cb;
68 };
69
70 /**
71 * key_name:
72 *
73 * @key: The uint16 selector key.
74 *
75 * Returns: The stringified name if the selector refers to a well-known
76 * numerically defined item, or NULL on key lookup failure.
77 */
key_name(uint16_t key)78 static const char *key_name(uint16_t key)
79 {
80 static const char *fw_cfg_wellknown_keys[FW_CFG_FILE_FIRST] = {
81 [FW_CFG_SIGNATURE] = "signature",
82 [FW_CFG_ID] = "id",
83 [FW_CFG_UUID] = "uuid",
84 [FW_CFG_RAM_SIZE] = "ram_size",
85 [FW_CFG_NOGRAPHIC] = "nographic",
86 [FW_CFG_NB_CPUS] = "nb_cpus",
87 [FW_CFG_MACHINE_ID] = "machine_id",
88 [FW_CFG_KERNEL_ADDR] = "kernel_addr",
89 [FW_CFG_KERNEL_SIZE] = "kernel_size",
90 [FW_CFG_KERNEL_CMDLINE] = "kernel_cmdline",
91 [FW_CFG_INITRD_ADDR] = "initrd_addr",
92 [FW_CFG_INITRD_SIZE] = "initdr_size",
93 [FW_CFG_BOOT_DEVICE] = "boot_device",
94 [FW_CFG_NUMA] = "numa",
95 [FW_CFG_BOOT_MENU] = "boot_menu",
96 [FW_CFG_MAX_CPUS] = "max_cpus",
97 [FW_CFG_KERNEL_ENTRY] = "kernel_entry",
98 [FW_CFG_KERNEL_DATA] = "kernel_data",
99 [FW_CFG_INITRD_DATA] = "initrd_data",
100 [FW_CFG_CMDLINE_ADDR] = "cmdline_addr",
101 [FW_CFG_CMDLINE_SIZE] = "cmdline_size",
102 [FW_CFG_CMDLINE_DATA] = "cmdline_data",
103 [FW_CFG_SETUP_ADDR] = "setup_addr",
104 [FW_CFG_SETUP_SIZE] = "setup_size",
105 [FW_CFG_SETUP_DATA] = "setup_data",
106 [FW_CFG_FILE_DIR] = "file_dir",
107 };
108
109 if (key & FW_CFG_ARCH_LOCAL) {
110 return fw_cfg_arch_key_name(key);
111 }
112 if (key < FW_CFG_FILE_FIRST) {
113 return fw_cfg_wellknown_keys[key];
114 }
115
116 return NULL;
117 }
118
trace_key_name(uint16_t key)119 static inline const char *trace_key_name(uint16_t key)
120 {
121 const char *name = key_name(key);
122
123 return name ? name : "unknown";
124 }
125
126 #define JPG_FILE 0
127 #define BMP_FILE 1
128
read_splashfile(char * filename,gsize * file_sizep,int * file_typep)129 static char *read_splashfile(char *filename, gsize *file_sizep,
130 int *file_typep)
131 {
132 GError *err = NULL;
133 gchar *content;
134 int file_type;
135 unsigned int filehead;
136 int bmp_bpp;
137
138 if (!g_file_get_contents(filename, &content, file_sizep, &err)) {
139 error_report("failed to read splash file '%s': %s",
140 filename, err->message);
141 g_error_free(err);
142 return NULL;
143 }
144
145 /* check file size */
146 if (*file_sizep < 30) {
147 goto error;
148 }
149
150 /* check magic ID */
151 filehead = lduw_le_p(content);
152 if (filehead == 0xd8ff) {
153 file_type = JPG_FILE;
154 } else if (filehead == 0x4d42) {
155 file_type = BMP_FILE;
156 } else {
157 goto error;
158 }
159
160 /* check BMP bpp */
161 if (file_type == BMP_FILE) {
162 bmp_bpp = lduw_le_p(&content[28]);
163 if (bmp_bpp != 24) {
164 goto error;
165 }
166 }
167
168 /* return values */
169 *file_typep = file_type;
170
171 return content;
172
173 error:
174 error_report("splash file '%s' format not recognized; must be JPEG "
175 "or 24 bit BMP", filename);
176 g_free(content);
177 return NULL;
178 }
179
fw_cfg_bootsplash(FWCfgState * s)180 static void fw_cfg_bootsplash(FWCfgState *s)
181 {
182 char *filename, *file_data;
183 gsize file_size;
184 int file_type;
185
186 /* insert splash time if user configurated */
187 if (current_machine->boot_config.has_splash_time) {
188 int64_t bst_val = current_machine->boot_config.splash_time;
189 uint16_t bst_le16;
190
191 /* validate the input */
192 if (bst_val < 0 || bst_val > 0xffff) {
193 error_report("splash-time is invalid,"
194 "it should be a value between 0 and 65535");
195 exit(1);
196 }
197 /* use little endian format */
198 bst_le16 = cpu_to_le16(bst_val);
199 fw_cfg_add_file(s, "etc/boot-menu-wait",
200 g_memdup(&bst_le16, sizeof bst_le16), sizeof bst_le16);
201 }
202
203 /* insert splash file if user configurated */
204 if (current_machine->boot_config.splash) {
205 const char *boot_splash_filename = current_machine->boot_config.splash;
206 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename);
207 if (filename == NULL) {
208 error_report("failed to find file '%s'", boot_splash_filename);
209 return;
210 }
211
212 /* loading file data */
213 file_data = read_splashfile(filename, &file_size, &file_type);
214 if (file_data == NULL) {
215 g_free(filename);
216 return;
217 }
218 g_free(boot_splash_filedata);
219 boot_splash_filedata = (uint8_t *)file_data;
220
221 /* insert data */
222 if (file_type == JPG_FILE) {
223 fw_cfg_add_file(s, "bootsplash.jpg",
224 boot_splash_filedata, file_size);
225 } else {
226 fw_cfg_add_file(s, "bootsplash.bmp",
227 boot_splash_filedata, file_size);
228 }
229 g_free(filename);
230 }
231 }
232
fw_cfg_reboot(FWCfgState * s)233 static void fw_cfg_reboot(FWCfgState *s)
234 {
235 uint64_t rt_val = -1;
236 uint32_t rt_le32;
237
238 if (current_machine->boot_config.has_reboot_timeout) {
239 rt_val = current_machine->boot_config.reboot_timeout;
240
241 /* validate the input */
242 if (rt_val > 0xffff && rt_val != (uint64_t)-1) {
243 error_report("reboot timeout is invalid,"
244 "it should be a value between -1 and 65535");
245 exit(1);
246 }
247 }
248
249 rt_le32 = cpu_to_le32(rt_val);
250 fw_cfg_add_file(s, "etc/boot-fail-wait", g_memdup(&rt_le32, 4), 4);
251 }
252
fw_cfg_write(FWCfgState * s,uint8_t value)253 static void fw_cfg_write(FWCfgState *s, uint8_t value)
254 {
255 /* nothing, write support removed in QEMU v2.4+ */
256 }
257
fw_cfg_file_slots(const FWCfgState * s)258 static inline uint16_t fw_cfg_file_slots(const FWCfgState *s)
259 {
260 return s->file_slots;
261 }
262
263 /* Note: this function returns an exclusive limit. */
fw_cfg_max_entry(const FWCfgState * s)264 static inline uint32_t fw_cfg_max_entry(const FWCfgState *s)
265 {
266 return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s);
267 }
268
fw_cfg_select(FWCfgState * s,uint16_t key)269 static int fw_cfg_select(FWCfgState *s, uint16_t key)
270 {
271 int arch, ret;
272 FWCfgEntry *e;
273
274 s->cur_offset = 0;
275 if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) {
276 s->cur_entry = FW_CFG_INVALID;
277 ret = 0;
278 } else {
279 s->cur_entry = key;
280 ret = 1;
281 /* entry successfully selected, now run callback if present */
282 arch = !!(key & FW_CFG_ARCH_LOCAL);
283 e = &s->entries[arch][key & FW_CFG_ENTRY_MASK];
284 if (e->select_cb) {
285 e->select_cb(e->callback_opaque);
286 }
287 }
288
289 trace_fw_cfg_select(s, key, trace_key_name(key), ret);
290 return ret;
291 }
292
fw_cfg_data_read(void * opaque,hwaddr addr,unsigned size)293 static uint64_t fw_cfg_data_read(void *opaque, hwaddr addr, unsigned size)
294 {
295 FWCfgState *s = opaque;
296 int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
297 FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
298 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
299 uint64_t value = 0;
300
301 assert(size > 0 && size <= sizeof(value));
302 if (s->cur_entry != FW_CFG_INVALID && e->data && s->cur_offset < e->len) {
303 /* The least significant 'size' bytes of the return value are
304 * expected to contain a string preserving portion of the item
305 * data, padded with zeros on the right in case we run out early.
306 * In technical terms, we're composing the host-endian representation
307 * of the big endian interpretation of the fw_cfg string.
308 */
309 do {
310 value = (value << 8) | e->data[s->cur_offset++];
311 } while (--size && s->cur_offset < e->len);
312 /* If size is still not zero, we *did* run out early, so continue
313 * left-shifting, to add the appropriate number of padding zeros
314 * on the right.
315 */
316 value <<= 8 * size;
317 }
318
319 trace_fw_cfg_read(s, value);
320 return value;
321 }
322
fw_cfg_data_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)323 static void fw_cfg_data_mem_write(void *opaque, hwaddr addr,
324 uint64_t value, unsigned size)
325 {
326 FWCfgState *s = opaque;
327 unsigned i = size;
328
329 do {
330 fw_cfg_write(s, value >> (8 * --i));
331 } while (i);
332 }
333
fw_cfg_dma_transfer(FWCfgState * s)334 static void fw_cfg_dma_transfer(FWCfgState *s)
335 {
336 dma_addr_t len;
337 FWCfgDmaAccess dma;
338 int arch;
339 FWCfgEntry *e;
340 int read = 0, write = 0;
341 dma_addr_t dma_addr;
342
343 /* Reset the address before the next access */
344 dma_addr = s->dma_addr;
345 s->dma_addr = 0;
346
347 if (dma_memory_read(s->dma_as, dma_addr,
348 &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) {
349 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
350 FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED);
351 return;
352 }
353
354 dma.address = be64_to_cpu(dma.address);
355 dma.length = be32_to_cpu(dma.length);
356 dma.control = be32_to_cpu(dma.control);
357
358 if (dma.control & FW_CFG_DMA_CTL_SELECT) {
359 fw_cfg_select(s, dma.control >> 16);
360 }
361
362 arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
363 e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
364 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
365
366 if (dma.control & FW_CFG_DMA_CTL_READ) {
367 read = 1;
368 write = 0;
369 } else if (dma.control & FW_CFG_DMA_CTL_WRITE) {
370 read = 0;
371 write = 1;
372 } else if (dma.control & FW_CFG_DMA_CTL_SKIP) {
373 read = 0;
374 write = 0;
375 } else {
376 dma.length = 0;
377 }
378
379 dma.control = 0;
380
381 while (dma.length > 0 && !(dma.control & FW_CFG_DMA_CTL_ERROR)) {
382 if (s->cur_entry == FW_CFG_INVALID || !e->data ||
383 s->cur_offset >= e->len) {
384 len = dma.length;
385
386 /* If the access is not a read access, it will be a skip access,
387 * tested before.
388 */
389 if (read) {
390 if (dma_memory_set(s->dma_as, dma.address, 0, len,
391 MEMTXATTRS_UNSPECIFIED)) {
392 dma.control |= FW_CFG_DMA_CTL_ERROR;
393 }
394 }
395 if (write) {
396 dma.control |= FW_CFG_DMA_CTL_ERROR;
397 }
398 } else {
399 if (dma.length <= (e->len - s->cur_offset)) {
400 len = dma.length;
401 } else {
402 len = (e->len - s->cur_offset);
403 }
404
405 /* If the access is not a read access, it will be a skip access,
406 * tested before.
407 */
408 if (read) {
409 if (dma_memory_write(s->dma_as, dma.address,
410 &e->data[s->cur_offset], len,
411 MEMTXATTRS_UNSPECIFIED)) {
412 dma.control |= FW_CFG_DMA_CTL_ERROR;
413 }
414 }
415 if (write) {
416 if (!e->allow_write ||
417 len != dma.length ||
418 dma_memory_read(s->dma_as, dma.address,
419 &e->data[s->cur_offset], len,
420 MEMTXATTRS_UNSPECIFIED)) {
421 dma.control |= FW_CFG_DMA_CTL_ERROR;
422 } else if (e->write_cb) {
423 e->write_cb(e->callback_opaque, s->cur_offset, len);
424 }
425 }
426
427 s->cur_offset += len;
428 }
429
430 dma.address += len;
431 dma.length -= len;
432
433 }
434
435 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
436 dma.control, MEMTXATTRS_UNSPECIFIED);
437
438 trace_fw_cfg_read(s, 0);
439 }
440
fw_cfg_dma_mem_read(void * opaque,hwaddr addr,unsigned size)441 static uint64_t fw_cfg_dma_mem_read(void *opaque, hwaddr addr,
442 unsigned size)
443 {
444 /* Return a signature value (and handle various read sizes) */
445 return extract64(FW_CFG_DMA_SIGNATURE, (8 - addr - size) * 8, size * 8);
446 }
447
fw_cfg_dma_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)448 static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
449 uint64_t value, unsigned size)
450 {
451 FWCfgState *s = opaque;
452
453 if (size == 4) {
454 if (addr == 0) {
455 /* FWCfgDmaAccess high address */
456 s->dma_addr = value << 32;
457 } else if (addr == 4) {
458 /* FWCfgDmaAccess low address */
459 s->dma_addr |= value;
460 fw_cfg_dma_transfer(s);
461 }
462 } else if (size == 8 && addr == 0) {
463 s->dma_addr = value;
464 fw_cfg_dma_transfer(s);
465 }
466 }
467
fw_cfg_dma_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)468 static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
469 unsigned size, bool is_write,
470 MemTxAttrs attrs)
471 {
472 return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
473 (size == 8 && addr == 0));
474 }
475
fw_cfg_data_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)476 static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
477 unsigned size, bool is_write,
478 MemTxAttrs attrs)
479 {
480 return addr == 0;
481 }
482
fw_cfg_ctl_mem_read(void * opaque,hwaddr addr,unsigned size)483 static uint64_t fw_cfg_ctl_mem_read(void *opaque, hwaddr addr, unsigned size)
484 {
485 return 0;
486 }
487
fw_cfg_ctl_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)488 static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
489 uint64_t value, unsigned size)
490 {
491 fw_cfg_select(opaque, (uint16_t)value);
492 }
493
fw_cfg_ctl_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)494 static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
495 unsigned size, bool is_write,
496 MemTxAttrs attrs)
497 {
498 return is_write && size == 2;
499 }
500
fw_cfg_comb_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)501 static void fw_cfg_comb_write(void *opaque, hwaddr addr,
502 uint64_t value, unsigned size)
503 {
504 switch (size) {
505 case 1:
506 fw_cfg_write(opaque, (uint8_t)value);
507 break;
508 case 2:
509 fw_cfg_select(opaque, (uint16_t)value);
510 break;
511 }
512 }
513
fw_cfg_comb_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)514 static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
515 unsigned size, bool is_write,
516 MemTxAttrs attrs)
517 {
518 return (size == 1) || (is_write && size == 2);
519 }
520
521 static const MemoryRegionOps fw_cfg_ctl_mem_ops = {
522 .read = fw_cfg_ctl_mem_read,
523 .write = fw_cfg_ctl_mem_write,
524 .endianness = DEVICE_BIG_ENDIAN,
525 .valid.accepts = fw_cfg_ctl_mem_valid,
526 };
527
528 static const MemoryRegionOps fw_cfg_data_mem_ops = {
529 .read = fw_cfg_data_read,
530 .write = fw_cfg_data_mem_write,
531 .endianness = DEVICE_BIG_ENDIAN,
532 .valid = {
533 .min_access_size = 1,
534 .max_access_size = 1,
535 .accepts = fw_cfg_data_mem_valid,
536 },
537 };
538
539 static const MemoryRegionOps fw_cfg_comb_mem_ops = {
540 .read = fw_cfg_data_read,
541 .write = fw_cfg_comb_write,
542 .endianness = DEVICE_LITTLE_ENDIAN,
543 .valid.accepts = fw_cfg_comb_valid,
544 };
545
546 static const MemoryRegionOps fw_cfg_dma_mem_ops = {
547 .read = fw_cfg_dma_mem_read,
548 .write = fw_cfg_dma_mem_write,
549 .endianness = DEVICE_BIG_ENDIAN,
550 .valid.accepts = fw_cfg_dma_mem_valid,
551 .valid.max_access_size = 8,
552 .impl.max_access_size = 8,
553 };
554
fw_cfg_reset(DeviceState * d)555 static void fw_cfg_reset(DeviceState *d)
556 {
557 FWCfgState *s = FW_CFG(d);
558
559 /* we never register a read callback for FW_CFG_SIGNATURE */
560 fw_cfg_select(s, FW_CFG_SIGNATURE);
561 }
562
563 /* Save restore 32 bit int as uint16_t
564 This is a Big hack, but it is how the old state did it.
565 Or we broke compatibility in the state, or we can't use struct tm
566 */
567
get_uint32_as_uint16(QEMUFile * f,void * pv,size_t size,const VMStateField * field)568 static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size,
569 const VMStateField *field)
570 {
571 uint32_t *v = pv;
572 *v = qemu_get_be16(f);
573 return 0;
574 }
575
put_unused(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)576 static int put_unused(QEMUFile *f, void *pv, size_t size,
577 const VMStateField *field, JSONWriter *vmdesc)
578 {
579 fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n");
580 fprintf(stderr, "This functions shouldn't be called.\n");
581
582 return 0;
583 }
584
585 static const VMStateInfo vmstate_hack_uint32_as_uint16 = {
586 .name = "int32_as_uint16",
587 .get = get_uint32_as_uint16,
588 .put = put_unused,
589 };
590
591 #define VMSTATE_UINT16_HACK(_f, _s, _t) \
592 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint32_as_uint16, uint32_t)
593
594
is_version_1(void * opaque,int version_id)595 static bool is_version_1(void *opaque, int version_id)
596 {
597 return version_id == 1;
598 }
599
fw_cfg_dma_enabled(void * opaque)600 bool fw_cfg_dma_enabled(void *opaque)
601 {
602 FWCfgState *s = opaque;
603
604 return s->dma_enabled;
605 }
606
fw_cfg_acpi_mr_restore(void * opaque)607 static bool fw_cfg_acpi_mr_restore(void *opaque)
608 {
609 FWCfgState *s = opaque;
610 bool mr_aligned;
611
612 mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
613 QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
614 QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
615 return s->acpi_mr_restore && !mr_aligned;
616 }
617
fw_cfg_update_mr(FWCfgState * s,uint16_t key,size_t size)618 static void fw_cfg_update_mr(FWCfgState *s, uint16_t key, size_t size)
619 {
620 MemoryRegion *mr;
621 ram_addr_t offset;
622 int arch = !!(key & FW_CFG_ARCH_LOCAL);
623 void *ptr;
624
625 key &= FW_CFG_ENTRY_MASK;
626 assert(key < fw_cfg_max_entry(s));
627
628 ptr = s->entries[arch][key].data;
629 mr = memory_region_from_host(ptr, &offset);
630
631 memory_region_ram_resize(mr, size, &error_abort);
632 }
633
fw_cfg_acpi_mr_restore_post_load(void * opaque,int version_id)634 static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id)
635 {
636 FWCfgState *s = opaque;
637 int i, index;
638
639 assert(s->files);
640
641 index = be32_to_cpu(s->files->count);
642
643 for (i = 0; i < index; i++) {
644 if (!strcmp(s->files->f[i].name, ACPI_BUILD_TABLE_FILE)) {
645 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->table_mr_size);
646 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_LOADER_FILE)) {
647 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->linker_mr_size);
648 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_RSDP_FILE)) {
649 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->rsdp_mr_size);
650 }
651 }
652
653 return 0;
654 }
655
656 static const VMStateDescription vmstate_fw_cfg_dma = {
657 .name = "fw_cfg/dma",
658 .needed = fw_cfg_dma_enabled,
659 .fields = (const VMStateField[]) {
660 VMSTATE_UINT64(dma_addr, FWCfgState),
661 VMSTATE_END_OF_LIST()
662 },
663 };
664
665 static const VMStateDescription vmstate_fw_cfg_acpi_mr = {
666 .name = "fw_cfg/acpi_mr",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .needed = fw_cfg_acpi_mr_restore,
670 .post_load = fw_cfg_acpi_mr_restore_post_load,
671 .fields = (const VMStateField[]) {
672 VMSTATE_UINT64(table_mr_size, FWCfgState),
673 VMSTATE_UINT64(linker_mr_size, FWCfgState),
674 VMSTATE_UINT64(rsdp_mr_size, FWCfgState),
675 VMSTATE_END_OF_LIST()
676 },
677 };
678
679 static const VMStateDescription vmstate_fw_cfg = {
680 .name = "fw_cfg",
681 .version_id = 2,
682 .minimum_version_id = 1,
683 .fields = (const VMStateField[]) {
684 VMSTATE_UINT16(cur_entry, FWCfgState),
685 VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1),
686 VMSTATE_UINT32_V(cur_offset, FWCfgState, 2),
687 VMSTATE_END_OF_LIST()
688 },
689 .subsections = (const VMStateDescription * const []) {
690 &vmstate_fw_cfg_dma,
691 &vmstate_fw_cfg_acpi_mr,
692 NULL,
693 }
694 };
695
fw_cfg_add_bytes_callback(FWCfgState * s,uint16_t key,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)696 static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
697 FWCfgCallback select_cb,
698 FWCfgWriteCallback write_cb,
699 void *callback_opaque,
700 void *data, size_t len,
701 bool read_only)
702 {
703 int arch = !!(key & FW_CFG_ARCH_LOCAL);
704
705 key &= FW_CFG_ENTRY_MASK;
706
707 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
708 assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
709
710 s->entries[arch][key].data = data;
711 s->entries[arch][key].len = (uint32_t)len;
712 s->entries[arch][key].select_cb = select_cb;
713 s->entries[arch][key].write_cb = write_cb;
714 s->entries[arch][key].callback_opaque = callback_opaque;
715 s->entries[arch][key].allow_write = !read_only;
716 }
717
fw_cfg_modify_bytes_read(FWCfgState * s,uint16_t key,void * data,size_t len)718 static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
719 void *data, size_t len)
720 {
721 void *ptr;
722 int arch = !!(key & FW_CFG_ARCH_LOCAL);
723
724 key &= FW_CFG_ENTRY_MASK;
725
726 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
727
728 /* return the old data to the function caller, avoid memory leak */
729 ptr = s->entries[arch][key].data;
730 s->entries[arch][key].data = data;
731 s->entries[arch][key].len = len;
732 s->entries[arch][key].allow_write = false;
733
734 return ptr;
735 }
736
fw_cfg_add_bytes(FWCfgState * s,uint16_t key,void * data,size_t len)737 void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
738 {
739 trace_fw_cfg_add_bytes(key, trace_key_name(key), len);
740 fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true);
741 }
742
fw_cfg_add_string(FWCfgState * s,uint16_t key,const char * value)743 void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
744 {
745 size_t sz = strlen(value) + 1;
746
747 trace_fw_cfg_add_string(key, trace_key_name(key), value);
748 fw_cfg_add_bytes(s, key, g_memdup(value, sz), sz);
749 }
750
fw_cfg_modify_string(FWCfgState * s,uint16_t key,const char * value)751 void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value)
752 {
753 size_t sz = strlen(value) + 1;
754 char *old;
755
756 old = fw_cfg_modify_bytes_read(s, key, g_memdup(value, sz), sz);
757 g_free(old);
758 }
759
fw_cfg_add_i16(FWCfgState * s,uint16_t key,uint16_t value)760 void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value)
761 {
762 uint16_t *copy;
763
764 copy = g_malloc(sizeof(value));
765 *copy = cpu_to_le16(value);
766 trace_fw_cfg_add_i16(key, trace_key_name(key), value);
767 fw_cfg_add_bytes(s, key, copy, sizeof(value));
768 }
769
fw_cfg_modify_i16(FWCfgState * s,uint16_t key,uint16_t value)770 void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value)
771 {
772 uint16_t *copy, *old;
773
774 copy = g_malloc(sizeof(value));
775 *copy = cpu_to_le16(value);
776 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
777 g_free(old);
778 }
779
fw_cfg_add_i32(FWCfgState * s,uint16_t key,uint32_t value)780 void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value)
781 {
782 uint32_t *copy;
783
784 copy = g_malloc(sizeof(value));
785 *copy = cpu_to_le32(value);
786 trace_fw_cfg_add_i32(key, trace_key_name(key), value);
787 fw_cfg_add_bytes(s, key, copy, sizeof(value));
788 }
789
fw_cfg_modify_i32(FWCfgState * s,uint16_t key,uint32_t value)790 void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value)
791 {
792 uint32_t *copy, *old;
793
794 copy = g_malloc(sizeof(value));
795 *copy = cpu_to_le32(value);
796 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
797 g_free(old);
798 }
799
fw_cfg_add_i64(FWCfgState * s,uint16_t key,uint64_t value)800 void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value)
801 {
802 uint64_t *copy;
803
804 copy = g_malloc(sizeof(value));
805 *copy = cpu_to_le64(value);
806 trace_fw_cfg_add_i64(key, trace_key_name(key), value);
807 fw_cfg_add_bytes(s, key, copy, sizeof(value));
808 }
809
fw_cfg_modify_i64(FWCfgState * s,uint16_t key,uint64_t value)810 void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value)
811 {
812 uint64_t *copy, *old;
813
814 copy = g_malloc(sizeof(value));
815 *copy = cpu_to_le64(value);
816 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
817 g_free(old);
818 }
819
fw_cfg_set_order_override(FWCfgState * s,int order)820 void fw_cfg_set_order_override(FWCfgState *s, int order)
821 {
822 assert(s->fw_cfg_order_override == 0);
823 s->fw_cfg_order_override = order;
824 }
825
fw_cfg_reset_order_override(FWCfgState * s)826 void fw_cfg_reset_order_override(FWCfgState *s)
827 {
828 assert(s->fw_cfg_order_override != 0);
829 s->fw_cfg_order_override = 0;
830 }
831
832 /*
833 * This is the legacy order list. For legacy systems, files are in
834 * the fw_cfg in the order defined below, by the "order" value. Note
835 * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a
836 * specific area, but there may be more than one and they occur in the
837 * order that the user specifies them on the command line. Those are
838 * handled in a special manner, using the order override above.
839 *
840 * For non-legacy, the files are sorted by filename to avoid this kind
841 * of complexity in the future.
842 *
843 * This is only for x86, other arches don't implement versioning so
844 * they won't set legacy mode.
845 */
846 static struct {
847 const char *name;
848 int order;
849 } fw_cfg_order[] = {
850 { "etc/boot-menu-wait", 10 },
851 { "bootsplash.jpg", 11 },
852 { "bootsplash.bmp", 12 },
853 { "etc/boot-fail-wait", 15 },
854 { "etc/smbios/smbios-tables", 20 },
855 { "etc/smbios/smbios-anchor", 30 },
856 { "etc/e820", 40 },
857 { "etc/reserved-memory-end", 50 },
858 { "genroms/kvmvapic.bin", 55 },
859 { "genroms/linuxboot.bin", 60 },
860 { }, /* VGA ROMs from pc_vga_init come here, 70. */
861 { }, /* NIC option ROMs from pc_nic_init come here, 80. */
862 { "etc/system-states", 90 },
863 { }, /* User ROMs come here, 100. */
864 { }, /* Device FW comes here, 110. */
865 { "etc/extra-pci-roots", 120 },
866 { "etc/acpi/tables", 130 },
867 { "etc/table-loader", 140 },
868 { "etc/tpm/log", 150 },
869 { "etc/acpi/rsdp", 160 },
870 { "bootorder", 170 },
871 { "etc/msr_feature_control", 180 },
872
873 #define FW_CFG_ORDER_OVERRIDE_LAST 200
874 };
875
876 /*
877 * Any sub-page size update to these table MRs will be lost during migration,
878 * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path.
879 * In order to avoid the inconsistency in sizes save them separately and
880 * migrate over in vmstate post_load().
881 */
fw_cfg_acpi_mr_save(FWCfgState * s,const char * filename,size_t len)882 static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len)
883 {
884 if (!strcmp(filename, ACPI_BUILD_TABLE_FILE)) {
885 s->table_mr_size = len;
886 } else if (!strcmp(filename, ACPI_BUILD_LOADER_FILE)) {
887 s->linker_mr_size = len;
888 } else if (!strcmp(filename, ACPI_BUILD_RSDP_FILE)) {
889 s->rsdp_mr_size = len;
890 }
891 }
892
get_fw_cfg_order(FWCfgState * s,const char * name)893 static int get_fw_cfg_order(FWCfgState *s, const char *name)
894 {
895 int i;
896
897 if (s->fw_cfg_order_override > 0) {
898 return s->fw_cfg_order_override;
899 }
900
901 for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) {
902 if (fw_cfg_order[i].name == NULL) {
903 continue;
904 }
905
906 if (strcmp(name, fw_cfg_order[i].name) == 0) {
907 return fw_cfg_order[i].order;
908 }
909 }
910
911 /* Stick unknown stuff at the end. */
912 warn_report("Unknown firmware file in legacy mode: %s", name);
913 return FW_CFG_ORDER_OVERRIDE_LAST;
914 }
915
fw_cfg_add_file_callback(FWCfgState * s,const char * filename,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)916 void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
917 FWCfgCallback select_cb,
918 FWCfgWriteCallback write_cb,
919 void *callback_opaque,
920 void *data, size_t len, bool read_only)
921 {
922 int i, index, count;
923 size_t dsize;
924 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
925 int order = 0;
926
927 if (!s->files) {
928 dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s);
929 s->files = g_malloc0(dsize);
930 fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize);
931 }
932
933 count = be32_to_cpu(s->files->count);
934 assert(count < fw_cfg_file_slots(s));
935
936 /* Find the insertion point. */
937 if (mc->legacy_fw_cfg_order) {
938 /*
939 * Sort by order. For files with the same order, we keep them
940 * in the sequence in which they were added.
941 */
942 order = get_fw_cfg_order(s, filename);
943 for (index = count;
944 index > 0 && order < s->entry_order[index - 1];
945 index--);
946 } else {
947 /* Sort by file name. */
948 for (index = count;
949 index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
950 index--);
951 }
952
953 /*
954 * Move all the entries from the index point and after down one
955 * to create a slot for the new entry. Because calculations are
956 * being done with the index, make it so that "i" is the current
957 * index and "i - 1" is the one being copied from, thus the
958 * unusual start and end in the for statement.
959 */
960 for (i = count; i > index; i--) {
961 s->files->f[i] = s->files->f[i - 1];
962 s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i);
963 s->entries[0][FW_CFG_FILE_FIRST + i] =
964 s->entries[0][FW_CFG_FILE_FIRST + i - 1];
965 s->entry_order[i] = s->entry_order[i - 1];
966 }
967
968 memset(&s->files->f[index], 0, sizeof(FWCfgFile));
969 memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry));
970
971 pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename);
972 for (i = 0; i <= count; i++) {
973 if (i != index &&
974 strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
975 error_report("duplicate fw_cfg file name: %s",
976 s->files->f[index].name);
977 exit(1);
978 }
979 }
980
981 fw_cfg_add_bytes_callback(s, FW_CFG_FILE_FIRST + index,
982 select_cb, write_cb,
983 callback_opaque, data, len,
984 read_only);
985
986 s->files->f[index].size = cpu_to_be32(len);
987 s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
988 s->entry_order[index] = order;
989 trace_fw_cfg_add_file(s, index, s->files->f[index].name, len);
990
991 s->files->count = cpu_to_be32(count+1);
992 fw_cfg_acpi_mr_save(s, filename, len);
993 }
994
fw_cfg_add_file(FWCfgState * s,const char * filename,void * data,size_t len)995 void fw_cfg_add_file(FWCfgState *s, const char *filename,
996 void *data, size_t len)
997 {
998 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
999 }
1000
fw_cfg_modify_file(FWCfgState * s,const char * filename,void * data,size_t len)1001 void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
1002 void *data, size_t len)
1003 {
1004 int i, index;
1005 void *ptr = NULL;
1006
1007 assert(s->files);
1008
1009 index = be32_to_cpu(s->files->count);
1010
1011 for (i = 0; i < index; i++) {
1012 if (strcmp(filename, s->files->f[i].name) == 0) {
1013 ptr = fw_cfg_modify_bytes_read(s, FW_CFG_FILE_FIRST + i,
1014 data, len);
1015 s->files->f[i].size = cpu_to_be32(len);
1016 fw_cfg_acpi_mr_save(s, filename, len);
1017 return ptr;
1018 }
1019 }
1020
1021 assert(index < fw_cfg_file_slots(s));
1022
1023 /* add new one */
1024 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
1025 return NULL;
1026 }
1027
fw_cfg_add_file_from_generator(FWCfgState * s,Object * parent,const char * part,const char * filename,Error ** errp)1028 bool fw_cfg_add_file_from_generator(FWCfgState *s,
1029 Object *parent, const char *part,
1030 const char *filename, Error **errp)
1031 {
1032 ERRP_GUARD();
1033 FWCfgDataGeneratorClass *klass;
1034 GByteArray *array;
1035 Object *obj;
1036 gsize size;
1037
1038 obj = object_resolve_path_component(parent, part);
1039 if (!obj) {
1040 error_setg(errp, "Cannot find object ID '%s'", part);
1041 return false;
1042 }
1043 if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) {
1044 error_setg(errp, "Object ID '%s' is not a '%s' subclass",
1045 part, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE);
1046 return false;
1047 }
1048 klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj);
1049 array = klass->get_data(obj, errp);
1050 if (*errp || !array) {
1051 return false;
1052 }
1053 size = array->len;
1054 fw_cfg_add_file(s, filename, g_byte_array_free(array, FALSE), size);
1055
1056 return true;
1057 }
1058
fw_cfg_machine_reset(void * opaque)1059 static void fw_cfg_machine_reset(void *opaque)
1060 {
1061 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1062 FWCfgState *s = opaque;
1063 void *ptr;
1064 size_t len;
1065 char *buf;
1066
1067 buf = get_boot_devices_list(&len);
1068 ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len);
1069 g_free(ptr);
1070
1071 if (!mc->legacy_fw_cfg_order) {
1072 buf = get_boot_devices_lchs_list(&len);
1073 ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len);
1074 g_free(ptr);
1075 }
1076 }
1077
fw_cfg_machine_ready(struct Notifier * n,void * data)1078 static void fw_cfg_machine_ready(struct Notifier *n, void *data)
1079 {
1080 FWCfgState *s = container_of(n, FWCfgState, machine_ready);
1081 qemu_register_reset(fw_cfg_machine_reset, s);
1082 }
1083
1084 static const Property fw_cfg_properties[] = {
1085 DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true),
1086 };
1087
fw_cfg_common_realize(DeviceState * dev,Error ** errp)1088 static void fw_cfg_common_realize(DeviceState *dev, Error **errp)
1089 {
1090 FWCfgState *s = FW_CFG(dev);
1091 MachineState *machine = MACHINE(qdev_get_machine());
1092 uint32_t version = FW_CFG_VERSION;
1093
1094 if (!fw_cfg_find()) {
1095 error_setg(errp, "at most one %s device is permitted", TYPE_FW_CFG);
1096 return;
1097 }
1098
1099 fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
1100 fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16);
1101 fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics);
1102 fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)(machine->boot_config.has_menu && machine->boot_config.menu));
1103 fw_cfg_bootsplash(s);
1104 fw_cfg_reboot(s);
1105
1106 if (s->dma_enabled) {
1107 version |= FW_CFG_VERSION_DMA;
1108 }
1109
1110 fw_cfg_add_i32(s, FW_CFG_ID, version);
1111
1112 s->machine_ready.notify = fw_cfg_machine_ready;
1113 qemu_add_machine_init_done_notifier(&s->machine_ready);
1114 }
1115
fw_cfg_init_io_dma(uint32_t iobase,uint32_t dma_iobase,AddressSpace * dma_as)1116 FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
1117 AddressSpace *dma_as)
1118 {
1119 DeviceState *dev;
1120 SysBusDevice *sbd;
1121 FWCfgIoState *ios;
1122 FWCfgState *s;
1123 MemoryRegion *iomem = get_system_io();
1124 bool dma_requested = dma_iobase && dma_as;
1125
1126 dev = qdev_new(TYPE_FW_CFG_IO);
1127 if (!dma_requested) {
1128 qdev_prop_set_bit(dev, "dma_enabled", false);
1129 }
1130
1131 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1132 OBJECT(dev));
1133
1134 sbd = SYS_BUS_DEVICE(dev);
1135 sysbus_realize_and_unref(sbd, &error_fatal);
1136 ios = FW_CFG_IO(dev);
1137 memory_region_add_subregion(iomem, iobase, &ios->comb_iomem);
1138
1139 s = FW_CFG(dev);
1140
1141 if (s->dma_enabled) {
1142 /* 64 bits for the address field */
1143 s->dma_as = dma_as;
1144 s->dma_addr = 0;
1145 memory_region_add_subregion(iomem, dma_iobase, &s->dma_iomem);
1146 }
1147
1148 return s;
1149 }
1150
fw_cfg_init_mem_wide(hwaddr ctl_addr,hwaddr data_addr,uint32_t data_width,hwaddr dma_addr,AddressSpace * dma_as)1151 FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
1152 hwaddr data_addr, uint32_t data_width,
1153 hwaddr dma_addr, AddressSpace *dma_as)
1154 {
1155 DeviceState *dev;
1156 SysBusDevice *sbd;
1157 FWCfgState *s;
1158 bool dma_requested = dma_addr && dma_as;
1159
1160 dev = qdev_new(TYPE_FW_CFG_MEM);
1161 qdev_prop_set_uint32(dev, "data_width", data_width);
1162 if (!dma_requested) {
1163 qdev_prop_set_bit(dev, "dma_enabled", false);
1164 }
1165
1166 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1167 OBJECT(dev));
1168
1169 sbd = SYS_BUS_DEVICE(dev);
1170 sysbus_realize_and_unref(sbd, &error_fatal);
1171 sysbus_mmio_map(sbd, 0, ctl_addr);
1172 sysbus_mmio_map(sbd, 1, data_addr);
1173
1174 s = FW_CFG(dev);
1175
1176 if (s->dma_enabled) {
1177 s->dma_as = dma_as;
1178 s->dma_addr = 0;
1179 sysbus_mmio_map(sbd, 2, dma_addr);
1180 }
1181
1182 return s;
1183 }
1184
fw_cfg_init_mem(hwaddr ctl_addr,hwaddr data_addr)1185 FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr)
1186 {
1187 return fw_cfg_init_mem_wide(ctl_addr, data_addr,
1188 fw_cfg_data_mem_ops.valid.max_access_size,
1189 0, NULL);
1190 }
1191
1192
fw_cfg_find(void)1193 FWCfgState *fw_cfg_find(void)
1194 {
1195 /* Returns NULL unless there is exactly one fw_cfg device */
1196 return FW_CFG(object_resolve_path_type("", TYPE_FW_CFG, NULL));
1197 }
1198
load_image_to_fw_cfg(FWCfgState * fw_cfg,uint16_t size_key,uint16_t data_key,const char * image_name,bool try_decompress)1199 void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
1200 uint16_t data_key, const char *image_name,
1201 bool try_decompress)
1202 {
1203 size_t size = -1;
1204 uint8_t *data;
1205
1206 if (image_name == NULL) {
1207 return;
1208 }
1209
1210 if (try_decompress) {
1211 size = load_image_gzipped_buffer(image_name,
1212 LOAD_IMAGE_MAX_GUNZIP_BYTES, &data);
1213 }
1214
1215 if (size == (size_t)-1) {
1216 gchar *contents;
1217 gsize length;
1218
1219 if (!g_file_get_contents(image_name, &contents, &length, NULL)) {
1220 error_report("failed to load \"%s\"", image_name);
1221 exit(1);
1222 }
1223 size = length;
1224 data = (uint8_t *)contents;
1225 }
1226
1227 fw_cfg_add_i32(fw_cfg, size_key, size);
1228 fw_cfg_add_bytes(fw_cfg, data_key, data, size);
1229 }
1230
fw_cfg_class_init(ObjectClass * klass,void * data)1231 static void fw_cfg_class_init(ObjectClass *klass, void *data)
1232 {
1233 DeviceClass *dc = DEVICE_CLASS(klass);
1234
1235 device_class_set_legacy_reset(dc, fw_cfg_reset);
1236 dc->vmsd = &vmstate_fw_cfg;
1237
1238 device_class_set_props(dc, fw_cfg_properties);
1239 }
1240
1241 static const TypeInfo fw_cfg_info = {
1242 .name = TYPE_FW_CFG,
1243 .parent = TYPE_SYS_BUS_DEVICE,
1244 .abstract = true,
1245 .instance_size = sizeof(FWCfgState),
1246 .class_init = fw_cfg_class_init,
1247 };
1248
fw_cfg_file_slots_allocate(FWCfgState * s,Error ** errp)1249 static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
1250 {
1251 uint16_t file_slots_max;
1252
1253 if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) {
1254 error_setg(errp, "\"file_slots\" must be at least 0x%x",
1255 FW_CFG_FILE_SLOTS_MIN);
1256 return;
1257 }
1258
1259 /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value
1260 * that we permit. The actual (exclusive) value coming from the
1261 * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */
1262 file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1;
1263 if (fw_cfg_file_slots(s) > file_slots_max) {
1264 error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16,
1265 file_slots_max);
1266 return;
1267 }
1268
1269 s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1270 s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1271 s->entry_order = g_new0(int, fw_cfg_max_entry(s));
1272 }
1273
1274 static const Property fw_cfg_io_properties[] = {
1275 DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
1276 true),
1277 DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
1278 FW_CFG_FILE_SLOTS_DFLT),
1279 };
1280
fw_cfg_io_realize(DeviceState * dev,Error ** errp)1281 static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
1282 {
1283 ERRP_GUARD();
1284 FWCfgIoState *s = FW_CFG_IO(dev);
1285
1286 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1287 if (*errp) {
1288 return;
1289 }
1290
1291 /* when using port i/o, the 8-bit data register ALWAYS overlaps
1292 * with half of the 16-bit control register. Hence, the total size
1293 * of the i/o region used is FW_CFG_CTL_SIZE */
1294 memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops,
1295 FW_CFG(s), "fwcfg", FW_CFG_CTL_SIZE);
1296
1297 if (FW_CFG(s)->dma_enabled) {
1298 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1299 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1300 sizeof(dma_addr_t));
1301 }
1302
1303 fw_cfg_common_realize(dev, errp);
1304 }
1305
fw_cfg_io_class_init(ObjectClass * klass,void * data)1306 static void fw_cfg_io_class_init(ObjectClass *klass, void *data)
1307 {
1308 DeviceClass *dc = DEVICE_CLASS(klass);
1309
1310 dc->realize = fw_cfg_io_realize;
1311 device_class_set_props(dc, fw_cfg_io_properties);
1312 }
1313
1314 static const TypeInfo fw_cfg_io_info = {
1315 .name = TYPE_FW_CFG_IO,
1316 .parent = TYPE_FW_CFG,
1317 .instance_size = sizeof(FWCfgIoState),
1318 .class_init = fw_cfg_io_class_init,
1319 };
1320
1321
1322 static const Property fw_cfg_mem_properties[] = {
1323 DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
1324 DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
1325 true),
1326 DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
1327 FW_CFG_FILE_SLOTS_DFLT),
1328 };
1329
fw_cfg_mem_realize(DeviceState * dev,Error ** errp)1330 static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
1331 {
1332 ERRP_GUARD();
1333 FWCfgMemState *s = FW_CFG_MEM(dev);
1334 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1335 const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops;
1336
1337 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1338 if (*errp) {
1339 return;
1340 }
1341
1342 memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops,
1343 FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE);
1344 sysbus_init_mmio(sbd, &s->ctl_iomem);
1345
1346 if (s->data_width > data_ops->valid.max_access_size) {
1347 s->wide_data_ops = *data_ops;
1348
1349 s->wide_data_ops.valid.max_access_size = s->data_width;
1350 s->wide_data_ops.impl.max_access_size = s->data_width;
1351 data_ops = &s->wide_data_ops;
1352 }
1353 memory_region_init_io(&s->data_iomem, OBJECT(s), data_ops, FW_CFG(s),
1354 "fwcfg.data", data_ops->valid.max_access_size);
1355 sysbus_init_mmio(sbd, &s->data_iomem);
1356
1357 if (FW_CFG(s)->dma_enabled) {
1358 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1359 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1360 sizeof(dma_addr_t));
1361 sysbus_init_mmio(sbd, &FW_CFG(s)->dma_iomem);
1362 }
1363
1364 fw_cfg_common_realize(dev, errp);
1365 }
1366
fw_cfg_mem_class_init(ObjectClass * klass,void * data)1367 static void fw_cfg_mem_class_init(ObjectClass *klass, void *data)
1368 {
1369 DeviceClass *dc = DEVICE_CLASS(klass);
1370
1371 dc->realize = fw_cfg_mem_realize;
1372 device_class_set_props(dc, fw_cfg_mem_properties);
1373 }
1374
1375 static const TypeInfo fw_cfg_mem_info = {
1376 .name = TYPE_FW_CFG_MEM,
1377 .parent = TYPE_FW_CFG,
1378 .instance_size = sizeof(FWCfgMemState),
1379 .class_init = fw_cfg_mem_class_init,
1380 };
1381
fw_cfg_register_types(void)1382 static void fw_cfg_register_types(void)
1383 {
1384 type_register_static(&fw_cfg_info);
1385 type_register_static(&fw_cfg_io_info);
1386 type_register_static(&fw_cfg_mem_info);
1387 }
1388
1389 type_init(fw_cfg_register_types)
1390