1 /*
2 * QEMU Firmware configuration device emulation
3 *
4 * Copyright (c) 2008 Gleb Natapov
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/dma.h"
29 #include "sysemu/reset.h"
30 #include "exec/address-spaces.h"
31 #include "hw/boards.h"
32 #include "hw/nvram/fw_cfg.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/sysbus.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "trace.h"
38 #include "qemu/error-report.h"
39 #include "qemu/option.h"
40 #include "qemu/config-file.h"
41 #include "qemu/cutils.h"
42 #include "qapi/error.h"
43 #include "hw/acpi/aml-build.h"
44 #include "hw/pci/pci_bus.h"
45 #include "hw/loader.h"
46
47 #define FW_CFG_FILE_SLOTS_DFLT 0x20
48
49 /* FW_CFG_VERSION bits */
50 #define FW_CFG_VERSION 0x01
51 #define FW_CFG_VERSION_DMA 0x02
52
53 /* FW_CFG_DMA_CONTROL bits */
54 #define FW_CFG_DMA_CTL_ERROR 0x01
55 #define FW_CFG_DMA_CTL_READ 0x02
56 #define FW_CFG_DMA_CTL_SKIP 0x04
57 #define FW_CFG_DMA_CTL_SELECT 0x08
58 #define FW_CFG_DMA_CTL_WRITE 0x10
59
60 #define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
61
62 struct FWCfgEntry {
63 uint32_t len;
64 bool allow_write;
65 uint8_t *data;
66 void *callback_opaque;
67 FWCfgCallback select_cb;
68 FWCfgWriteCallback write_cb;
69 };
70
71 /**
72 * key_name:
73 *
74 * @key: The uint16 selector key.
75 *
76 * Returns: The stringified name if the selector refers to a well-known
77 * numerically defined item, or NULL on key lookup failure.
78 */
key_name(uint16_t key)79 static const char *key_name(uint16_t key)
80 {
81 static const char *fw_cfg_wellknown_keys[FW_CFG_FILE_FIRST] = {
82 [FW_CFG_SIGNATURE] = "signature",
83 [FW_CFG_ID] = "id",
84 [FW_CFG_UUID] = "uuid",
85 [FW_CFG_RAM_SIZE] = "ram_size",
86 [FW_CFG_NOGRAPHIC] = "nographic",
87 [FW_CFG_NB_CPUS] = "nb_cpus",
88 [FW_CFG_MACHINE_ID] = "machine_id",
89 [FW_CFG_KERNEL_ADDR] = "kernel_addr",
90 [FW_CFG_KERNEL_SIZE] = "kernel_size",
91 [FW_CFG_KERNEL_CMDLINE] = "kernel_cmdline",
92 [FW_CFG_INITRD_ADDR] = "initrd_addr",
93 [FW_CFG_INITRD_SIZE] = "initdr_size",
94 [FW_CFG_BOOT_DEVICE] = "boot_device",
95 [FW_CFG_NUMA] = "numa",
96 [FW_CFG_BOOT_MENU] = "boot_menu",
97 [FW_CFG_MAX_CPUS] = "max_cpus",
98 [FW_CFG_KERNEL_ENTRY] = "kernel_entry",
99 [FW_CFG_KERNEL_DATA] = "kernel_data",
100 [FW_CFG_INITRD_DATA] = "initrd_data",
101 [FW_CFG_CMDLINE_ADDR] = "cmdline_addr",
102 [FW_CFG_CMDLINE_SIZE] = "cmdline_size",
103 [FW_CFG_CMDLINE_DATA] = "cmdline_data",
104 [FW_CFG_SETUP_ADDR] = "setup_addr",
105 [FW_CFG_SETUP_SIZE] = "setup_size",
106 [FW_CFG_SETUP_DATA] = "setup_data",
107 [FW_CFG_FILE_DIR] = "file_dir",
108 };
109
110 if (key & FW_CFG_ARCH_LOCAL) {
111 return fw_cfg_arch_key_name(key);
112 }
113 if (key < FW_CFG_FILE_FIRST) {
114 return fw_cfg_wellknown_keys[key];
115 }
116
117 return NULL;
118 }
119
trace_key_name(uint16_t key)120 static inline const char *trace_key_name(uint16_t key)
121 {
122 const char *name = key_name(key);
123
124 return name ? name : "unknown";
125 }
126
127 #define JPG_FILE 0
128 #define BMP_FILE 1
129
read_splashfile(char * filename,gsize * file_sizep,int * file_typep)130 static char *read_splashfile(char *filename, gsize *file_sizep,
131 int *file_typep)
132 {
133 GError *err = NULL;
134 gchar *content;
135 int file_type;
136 unsigned int filehead;
137 int bmp_bpp;
138
139 if (!g_file_get_contents(filename, &content, file_sizep, &err)) {
140 error_report("failed to read splash file '%s': %s",
141 filename, err->message);
142 g_error_free(err);
143 return NULL;
144 }
145
146 /* check file size */
147 if (*file_sizep < 30) {
148 goto error;
149 }
150
151 /* check magic ID */
152 filehead = lduw_le_p(content);
153 if (filehead == 0xd8ff) {
154 file_type = JPG_FILE;
155 } else if (filehead == 0x4d42) {
156 file_type = BMP_FILE;
157 } else {
158 goto error;
159 }
160
161 /* check BMP bpp */
162 if (file_type == BMP_FILE) {
163 bmp_bpp = lduw_le_p(&content[28]);
164 if (bmp_bpp != 24) {
165 goto error;
166 }
167 }
168
169 /* return values */
170 *file_typep = file_type;
171
172 return content;
173
174 error:
175 error_report("splash file '%s' format not recognized; must be JPEG "
176 "or 24 bit BMP", filename);
177 g_free(content);
178 return NULL;
179 }
180
fw_cfg_bootsplash(FWCfgState * s)181 static void fw_cfg_bootsplash(FWCfgState *s)
182 {
183 char *filename, *file_data;
184 gsize file_size;
185 int file_type;
186
187 /* insert splash time if user configurated */
188 if (current_machine->boot_config.has_splash_time) {
189 int64_t bst_val = current_machine->boot_config.splash_time;
190 uint16_t bst_le16;
191
192 /* validate the input */
193 if (bst_val < 0 || bst_val > 0xffff) {
194 error_report("splash-time is invalid,"
195 "it should be a value between 0 and 65535");
196 exit(1);
197 }
198 /* use little endian format */
199 bst_le16 = cpu_to_le16(bst_val);
200 fw_cfg_add_file(s, "etc/boot-menu-wait",
201 g_memdup(&bst_le16, sizeof bst_le16), sizeof bst_le16);
202 }
203
204 /* insert splash file if user configurated */
205 if (current_machine->boot_config.splash) {
206 const char *boot_splash_filename = current_machine->boot_config.splash;
207 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename);
208 if (filename == NULL) {
209 error_report("failed to find file '%s'", boot_splash_filename);
210 return;
211 }
212
213 /* loading file data */
214 file_data = read_splashfile(filename, &file_size, &file_type);
215 if (file_data == NULL) {
216 g_free(filename);
217 return;
218 }
219 g_free(boot_splash_filedata);
220 boot_splash_filedata = (uint8_t *)file_data;
221
222 /* insert data */
223 if (file_type == JPG_FILE) {
224 fw_cfg_add_file(s, "bootsplash.jpg",
225 boot_splash_filedata, file_size);
226 } else {
227 fw_cfg_add_file(s, "bootsplash.bmp",
228 boot_splash_filedata, file_size);
229 }
230 g_free(filename);
231 }
232 }
233
fw_cfg_reboot(FWCfgState * s)234 static void fw_cfg_reboot(FWCfgState *s)
235 {
236 uint64_t rt_val = -1;
237 uint32_t rt_le32;
238
239 if (current_machine->boot_config.has_reboot_timeout) {
240 rt_val = current_machine->boot_config.reboot_timeout;
241
242 /* validate the input */
243 if (rt_val > 0xffff && rt_val != (uint64_t)-1) {
244 error_report("reboot timeout is invalid,"
245 "it should be a value between -1 and 65535");
246 exit(1);
247 }
248 }
249
250 rt_le32 = cpu_to_le32(rt_val);
251 fw_cfg_add_file(s, "etc/boot-fail-wait", g_memdup(&rt_le32, 4), 4);
252 }
253
fw_cfg_write(FWCfgState * s,uint8_t value)254 static void fw_cfg_write(FWCfgState *s, uint8_t value)
255 {
256 /* nothing, write support removed in QEMU v2.4+ */
257 }
258
fw_cfg_file_slots(const FWCfgState * s)259 static inline uint16_t fw_cfg_file_slots(const FWCfgState *s)
260 {
261 return s->file_slots;
262 }
263
264 /* Note: this function returns an exclusive limit. */
fw_cfg_max_entry(const FWCfgState * s)265 static inline uint32_t fw_cfg_max_entry(const FWCfgState *s)
266 {
267 return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s);
268 }
269
fw_cfg_select(FWCfgState * s,uint16_t key)270 static int fw_cfg_select(FWCfgState *s, uint16_t key)
271 {
272 int arch, ret;
273 FWCfgEntry *e;
274
275 s->cur_offset = 0;
276 if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) {
277 s->cur_entry = FW_CFG_INVALID;
278 ret = 0;
279 } else {
280 s->cur_entry = key;
281 ret = 1;
282 /* entry successfully selected, now run callback if present */
283 arch = !!(key & FW_CFG_ARCH_LOCAL);
284 e = &s->entries[arch][key & FW_CFG_ENTRY_MASK];
285 if (e->select_cb) {
286 e->select_cb(e->callback_opaque);
287 }
288 }
289
290 trace_fw_cfg_select(s, key, trace_key_name(key), ret);
291 return ret;
292 }
293
fw_cfg_data_read(void * opaque,hwaddr addr,unsigned size)294 static uint64_t fw_cfg_data_read(void *opaque, hwaddr addr, unsigned size)
295 {
296 FWCfgState *s = opaque;
297 int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
298 FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
299 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
300 uint64_t value = 0;
301
302 assert(size > 0 && size <= sizeof(value));
303 if (s->cur_entry != FW_CFG_INVALID && e->data && s->cur_offset < e->len) {
304 /* The least significant 'size' bytes of the return value are
305 * expected to contain a string preserving portion of the item
306 * data, padded with zeros on the right in case we run out early.
307 * In technical terms, we're composing the host-endian representation
308 * of the big endian interpretation of the fw_cfg string.
309 */
310 do {
311 value = (value << 8) | e->data[s->cur_offset++];
312 } while (--size && s->cur_offset < e->len);
313 /* If size is still not zero, we *did* run out early, so continue
314 * left-shifting, to add the appropriate number of padding zeros
315 * on the right.
316 */
317 value <<= 8 * size;
318 }
319
320 trace_fw_cfg_read(s, value);
321 return value;
322 }
323
fw_cfg_data_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)324 static void fw_cfg_data_mem_write(void *opaque, hwaddr addr,
325 uint64_t value, unsigned size)
326 {
327 FWCfgState *s = opaque;
328 unsigned i = size;
329
330 do {
331 fw_cfg_write(s, value >> (8 * --i));
332 } while (i);
333 }
334
fw_cfg_dma_transfer(FWCfgState * s)335 static void fw_cfg_dma_transfer(FWCfgState *s)
336 {
337 dma_addr_t len;
338 FWCfgDmaAccess dma;
339 int arch;
340 FWCfgEntry *e;
341 int read = 0, write = 0;
342 dma_addr_t dma_addr;
343
344 /* Reset the address before the next access */
345 dma_addr = s->dma_addr;
346 s->dma_addr = 0;
347
348 if (dma_memory_read(s->dma_as, dma_addr,
349 &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) {
350 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
351 FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED);
352 return;
353 }
354
355 dma.address = be64_to_cpu(dma.address);
356 dma.length = be32_to_cpu(dma.length);
357 dma.control = be32_to_cpu(dma.control);
358
359 if (dma.control & FW_CFG_DMA_CTL_SELECT) {
360 fw_cfg_select(s, dma.control >> 16);
361 }
362
363 arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
364 e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
365 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
366
367 if (dma.control & FW_CFG_DMA_CTL_READ) {
368 read = 1;
369 write = 0;
370 } else if (dma.control & FW_CFG_DMA_CTL_WRITE) {
371 read = 0;
372 write = 1;
373 } else if (dma.control & FW_CFG_DMA_CTL_SKIP) {
374 read = 0;
375 write = 0;
376 } else {
377 dma.length = 0;
378 }
379
380 dma.control = 0;
381
382 while (dma.length > 0 && !(dma.control & FW_CFG_DMA_CTL_ERROR)) {
383 if (s->cur_entry == FW_CFG_INVALID || !e->data ||
384 s->cur_offset >= e->len) {
385 len = dma.length;
386
387 /* If the access is not a read access, it will be a skip access,
388 * tested before.
389 */
390 if (read) {
391 if (dma_memory_set(s->dma_as, dma.address, 0, len,
392 MEMTXATTRS_UNSPECIFIED)) {
393 dma.control |= FW_CFG_DMA_CTL_ERROR;
394 }
395 }
396 if (write) {
397 dma.control |= FW_CFG_DMA_CTL_ERROR;
398 }
399 } else {
400 if (dma.length <= (e->len - s->cur_offset)) {
401 len = dma.length;
402 } else {
403 len = (e->len - s->cur_offset);
404 }
405
406 /* If the access is not a read access, it will be a skip access,
407 * tested before.
408 */
409 if (read) {
410 if (dma_memory_write(s->dma_as, dma.address,
411 &e->data[s->cur_offset], len,
412 MEMTXATTRS_UNSPECIFIED)) {
413 dma.control |= FW_CFG_DMA_CTL_ERROR;
414 }
415 }
416 if (write) {
417 if (!e->allow_write ||
418 len != dma.length ||
419 dma_memory_read(s->dma_as, dma.address,
420 &e->data[s->cur_offset], len,
421 MEMTXATTRS_UNSPECIFIED)) {
422 dma.control |= FW_CFG_DMA_CTL_ERROR;
423 } else if (e->write_cb) {
424 e->write_cb(e->callback_opaque, s->cur_offset, len);
425 }
426 }
427
428 s->cur_offset += len;
429 }
430
431 dma.address += len;
432 dma.length -= len;
433
434 }
435
436 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
437 dma.control, MEMTXATTRS_UNSPECIFIED);
438
439 trace_fw_cfg_read(s, 0);
440 }
441
fw_cfg_dma_mem_read(void * opaque,hwaddr addr,unsigned size)442 static uint64_t fw_cfg_dma_mem_read(void *opaque, hwaddr addr,
443 unsigned size)
444 {
445 /* Return a signature value (and handle various read sizes) */
446 return extract64(FW_CFG_DMA_SIGNATURE, (8 - addr - size) * 8, size * 8);
447 }
448
fw_cfg_dma_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)449 static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
450 uint64_t value, unsigned size)
451 {
452 FWCfgState *s = opaque;
453
454 if (size == 4) {
455 if (addr == 0) {
456 /* FWCfgDmaAccess high address */
457 s->dma_addr = value << 32;
458 } else if (addr == 4) {
459 /* FWCfgDmaAccess low address */
460 s->dma_addr |= value;
461 fw_cfg_dma_transfer(s);
462 }
463 } else if (size == 8 && addr == 0) {
464 s->dma_addr = value;
465 fw_cfg_dma_transfer(s);
466 }
467 }
468
fw_cfg_dma_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)469 static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
470 unsigned size, bool is_write,
471 MemTxAttrs attrs)
472 {
473 return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
474 (size == 8 && addr == 0));
475 }
476
fw_cfg_data_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)477 static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
478 unsigned size, bool is_write,
479 MemTxAttrs attrs)
480 {
481 return addr == 0;
482 }
483
fw_cfg_ctl_mem_read(void * opaque,hwaddr addr,unsigned size)484 static uint64_t fw_cfg_ctl_mem_read(void *opaque, hwaddr addr, unsigned size)
485 {
486 return 0;
487 }
488
fw_cfg_ctl_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)489 static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
490 uint64_t value, unsigned size)
491 {
492 fw_cfg_select(opaque, (uint16_t)value);
493 }
494
fw_cfg_ctl_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)495 static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
496 unsigned size, bool is_write,
497 MemTxAttrs attrs)
498 {
499 return is_write && size == 2;
500 }
501
fw_cfg_comb_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)502 static void fw_cfg_comb_write(void *opaque, hwaddr addr,
503 uint64_t value, unsigned size)
504 {
505 switch (size) {
506 case 1:
507 fw_cfg_write(opaque, (uint8_t)value);
508 break;
509 case 2:
510 fw_cfg_select(opaque, (uint16_t)value);
511 break;
512 }
513 }
514
fw_cfg_comb_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)515 static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
516 unsigned size, bool is_write,
517 MemTxAttrs attrs)
518 {
519 return (size == 1) || (is_write && size == 2);
520 }
521
522 static const MemoryRegionOps fw_cfg_ctl_mem_ops = {
523 .read = fw_cfg_ctl_mem_read,
524 .write = fw_cfg_ctl_mem_write,
525 .endianness = DEVICE_BIG_ENDIAN,
526 .valid.accepts = fw_cfg_ctl_mem_valid,
527 };
528
529 static const MemoryRegionOps fw_cfg_data_mem_ops = {
530 .read = fw_cfg_data_read,
531 .write = fw_cfg_data_mem_write,
532 .endianness = DEVICE_BIG_ENDIAN,
533 .valid = {
534 .min_access_size = 1,
535 .max_access_size = 1,
536 .accepts = fw_cfg_data_mem_valid,
537 },
538 };
539
540 static const MemoryRegionOps fw_cfg_comb_mem_ops = {
541 .read = fw_cfg_data_read,
542 .write = fw_cfg_comb_write,
543 .endianness = DEVICE_LITTLE_ENDIAN,
544 .valid.accepts = fw_cfg_comb_valid,
545 };
546
547 static const MemoryRegionOps fw_cfg_dma_mem_ops = {
548 .read = fw_cfg_dma_mem_read,
549 .write = fw_cfg_dma_mem_write,
550 .endianness = DEVICE_BIG_ENDIAN,
551 .valid.accepts = fw_cfg_dma_mem_valid,
552 .valid.max_access_size = 8,
553 .impl.max_access_size = 8,
554 };
555
fw_cfg_reset(DeviceState * d)556 static void fw_cfg_reset(DeviceState *d)
557 {
558 FWCfgState *s = FW_CFG(d);
559
560 /* we never register a read callback for FW_CFG_SIGNATURE */
561 fw_cfg_select(s, FW_CFG_SIGNATURE);
562 }
563
564 /* Save restore 32 bit int as uint16_t
565 This is a Big hack, but it is how the old state did it.
566 Or we broke compatibility in the state, or we can't use struct tm
567 */
568
get_uint32_as_uint16(QEMUFile * f,void * pv,size_t size,const VMStateField * field)569 static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size,
570 const VMStateField *field)
571 {
572 uint32_t *v = pv;
573 *v = qemu_get_be16(f);
574 return 0;
575 }
576
put_unused(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)577 static int put_unused(QEMUFile *f, void *pv, size_t size,
578 const VMStateField *field, JSONWriter *vmdesc)
579 {
580 fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n");
581 fprintf(stderr, "This functions shouldn't be called.\n");
582
583 return 0;
584 }
585
586 static const VMStateInfo vmstate_hack_uint32_as_uint16 = {
587 .name = "int32_as_uint16",
588 .get = get_uint32_as_uint16,
589 .put = put_unused,
590 };
591
592 #define VMSTATE_UINT16_HACK(_f, _s, _t) \
593 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint32_as_uint16, uint32_t)
594
595
is_version_1(void * opaque,int version_id)596 static bool is_version_1(void *opaque, int version_id)
597 {
598 return version_id == 1;
599 }
600
fw_cfg_dma_enabled(void * opaque)601 bool fw_cfg_dma_enabled(void *opaque)
602 {
603 FWCfgState *s = opaque;
604
605 return s->dma_enabled;
606 }
607
fw_cfg_acpi_mr_restore(void * opaque)608 static bool fw_cfg_acpi_mr_restore(void *opaque)
609 {
610 FWCfgState *s = opaque;
611 bool mr_aligned;
612
613 mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
614 QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
615 QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
616 return s->acpi_mr_restore && !mr_aligned;
617 }
618
fw_cfg_update_mr(FWCfgState * s,uint16_t key,size_t size)619 static void fw_cfg_update_mr(FWCfgState *s, uint16_t key, size_t size)
620 {
621 MemoryRegion *mr;
622 ram_addr_t offset;
623 int arch = !!(key & FW_CFG_ARCH_LOCAL);
624 void *ptr;
625
626 key &= FW_CFG_ENTRY_MASK;
627 assert(key < fw_cfg_max_entry(s));
628
629 ptr = s->entries[arch][key].data;
630 mr = memory_region_from_host(ptr, &offset);
631
632 memory_region_ram_resize(mr, size, &error_abort);
633 }
634
fw_cfg_acpi_mr_restore_post_load(void * opaque,int version_id)635 static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id)
636 {
637 FWCfgState *s = opaque;
638 int i, index;
639
640 assert(s->files);
641
642 index = be32_to_cpu(s->files->count);
643
644 for (i = 0; i < index; i++) {
645 if (!strcmp(s->files->f[i].name, ACPI_BUILD_TABLE_FILE)) {
646 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->table_mr_size);
647 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_LOADER_FILE)) {
648 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->linker_mr_size);
649 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_RSDP_FILE)) {
650 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->rsdp_mr_size);
651 }
652 }
653
654 return 0;
655 }
656
657 static const VMStateDescription vmstate_fw_cfg_dma = {
658 .name = "fw_cfg/dma",
659 .needed = fw_cfg_dma_enabled,
660 .fields = (const VMStateField[]) {
661 VMSTATE_UINT64(dma_addr, FWCfgState),
662 VMSTATE_END_OF_LIST()
663 },
664 };
665
666 static const VMStateDescription vmstate_fw_cfg_acpi_mr = {
667 .name = "fw_cfg/acpi_mr",
668 .version_id = 1,
669 .minimum_version_id = 1,
670 .needed = fw_cfg_acpi_mr_restore,
671 .post_load = fw_cfg_acpi_mr_restore_post_load,
672 .fields = (const VMStateField[]) {
673 VMSTATE_UINT64(table_mr_size, FWCfgState),
674 VMSTATE_UINT64(linker_mr_size, FWCfgState),
675 VMSTATE_UINT64(rsdp_mr_size, FWCfgState),
676 VMSTATE_END_OF_LIST()
677 },
678 };
679
680 static const VMStateDescription vmstate_fw_cfg = {
681 .name = "fw_cfg",
682 .version_id = 2,
683 .minimum_version_id = 1,
684 .fields = (const VMStateField[]) {
685 VMSTATE_UINT16(cur_entry, FWCfgState),
686 VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1),
687 VMSTATE_UINT32_V(cur_offset, FWCfgState, 2),
688 VMSTATE_END_OF_LIST()
689 },
690 .subsections = (const VMStateDescription * const []) {
691 &vmstate_fw_cfg_dma,
692 &vmstate_fw_cfg_acpi_mr,
693 NULL,
694 }
695 };
696
fw_cfg_add_bytes_callback(FWCfgState * s,uint16_t key,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)697 static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
698 FWCfgCallback select_cb,
699 FWCfgWriteCallback write_cb,
700 void *callback_opaque,
701 void *data, size_t len,
702 bool read_only)
703 {
704 int arch = !!(key & FW_CFG_ARCH_LOCAL);
705
706 key &= FW_CFG_ENTRY_MASK;
707
708 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
709 assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
710
711 s->entries[arch][key].data = data;
712 s->entries[arch][key].len = (uint32_t)len;
713 s->entries[arch][key].select_cb = select_cb;
714 s->entries[arch][key].write_cb = write_cb;
715 s->entries[arch][key].callback_opaque = callback_opaque;
716 s->entries[arch][key].allow_write = !read_only;
717 }
718
fw_cfg_modify_bytes_read(FWCfgState * s,uint16_t key,void * data,size_t len)719 static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
720 void *data, size_t len)
721 {
722 void *ptr;
723 int arch = !!(key & FW_CFG_ARCH_LOCAL);
724
725 key &= FW_CFG_ENTRY_MASK;
726
727 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
728
729 /* return the old data to the function caller, avoid memory leak */
730 ptr = s->entries[arch][key].data;
731 s->entries[arch][key].data = data;
732 s->entries[arch][key].len = len;
733 s->entries[arch][key].callback_opaque = NULL;
734 s->entries[arch][key].allow_write = false;
735
736 return ptr;
737 }
738
fw_cfg_add_bytes(FWCfgState * s,uint16_t key,void * data,size_t len)739 void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
740 {
741 trace_fw_cfg_add_bytes(key, trace_key_name(key), len);
742 fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true);
743 }
744
fw_cfg_add_string(FWCfgState * s,uint16_t key,const char * value)745 void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
746 {
747 size_t sz = strlen(value) + 1;
748
749 trace_fw_cfg_add_string(key, trace_key_name(key), value);
750 fw_cfg_add_bytes(s, key, g_memdup(value, sz), sz);
751 }
752
fw_cfg_modify_string(FWCfgState * s,uint16_t key,const char * value)753 void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value)
754 {
755 size_t sz = strlen(value) + 1;
756 char *old;
757
758 old = fw_cfg_modify_bytes_read(s, key, g_memdup(value, sz), sz);
759 g_free(old);
760 }
761
fw_cfg_add_i16(FWCfgState * s,uint16_t key,uint16_t value)762 void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value)
763 {
764 uint16_t *copy;
765
766 copy = g_malloc(sizeof(value));
767 *copy = cpu_to_le16(value);
768 trace_fw_cfg_add_i16(key, trace_key_name(key), value);
769 fw_cfg_add_bytes(s, key, copy, sizeof(value));
770 }
771
fw_cfg_modify_i16(FWCfgState * s,uint16_t key,uint16_t value)772 void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value)
773 {
774 uint16_t *copy, *old;
775
776 copy = g_malloc(sizeof(value));
777 *copy = cpu_to_le16(value);
778 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
779 g_free(old);
780 }
781
fw_cfg_add_i32(FWCfgState * s,uint16_t key,uint32_t value)782 void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value)
783 {
784 uint32_t *copy;
785
786 copy = g_malloc(sizeof(value));
787 *copy = cpu_to_le32(value);
788 trace_fw_cfg_add_i32(key, trace_key_name(key), value);
789 fw_cfg_add_bytes(s, key, copy, sizeof(value));
790 }
791
fw_cfg_modify_i32(FWCfgState * s,uint16_t key,uint32_t value)792 void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value)
793 {
794 uint32_t *copy, *old;
795
796 copy = g_malloc(sizeof(value));
797 *copy = cpu_to_le32(value);
798 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
799 g_free(old);
800 }
801
fw_cfg_add_i64(FWCfgState * s,uint16_t key,uint64_t value)802 void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value)
803 {
804 uint64_t *copy;
805
806 copy = g_malloc(sizeof(value));
807 *copy = cpu_to_le64(value);
808 trace_fw_cfg_add_i64(key, trace_key_name(key), value);
809 fw_cfg_add_bytes(s, key, copy, sizeof(value));
810 }
811
fw_cfg_modify_i64(FWCfgState * s,uint16_t key,uint64_t value)812 void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value)
813 {
814 uint64_t *copy, *old;
815
816 copy = g_malloc(sizeof(value));
817 *copy = cpu_to_le64(value);
818 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
819 g_free(old);
820 }
821
fw_cfg_set_order_override(FWCfgState * s,int order)822 void fw_cfg_set_order_override(FWCfgState *s, int order)
823 {
824 assert(s->fw_cfg_order_override == 0);
825 s->fw_cfg_order_override = order;
826 }
827
fw_cfg_reset_order_override(FWCfgState * s)828 void fw_cfg_reset_order_override(FWCfgState *s)
829 {
830 assert(s->fw_cfg_order_override != 0);
831 s->fw_cfg_order_override = 0;
832 }
833
834 /*
835 * This is the legacy order list. For legacy systems, files are in
836 * the fw_cfg in the order defined below, by the "order" value. Note
837 * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a
838 * specific area, but there may be more than one and they occur in the
839 * order that the user specifies them on the command line. Those are
840 * handled in a special manner, using the order override above.
841 *
842 * For non-legacy, the files are sorted by filename to avoid this kind
843 * of complexity in the future.
844 *
845 * This is only for x86, other arches don't implement versioning so
846 * they won't set legacy mode.
847 */
848 static struct {
849 const char *name;
850 int order;
851 } fw_cfg_order[] = {
852 { "etc/boot-menu-wait", 10 },
853 { "bootsplash.jpg", 11 },
854 { "bootsplash.bmp", 12 },
855 { "etc/boot-fail-wait", 15 },
856 { "etc/smbios/smbios-tables", 20 },
857 { "etc/smbios/smbios-anchor", 30 },
858 { "etc/e820", 40 },
859 { "etc/reserved-memory-end", 50 },
860 { "genroms/kvmvapic.bin", 55 },
861 { "genroms/linuxboot.bin", 60 },
862 { }, /* VGA ROMs from pc_vga_init come here, 70. */
863 { }, /* NIC option ROMs from pc_nic_init come here, 80. */
864 { "etc/system-states", 90 },
865 { }, /* User ROMs come here, 100. */
866 { }, /* Device FW comes here, 110. */
867 { "etc/extra-pci-roots", 120 },
868 { "etc/acpi/tables", 130 },
869 { "etc/table-loader", 140 },
870 { "etc/tpm/log", 150 },
871 { "etc/acpi/rsdp", 160 },
872 { "bootorder", 170 },
873 { "etc/msr_feature_control", 180 },
874
875 #define FW_CFG_ORDER_OVERRIDE_LAST 200
876 };
877
878 /*
879 * Any sub-page size update to these table MRs will be lost during migration,
880 * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path.
881 * In order to avoid the inconsistency in sizes save them separately and
882 * migrate over in vmstate post_load().
883 */
fw_cfg_acpi_mr_save(FWCfgState * s,const char * filename,size_t len)884 static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len)
885 {
886 if (!strcmp(filename, ACPI_BUILD_TABLE_FILE)) {
887 s->table_mr_size = len;
888 } else if (!strcmp(filename, ACPI_BUILD_LOADER_FILE)) {
889 s->linker_mr_size = len;
890 } else if (!strcmp(filename, ACPI_BUILD_RSDP_FILE)) {
891 s->rsdp_mr_size = len;
892 }
893 }
894
get_fw_cfg_order(FWCfgState * s,const char * name)895 static int get_fw_cfg_order(FWCfgState *s, const char *name)
896 {
897 int i;
898
899 if (s->fw_cfg_order_override > 0) {
900 return s->fw_cfg_order_override;
901 }
902
903 for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) {
904 if (fw_cfg_order[i].name == NULL) {
905 continue;
906 }
907
908 if (strcmp(name, fw_cfg_order[i].name) == 0) {
909 return fw_cfg_order[i].order;
910 }
911 }
912
913 /* Stick unknown stuff at the end. */
914 warn_report("Unknown firmware file in legacy mode: %s", name);
915 return FW_CFG_ORDER_OVERRIDE_LAST;
916 }
917
fw_cfg_add_file_callback(FWCfgState * s,const char * filename,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)918 void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
919 FWCfgCallback select_cb,
920 FWCfgWriteCallback write_cb,
921 void *callback_opaque,
922 void *data, size_t len, bool read_only)
923 {
924 int i, index, count;
925 size_t dsize;
926 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
927 int order = 0;
928
929 if (!s->files) {
930 dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s);
931 s->files = g_malloc0(dsize);
932 fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize);
933 }
934
935 count = be32_to_cpu(s->files->count);
936 assert(count < fw_cfg_file_slots(s));
937
938 /* Find the insertion point. */
939 if (mc->legacy_fw_cfg_order) {
940 /*
941 * Sort by order. For files with the same order, we keep them
942 * in the sequence in which they were added.
943 */
944 order = get_fw_cfg_order(s, filename);
945 for (index = count;
946 index > 0 && order < s->entry_order[index - 1];
947 index--);
948 } else {
949 /* Sort by file name. */
950 for (index = count;
951 index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
952 index--);
953 }
954
955 /*
956 * Move all the entries from the index point and after down one
957 * to create a slot for the new entry. Because calculations are
958 * being done with the index, make it so that "i" is the current
959 * index and "i - 1" is the one being copied from, thus the
960 * unusual start and end in the for statement.
961 */
962 for (i = count; i > index; i--) {
963 s->files->f[i] = s->files->f[i - 1];
964 s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i);
965 s->entries[0][FW_CFG_FILE_FIRST + i] =
966 s->entries[0][FW_CFG_FILE_FIRST + i - 1];
967 s->entry_order[i] = s->entry_order[i - 1];
968 }
969
970 memset(&s->files->f[index], 0, sizeof(FWCfgFile));
971 memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry));
972
973 pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename);
974 for (i = 0; i <= count; i++) {
975 if (i != index &&
976 strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
977 error_report("duplicate fw_cfg file name: %s",
978 s->files->f[index].name);
979 exit(1);
980 }
981 }
982
983 fw_cfg_add_bytes_callback(s, FW_CFG_FILE_FIRST + index,
984 select_cb, write_cb,
985 callback_opaque, data, len,
986 read_only);
987
988 s->files->f[index].size = cpu_to_be32(len);
989 s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
990 s->entry_order[index] = order;
991 trace_fw_cfg_add_file(s, index, s->files->f[index].name, len);
992
993 s->files->count = cpu_to_be32(count+1);
994 fw_cfg_acpi_mr_save(s, filename, len);
995 }
996
fw_cfg_add_file(FWCfgState * s,const char * filename,void * data,size_t len)997 void fw_cfg_add_file(FWCfgState *s, const char *filename,
998 void *data, size_t len)
999 {
1000 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
1001 }
1002
fw_cfg_modify_file(FWCfgState * s,const char * filename,void * data,size_t len)1003 void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
1004 void *data, size_t len)
1005 {
1006 int i, index;
1007 void *ptr = NULL;
1008
1009 assert(s->files);
1010
1011 index = be32_to_cpu(s->files->count);
1012
1013 for (i = 0; i < index; i++) {
1014 if (strcmp(filename, s->files->f[i].name) == 0) {
1015 ptr = fw_cfg_modify_bytes_read(s, FW_CFG_FILE_FIRST + i,
1016 data, len);
1017 s->files->f[i].size = cpu_to_be32(len);
1018 fw_cfg_acpi_mr_save(s, filename, len);
1019 return ptr;
1020 }
1021 }
1022
1023 assert(index < fw_cfg_file_slots(s));
1024
1025 /* add new one */
1026 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
1027 return NULL;
1028 }
1029
fw_cfg_add_from_generator(FWCfgState * s,const char * filename,const char * gen_id,Error ** errp)1030 bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename,
1031 const char *gen_id, Error **errp)
1032 {
1033 FWCfgDataGeneratorClass *klass;
1034 GByteArray *array;
1035 Object *obj;
1036 gsize size;
1037
1038 obj = object_resolve_path_component(object_get_objects_root(), gen_id);
1039 if (!obj) {
1040 error_setg(errp, "Cannot find object ID '%s'", gen_id);
1041 return false;
1042 }
1043 if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) {
1044 error_setg(errp, "Object ID '%s' is not a '%s' subclass",
1045 gen_id, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE);
1046 return false;
1047 }
1048 klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj);
1049 array = klass->get_data(obj, errp);
1050 if (!array) {
1051 return false;
1052 }
1053 size = array->len;
1054 fw_cfg_add_file(s, filename, g_byte_array_free(array, FALSE), size);
1055
1056 return true;
1057 }
1058
fw_cfg_add_extra_pci_roots(PCIBus * bus,FWCfgState * s)1059 void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s)
1060 {
1061 int extra_hosts = 0;
1062
1063 if (!bus) {
1064 return;
1065 }
1066
1067 QLIST_FOREACH(bus, &bus->child, sibling) {
1068 /* look for expander root buses */
1069 if (pci_bus_is_root(bus)) {
1070 extra_hosts++;
1071 }
1072 }
1073
1074 if (extra_hosts && s) {
1075 uint64_t *val = g_malloc(sizeof(*val));
1076 *val = cpu_to_le64(extra_hosts);
1077 fw_cfg_add_file(s, "etc/extra-pci-roots", val, sizeof(*val));
1078 }
1079 }
1080
fw_cfg_machine_reset(void * opaque)1081 static void fw_cfg_machine_reset(void *opaque)
1082 {
1083 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
1084 FWCfgState *s = opaque;
1085 void *ptr;
1086 size_t len;
1087 char *buf;
1088
1089 buf = get_boot_devices_list(&len);
1090 ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len);
1091 g_free(ptr);
1092
1093 if (!mc->legacy_fw_cfg_order) {
1094 buf = get_boot_devices_lchs_list(&len);
1095 ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len);
1096 g_free(ptr);
1097 }
1098 }
1099
fw_cfg_machine_ready(struct Notifier * n,void * data)1100 static void fw_cfg_machine_ready(struct Notifier *n, void *data)
1101 {
1102 FWCfgState *s = container_of(n, FWCfgState, machine_ready);
1103 qemu_register_reset(fw_cfg_machine_reset, s);
1104 }
1105
1106 static Property fw_cfg_properties[] = {
1107 DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true),
1108 DEFINE_PROP_END_OF_LIST(),
1109 };
1110
fw_cfg_common_realize(DeviceState * dev,Error ** errp)1111 static void fw_cfg_common_realize(DeviceState *dev, Error **errp)
1112 {
1113 FWCfgState *s = FW_CFG(dev);
1114 MachineState *machine = MACHINE(qdev_get_machine());
1115 uint32_t version = FW_CFG_VERSION;
1116
1117 if (!fw_cfg_find()) {
1118 error_setg(errp, "at most one %s device is permitted", TYPE_FW_CFG);
1119 return;
1120 }
1121
1122 fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
1123 fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16);
1124 fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics);
1125 fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)(machine->boot_config.has_menu && machine->boot_config.menu));
1126 fw_cfg_bootsplash(s);
1127 fw_cfg_reboot(s);
1128
1129 if (s->dma_enabled) {
1130 version |= FW_CFG_VERSION_DMA;
1131 }
1132
1133 fw_cfg_add_i32(s, FW_CFG_ID, version);
1134
1135 s->machine_ready.notify = fw_cfg_machine_ready;
1136 qemu_add_machine_init_done_notifier(&s->machine_ready);
1137 }
1138
fw_cfg_init_io_dma(uint32_t iobase,uint32_t dma_iobase,AddressSpace * dma_as)1139 FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
1140 AddressSpace *dma_as)
1141 {
1142 DeviceState *dev;
1143 SysBusDevice *sbd;
1144 FWCfgIoState *ios;
1145 FWCfgState *s;
1146 MemoryRegion *iomem = get_system_io();
1147 bool dma_requested = dma_iobase && dma_as;
1148
1149 dev = qdev_new(TYPE_FW_CFG_IO);
1150 if (!dma_requested) {
1151 qdev_prop_set_bit(dev, "dma_enabled", false);
1152 }
1153
1154 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1155 OBJECT(dev));
1156
1157 sbd = SYS_BUS_DEVICE(dev);
1158 sysbus_realize_and_unref(sbd, &error_fatal);
1159 ios = FW_CFG_IO(dev);
1160 memory_region_add_subregion(iomem, iobase, &ios->comb_iomem);
1161
1162 s = FW_CFG(dev);
1163
1164 if (s->dma_enabled) {
1165 /* 64 bits for the address field */
1166 s->dma_as = dma_as;
1167 s->dma_addr = 0;
1168 memory_region_add_subregion(iomem, dma_iobase, &s->dma_iomem);
1169 }
1170
1171 return s;
1172 }
1173
fw_cfg_init_io(uint32_t iobase)1174 FWCfgState *fw_cfg_init_io(uint32_t iobase)
1175 {
1176 return fw_cfg_init_io_dma(iobase, 0, NULL);
1177 }
1178
fw_cfg_init_mem_wide(hwaddr ctl_addr,hwaddr data_addr,uint32_t data_width,hwaddr dma_addr,AddressSpace * dma_as)1179 FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
1180 hwaddr data_addr, uint32_t data_width,
1181 hwaddr dma_addr, AddressSpace *dma_as)
1182 {
1183 DeviceState *dev;
1184 SysBusDevice *sbd;
1185 FWCfgState *s;
1186 bool dma_requested = dma_addr && dma_as;
1187
1188 dev = qdev_new(TYPE_FW_CFG_MEM);
1189 qdev_prop_set_uint32(dev, "data_width", data_width);
1190 if (!dma_requested) {
1191 qdev_prop_set_bit(dev, "dma_enabled", false);
1192 }
1193
1194 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1195 OBJECT(dev));
1196
1197 sbd = SYS_BUS_DEVICE(dev);
1198 sysbus_realize_and_unref(sbd, &error_fatal);
1199 sysbus_mmio_map(sbd, 0, ctl_addr);
1200 sysbus_mmio_map(sbd, 1, data_addr);
1201
1202 s = FW_CFG(dev);
1203
1204 if (s->dma_enabled) {
1205 s->dma_as = dma_as;
1206 s->dma_addr = 0;
1207 sysbus_mmio_map(sbd, 2, dma_addr);
1208 }
1209
1210 return s;
1211 }
1212
fw_cfg_init_mem(hwaddr ctl_addr,hwaddr data_addr)1213 FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr)
1214 {
1215 return fw_cfg_init_mem_wide(ctl_addr, data_addr,
1216 fw_cfg_data_mem_ops.valid.max_access_size,
1217 0, NULL);
1218 }
1219
1220
fw_cfg_find(void)1221 FWCfgState *fw_cfg_find(void)
1222 {
1223 /* Returns NULL unless there is exactly one fw_cfg device */
1224 return FW_CFG(object_resolve_path_type("", TYPE_FW_CFG, NULL));
1225 }
1226
load_image_to_fw_cfg(FWCfgState * fw_cfg,uint16_t size_key,uint16_t data_key,const char * image_name,bool try_decompress)1227 void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
1228 uint16_t data_key, const char *image_name,
1229 bool try_decompress)
1230 {
1231 size_t size = -1;
1232 uint8_t *data;
1233
1234 if (image_name == NULL) {
1235 return;
1236 }
1237
1238 if (try_decompress) {
1239 size = load_image_gzipped_buffer(image_name,
1240 LOAD_IMAGE_MAX_GUNZIP_BYTES, &data);
1241 }
1242
1243 if (size == (size_t)-1) {
1244 gchar *contents;
1245 gsize length;
1246
1247 if (!g_file_get_contents(image_name, &contents, &length, NULL)) {
1248 error_report("failed to load \"%s\"", image_name);
1249 exit(1);
1250 }
1251 size = length;
1252 data = (uint8_t *)contents;
1253 }
1254
1255 fw_cfg_add_i32(fw_cfg, size_key, size);
1256 fw_cfg_add_bytes(fw_cfg, data_key, data, size);
1257 }
1258
fw_cfg_class_init(ObjectClass * klass,void * data)1259 static void fw_cfg_class_init(ObjectClass *klass, void *data)
1260 {
1261 DeviceClass *dc = DEVICE_CLASS(klass);
1262
1263 dc->reset = fw_cfg_reset;
1264 dc->vmsd = &vmstate_fw_cfg;
1265
1266 device_class_set_props(dc, fw_cfg_properties);
1267 }
1268
1269 static const TypeInfo fw_cfg_info = {
1270 .name = TYPE_FW_CFG,
1271 .parent = TYPE_SYS_BUS_DEVICE,
1272 .abstract = true,
1273 .instance_size = sizeof(FWCfgState),
1274 .class_init = fw_cfg_class_init,
1275 };
1276
fw_cfg_file_slots_allocate(FWCfgState * s,Error ** errp)1277 static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
1278 {
1279 uint16_t file_slots_max;
1280
1281 if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) {
1282 error_setg(errp, "\"file_slots\" must be at least 0x%x",
1283 FW_CFG_FILE_SLOTS_MIN);
1284 return;
1285 }
1286
1287 /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value
1288 * that we permit. The actual (exclusive) value coming from the
1289 * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */
1290 file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1;
1291 if (fw_cfg_file_slots(s) > file_slots_max) {
1292 error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16,
1293 file_slots_max);
1294 return;
1295 }
1296
1297 s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1298 s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1299 s->entry_order = g_new0(int, fw_cfg_max_entry(s));
1300 }
1301
1302 static Property fw_cfg_io_properties[] = {
1303 DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
1304 true),
1305 DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
1306 FW_CFG_FILE_SLOTS_DFLT),
1307 DEFINE_PROP_END_OF_LIST(),
1308 };
1309
fw_cfg_io_realize(DeviceState * dev,Error ** errp)1310 static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
1311 {
1312 ERRP_GUARD();
1313 FWCfgIoState *s = FW_CFG_IO(dev);
1314
1315 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1316 if (*errp) {
1317 return;
1318 }
1319
1320 /* when using port i/o, the 8-bit data register ALWAYS overlaps
1321 * with half of the 16-bit control register. Hence, the total size
1322 * of the i/o region used is FW_CFG_CTL_SIZE */
1323 memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops,
1324 FW_CFG(s), "fwcfg", FW_CFG_CTL_SIZE);
1325
1326 if (FW_CFG(s)->dma_enabled) {
1327 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1328 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1329 sizeof(dma_addr_t));
1330 }
1331
1332 fw_cfg_common_realize(dev, errp);
1333 }
1334
fw_cfg_io_class_init(ObjectClass * klass,void * data)1335 static void fw_cfg_io_class_init(ObjectClass *klass, void *data)
1336 {
1337 DeviceClass *dc = DEVICE_CLASS(klass);
1338
1339 dc->realize = fw_cfg_io_realize;
1340 device_class_set_props(dc, fw_cfg_io_properties);
1341 }
1342
1343 static const TypeInfo fw_cfg_io_info = {
1344 .name = TYPE_FW_CFG_IO,
1345 .parent = TYPE_FW_CFG,
1346 .instance_size = sizeof(FWCfgIoState),
1347 .class_init = fw_cfg_io_class_init,
1348 };
1349
1350
1351 static Property fw_cfg_mem_properties[] = {
1352 DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
1353 DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
1354 true),
1355 DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
1356 FW_CFG_FILE_SLOTS_DFLT),
1357 DEFINE_PROP_END_OF_LIST(),
1358 };
1359
fw_cfg_mem_realize(DeviceState * dev,Error ** errp)1360 static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
1361 {
1362 ERRP_GUARD();
1363 FWCfgMemState *s = FW_CFG_MEM(dev);
1364 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1365 const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops;
1366
1367 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1368 if (*errp) {
1369 return;
1370 }
1371
1372 memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops,
1373 FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE);
1374 sysbus_init_mmio(sbd, &s->ctl_iomem);
1375
1376 if (s->data_width > data_ops->valid.max_access_size) {
1377 s->wide_data_ops = *data_ops;
1378
1379 s->wide_data_ops.valid.max_access_size = s->data_width;
1380 s->wide_data_ops.impl.max_access_size = s->data_width;
1381 data_ops = &s->wide_data_ops;
1382 }
1383 memory_region_init_io(&s->data_iomem, OBJECT(s), data_ops, FW_CFG(s),
1384 "fwcfg.data", data_ops->valid.max_access_size);
1385 sysbus_init_mmio(sbd, &s->data_iomem);
1386
1387 if (FW_CFG(s)->dma_enabled) {
1388 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1389 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1390 sizeof(dma_addr_t));
1391 sysbus_init_mmio(sbd, &FW_CFG(s)->dma_iomem);
1392 }
1393
1394 fw_cfg_common_realize(dev, errp);
1395 }
1396
fw_cfg_mem_class_init(ObjectClass * klass,void * data)1397 static void fw_cfg_mem_class_init(ObjectClass *klass, void *data)
1398 {
1399 DeviceClass *dc = DEVICE_CLASS(klass);
1400
1401 dc->realize = fw_cfg_mem_realize;
1402 device_class_set_props(dc, fw_cfg_mem_properties);
1403 }
1404
1405 static const TypeInfo fw_cfg_mem_info = {
1406 .name = TYPE_FW_CFG_MEM,
1407 .parent = TYPE_FW_CFG,
1408 .instance_size = sizeof(FWCfgMemState),
1409 .class_init = fw_cfg_mem_class_init,
1410 };
1411
fw_cfg_register_types(void)1412 static void fw_cfg_register_types(void)
1413 {
1414 type_register_static(&fw_cfg_info);
1415 type_register_static(&fw_cfg_io_info);
1416 type_register_static(&fw_cfg_mem_info);
1417 }
1418
1419 type_init(fw_cfg_register_types)
1420