xref: /openbmc/qemu/tests/qtest/fuzz/generic_fuzz.c (revision 05caa062)
1 /*
2  * Generic Virtual-Device Fuzzing Target
3  *
4  * Copyright Red Hat Inc., 2020
5  *
6  * Authors:
7  *  Alexander Bulekov   <alxndr@bu.edu>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/range.h"
15 
16 #include <wordexp.h>
17 
18 #include "hw/core/cpu.h"
19 #include "tests/qtest/libqtest.h"
20 #include "tests/qtest/libqos/pci-pc.h"
21 #include "fuzz.h"
22 #include "string.h"
23 #include "exec/memory.h"
24 #include "exec/ramblock.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/pci_device.h"
28 #include "hw/boards.h"
29 #include "generic_fuzz_configs.h"
30 #include "hw/mem/sparse-mem.h"
31 
32 static void pci_enum(gpointer pcidev, gpointer bus);
33 
34 /*
35  * SEPARATOR is used to separate "operations" in the fuzz input
36  */
37 #define SEPARATOR "FUZZ"
38 
39 enum cmds {
40     OP_IN,
41     OP_OUT,
42     OP_READ,
43     OP_WRITE,
44     OP_PCI_READ,
45     OP_PCI_WRITE,
46     OP_DISABLE_PCI,
47     OP_ADD_DMA_PATTERN,
48     OP_CLEAR_DMA_PATTERNS,
49     OP_CLOCK_STEP,
50 };
51 
52 #define USEC_IN_SEC 1000000000
53 
54 #define MAX_DMA_FILL_SIZE 0x10000
55 #define MAX_TOTAL_DMA_SIZE 0x10000000
56 
57 #define PCI_HOST_BRIDGE_CFG 0xcf8
58 #define PCI_HOST_BRIDGE_DATA 0xcfc
59 
60 typedef struct {
61     ram_addr_t addr;
62     ram_addr_t size; /* The number of bytes until the end of the I/O region */
63 } address_range;
64 
65 static bool qtest_log_enabled;
66 size_t dma_bytes_written;
67 
68 MemoryRegion *sparse_mem_mr;
69 
70 /*
71  * A pattern used to populate a DMA region or perform a memwrite. This is
72  * useful for e.g. populating tables of unique addresses.
73  * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
74  * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
75  */
76 typedef struct {
77     uint8_t index;      /* Index of a byte to increment by stride */
78     uint8_t stride;     /* Increment each index'th byte by this amount */
79     size_t len;
80     const uint8_t *data;
81 } pattern;
82 
83 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
84 static bool avoid_double_fetches;
85 
86 static QTestState *qts_global; /* Need a global for the DMA callback */
87 
88 /*
89  * List of memory regions that are children of QOM objects specified by the
90  * user for fuzzing.
91  */
92 static GHashTable *fuzzable_memoryregions;
93 static GPtrArray *fuzzable_pci_devices;
94 
95 struct get_io_cb_info {
96     int index;
97     int found;
98     address_range result;
99 };
100 
101 static bool get_io_address_cb(Int128 start, Int128 size,
102                               const MemoryRegion *mr,
103                               hwaddr offset_in_region,
104                               void *opaque)
105 {
106     struct get_io_cb_info *info = opaque;
107     if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
108         if (info->index == 0) {
109             info->result.addr = (ram_addr_t)start;
110             info->result.size = (ram_addr_t)size;
111             info->found = 1;
112             return true;
113         }
114         info->index--;
115     }
116     return false;
117 }
118 
119 /*
120  * List of dma regions populated since the last fuzzing command. Used to ensure
121  * that we only write to each DMA address once, to avoid race conditions when
122  * building reproducers.
123  */
124 static GArray *dma_regions;
125 
126 static GArray *dma_patterns;
127 static int dma_pattern_index;
128 static bool pci_disabled;
129 
130 /*
131  * Allocate a block of memory and populate it with a pattern.
132  */
133 static void *pattern_alloc(pattern p, size_t len)
134 {
135     int i;
136     uint8_t *buf = g_malloc(len);
137     uint8_t sum = 0;
138 
139     for (i = 0; i < len; ++i) {
140         buf[i] = p.data[i % p.len];
141         if ((i % p.len) == p.index) {
142             buf[i] += sum;
143             sum += p.stride;
144         }
145     }
146     return buf;
147 }
148 
149 static int fuzz_memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
150 {
151     unsigned access_size_max = mr->ops->valid.max_access_size;
152 
153     /*
154      * Regions are assumed to support 1-4 byte accesses unless
155      * otherwise specified.
156      */
157     if (access_size_max == 0) {
158         access_size_max = 4;
159     }
160 
161     /* Bound the maximum access by the alignment of the address.  */
162     if (!mr->ops->impl.unaligned) {
163         unsigned align_size_max = addr & -addr;
164         if (align_size_max != 0 && align_size_max < access_size_max) {
165             access_size_max = align_size_max;
166         }
167     }
168 
169     /* Don't attempt accesses larger than the maximum.  */
170     if (l > access_size_max) {
171         l = access_size_max;
172     }
173     l = pow2floor(l);
174 
175     return l;
176 }
177 
178 /*
179  * Call-back for functions that perform DMA reads from guest memory. Confirm
180  * that the region has not already been populated since the last loop in
181  * generic_fuzz(), avoiding potential race-conditions, which we don't have
182  * a good way for reproducing right now.
183  */
184 void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
185 {
186     /* Are we in the generic-fuzzer or are we using another fuzz-target? */
187     if (!qts_global) {
188         return;
189     }
190 
191     /*
192      * Return immediately if:
193      * - We have no DMA patterns defined
194      * - The length of the DMA read request is zero
195      * - The DMA read is hitting an MR other than the machine's main RAM
196      * - The DMA request hits past the bounds of our RAM
197      */
198     if (dma_patterns->len == 0
199         || len == 0
200         || dma_bytes_written + len > MAX_TOTAL_DMA_SIZE
201         || (mr != current_machine->ram && mr != sparse_mem_mr)) {
202         return;
203     }
204 
205     /*
206      * If we overlap with any existing dma_regions, split the range and only
207      * populate the non-overlapping parts.
208      */
209     address_range region;
210     bool double_fetch = false;
211     for (int i = 0;
212          i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
213          ++i) {
214         region = g_array_index(dma_regions, address_range, i);
215         if (ranges_overlap(addr, len, region.addr, region.size)) {
216             double_fetch = true;
217             if (addr < region.addr
218                 && avoid_double_fetches) {
219                 fuzz_dma_read_cb(addr, region.addr - addr, mr);
220             }
221             if (addr + len > region.addr + region.size
222                 && avoid_double_fetches) {
223                 fuzz_dma_read_cb(region.addr + region.size,
224                         addr + len - (region.addr + region.size), mr);
225             }
226             return;
227         }
228     }
229 
230     /* Cap the length of the DMA access to something reasonable */
231     len = MIN(len, MAX_DMA_FILL_SIZE);
232 
233     address_range ar = {addr, len};
234     g_array_append_val(dma_regions, ar);
235     pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
236     void *buf_base = pattern_alloc(p, ar.size);
237     void *buf = buf_base;
238     hwaddr l, addr1;
239     MemoryRegion *mr1;
240     while (len > 0) {
241         l = len;
242         mr1 = address_space_translate(first_cpu->as,
243                                       addr, &addr1, &l, true,
244                                       MEMTXATTRS_UNSPECIFIED);
245 
246         /*
247          *  If mr1 isn't RAM, address_space_translate doesn't update l. Use
248          *  fuzz_memory_access_size to identify the number of bytes that it
249          *  is safe to write without accidentally writing to another
250          *  MemoryRegion.
251          */
252         if (!memory_region_is_ram(mr1)) {
253             l = fuzz_memory_access_size(mr1, l, addr1);
254         }
255         if (memory_region_is_ram(mr1) ||
256             memory_region_is_romd(mr1) ||
257             mr1 == sparse_mem_mr) {
258             /* ROM/RAM case */
259             if (qtest_log_enabled) {
260                 /*
261                 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
262                 * that will be written by qtest.c with a DMA tag, so we can reorder
263                 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
264                 * command.
265                 */
266                 fprintf(stderr, "[DMA] ");
267                 if (double_fetch) {
268                     fprintf(stderr, "[DOUBLE-FETCH] ");
269                 }
270                 fflush(stderr);
271             }
272             qtest_memwrite(qts_global, addr, buf, l);
273             dma_bytes_written += l;
274         }
275         len -= l;
276         buf += l;
277         addr += l;
278 
279     }
280     g_free(buf_base);
281 
282     /* Increment the index of the pattern for the next DMA access */
283     dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
284 }
285 
286 /*
287  * Here we want to convert a fuzzer-provided [io-region-index, offset] to
288  * a physical address. To do this, we iterate over all of the matched
289  * MemoryRegions. Check whether each region exists within the particular io
290  * space. Return the absolute address of the offset within the index'th region
291  * that is a subregion of the io_space and the distance until the end of the
292  * memory region.
293  */
294 static bool get_io_address(address_range *result, AddressSpace *as,
295                             uint8_t index,
296                             uint32_t offset) {
297     FlatView *view;
298     view = as->current_map;
299     g_assert(view);
300     struct get_io_cb_info cb_info = {};
301 
302     cb_info.index = index;
303 
304     /*
305      * Loop around the FlatView until we match "index" number of
306      * fuzzable_memoryregions, or until we know that there are no matching
307      * memory_regions.
308      */
309     do {
310         flatview_for_each_range(view, get_io_address_cb , &cb_info);
311     } while (cb_info.index != index && !cb_info.found);
312 
313     *result = cb_info.result;
314     if (result->size) {
315         offset = offset % result->size;
316         result->addr += offset;
317         result->size -= offset;
318     }
319     return cb_info.found;
320 }
321 
322 static bool get_pio_address(address_range *result,
323                             uint8_t index, uint16_t offset)
324 {
325     /*
326      * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
327      * can contain an addr that extends past the PIO space. When we pass this
328      * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
329      * up fuzzing a completely different MemoryRegion/Device. Therefore, check
330      * that the address here is within the PIO space limits.
331      */
332     bool found = get_io_address(result, &address_space_io, index, offset);
333     return result->addr <= 0xFFFF ? found : false;
334 }
335 
336 static bool get_mmio_address(address_range *result,
337                              uint8_t index, uint32_t offset)
338 {
339     return get_io_address(result, &address_space_memory, index, offset);
340 }
341 
342 static void op_in(QTestState *s, const unsigned char * data, size_t len)
343 {
344     enum Sizes {Byte, Word, Long, end_sizes};
345     struct {
346         uint8_t size;
347         uint8_t base;
348         uint16_t offset;
349     } a;
350     address_range abs;
351 
352     if (len < sizeof(a)) {
353         return;
354     }
355     memcpy(&a, data, sizeof(a));
356     if (get_pio_address(&abs, a.base, a.offset) == 0) {
357         return;
358     }
359 
360     switch (a.size %= end_sizes) {
361     case Byte:
362         qtest_inb(s, abs.addr);
363         break;
364     case Word:
365         if (abs.size >= 2) {
366             qtest_inw(s, abs.addr);
367         }
368         break;
369     case Long:
370         if (abs.size >= 4) {
371             qtest_inl(s, abs.addr);
372         }
373         break;
374     }
375 }
376 
377 static void op_out(QTestState *s, const unsigned char * data, size_t len)
378 {
379     enum Sizes {Byte, Word, Long, end_sizes};
380     struct {
381         uint8_t size;
382         uint8_t base;
383         uint16_t offset;
384         uint32_t value;
385     } a;
386     address_range abs;
387 
388     if (len < sizeof(a)) {
389         return;
390     }
391     memcpy(&a, data, sizeof(a));
392 
393     if (get_pio_address(&abs, a.base, a.offset) == 0) {
394         return;
395     }
396 
397     switch (a.size %= end_sizes) {
398     case Byte:
399         qtest_outb(s, abs.addr, a.value & 0xFF);
400         break;
401     case Word:
402         if (abs.size >= 2) {
403             qtest_outw(s, abs.addr, a.value & 0xFFFF);
404         }
405         break;
406     case Long:
407         if (abs.size >= 4) {
408             qtest_outl(s, abs.addr, a.value);
409         }
410         break;
411     }
412 }
413 
414 static void op_read(QTestState *s, const unsigned char * data, size_t len)
415 {
416     enum Sizes {Byte, Word, Long, Quad, end_sizes};
417     struct {
418         uint8_t size;
419         uint8_t base;
420         uint32_t offset;
421     } a;
422     address_range abs;
423 
424     if (len < sizeof(a)) {
425         return;
426     }
427     memcpy(&a, data, sizeof(a));
428 
429     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
430         return;
431     }
432 
433     switch (a.size %= end_sizes) {
434     case Byte:
435         qtest_readb(s, abs.addr);
436         break;
437     case Word:
438         if (abs.size >= 2) {
439             qtest_readw(s, abs.addr);
440         }
441         break;
442     case Long:
443         if (abs.size >= 4) {
444             qtest_readl(s, abs.addr);
445         }
446         break;
447     case Quad:
448         if (abs.size >= 8) {
449             qtest_readq(s, abs.addr);
450         }
451         break;
452     }
453 }
454 
455 static void op_write(QTestState *s, const unsigned char * data, size_t len)
456 {
457     enum Sizes {Byte, Word, Long, Quad, end_sizes};
458     struct {
459         uint8_t size;
460         uint8_t base;
461         uint32_t offset;
462         uint64_t value;
463     } a;
464     address_range abs;
465 
466     if (len < sizeof(a)) {
467         return;
468     }
469     memcpy(&a, data, sizeof(a));
470 
471     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
472         return;
473     }
474 
475     switch (a.size %= end_sizes) {
476     case Byte:
477             qtest_writeb(s, abs.addr, a.value & 0xFF);
478         break;
479     case Word:
480         if (abs.size >= 2) {
481             qtest_writew(s, abs.addr, a.value & 0xFFFF);
482         }
483         break;
484     case Long:
485         if (abs.size >= 4) {
486             qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
487         }
488         break;
489     case Quad:
490         if (abs.size >= 8) {
491             qtest_writeq(s, abs.addr, a.value);
492         }
493         break;
494     }
495 }
496 
497 static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
498 {
499     enum Sizes {Byte, Word, Long, end_sizes};
500     struct {
501         uint8_t size;
502         uint8_t base;
503         uint8_t offset;
504     } a;
505     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
506         return;
507     }
508     memcpy(&a, data, sizeof(a));
509     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
510                                   a.base % fuzzable_pci_devices->len);
511     int devfn = dev->devfn;
512     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
513     switch (a.size %= end_sizes) {
514     case Byte:
515         qtest_inb(s, PCI_HOST_BRIDGE_DATA);
516         break;
517     case Word:
518         qtest_inw(s, PCI_HOST_BRIDGE_DATA);
519         break;
520     case Long:
521         qtest_inl(s, PCI_HOST_BRIDGE_DATA);
522         break;
523     }
524 }
525 
526 static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
527 {
528     enum Sizes {Byte, Word, Long, end_sizes};
529     struct {
530         uint8_t size;
531         uint8_t base;
532         uint8_t offset;
533         uint32_t value;
534     } a;
535     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
536         return;
537     }
538     memcpy(&a, data, sizeof(a));
539     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
540                                   a.base % fuzzable_pci_devices->len);
541     int devfn = dev->devfn;
542     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
543     switch (a.size %= end_sizes) {
544     case Byte:
545         qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
546         break;
547     case Word:
548         qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
549         break;
550     case Long:
551         qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
552         break;
553     }
554 }
555 
556 static void op_add_dma_pattern(QTestState *s,
557                                const unsigned char *data, size_t len)
558 {
559     struct {
560         /*
561          * index and stride can be used to increment the index-th byte of the
562          * pattern by the value stride, for each loop of the pattern.
563          */
564         uint8_t index;
565         uint8_t stride;
566     } a;
567 
568     if (len < sizeof(a) + 1) {
569         return;
570     }
571     memcpy(&a, data, sizeof(a));
572     pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
573     p.index = a.index % p.len;
574     g_array_append_val(dma_patterns, p);
575     return;
576 }
577 
578 static void op_clear_dma_patterns(QTestState *s,
579                                   const unsigned char *data, size_t len)
580 {
581     g_array_set_size(dma_patterns, 0);
582     dma_pattern_index = 0;
583 }
584 
585 static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
586 {
587     qtest_clock_step_next(s);
588 }
589 
590 static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
591 {
592     pci_disabled = true;
593 }
594 
595 /*
596  * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
597  * Some commands can be variable-width, so we use a separator, SEPARATOR, to
598  * specify the boundaries between commands. SEPARATOR is used to separate
599  * "operations" in the fuzz input. Why use a separator, instead of just using
600  * the operations' length to identify operation boundaries?
601  *   1. This is a simple way to support variable-length operations
602  *   2. This adds "stability" to the input.
603  *      For example take the input "AbBcgDefg", where there is no separator and
604  *      Opcodes are capitalized.
605  *      Simply, by removing the first byte, we end up with a very different
606  *      sequence:
607  *      BbcGdefg...
608  *      By adding a separator, we avoid this problem:
609  *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
610  *      Since B uses two additional bytes as operands, the first "B" will be
611  *      ignored. The fuzzer actively tries to reduce inputs, so such unused
612  *      bytes are likely to be pruned, eventually.
613  *
614  *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
615  *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
616  *  -dict), though this should not be necessary.
617  *
618  * As a result, the stream of bytes is converted into a sequence of commands.
619  * In a simplified example where SEPARATOR is 0xFF:
620  * 00 01 02 FF 03 04 05 06 FF 01 FF ...
621  * becomes this sequence of commands:
622  * 00 01 02    -> op00 (0102)   -> in (0102, 2)
623  * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
624  * 01          -> op01 (-,0)    -> out (-,0)
625  * ...
626  *
627  * Note here that it is the job of the individual opcode functions to check
628  * that enough data was provided. I.e. in the last command out (,0), out needs
629  * to check that there is not enough data provided to select an address/value
630  * for the operation.
631  */
632 static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
633 {
634     void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
635         [OP_IN]                 = op_in,
636         [OP_OUT]                = op_out,
637         [OP_READ]               = op_read,
638         [OP_WRITE]              = op_write,
639         [OP_PCI_READ]           = op_pci_read,
640         [OP_PCI_WRITE]          = op_pci_write,
641         [OP_DISABLE_PCI]        = op_disable_pci,
642         [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
643         [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
644         [OP_CLOCK_STEP]         = op_clock_step,
645     };
646     const unsigned char *cmd = Data;
647     const unsigned char *nextcmd;
648     size_t cmd_len;
649     uint8_t op;
650 
651     op_clear_dma_patterns(s, NULL, 0);
652     pci_disabled = false;
653     dma_bytes_written = 0;
654 
655     QPCIBus *pcibus = qpci_new_pc(s, NULL);
656     g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
657     qpci_free_pc(pcibus);
658 
659     while (cmd && Size) {
660         /* Get the length until the next command or end of input */
661         nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
662         cmd_len = nextcmd ? nextcmd - cmd : Size;
663 
664         if (cmd_len > 0) {
665             /* Interpret the first byte of the command as an opcode */
666             op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
667             ops[op](s, cmd + 1, cmd_len - 1);
668 
669             /* Run the main loop */
670             flush_events(s);
671         }
672         /* Advance to the next command */
673         cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
674         Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
675         g_array_set_size(dma_regions, 0);
676     }
677     fuzz_reset(s);
678 }
679 
680 static void usage(void)
681 {
682     printf("Please specify the following environment variables:\n");
683     printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
684     printf("QEMU_FUZZ_OBJECTS= "
685             "a space separated list of QOM type names for objects to fuzz\n");
686     printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
687             "Try to avoid racy DMA double fetch bugs? %d by default\n",
688             avoid_double_fetches);
689     exit(0);
690 }
691 
692 static int locate_fuzz_memory_regions(Object *child, void *opaque)
693 {
694     MemoryRegion *mr;
695     if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
696         mr = MEMORY_REGION(child);
697         if ((memory_region_is_ram(mr) ||
698             memory_region_is_ram_device(mr) ||
699             memory_region_is_rom(mr)) == false) {
700             /*
701              * We don't want duplicate pointers to the same MemoryRegion, so
702              * try to remove copies of the pointer, before adding it.
703              */
704             g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
705         }
706     }
707     return 0;
708 }
709 
710 static int locate_fuzz_objects(Object *child, void *opaque)
711 {
712     GString *type_name;
713     GString *path_name;
714     char *pattern = opaque;
715 
716     type_name = g_string_new(object_get_typename(child));
717     g_string_ascii_down(type_name);
718     if (g_pattern_match_simple(pattern, type_name->str)) {
719         /* Find and save ptrs to any child MemoryRegions */
720         object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
721 
722         /*
723          * We matched an object. If its a PCI device, store a pointer to it so
724          * we can map BARs and fuzz its config space.
725          */
726         if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
727             /*
728              * Don't want duplicate pointers to the same PCIDevice, so remove
729              * copies of the pointer, before adding it.
730              */
731             g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
732             g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
733         }
734     } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
735         path_name = g_string_new(object_get_canonical_path_component(child));
736         g_string_ascii_down(path_name);
737         if (g_pattern_match_simple(pattern, path_name->str)) {
738             MemoryRegion *mr;
739             mr = MEMORY_REGION(child);
740             if ((memory_region_is_ram(mr) ||
741                  memory_region_is_ram_device(mr) ||
742                  memory_region_is_rom(mr)) == false) {
743                 g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
744             }
745         }
746         g_string_free(path_name, true);
747     }
748     g_string_free(type_name, true);
749     return 0;
750 }
751 
752 
753 static void pci_enum(gpointer pcidev, gpointer bus)
754 {
755     PCIDevice *dev = pcidev;
756     QPCIDevice *qdev;
757     int i;
758 
759     qdev = qpci_device_find(bus, dev->devfn);
760     g_assert(qdev != NULL);
761     for (i = 0; i < 6; i++) {
762         if (dev->io_regions[i].size) {
763             qpci_iomap(qdev, i, NULL);
764         }
765     }
766     qpci_device_enable(qdev);
767     g_free(qdev);
768 }
769 
770 static void generic_pre_fuzz(QTestState *s)
771 {
772     GHashTableIter iter;
773     MemoryRegion *mr;
774     char **result;
775     GString *name_pattern;
776 
777     if (!getenv("QEMU_FUZZ_OBJECTS")) {
778         usage();
779     }
780     if (getenv("QTEST_LOG")) {
781         qtest_log_enabled = 1;
782     }
783     if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
784         avoid_double_fetches = 1;
785     }
786     qts_global = s;
787 
788     /*
789      * Create a special device that we can use to back DMA buffers at very
790      * high memory addresses
791      */
792     sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
793 
794     dma_regions = g_array_new(false, false, sizeof(address_range));
795     dma_patterns = g_array_new(false, false, sizeof(pattern));
796 
797     fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
798     fuzzable_pci_devices   = g_ptr_array_new();
799 
800     result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
801     for (int i = 0; result[i] != NULL; i++) {
802         name_pattern = g_string_new(result[i]);
803         /*
804          * Make the pattern lowercase. We do the same for all the MemoryRegion
805          * and Type names so the configs are case-insensitive.
806          */
807         g_string_ascii_down(name_pattern);
808         printf("Matching objects by name %s\n", result[i]);
809         object_child_foreach_recursive(qdev_get_machine(),
810                                     locate_fuzz_objects,
811                                     name_pattern->str);
812         g_string_free(name_pattern, true);
813     }
814     g_strfreev(result);
815     printf("This process will try to fuzz the following MemoryRegions:\n");
816 
817     g_hash_table_iter_init(&iter, fuzzable_memoryregions);
818     while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
819         printf("  * %s (size 0x%" PRIx64 ")\n",
820                object_get_canonical_path_component(&(mr->parent_obj)),
821                memory_region_size(mr));
822     }
823 
824     if (!g_hash_table_size(fuzzable_memoryregions)) {
825         printf("No fuzzable memory regions found...\n");
826         exit(1);
827     }
828 }
829 
830 /*
831  * When libfuzzer gives us two inputs to combine, return a new input with the
832  * following structure:
833  *
834  * Input 1 (data1)
835  * SEPARATOR
836  * Clear out the DMA Patterns
837  * SEPARATOR
838  * Disable the pci_read/write instructions
839  * SEPARATOR
840  * Input 2 (data2)
841  *
842  * The idea is to collate the core behaviors of the two inputs.
843  * For example:
844  * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
845  *          device functionality A
846  * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
847  *          functionality B
848  *
849  * This function attempts to produce an input that:
850  * Output: maps a device's BARs, set up three DMA patterns, triggers
851  *          device functionality A, replaces the DMA patterns with a single
852  *          pattern, and triggers device functionality B.
853  */
854 static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
855                                      uint8_t *data2, size_t size2, uint8_t *out,
856                                      size_t max_out_size, unsigned int seed)
857 {
858     size_t copy_len = 0, size = 0;
859 
860     /* Check that we have enough space for data1 and at least part of data2 */
861     if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
862         return 0;
863     }
864 
865     /* Copy_Len in the first input */
866     copy_len = size1;
867     memcpy(out + size, data1, copy_len);
868     size += copy_len;
869     max_out_size -= copy_len;
870 
871     /* Append a separator */
872     copy_len = strlen(SEPARATOR);
873     memcpy(out + size, SEPARATOR, copy_len);
874     size += copy_len;
875     max_out_size -= copy_len;
876 
877     /* Clear out the DMA Patterns */
878     copy_len = 1;
879     if (copy_len) {
880         out[size] = OP_CLEAR_DMA_PATTERNS;
881     }
882     size += copy_len;
883     max_out_size -= copy_len;
884 
885     /* Append a separator */
886     copy_len = strlen(SEPARATOR);
887     memcpy(out + size, SEPARATOR, copy_len);
888     size += copy_len;
889     max_out_size -= copy_len;
890 
891     /* Disable PCI ops. Assume data1 took care of setting up PCI */
892     copy_len = 1;
893     if (copy_len) {
894         out[size] = OP_DISABLE_PCI;
895     }
896     size += copy_len;
897     max_out_size -= copy_len;
898 
899     /* Append a separator */
900     copy_len = strlen(SEPARATOR);
901     memcpy(out + size, SEPARATOR, copy_len);
902     size += copy_len;
903     max_out_size -= copy_len;
904 
905     /* Copy_Len over the second input */
906     copy_len = MIN(size2, max_out_size);
907     memcpy(out + size, data2, copy_len);
908     size += copy_len;
909     max_out_size -= copy_len;
910 
911     return  size;
912 }
913 
914 
915 static GString *generic_fuzz_cmdline(FuzzTarget *t)
916 {
917     GString *cmd_line = g_string_new(TARGET_NAME);
918     if (!getenv("QEMU_FUZZ_ARGS")) {
919         usage();
920     }
921     g_string_append_printf(cmd_line, " -display none \
922                                       -machine accel=qtest, \
923                                       -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
924     return cmd_line;
925 }
926 
927 static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
928 {
929     gchar *args;
930     const generic_fuzz_config *config;
931     g_assert(t->opaque);
932 
933     config = t->opaque;
934     g_setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
935     if (config->argfunc) {
936         args = config->argfunc();
937         g_setenv("QEMU_FUZZ_ARGS", args, 1);
938         g_free(args);
939     } else {
940         g_assert_nonnull(config->args);
941         g_setenv("QEMU_FUZZ_ARGS", config->args, 1);
942     }
943     g_setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
944     return generic_fuzz_cmdline(t);
945 }
946 
947 static void register_generic_fuzz_targets(void)
948 {
949     fuzz_add_target(&(FuzzTarget){
950             .name = "generic-fuzz",
951             .description = "Fuzz based on any qemu command-line args. ",
952             .get_init_cmdline = generic_fuzz_cmdline,
953             .pre_fuzz = generic_pre_fuzz,
954             .fuzz = generic_fuzz,
955             .crossover = generic_fuzz_crossover
956     });
957 
958     for (int i = 0; i < ARRAY_SIZE(predefined_configs); i++) {
959         const generic_fuzz_config *config = predefined_configs + i;
960         fuzz_add_target(&(FuzzTarget){
961                 .name = g_strconcat("generic-fuzz-", config->name, NULL),
962                 .description = "Predefined generic-fuzz config.",
963                 .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
964                 .pre_fuzz = generic_pre_fuzz,
965                 .fuzz = generic_fuzz,
966                 .crossover = generic_fuzz_crossover,
967                 .opaque = (void *)config
968         });
969     }
970 }
971 
972 fuzz_target_init(register_generic_fuzz_targets);
973