xref: /openbmc/qemu/tests/qtest/fuzz/generic_fuzz.c (revision 835fde4a)
1 /*
2  * Generic Virtual-Device Fuzzing Target
3  *
4  * Copyright Red Hat Inc., 2020
5  *
6  * Authors:
7  *  Alexander Bulekov   <alxndr@bu.edu>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 
15 #include <wordexp.h>
16 
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqos/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
20 #include "fuzz.h"
21 #include "fork_fuzz.h"
22 #include "exec/address-spaces.h"
23 #include "string.h"
24 #include "exec/memory.h"
25 #include "exec/ramblock.h"
26 #include "exec/address-spaces.h"
27 #include "hw/qdev-core.h"
28 #include "hw/pci/pci.h"
29 #include "hw/boards.h"
30 #include "generic_fuzz_configs.h"
31 #include "hw/mem/sparse-mem.h"
32 
33 /*
34  * SEPARATOR is used to separate "operations" in the fuzz input
35  */
36 #define SEPARATOR "FUZZ"
37 
38 enum cmds {
39     OP_IN,
40     OP_OUT,
41     OP_READ,
42     OP_WRITE,
43     OP_PCI_READ,
44     OP_PCI_WRITE,
45     OP_DISABLE_PCI,
46     OP_ADD_DMA_PATTERN,
47     OP_CLEAR_DMA_PATTERNS,
48     OP_CLOCK_STEP,
49 };
50 
51 #define DEFAULT_TIMEOUT_US 100000
52 #define USEC_IN_SEC 1000000000
53 
54 #define MAX_DMA_FILL_SIZE 0x10000
55 
56 #define PCI_HOST_BRIDGE_CFG 0xcf8
57 #define PCI_HOST_BRIDGE_DATA 0xcfc
58 
59 typedef struct {
60     ram_addr_t addr;
61     ram_addr_t size; /* The number of bytes until the end of the I/O region */
62 } address_range;
63 
64 static useconds_t timeout = DEFAULT_TIMEOUT_US;
65 
66 static bool qtest_log_enabled;
67 
68 MemoryRegion *sparse_mem_mr;
69 
70 /*
71  * A pattern used to populate a DMA region or perform a memwrite. This is
72  * useful for e.g. populating tables of unique addresses.
73  * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
74  * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
75  */
76 typedef struct {
77     uint8_t index;      /* Index of a byte to increment by stride */
78     uint8_t stride;     /* Increment each index'th byte by this amount */
79     size_t len;
80     const uint8_t *data;
81 } pattern;
82 
83 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
84 static bool avoid_double_fetches;
85 
86 static QTestState *qts_global; /* Need a global for the DMA callback */
87 
88 /*
89  * List of memory regions that are children of QOM objects specified by the
90  * user for fuzzing.
91  */
92 static GHashTable *fuzzable_memoryregions;
93 static GPtrArray *fuzzable_pci_devices;
94 
95 struct get_io_cb_info {
96     int index;
97     int found;
98     address_range result;
99 };
100 
101 static int get_io_address_cb(Int128 start, Int128 size,
102                           const MemoryRegion *mr, void *opaque) {
103     struct get_io_cb_info *info = opaque;
104     if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
105         if (info->index == 0) {
106             info->result.addr = (ram_addr_t)start;
107             info->result.size = (ram_addr_t)size;
108             info->found = 1;
109             return 1;
110         }
111         info->index--;
112     }
113     return 0;
114 }
115 
116 /*
117  * List of dma regions populated since the last fuzzing command. Used to ensure
118  * that we only write to each DMA address once, to avoid race conditions when
119  * building reproducers.
120  */
121 static GArray *dma_regions;
122 
123 static GArray *dma_patterns;
124 static int dma_pattern_index;
125 static bool pci_disabled;
126 
127 /*
128  * Allocate a block of memory and populate it with a pattern.
129  */
130 static void *pattern_alloc(pattern p, size_t len)
131 {
132     int i;
133     uint8_t *buf = g_malloc(len);
134     uint8_t sum = 0;
135 
136     for (i = 0; i < len; ++i) {
137         buf[i] = p.data[i % p.len];
138         if ((i % p.len) == p.index) {
139             buf[i] += sum;
140             sum += p.stride;
141         }
142     }
143     return buf;
144 }
145 
146 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
147 {
148     unsigned access_size_max = mr->ops->valid.max_access_size;
149 
150     /*
151      * Regions are assumed to support 1-4 byte accesses unless
152      * otherwise specified.
153      */
154     if (access_size_max == 0) {
155         access_size_max = 4;
156     }
157 
158     /* Bound the maximum access by the alignment of the address.  */
159     if (!mr->ops->impl.unaligned) {
160         unsigned align_size_max = addr & -addr;
161         if (align_size_max != 0 && align_size_max < access_size_max) {
162             access_size_max = align_size_max;
163         }
164     }
165 
166     /* Don't attempt accesses larger than the maximum.  */
167     if (l > access_size_max) {
168         l = access_size_max;
169     }
170     l = pow2floor(l);
171 
172     return l;
173 }
174 
175 /*
176  * Call-back for functions that perform DMA reads from guest memory. Confirm
177  * that the region has not already been populated since the last loop in
178  * generic_fuzz(), avoiding potential race-conditions, which we don't have
179  * a good way for reproducing right now.
180  */
181 void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
182 {
183     /* Are we in the generic-fuzzer or are we using another fuzz-target? */
184     if (!qts_global) {
185         return;
186     }
187 
188     /*
189      * Return immediately if:
190      * - We have no DMA patterns defined
191      * - The length of the DMA read request is zero
192      * - The DMA read is hitting an MR other than the machine's main RAM
193      * - The DMA request hits past the bounds of our RAM
194      */
195     if (dma_patterns->len == 0
196         || len == 0
197         || (mr != current_machine->ram && mr != sparse_mem_mr)) {
198         return;
199     }
200 
201     /*
202      * If we overlap with any existing dma_regions, split the range and only
203      * populate the non-overlapping parts.
204      */
205     address_range region;
206     bool double_fetch = false;
207     for (int i = 0;
208          i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
209          ++i) {
210         region = g_array_index(dma_regions, address_range, i);
211         if (addr < region.addr + region.size && addr + len > region.addr) {
212             double_fetch = true;
213             if (addr < region.addr
214                 && avoid_double_fetches) {
215                 fuzz_dma_read_cb(addr, region.addr - addr, mr);
216             }
217             if (addr + len > region.addr + region.size
218                 && avoid_double_fetches) {
219                 fuzz_dma_read_cb(region.addr + region.size,
220                         addr + len - (region.addr + region.size), mr);
221             }
222             return;
223         }
224     }
225 
226     /* Cap the length of the DMA access to something reasonable */
227     len = MIN(len, MAX_DMA_FILL_SIZE);
228 
229     address_range ar = {addr, len};
230     g_array_append_val(dma_regions, ar);
231     pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
232     void *buf_base = pattern_alloc(p, ar.size);
233     void *buf = buf_base;
234     hwaddr l, addr1;
235     MemoryRegion *mr1;
236     while (len > 0) {
237         l = len;
238         mr1 = address_space_translate(first_cpu->as,
239                                       addr, &addr1, &l, true,
240                                       MEMTXATTRS_UNSPECIFIED);
241 
242         if (!(memory_region_is_ram(mr1) ||
243               memory_region_is_romd(mr1)) && mr1 != sparse_mem_mr) {
244             l = memory_access_size(mr1, l, addr1);
245         } else {
246             /* ROM/RAM case */
247             if (qtest_log_enabled) {
248                 /*
249                 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
250                 * that will be written by qtest.c with a DMA tag, so we can reorder
251                 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
252                 * command.
253                 */
254                 fprintf(stderr, "[DMA] ");
255                 if (double_fetch) {
256                     fprintf(stderr, "[DOUBLE-FETCH] ");
257                 }
258                 fflush(stderr);
259             }
260             qtest_memwrite(qts_global, addr, buf, l);
261         }
262         len -= l;
263         buf += l;
264         addr += l;
265 
266     }
267     g_free(buf_base);
268 
269     /* Increment the index of the pattern for the next DMA access */
270     dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
271 }
272 
273 /*
274  * Here we want to convert a fuzzer-provided [io-region-index, offset] to
275  * a physical address. To do this, we iterate over all of the matched
276  * MemoryRegions. Check whether each region exists within the particular io
277  * space. Return the absolute address of the offset within the index'th region
278  * that is a subregion of the io_space and the distance until the end of the
279  * memory region.
280  */
281 static bool get_io_address(address_range *result, AddressSpace *as,
282                             uint8_t index,
283                             uint32_t offset) {
284     FlatView *view;
285     view = as->current_map;
286     g_assert(view);
287     struct get_io_cb_info cb_info = {};
288 
289     cb_info.index = index;
290 
291     /*
292      * Loop around the FlatView until we match "index" number of
293      * fuzzable_memoryregions, or until we know that there are no matching
294      * memory_regions.
295      */
296     do {
297         flatview_for_each_range(view, get_io_address_cb , &cb_info);
298     } while (cb_info.index != index && !cb_info.found);
299 
300     *result = cb_info.result;
301     if (result->size) {
302         offset = offset % result->size;
303         result->addr += offset;
304         result->size -= offset;
305     }
306     return cb_info.found;
307 }
308 
309 static bool get_pio_address(address_range *result,
310                             uint8_t index, uint16_t offset)
311 {
312     /*
313      * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
314      * can contain an addr that extends past the PIO space. When we pass this
315      * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
316      * up fuzzing a completely different MemoryRegion/Device. Therefore, check
317      * that the address here is within the PIO space limits.
318      */
319     bool found = get_io_address(result, &address_space_io, index, offset);
320     return result->addr <= 0xFFFF ? found : false;
321 }
322 
323 static bool get_mmio_address(address_range *result,
324                              uint8_t index, uint32_t offset)
325 {
326     return get_io_address(result, &address_space_memory, index, offset);
327 }
328 
329 static void op_in(QTestState *s, const unsigned char * data, size_t len)
330 {
331     enum Sizes {Byte, Word, Long, end_sizes};
332     struct {
333         uint8_t size;
334         uint8_t base;
335         uint16_t offset;
336     } a;
337     address_range abs;
338 
339     if (len < sizeof(a)) {
340         return;
341     }
342     memcpy(&a, data, sizeof(a));
343     if (get_pio_address(&abs, a.base, a.offset) == 0) {
344         return;
345     }
346 
347     switch (a.size %= end_sizes) {
348     case Byte:
349         qtest_inb(s, abs.addr);
350         break;
351     case Word:
352         if (abs.size >= 2) {
353             qtest_inw(s, abs.addr);
354         }
355         break;
356     case Long:
357         if (abs.size >= 4) {
358             qtest_inl(s, abs.addr);
359         }
360         break;
361     }
362 }
363 
364 static void op_out(QTestState *s, const unsigned char * data, size_t len)
365 {
366     enum Sizes {Byte, Word, Long, end_sizes};
367     struct {
368         uint8_t size;
369         uint8_t base;
370         uint16_t offset;
371         uint32_t value;
372     } a;
373     address_range abs;
374 
375     if (len < sizeof(a)) {
376         return;
377     }
378     memcpy(&a, data, sizeof(a));
379 
380     if (get_pio_address(&abs, a.base, a.offset) == 0) {
381         return;
382     }
383 
384     switch (a.size %= end_sizes) {
385     case Byte:
386         qtest_outb(s, abs.addr, a.value & 0xFF);
387         break;
388     case Word:
389         if (abs.size >= 2) {
390             qtest_outw(s, abs.addr, a.value & 0xFFFF);
391         }
392         break;
393     case Long:
394         if (abs.size >= 4) {
395             qtest_outl(s, abs.addr, a.value);
396         }
397         break;
398     }
399 }
400 
401 static void op_read(QTestState *s, const unsigned char * data, size_t len)
402 {
403     enum Sizes {Byte, Word, Long, Quad, end_sizes};
404     struct {
405         uint8_t size;
406         uint8_t base;
407         uint32_t offset;
408     } a;
409     address_range abs;
410 
411     if (len < sizeof(a)) {
412         return;
413     }
414     memcpy(&a, data, sizeof(a));
415 
416     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
417         return;
418     }
419 
420     switch (a.size %= end_sizes) {
421     case Byte:
422         qtest_readb(s, abs.addr);
423         break;
424     case Word:
425         if (abs.size >= 2) {
426             qtest_readw(s, abs.addr);
427         }
428         break;
429     case Long:
430         if (abs.size >= 4) {
431             qtest_readl(s, abs.addr);
432         }
433         break;
434     case Quad:
435         if (abs.size >= 8) {
436             qtest_readq(s, abs.addr);
437         }
438         break;
439     }
440 }
441 
442 static void op_write(QTestState *s, const unsigned char * data, size_t len)
443 {
444     enum Sizes {Byte, Word, Long, Quad, end_sizes};
445     struct {
446         uint8_t size;
447         uint8_t base;
448         uint32_t offset;
449         uint64_t value;
450     } a;
451     address_range abs;
452 
453     if (len < sizeof(a)) {
454         return;
455     }
456     memcpy(&a, data, sizeof(a));
457 
458     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
459         return;
460     }
461 
462     switch (a.size %= end_sizes) {
463     case Byte:
464             qtest_writeb(s, abs.addr, a.value & 0xFF);
465         break;
466     case Word:
467         if (abs.size >= 2) {
468             qtest_writew(s, abs.addr, a.value & 0xFFFF);
469         }
470         break;
471     case Long:
472         if (abs.size >= 4) {
473             qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
474         }
475         break;
476     case Quad:
477         if (abs.size >= 8) {
478             qtest_writeq(s, abs.addr, a.value);
479         }
480         break;
481     }
482 }
483 
484 static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
485 {
486     enum Sizes {Byte, Word, Long, end_sizes};
487     struct {
488         uint8_t size;
489         uint8_t base;
490         uint8_t offset;
491     } a;
492     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
493         return;
494     }
495     memcpy(&a, data, sizeof(a));
496     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
497                                   a.base % fuzzable_pci_devices->len);
498     int devfn = dev->devfn;
499     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
500     switch (a.size %= end_sizes) {
501     case Byte:
502         qtest_inb(s, PCI_HOST_BRIDGE_DATA);
503         break;
504     case Word:
505         qtest_inw(s, PCI_HOST_BRIDGE_DATA);
506         break;
507     case Long:
508         qtest_inl(s, PCI_HOST_BRIDGE_DATA);
509         break;
510     }
511 }
512 
513 static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
514 {
515     enum Sizes {Byte, Word, Long, end_sizes};
516     struct {
517         uint8_t size;
518         uint8_t base;
519         uint8_t offset;
520         uint32_t value;
521     } a;
522     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
523         return;
524     }
525     memcpy(&a, data, sizeof(a));
526     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
527                                   a.base % fuzzable_pci_devices->len);
528     int devfn = dev->devfn;
529     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
530     switch (a.size %= end_sizes) {
531     case Byte:
532         qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
533         break;
534     case Word:
535         qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
536         break;
537     case Long:
538         qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
539         break;
540     }
541 }
542 
543 static void op_add_dma_pattern(QTestState *s,
544                                const unsigned char *data, size_t len)
545 {
546     struct {
547         /*
548          * index and stride can be used to increment the index-th byte of the
549          * pattern by the value stride, for each loop of the pattern.
550          */
551         uint8_t index;
552         uint8_t stride;
553     } a;
554 
555     if (len < sizeof(a) + 1) {
556         return;
557     }
558     memcpy(&a, data, sizeof(a));
559     pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
560     p.index = a.index % p.len;
561     g_array_append_val(dma_patterns, p);
562     return;
563 }
564 
565 static void op_clear_dma_patterns(QTestState *s,
566                                   const unsigned char *data, size_t len)
567 {
568     g_array_set_size(dma_patterns, 0);
569     dma_pattern_index = 0;
570 }
571 
572 static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
573 {
574     qtest_clock_step_next(s);
575 }
576 
577 static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
578 {
579     pci_disabled = true;
580 }
581 
582 static void handle_timeout(int sig)
583 {
584     if (qtest_log_enabled) {
585         fprintf(stderr, "[Timeout]\n");
586         fflush(stderr);
587     }
588 
589     /*
590      * If there is a crash, libfuzzer/ASAN forks a child to run an
591      * "llvm-symbolizer" process for printing out a pretty stacktrace. It
592      * communicates with this child using a pipe.  If we timeout+Exit, while
593      * libfuzzer is still communicating with the llvm-symbolizer child, we will
594      * be left with an orphan llvm-symbolizer process. Sometimes, this appears
595      * to lead to a deadlock in the forkserver. Use waitpid to check if there
596      * are any waitable children. If so, exit out of the signal-handler, and
597      * let libfuzzer finish communicating with the child, and exit, on its own.
598      */
599     if (waitpid(-1, NULL, WNOHANG) == 0) {
600         return;
601     }
602 
603     _Exit(0);
604 }
605 
606 /*
607  * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
608  * Some commands can be variable-width, so we use a separator, SEPARATOR, to
609  * specify the boundaries between commands. SEPARATOR is used to separate
610  * "operations" in the fuzz input. Why use a separator, instead of just using
611  * the operations' length to identify operation boundaries?
612  *   1. This is a simple way to support variable-length operations
613  *   2. This adds "stability" to the input.
614  *      For example take the input "AbBcgDefg", where there is no separator and
615  *      Opcodes are capitalized.
616  *      Simply, by removing the first byte, we end up with a very different
617  *      sequence:
618  *      BbcGdefg...
619  *      By adding a separator, we avoid this problem:
620  *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
621  *      Since B uses two additional bytes as operands, the first "B" will be
622  *      ignored. The fuzzer actively tries to reduce inputs, so such unused
623  *      bytes are likely to be pruned, eventually.
624  *
625  *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
626  *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
627  *  -dict), though this should not be necessary.
628  *
629  * As a result, the stream of bytes is converted into a sequence of commands.
630  * In a simplified example where SEPARATOR is 0xFF:
631  * 00 01 02 FF 03 04 05 06 FF 01 FF ...
632  * becomes this sequence of commands:
633  * 00 01 02    -> op00 (0102)   -> in (0102, 2)
634  * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
635  * 01          -> op01 (-,0)    -> out (-,0)
636  * ...
637  *
638  * Note here that it is the job of the individual opcode functions to check
639  * that enough data was provided. I.e. in the last command out (,0), out needs
640  * to check that there is not enough data provided to select an address/value
641  * for the operation.
642  */
643 static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
644 {
645     void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
646         [OP_IN]                 = op_in,
647         [OP_OUT]                = op_out,
648         [OP_READ]               = op_read,
649         [OP_WRITE]              = op_write,
650         [OP_PCI_READ]           = op_pci_read,
651         [OP_PCI_WRITE]          = op_pci_write,
652         [OP_DISABLE_PCI]        = op_disable_pci,
653         [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
654         [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
655         [OP_CLOCK_STEP]         = op_clock_step,
656     };
657     const unsigned char *cmd = Data;
658     const unsigned char *nextcmd;
659     size_t cmd_len;
660     uint8_t op;
661 
662     if (fork() == 0) {
663         /*
664          * Sometimes the fuzzer will find inputs that take quite a long time to
665          * process. Often times, these inputs do not result in new coverage.
666          * Even if these inputs might be interesting, they can slow down the
667          * fuzzer, overall. Set a timeout to avoid hurting performance, too much
668          */
669         if (timeout) {
670             struct sigaction sact;
671             struct itimerval timer;
672 
673             sigemptyset(&sact.sa_mask);
674             sact.sa_flags   = SA_NODEFER;
675             sact.sa_handler = handle_timeout;
676             sigaction(SIGALRM, &sact, NULL);
677 
678             memset(&timer, 0, sizeof(timer));
679             timer.it_value.tv_sec = timeout / USEC_IN_SEC;
680             timer.it_value.tv_usec = timeout % USEC_IN_SEC;
681             setitimer(ITIMER_VIRTUAL, &timer, NULL);
682         }
683 
684         op_clear_dma_patterns(s, NULL, 0);
685         pci_disabled = false;
686 
687         while (cmd && Size) {
688             /* Get the length until the next command or end of input */
689             nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
690             cmd_len = nextcmd ? nextcmd - cmd : Size;
691 
692             if (cmd_len > 0) {
693                 /* Interpret the first byte of the command as an opcode */
694                 op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
695                 ops[op](s, cmd + 1, cmd_len - 1);
696 
697                 /* Run the main loop */
698                 flush_events(s);
699             }
700             /* Advance to the next command */
701             cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
702             Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
703             g_array_set_size(dma_regions, 0);
704         }
705         _Exit(0);
706     } else {
707         flush_events(s);
708         wait(0);
709     }
710 }
711 
712 static void usage(void)
713 {
714     printf("Please specify the following environment variables:\n");
715     printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
716     printf("QEMU_FUZZ_OBJECTS= "
717             "a space separated list of QOM type names for objects to fuzz\n");
718     printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
719             "Try to avoid racy DMA double fetch bugs? %d by default\n",
720             avoid_double_fetches);
721     printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
722             "0 to disable. %d by default\n", timeout);
723     exit(0);
724 }
725 
726 static int locate_fuzz_memory_regions(Object *child, void *opaque)
727 {
728     const char *name;
729     MemoryRegion *mr;
730     if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
731         mr = MEMORY_REGION(child);
732         if ((memory_region_is_ram(mr) ||
733             memory_region_is_ram_device(mr) ||
734             memory_region_is_rom(mr)) == false) {
735             name = object_get_canonical_path_component(child);
736             /*
737              * We don't want duplicate pointers to the same MemoryRegion, so
738              * try to remove copies of the pointer, before adding it.
739              */
740             g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
741         }
742     }
743     return 0;
744 }
745 
746 static int locate_fuzz_objects(Object *child, void *opaque)
747 {
748     char *pattern = opaque;
749     if (g_pattern_match_simple(pattern, object_get_typename(child))) {
750         /* Find and save ptrs to any child MemoryRegions */
751         object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
752 
753         /*
754          * We matched an object. If its a PCI device, store a pointer to it so
755          * we can map BARs and fuzz its config space.
756          */
757         if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
758             /*
759              * Don't want duplicate pointers to the same PCIDevice, so remove
760              * copies of the pointer, before adding it.
761              */
762             g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
763             g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
764         }
765     } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
766         if (g_pattern_match_simple(pattern,
767             object_get_canonical_path_component(child))) {
768             MemoryRegion *mr;
769             mr = MEMORY_REGION(child);
770             if ((memory_region_is_ram(mr) ||
771                  memory_region_is_ram_device(mr) ||
772                  memory_region_is_rom(mr)) == false) {
773                 g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
774             }
775         }
776     }
777     return 0;
778 }
779 
780 
781 static void pci_enum(gpointer pcidev, gpointer bus)
782 {
783     PCIDevice *dev = pcidev;
784     QPCIDevice *qdev;
785     int i;
786 
787     qdev = qpci_device_find(bus, dev->devfn);
788     g_assert(qdev != NULL);
789     for (i = 0; i < 6; i++) {
790         if (dev->io_regions[i].size) {
791             qpci_iomap(qdev, i, NULL);
792         }
793     }
794     qpci_device_enable(qdev);
795     g_free(qdev);
796 }
797 
798 static void generic_pre_fuzz(QTestState *s)
799 {
800     GHashTableIter iter;
801     MemoryRegion *mr;
802     QPCIBus *pcibus;
803     char **result;
804 
805     if (!getenv("QEMU_FUZZ_OBJECTS")) {
806         usage();
807     }
808     if (getenv("QTEST_LOG")) {
809         qtest_log_enabled = 1;
810     }
811     if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
812         avoid_double_fetches = 1;
813     }
814     if (getenv("QEMU_FUZZ_TIMEOUT")) {
815         timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
816     }
817     qts_global = s;
818 
819     /*
820      * Create a special device that we can use to back DMA buffers at very
821      * high memory addresses
822      */
823     sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
824 
825     dma_regions = g_array_new(false, false, sizeof(address_range));
826     dma_patterns = g_array_new(false, false, sizeof(pattern));
827 
828     fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
829     fuzzable_pci_devices   = g_ptr_array_new();
830 
831     result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
832     for (int i = 0; result[i] != NULL; i++) {
833         printf("Matching objects by name %s\n", result[i]);
834         object_child_foreach_recursive(qdev_get_machine(),
835                                     locate_fuzz_objects,
836                                     result[i]);
837     }
838     g_strfreev(result);
839     printf("This process will try to fuzz the following MemoryRegions:\n");
840 
841     g_hash_table_iter_init(&iter, fuzzable_memoryregions);
842     while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
843         printf("  * %s (size %lx)\n",
844                object_get_canonical_path_component(&(mr->parent_obj)),
845                (uint64_t)mr->size);
846     }
847 
848     if (!g_hash_table_size(fuzzable_memoryregions)) {
849         printf("No fuzzable memory regions found...\n");
850         exit(1);
851     }
852 
853     pcibus = qpci_new_pc(s, NULL);
854     g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
855     qpci_free_pc(pcibus);
856 
857     counter_shm_init();
858 }
859 
860 /*
861  * When libfuzzer gives us two inputs to combine, return a new input with the
862  * following structure:
863  *
864  * Input 1 (data1)
865  * SEPARATOR
866  * Clear out the DMA Patterns
867  * SEPARATOR
868  * Disable the pci_read/write instructions
869  * SEPARATOR
870  * Input 2 (data2)
871  *
872  * The idea is to collate the core behaviors of the two inputs.
873  * For example:
874  * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
875  *          device functionality A
876  * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
877  *          functionality B
878  *
879  * This function attempts to produce an input that:
880  * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
881  *          functionality A device, replaces the DMA patterns with a single
882  *          patten, and triggers device functionality B.
883  */
884 static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
885                                      uint8_t *data2, size_t size2, uint8_t *out,
886                                      size_t max_out_size, unsigned int seed)
887 {
888     size_t copy_len = 0, size = 0;
889 
890     /* Check that we have enough space for data1 and at least part of data2 */
891     if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
892         return 0;
893     }
894 
895     /* Copy_Len in the first input */
896     copy_len = size1;
897     memcpy(out + size, data1, copy_len);
898     size += copy_len;
899     max_out_size -= copy_len;
900 
901     /* Append a separator */
902     copy_len = strlen(SEPARATOR);
903     memcpy(out + size, SEPARATOR, copy_len);
904     size += copy_len;
905     max_out_size -= copy_len;
906 
907     /* Clear out the DMA Patterns */
908     copy_len = 1;
909     if (copy_len) {
910         out[size] = OP_CLEAR_DMA_PATTERNS;
911     }
912     size += copy_len;
913     max_out_size -= copy_len;
914 
915     /* Append a separator */
916     copy_len = strlen(SEPARATOR);
917     memcpy(out + size, SEPARATOR, copy_len);
918     size += copy_len;
919     max_out_size -= copy_len;
920 
921     /* Disable PCI ops. Assume data1 took care of setting up PCI */
922     copy_len = 1;
923     if (copy_len) {
924         out[size] = OP_DISABLE_PCI;
925     }
926     size += copy_len;
927     max_out_size -= copy_len;
928 
929     /* Append a separator */
930     copy_len = strlen(SEPARATOR);
931     memcpy(out + size, SEPARATOR, copy_len);
932     size += copy_len;
933     max_out_size -= copy_len;
934 
935     /* Copy_Len over the second input */
936     copy_len = MIN(size2, max_out_size);
937     memcpy(out + size, data2, copy_len);
938     size += copy_len;
939     max_out_size -= copy_len;
940 
941     return  size;
942 }
943 
944 
945 static GString *generic_fuzz_cmdline(FuzzTarget *t)
946 {
947     GString *cmd_line = g_string_new(TARGET_NAME);
948     if (!getenv("QEMU_FUZZ_ARGS")) {
949         usage();
950     }
951     g_string_append_printf(cmd_line, " -display none \
952                                       -machine accel=qtest, \
953                                       -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
954     return cmd_line;
955 }
956 
957 static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
958 {
959     gchar *args;
960     const generic_fuzz_config *config;
961     g_assert(t->opaque);
962 
963     config = t->opaque;
964     setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
965     if (config->argfunc) {
966         args = config->argfunc();
967         setenv("QEMU_FUZZ_ARGS", args, 1);
968         g_free(args);
969     } else {
970         g_assert_nonnull(config->args);
971         setenv("QEMU_FUZZ_ARGS", config->args, 1);
972     }
973     setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
974     return generic_fuzz_cmdline(t);
975 }
976 
977 static void register_generic_fuzz_targets(void)
978 {
979     fuzz_add_target(&(FuzzTarget){
980             .name = "generic-fuzz",
981             .description = "Fuzz based on any qemu command-line args. ",
982             .get_init_cmdline = generic_fuzz_cmdline,
983             .pre_fuzz = generic_pre_fuzz,
984             .fuzz = generic_fuzz,
985             .crossover = generic_fuzz_crossover
986     });
987 
988     GString *name;
989     const generic_fuzz_config *config;
990 
991     for (int i = 0;
992          i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
993          i++) {
994         config = predefined_configs + i;
995         name = g_string_new("generic-fuzz");
996         g_string_append_printf(name, "-%s", config->name);
997         fuzz_add_target(&(FuzzTarget){
998                 .name = name->str,
999                 .description = "Predefined generic-fuzz config.",
1000                 .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
1001                 .pre_fuzz = generic_pre_fuzz,
1002                 .fuzz = generic_fuzz,
1003                 .crossover = generic_fuzz_crossover,
1004                 .opaque = (void *)config
1005         });
1006     }
1007 }
1008 
1009 fuzz_target_init(register_generic_fuzz_targets);
1010