xref: /openbmc/qemu/tests/qtest/fuzz/generic_fuzz.c (revision 26ed501b)
1 /*
2  * Generic Virtual-Device Fuzzing Target
3  *
4  * Copyright Red Hat Inc., 2020
5  *
6  * Authors:
7  *  Alexander Bulekov   <alxndr@bu.edu>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 
15 #include <wordexp.h>
16 
17 #include "hw/core/cpu.h"
18 #include "tests/qtest/libqtest.h"
19 #include "tests/qtest/libqos/pci-pc.h"
20 #include "fuzz.h"
21 #include "fork_fuzz.h"
22 #include "string.h"
23 #include "exec/memory.h"
24 #include "exec/ramblock.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "hw/boards.h"
28 #include "generic_fuzz_configs.h"
29 #include "hw/mem/sparse-mem.h"
30 
31 /*
32  * SEPARATOR is used to separate "operations" in the fuzz input
33  */
34 #define SEPARATOR "FUZZ"
35 
36 enum cmds {
37     OP_IN,
38     OP_OUT,
39     OP_READ,
40     OP_WRITE,
41     OP_PCI_READ,
42     OP_PCI_WRITE,
43     OP_DISABLE_PCI,
44     OP_ADD_DMA_PATTERN,
45     OP_CLEAR_DMA_PATTERNS,
46     OP_CLOCK_STEP,
47 };
48 
49 #define DEFAULT_TIMEOUT_US 100000
50 #define USEC_IN_SEC 1000000000
51 
52 #define MAX_DMA_FILL_SIZE 0x10000
53 
54 #define PCI_HOST_BRIDGE_CFG 0xcf8
55 #define PCI_HOST_BRIDGE_DATA 0xcfc
56 
57 typedef struct {
58     ram_addr_t addr;
59     ram_addr_t size; /* The number of bytes until the end of the I/O region */
60 } address_range;
61 
62 static useconds_t timeout = DEFAULT_TIMEOUT_US;
63 
64 static bool qtest_log_enabled;
65 
66 MemoryRegion *sparse_mem_mr;
67 
68 /*
69  * A pattern used to populate a DMA region or perform a memwrite. This is
70  * useful for e.g. populating tables of unique addresses.
71  * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
72  * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
73  */
74 typedef struct {
75     uint8_t index;      /* Index of a byte to increment by stride */
76     uint8_t stride;     /* Increment each index'th byte by this amount */
77     size_t len;
78     const uint8_t *data;
79 } pattern;
80 
81 /* Avoid filling the same DMA region between MMIO/PIO commands ? */
82 static bool avoid_double_fetches;
83 
84 static QTestState *qts_global; /* Need a global for the DMA callback */
85 
86 /*
87  * List of memory regions that are children of QOM objects specified by the
88  * user for fuzzing.
89  */
90 static GHashTable *fuzzable_memoryregions;
91 static GPtrArray *fuzzable_pci_devices;
92 
93 struct get_io_cb_info {
94     int index;
95     int found;
96     address_range result;
97 };
98 
99 static bool get_io_address_cb(Int128 start, Int128 size,
100                               const MemoryRegion *mr,
101                               hwaddr offset_in_region,
102                               void *opaque)
103 {
104     struct get_io_cb_info *info = opaque;
105     if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
106         if (info->index == 0) {
107             info->result.addr = (ram_addr_t)start;
108             info->result.size = (ram_addr_t)size;
109             info->found = 1;
110             return true;
111         }
112         info->index--;
113     }
114     return false;
115 }
116 
117 /*
118  * List of dma regions populated since the last fuzzing command. Used to ensure
119  * that we only write to each DMA address once, to avoid race conditions when
120  * building reproducers.
121  */
122 static GArray *dma_regions;
123 
124 static GArray *dma_patterns;
125 static int dma_pattern_index;
126 static bool pci_disabled;
127 
128 /*
129  * Allocate a block of memory and populate it with a pattern.
130  */
131 static void *pattern_alloc(pattern p, size_t len)
132 {
133     int i;
134     uint8_t *buf = g_malloc(len);
135     uint8_t sum = 0;
136 
137     for (i = 0; i < len; ++i) {
138         buf[i] = p.data[i % p.len];
139         if ((i % p.len) == p.index) {
140             buf[i] += sum;
141             sum += p.stride;
142         }
143     }
144     return buf;
145 }
146 
147 static int fuzz_memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
148 {
149     unsigned access_size_max = mr->ops->valid.max_access_size;
150 
151     /*
152      * Regions are assumed to support 1-4 byte accesses unless
153      * otherwise specified.
154      */
155     if (access_size_max == 0) {
156         access_size_max = 4;
157     }
158 
159     /* Bound the maximum access by the alignment of the address.  */
160     if (!mr->ops->impl.unaligned) {
161         unsigned align_size_max = addr & -addr;
162         if (align_size_max != 0 && align_size_max < access_size_max) {
163             access_size_max = align_size_max;
164         }
165     }
166 
167     /* Don't attempt accesses larger than the maximum.  */
168     if (l > access_size_max) {
169         l = access_size_max;
170     }
171     l = pow2floor(l);
172 
173     return l;
174 }
175 
176 /*
177  * Call-back for functions that perform DMA reads from guest memory. Confirm
178  * that the region has not already been populated since the last loop in
179  * generic_fuzz(), avoiding potential race-conditions, which we don't have
180  * a good way for reproducing right now.
181  */
182 void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
183 {
184     /* Are we in the generic-fuzzer or are we using another fuzz-target? */
185     if (!qts_global) {
186         return;
187     }
188 
189     /*
190      * Return immediately if:
191      * - We have no DMA patterns defined
192      * - The length of the DMA read request is zero
193      * - The DMA read is hitting an MR other than the machine's main RAM
194      * - The DMA request hits past the bounds of our RAM
195      */
196     if (dma_patterns->len == 0
197         || len == 0
198         || (mr != current_machine->ram && mr != sparse_mem_mr)) {
199         return;
200     }
201 
202     /*
203      * If we overlap with any existing dma_regions, split the range and only
204      * populate the non-overlapping parts.
205      */
206     address_range region;
207     bool double_fetch = false;
208     for (int i = 0;
209          i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
210          ++i) {
211         region = g_array_index(dma_regions, address_range, i);
212         if (addr < region.addr + region.size && addr + len > region.addr) {
213             double_fetch = true;
214             if (addr < region.addr
215                 && avoid_double_fetches) {
216                 fuzz_dma_read_cb(addr, region.addr - addr, mr);
217             }
218             if (addr + len > region.addr + region.size
219                 && avoid_double_fetches) {
220                 fuzz_dma_read_cb(region.addr + region.size,
221                         addr + len - (region.addr + region.size), mr);
222             }
223             return;
224         }
225     }
226 
227     /* Cap the length of the DMA access to something reasonable */
228     len = MIN(len, MAX_DMA_FILL_SIZE);
229 
230     address_range ar = {addr, len};
231     g_array_append_val(dma_regions, ar);
232     pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
233     void *buf_base = pattern_alloc(p, ar.size);
234     void *buf = buf_base;
235     hwaddr l, addr1;
236     MemoryRegion *mr1;
237     while (len > 0) {
238         l = len;
239         mr1 = address_space_translate(first_cpu->as,
240                                       addr, &addr1, &l, true,
241                                       MEMTXATTRS_UNSPECIFIED);
242 
243         /*
244          *  If mr1 isn't RAM, address_space_translate doesn't update l. Use
245          *  fuzz_memory_access_size to identify the number of bytes that it
246          *  is safe to write without accidentally writing to another
247          *  MemoryRegion.
248          */
249         if (!memory_region_is_ram(mr1)) {
250             l = fuzz_memory_access_size(mr1, l, addr1);
251         }
252         if (memory_region_is_ram(mr1) ||
253             memory_region_is_romd(mr1) ||
254             mr1 == sparse_mem_mr) {
255             /* ROM/RAM case */
256             if (qtest_log_enabled) {
257                 /*
258                 * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
259                 * that will be written by qtest.c with a DMA tag, so we can reorder
260                 * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
261                 * command.
262                 */
263                 fprintf(stderr, "[DMA] ");
264                 if (double_fetch) {
265                     fprintf(stderr, "[DOUBLE-FETCH] ");
266                 }
267                 fflush(stderr);
268             }
269             qtest_memwrite(qts_global, addr, buf, l);
270         }
271         len -= l;
272         buf += l;
273         addr += l;
274 
275     }
276     g_free(buf_base);
277 
278     /* Increment the index of the pattern for the next DMA access */
279     dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
280 }
281 
282 /*
283  * Here we want to convert a fuzzer-provided [io-region-index, offset] to
284  * a physical address. To do this, we iterate over all of the matched
285  * MemoryRegions. Check whether each region exists within the particular io
286  * space. Return the absolute address of the offset within the index'th region
287  * that is a subregion of the io_space and the distance until the end of the
288  * memory region.
289  */
290 static bool get_io_address(address_range *result, AddressSpace *as,
291                             uint8_t index,
292                             uint32_t offset) {
293     FlatView *view;
294     view = as->current_map;
295     g_assert(view);
296     struct get_io_cb_info cb_info = {};
297 
298     cb_info.index = index;
299 
300     /*
301      * Loop around the FlatView until we match "index" number of
302      * fuzzable_memoryregions, or until we know that there are no matching
303      * memory_regions.
304      */
305     do {
306         flatview_for_each_range(view, get_io_address_cb , &cb_info);
307     } while (cb_info.index != index && !cb_info.found);
308 
309     *result = cb_info.result;
310     if (result->size) {
311         offset = offset % result->size;
312         result->addr += offset;
313         result->size -= offset;
314     }
315     return cb_info.found;
316 }
317 
318 static bool get_pio_address(address_range *result,
319                             uint8_t index, uint16_t offset)
320 {
321     /*
322      * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
323      * can contain an addr that extends past the PIO space. When we pass this
324      * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
325      * up fuzzing a completely different MemoryRegion/Device. Therefore, check
326      * that the address here is within the PIO space limits.
327      */
328     bool found = get_io_address(result, &address_space_io, index, offset);
329     return result->addr <= 0xFFFF ? found : false;
330 }
331 
332 static bool get_mmio_address(address_range *result,
333                              uint8_t index, uint32_t offset)
334 {
335     return get_io_address(result, &address_space_memory, index, offset);
336 }
337 
338 static void op_in(QTestState *s, const unsigned char * data, size_t len)
339 {
340     enum Sizes {Byte, Word, Long, end_sizes};
341     struct {
342         uint8_t size;
343         uint8_t base;
344         uint16_t offset;
345     } a;
346     address_range abs;
347 
348     if (len < sizeof(a)) {
349         return;
350     }
351     memcpy(&a, data, sizeof(a));
352     if (get_pio_address(&abs, a.base, a.offset) == 0) {
353         return;
354     }
355 
356     switch (a.size %= end_sizes) {
357     case Byte:
358         qtest_inb(s, abs.addr);
359         break;
360     case Word:
361         if (abs.size >= 2) {
362             qtest_inw(s, abs.addr);
363         }
364         break;
365     case Long:
366         if (abs.size >= 4) {
367             qtest_inl(s, abs.addr);
368         }
369         break;
370     }
371 }
372 
373 static void op_out(QTestState *s, const unsigned char * data, size_t len)
374 {
375     enum Sizes {Byte, Word, Long, end_sizes};
376     struct {
377         uint8_t size;
378         uint8_t base;
379         uint16_t offset;
380         uint32_t value;
381     } a;
382     address_range abs;
383 
384     if (len < sizeof(a)) {
385         return;
386     }
387     memcpy(&a, data, sizeof(a));
388 
389     if (get_pio_address(&abs, a.base, a.offset) == 0) {
390         return;
391     }
392 
393     switch (a.size %= end_sizes) {
394     case Byte:
395         qtest_outb(s, abs.addr, a.value & 0xFF);
396         break;
397     case Word:
398         if (abs.size >= 2) {
399             qtest_outw(s, abs.addr, a.value & 0xFFFF);
400         }
401         break;
402     case Long:
403         if (abs.size >= 4) {
404             qtest_outl(s, abs.addr, a.value);
405         }
406         break;
407     }
408 }
409 
410 static void op_read(QTestState *s, const unsigned char * data, size_t len)
411 {
412     enum Sizes {Byte, Word, Long, Quad, end_sizes};
413     struct {
414         uint8_t size;
415         uint8_t base;
416         uint32_t offset;
417     } a;
418     address_range abs;
419 
420     if (len < sizeof(a)) {
421         return;
422     }
423     memcpy(&a, data, sizeof(a));
424 
425     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
426         return;
427     }
428 
429     switch (a.size %= end_sizes) {
430     case Byte:
431         qtest_readb(s, abs.addr);
432         break;
433     case Word:
434         if (abs.size >= 2) {
435             qtest_readw(s, abs.addr);
436         }
437         break;
438     case Long:
439         if (abs.size >= 4) {
440             qtest_readl(s, abs.addr);
441         }
442         break;
443     case Quad:
444         if (abs.size >= 8) {
445             qtest_readq(s, abs.addr);
446         }
447         break;
448     }
449 }
450 
451 static void op_write(QTestState *s, const unsigned char * data, size_t len)
452 {
453     enum Sizes {Byte, Word, Long, Quad, end_sizes};
454     struct {
455         uint8_t size;
456         uint8_t base;
457         uint32_t offset;
458         uint64_t value;
459     } a;
460     address_range abs;
461 
462     if (len < sizeof(a)) {
463         return;
464     }
465     memcpy(&a, data, sizeof(a));
466 
467     if (get_mmio_address(&abs, a.base, a.offset) == 0) {
468         return;
469     }
470 
471     switch (a.size %= end_sizes) {
472     case Byte:
473             qtest_writeb(s, abs.addr, a.value & 0xFF);
474         break;
475     case Word:
476         if (abs.size >= 2) {
477             qtest_writew(s, abs.addr, a.value & 0xFFFF);
478         }
479         break;
480     case Long:
481         if (abs.size >= 4) {
482             qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
483         }
484         break;
485     case Quad:
486         if (abs.size >= 8) {
487             qtest_writeq(s, abs.addr, a.value);
488         }
489         break;
490     }
491 }
492 
493 static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
494 {
495     enum Sizes {Byte, Word, Long, end_sizes};
496     struct {
497         uint8_t size;
498         uint8_t base;
499         uint8_t offset;
500     } a;
501     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
502         return;
503     }
504     memcpy(&a, data, sizeof(a));
505     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
506                                   a.base % fuzzable_pci_devices->len);
507     int devfn = dev->devfn;
508     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
509     switch (a.size %= end_sizes) {
510     case Byte:
511         qtest_inb(s, PCI_HOST_BRIDGE_DATA);
512         break;
513     case Word:
514         qtest_inw(s, PCI_HOST_BRIDGE_DATA);
515         break;
516     case Long:
517         qtest_inl(s, PCI_HOST_BRIDGE_DATA);
518         break;
519     }
520 }
521 
522 static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
523 {
524     enum Sizes {Byte, Word, Long, end_sizes};
525     struct {
526         uint8_t size;
527         uint8_t base;
528         uint8_t offset;
529         uint32_t value;
530     } a;
531     if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
532         return;
533     }
534     memcpy(&a, data, sizeof(a));
535     PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
536                                   a.base % fuzzable_pci_devices->len);
537     int devfn = dev->devfn;
538     qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
539     switch (a.size %= end_sizes) {
540     case Byte:
541         qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
542         break;
543     case Word:
544         qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
545         break;
546     case Long:
547         qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
548         break;
549     }
550 }
551 
552 static void op_add_dma_pattern(QTestState *s,
553                                const unsigned char *data, size_t len)
554 {
555     struct {
556         /*
557          * index and stride can be used to increment the index-th byte of the
558          * pattern by the value stride, for each loop of the pattern.
559          */
560         uint8_t index;
561         uint8_t stride;
562     } a;
563 
564     if (len < sizeof(a) + 1) {
565         return;
566     }
567     memcpy(&a, data, sizeof(a));
568     pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
569     p.index = a.index % p.len;
570     g_array_append_val(dma_patterns, p);
571     return;
572 }
573 
574 static void op_clear_dma_patterns(QTestState *s,
575                                   const unsigned char *data, size_t len)
576 {
577     g_array_set_size(dma_patterns, 0);
578     dma_pattern_index = 0;
579 }
580 
581 static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
582 {
583     qtest_clock_step_next(s);
584 }
585 
586 static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
587 {
588     pci_disabled = true;
589 }
590 
591 static void handle_timeout(int sig)
592 {
593     if (qtest_log_enabled) {
594         fprintf(stderr, "[Timeout]\n");
595         fflush(stderr);
596     }
597 
598     /*
599      * If there is a crash, libfuzzer/ASAN forks a child to run an
600      * "llvm-symbolizer" process for printing out a pretty stacktrace. It
601      * communicates with this child using a pipe.  If we timeout+Exit, while
602      * libfuzzer is still communicating with the llvm-symbolizer child, we will
603      * be left with an orphan llvm-symbolizer process. Sometimes, this appears
604      * to lead to a deadlock in the forkserver. Use waitpid to check if there
605      * are any waitable children. If so, exit out of the signal-handler, and
606      * let libfuzzer finish communicating with the child, and exit, on its own.
607      */
608     if (waitpid(-1, NULL, WNOHANG) == 0) {
609         return;
610     }
611 
612     _Exit(0);
613 }
614 
615 /*
616  * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
617  * Some commands can be variable-width, so we use a separator, SEPARATOR, to
618  * specify the boundaries between commands. SEPARATOR is used to separate
619  * "operations" in the fuzz input. Why use a separator, instead of just using
620  * the operations' length to identify operation boundaries?
621  *   1. This is a simple way to support variable-length operations
622  *   2. This adds "stability" to the input.
623  *      For example take the input "AbBcgDefg", where there is no separator and
624  *      Opcodes are capitalized.
625  *      Simply, by removing the first byte, we end up with a very different
626  *      sequence:
627  *      BbcGdefg...
628  *      By adding a separator, we avoid this problem:
629  *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
630  *      Since B uses two additional bytes as operands, the first "B" will be
631  *      ignored. The fuzzer actively tries to reduce inputs, so such unused
632  *      bytes are likely to be pruned, eventually.
633  *
634  *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
635  *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
636  *  -dict), though this should not be necessary.
637  *
638  * As a result, the stream of bytes is converted into a sequence of commands.
639  * In a simplified example where SEPARATOR is 0xFF:
640  * 00 01 02 FF 03 04 05 06 FF 01 FF ...
641  * becomes this sequence of commands:
642  * 00 01 02    -> op00 (0102)   -> in (0102, 2)
643  * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
644  * 01          -> op01 (-,0)    -> out (-,0)
645  * ...
646  *
647  * Note here that it is the job of the individual opcode functions to check
648  * that enough data was provided. I.e. in the last command out (,0), out needs
649  * to check that there is not enough data provided to select an address/value
650  * for the operation.
651  */
652 static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
653 {
654     void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
655         [OP_IN]                 = op_in,
656         [OP_OUT]                = op_out,
657         [OP_READ]               = op_read,
658         [OP_WRITE]              = op_write,
659         [OP_PCI_READ]           = op_pci_read,
660         [OP_PCI_WRITE]          = op_pci_write,
661         [OP_DISABLE_PCI]        = op_disable_pci,
662         [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
663         [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
664         [OP_CLOCK_STEP]         = op_clock_step,
665     };
666     const unsigned char *cmd = Data;
667     const unsigned char *nextcmd;
668     size_t cmd_len;
669     uint8_t op;
670 
671     if (fork() == 0) {
672         struct sigaction sact;
673         struct itimerval timer;
674         sigset_t set;
675         /*
676          * Sometimes the fuzzer will find inputs that take quite a long time to
677          * process. Often times, these inputs do not result in new coverage.
678          * Even if these inputs might be interesting, they can slow down the
679          * fuzzer, overall. Set a timeout for each command to avoid hurting
680          * performance, too much
681          */
682         if (timeout) {
683 
684             sigemptyset(&sact.sa_mask);
685             sact.sa_flags   = SA_NODEFER;
686             sact.sa_handler = handle_timeout;
687             sigaction(SIGALRM, &sact, NULL);
688 
689             sigemptyset(&set);
690             sigaddset(&set, SIGALRM);
691             pthread_sigmask(SIG_UNBLOCK, &set, NULL);
692 
693             memset(&timer, 0, sizeof(timer));
694             timer.it_value.tv_sec = timeout / USEC_IN_SEC;
695             timer.it_value.tv_usec = timeout % USEC_IN_SEC;
696         }
697 
698         op_clear_dma_patterns(s, NULL, 0);
699         pci_disabled = false;
700 
701         while (cmd && Size) {
702             /* Reset the timeout, each time we run a new command */
703             if (timeout) {
704                 setitimer(ITIMER_REAL, &timer, NULL);
705             }
706 
707             /* Get the length until the next command or end of input */
708             nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
709             cmd_len = nextcmd ? nextcmd - cmd : Size;
710 
711             if (cmd_len > 0) {
712                 /* Interpret the first byte of the command as an opcode */
713                 op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
714                 ops[op](s, cmd + 1, cmd_len - 1);
715 
716                 /* Run the main loop */
717                 flush_events(s);
718             }
719             /* Advance to the next command */
720             cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
721             Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
722             g_array_set_size(dma_regions, 0);
723         }
724         _Exit(0);
725     } else {
726         flush_events(s);
727         wait(0);
728     }
729 }
730 
731 static void usage(void)
732 {
733     printf("Please specify the following environment variables:\n");
734     printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
735     printf("QEMU_FUZZ_OBJECTS= "
736             "a space separated list of QOM type names for objects to fuzz\n");
737     printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
738             "Try to avoid racy DMA double fetch bugs? %d by default\n",
739             avoid_double_fetches);
740     printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
741             "0 to disable. %d by default\n", timeout);
742     exit(0);
743 }
744 
745 static int locate_fuzz_memory_regions(Object *child, void *opaque)
746 {
747     MemoryRegion *mr;
748     if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
749         mr = MEMORY_REGION(child);
750         if ((memory_region_is_ram(mr) ||
751             memory_region_is_ram_device(mr) ||
752             memory_region_is_rom(mr)) == false) {
753             /*
754              * We don't want duplicate pointers to the same MemoryRegion, so
755              * try to remove copies of the pointer, before adding it.
756              */
757             g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
758         }
759     }
760     return 0;
761 }
762 
763 static int locate_fuzz_objects(Object *child, void *opaque)
764 {
765     GString *type_name;
766     GString *path_name;
767     char *pattern = opaque;
768 
769     type_name = g_string_new(object_get_typename(child));
770     g_string_ascii_down(type_name);
771     if (g_pattern_match_simple(pattern, type_name->str)) {
772         /* Find and save ptrs to any child MemoryRegions */
773         object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
774 
775         /*
776          * We matched an object. If its a PCI device, store a pointer to it so
777          * we can map BARs and fuzz its config space.
778          */
779         if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
780             /*
781              * Don't want duplicate pointers to the same PCIDevice, so remove
782              * copies of the pointer, before adding it.
783              */
784             g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
785             g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
786         }
787     } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
788         path_name = g_string_new(object_get_canonical_path_component(child));
789         g_string_ascii_down(path_name);
790         if (g_pattern_match_simple(pattern, path_name->str)) {
791             MemoryRegion *mr;
792             mr = MEMORY_REGION(child);
793             if ((memory_region_is_ram(mr) ||
794                  memory_region_is_ram_device(mr) ||
795                  memory_region_is_rom(mr)) == false) {
796                 g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
797             }
798         }
799         g_string_free(path_name, true);
800     }
801     g_string_free(type_name, true);
802     return 0;
803 }
804 
805 
806 static void pci_enum(gpointer pcidev, gpointer bus)
807 {
808     PCIDevice *dev = pcidev;
809     QPCIDevice *qdev;
810     int i;
811 
812     qdev = qpci_device_find(bus, dev->devfn);
813     g_assert(qdev != NULL);
814     for (i = 0; i < 6; i++) {
815         if (dev->io_regions[i].size) {
816             qpci_iomap(qdev, i, NULL);
817         }
818     }
819     qpci_device_enable(qdev);
820     g_free(qdev);
821 }
822 
823 static void generic_pre_fuzz(QTestState *s)
824 {
825     GHashTableIter iter;
826     MemoryRegion *mr;
827     QPCIBus *pcibus;
828     char **result;
829     GString *name_pattern;
830 
831     if (!getenv("QEMU_FUZZ_OBJECTS")) {
832         usage();
833     }
834     if (getenv("QTEST_LOG")) {
835         qtest_log_enabled = 1;
836     }
837     if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
838         avoid_double_fetches = 1;
839     }
840     if (getenv("QEMU_FUZZ_TIMEOUT")) {
841         timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
842     }
843     qts_global = s;
844 
845     /*
846      * Create a special device that we can use to back DMA buffers at very
847      * high memory addresses
848      */
849     sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
850 
851     dma_regions = g_array_new(false, false, sizeof(address_range));
852     dma_patterns = g_array_new(false, false, sizeof(pattern));
853 
854     fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
855     fuzzable_pci_devices   = g_ptr_array_new();
856 
857     result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
858     for (int i = 0; result[i] != NULL; i++) {
859         name_pattern = g_string_new(result[i]);
860         /*
861          * Make the pattern lowercase. We do the same for all the MemoryRegion
862          * and Type names so the configs are case-insensitive.
863          */
864         g_string_ascii_down(name_pattern);
865         printf("Matching objects by name %s\n", result[i]);
866         object_child_foreach_recursive(qdev_get_machine(),
867                                     locate_fuzz_objects,
868                                     name_pattern->str);
869         g_string_free(name_pattern, true);
870     }
871     g_strfreev(result);
872     printf("This process will try to fuzz the following MemoryRegions:\n");
873 
874     g_hash_table_iter_init(&iter, fuzzable_memoryregions);
875     while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
876         printf("  * %s (size 0x%" PRIx64 ")\n",
877                object_get_canonical_path_component(&(mr->parent_obj)),
878                memory_region_size(mr));
879     }
880 
881     if (!g_hash_table_size(fuzzable_memoryregions)) {
882         printf("No fuzzable memory regions found...\n");
883         exit(1);
884     }
885 
886     pcibus = qpci_new_pc(s, NULL);
887     g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
888     qpci_free_pc(pcibus);
889 
890     counter_shm_init();
891 }
892 
893 /*
894  * When libfuzzer gives us two inputs to combine, return a new input with the
895  * following structure:
896  *
897  * Input 1 (data1)
898  * SEPARATOR
899  * Clear out the DMA Patterns
900  * SEPARATOR
901  * Disable the pci_read/write instructions
902  * SEPARATOR
903  * Input 2 (data2)
904  *
905  * The idea is to collate the core behaviors of the two inputs.
906  * For example:
907  * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
908  *          device functionality A
909  * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
910  *          functionality B
911  *
912  * This function attempts to produce an input that:
913  * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
914  *          functionality A device, replaces the DMA patterns with a single
915  *          patten, and triggers device functionality B.
916  */
917 static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
918                                      uint8_t *data2, size_t size2, uint8_t *out,
919                                      size_t max_out_size, unsigned int seed)
920 {
921     size_t copy_len = 0, size = 0;
922 
923     /* Check that we have enough space for data1 and at least part of data2 */
924     if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
925         return 0;
926     }
927 
928     /* Copy_Len in the first input */
929     copy_len = size1;
930     memcpy(out + size, data1, copy_len);
931     size += copy_len;
932     max_out_size -= copy_len;
933 
934     /* Append a separator */
935     copy_len = strlen(SEPARATOR);
936     memcpy(out + size, SEPARATOR, copy_len);
937     size += copy_len;
938     max_out_size -= copy_len;
939 
940     /* Clear out the DMA Patterns */
941     copy_len = 1;
942     if (copy_len) {
943         out[size] = OP_CLEAR_DMA_PATTERNS;
944     }
945     size += copy_len;
946     max_out_size -= copy_len;
947 
948     /* Append a separator */
949     copy_len = strlen(SEPARATOR);
950     memcpy(out + size, SEPARATOR, copy_len);
951     size += copy_len;
952     max_out_size -= copy_len;
953 
954     /* Disable PCI ops. Assume data1 took care of setting up PCI */
955     copy_len = 1;
956     if (copy_len) {
957         out[size] = OP_DISABLE_PCI;
958     }
959     size += copy_len;
960     max_out_size -= copy_len;
961 
962     /* Append a separator */
963     copy_len = strlen(SEPARATOR);
964     memcpy(out + size, SEPARATOR, copy_len);
965     size += copy_len;
966     max_out_size -= copy_len;
967 
968     /* Copy_Len over the second input */
969     copy_len = MIN(size2, max_out_size);
970     memcpy(out + size, data2, copy_len);
971     size += copy_len;
972     max_out_size -= copy_len;
973 
974     return  size;
975 }
976 
977 
978 static GString *generic_fuzz_cmdline(FuzzTarget *t)
979 {
980     GString *cmd_line = g_string_new(TARGET_NAME);
981     if (!getenv("QEMU_FUZZ_ARGS")) {
982         usage();
983     }
984     g_string_append_printf(cmd_line, " -display none \
985                                       -machine accel=qtest, \
986                                       -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
987     return cmd_line;
988 }
989 
990 static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
991 {
992     gchar *args;
993     const generic_fuzz_config *config;
994     g_assert(t->opaque);
995 
996     config = t->opaque;
997     setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
998     if (config->argfunc) {
999         args = config->argfunc();
1000         setenv("QEMU_FUZZ_ARGS", args, 1);
1001         g_free(args);
1002     } else {
1003         g_assert_nonnull(config->args);
1004         setenv("QEMU_FUZZ_ARGS", config->args, 1);
1005     }
1006     setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
1007     return generic_fuzz_cmdline(t);
1008 }
1009 
1010 static void register_generic_fuzz_targets(void)
1011 {
1012     fuzz_add_target(&(FuzzTarget){
1013             .name = "generic-fuzz",
1014             .description = "Fuzz based on any qemu command-line args. ",
1015             .get_init_cmdline = generic_fuzz_cmdline,
1016             .pre_fuzz = generic_pre_fuzz,
1017             .fuzz = generic_fuzz,
1018             .crossover = generic_fuzz_crossover
1019     });
1020 
1021     GString *name;
1022     const generic_fuzz_config *config;
1023 
1024     for (int i = 0;
1025          i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
1026          i++) {
1027         config = predefined_configs + i;
1028         name = g_string_new("generic-fuzz");
1029         g_string_append_printf(name, "-%s", config->name);
1030         fuzz_add_target(&(FuzzTarget){
1031                 .name = name->str,
1032                 .description = "Predefined generic-fuzz config.",
1033                 .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
1034                 .pre_fuzz = generic_pre_fuzz,
1035                 .fuzz = generic_fuzz,
1036                 .crossover = generic_fuzz_crossover,
1037                 .opaque = (void *)config
1038         });
1039     }
1040 }
1041 
1042 fuzz_target_init(register_generic_fuzz_targets);
1043