1 /* 2 * QEMU dump 3 * 4 * Copyright Fujitsu, Corp. 2011, 2012 5 * 6 * Authors: 7 * Wen Congyang <wency@cn.fujitsu.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/cutils.h" 16 #include "elf.h" 17 #include "exec/hwaddr.h" 18 #include "monitor/monitor.h" 19 #include "sysemu/kvm.h" 20 #include "sysemu/dump.h" 21 #include "sysemu/memory_mapping.h" 22 #include "sysemu/runstate.h" 23 #include "sysemu/cpus.h" 24 #include "qapi/error.h" 25 #include "qapi/qapi-commands-dump.h" 26 #include "qapi/qapi-events-dump.h" 27 #include "qapi/qmp/qerror.h" 28 #include "qemu/error-report.h" 29 #include "qemu/main-loop.h" 30 #include "hw/misc/vmcoreinfo.h" 31 #include "migration/blocker.h" 32 33 #ifdef TARGET_X86_64 34 #include "win_dump.h" 35 #endif 36 37 #include <zlib.h> 38 #ifdef CONFIG_LZO 39 #include <lzo/lzo1x.h> 40 #endif 41 #ifdef CONFIG_SNAPPY 42 #include <snappy-c.h> 43 #endif 44 #ifndef ELF_MACHINE_UNAME 45 #define ELF_MACHINE_UNAME "Unknown" 46 #endif 47 48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ 49 50 static Error *dump_migration_blocker; 51 52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ 53 ((DIV_ROUND_UP((hdr_size), 4) + \ 54 DIV_ROUND_UP((name_size), 4) + \ 55 DIV_ROUND_UP((desc_size), 4)) * 4) 56 57 uint16_t cpu_to_dump16(DumpState *s, uint16_t val) 58 { 59 if (s->dump_info.d_endian == ELFDATA2LSB) { 60 val = cpu_to_le16(val); 61 } else { 62 val = cpu_to_be16(val); 63 } 64 65 return val; 66 } 67 68 uint32_t cpu_to_dump32(DumpState *s, uint32_t val) 69 { 70 if (s->dump_info.d_endian == ELFDATA2LSB) { 71 val = cpu_to_le32(val); 72 } else { 73 val = cpu_to_be32(val); 74 } 75 76 return val; 77 } 78 79 uint64_t cpu_to_dump64(DumpState *s, uint64_t val) 80 { 81 if (s->dump_info.d_endian == ELFDATA2LSB) { 82 val = cpu_to_le64(val); 83 } else { 84 val = cpu_to_be64(val); 85 } 86 87 return val; 88 } 89 90 static int dump_cleanup(DumpState *s) 91 { 92 guest_phys_blocks_free(&s->guest_phys_blocks); 93 memory_mapping_list_free(&s->list); 94 close(s->fd); 95 g_free(s->guest_note); 96 s->guest_note = NULL; 97 if (s->resume) { 98 if (s->detached) { 99 qemu_mutex_lock_iothread(); 100 } 101 vm_start(); 102 if (s->detached) { 103 qemu_mutex_unlock_iothread(); 104 } 105 } 106 migrate_del_blocker(dump_migration_blocker); 107 108 return 0; 109 } 110 111 static int fd_write_vmcore(const void *buf, size_t size, void *opaque) 112 { 113 DumpState *s = opaque; 114 size_t written_size; 115 116 written_size = qemu_write_full(s->fd, buf, size); 117 if (written_size != size) { 118 return -errno; 119 } 120 121 return 0; 122 } 123 124 static void write_elf64_header(DumpState *s, Error **errp) 125 { 126 Elf64_Ehdr elf_header; 127 int ret; 128 129 memset(&elf_header, 0, sizeof(Elf64_Ehdr)); 130 memcpy(&elf_header, ELFMAG, SELFMAG); 131 elf_header.e_ident[EI_CLASS] = ELFCLASS64; 132 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; 133 elf_header.e_ident[EI_VERSION] = EV_CURRENT; 134 elf_header.e_type = cpu_to_dump16(s, ET_CORE); 135 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 136 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); 137 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 138 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr)); 139 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); 140 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); 141 if (s->have_section) { 142 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info; 143 144 elf_header.e_shoff = cpu_to_dump64(s, shoff); 145 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); 146 elf_header.e_shnum = cpu_to_dump16(s, 1); 147 } 148 149 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); 150 if (ret < 0) { 151 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 152 } 153 } 154 155 static void write_elf32_header(DumpState *s, Error **errp) 156 { 157 Elf32_Ehdr elf_header; 158 int ret; 159 160 memset(&elf_header, 0, sizeof(Elf32_Ehdr)); 161 memcpy(&elf_header, ELFMAG, SELFMAG); 162 elf_header.e_ident[EI_CLASS] = ELFCLASS32; 163 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; 164 elf_header.e_ident[EI_VERSION] = EV_CURRENT; 165 elf_header.e_type = cpu_to_dump16(s, ET_CORE); 166 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 167 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); 168 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 169 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr)); 170 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); 171 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); 172 if (s->have_section) { 173 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info; 174 175 elf_header.e_shoff = cpu_to_dump32(s, shoff); 176 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); 177 elf_header.e_shnum = cpu_to_dump16(s, 1); 178 } 179 180 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); 181 if (ret < 0) { 182 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 183 } 184 } 185 186 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, 187 int phdr_index, hwaddr offset, 188 hwaddr filesz, Error **errp) 189 { 190 Elf64_Phdr phdr; 191 int ret; 192 193 memset(&phdr, 0, sizeof(Elf64_Phdr)); 194 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 195 phdr.p_offset = cpu_to_dump64(s, offset); 196 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); 197 phdr.p_filesz = cpu_to_dump64(s, filesz); 198 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); 199 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 200 201 assert(memory_mapping->length >= filesz); 202 203 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 204 if (ret < 0) { 205 error_setg_errno(errp, -ret, 206 "dump: failed to write program header table"); 207 } 208 } 209 210 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, 211 int phdr_index, hwaddr offset, 212 hwaddr filesz, Error **errp) 213 { 214 Elf32_Phdr phdr; 215 int ret; 216 217 memset(&phdr, 0, sizeof(Elf32_Phdr)); 218 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 219 phdr.p_offset = cpu_to_dump32(s, offset); 220 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); 221 phdr.p_filesz = cpu_to_dump32(s, filesz); 222 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); 223 phdr.p_vaddr = 224 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 225 226 assert(memory_mapping->length >= filesz); 227 228 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 229 if (ret < 0) { 230 error_setg_errno(errp, -ret, 231 "dump: failed to write program header table"); 232 } 233 } 234 235 static void write_elf64_note(DumpState *s, Error **errp) 236 { 237 Elf64_Phdr phdr; 238 hwaddr begin = s->memory_offset - s->note_size; 239 int ret; 240 241 memset(&phdr, 0, sizeof(Elf64_Phdr)); 242 phdr.p_type = cpu_to_dump32(s, PT_NOTE); 243 phdr.p_offset = cpu_to_dump64(s, begin); 244 phdr.p_paddr = 0; 245 phdr.p_filesz = cpu_to_dump64(s, s->note_size); 246 phdr.p_memsz = cpu_to_dump64(s, s->note_size); 247 phdr.p_vaddr = 0; 248 249 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 250 if (ret < 0) { 251 error_setg_errno(errp, -ret, 252 "dump: failed to write program header table"); 253 } 254 } 255 256 static inline int cpu_index(CPUState *cpu) 257 { 258 return cpu->cpu_index + 1; 259 } 260 261 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, 262 Error **errp) 263 { 264 int ret; 265 266 if (s->guest_note) { 267 ret = f(s->guest_note, s->guest_note_size, s); 268 if (ret < 0) { 269 error_setg(errp, "dump: failed to write guest note"); 270 } 271 } 272 } 273 274 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, 275 Error **errp) 276 { 277 CPUState *cpu; 278 int ret; 279 int id; 280 281 CPU_FOREACH(cpu) { 282 id = cpu_index(cpu); 283 ret = cpu_write_elf64_note(f, cpu, id, s); 284 if (ret < 0) { 285 error_setg(errp, "dump: failed to write elf notes"); 286 return; 287 } 288 } 289 290 CPU_FOREACH(cpu) { 291 ret = cpu_write_elf64_qemunote(f, cpu, s); 292 if (ret < 0) { 293 error_setg(errp, "dump: failed to write CPU status"); 294 return; 295 } 296 } 297 298 write_guest_note(f, s, errp); 299 } 300 301 static void write_elf32_note(DumpState *s, Error **errp) 302 { 303 hwaddr begin = s->memory_offset - s->note_size; 304 Elf32_Phdr phdr; 305 int ret; 306 307 memset(&phdr, 0, sizeof(Elf32_Phdr)); 308 phdr.p_type = cpu_to_dump32(s, PT_NOTE); 309 phdr.p_offset = cpu_to_dump32(s, begin); 310 phdr.p_paddr = 0; 311 phdr.p_filesz = cpu_to_dump32(s, s->note_size); 312 phdr.p_memsz = cpu_to_dump32(s, s->note_size); 313 phdr.p_vaddr = 0; 314 315 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 316 if (ret < 0) { 317 error_setg_errno(errp, -ret, 318 "dump: failed to write program header table"); 319 } 320 } 321 322 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, 323 Error **errp) 324 { 325 CPUState *cpu; 326 int ret; 327 int id; 328 329 CPU_FOREACH(cpu) { 330 id = cpu_index(cpu); 331 ret = cpu_write_elf32_note(f, cpu, id, s); 332 if (ret < 0) { 333 error_setg(errp, "dump: failed to write elf notes"); 334 return; 335 } 336 } 337 338 CPU_FOREACH(cpu) { 339 ret = cpu_write_elf32_qemunote(f, cpu, s); 340 if (ret < 0) { 341 error_setg(errp, "dump: failed to write CPU status"); 342 return; 343 } 344 } 345 346 write_guest_note(f, s, errp); 347 } 348 349 static void write_elf_section(DumpState *s, int type, Error **errp) 350 { 351 Elf32_Shdr shdr32; 352 Elf64_Shdr shdr64; 353 int shdr_size; 354 void *shdr; 355 int ret; 356 357 if (type == 0) { 358 shdr_size = sizeof(Elf32_Shdr); 359 memset(&shdr32, 0, shdr_size); 360 shdr32.sh_info = cpu_to_dump32(s, s->sh_info); 361 shdr = &shdr32; 362 } else { 363 shdr_size = sizeof(Elf64_Shdr); 364 memset(&shdr64, 0, shdr_size); 365 shdr64.sh_info = cpu_to_dump32(s, s->sh_info); 366 shdr = &shdr64; 367 } 368 369 ret = fd_write_vmcore(shdr, shdr_size, s); 370 if (ret < 0) { 371 error_setg_errno(errp, -ret, 372 "dump: failed to write section header table"); 373 } 374 } 375 376 static void write_data(DumpState *s, void *buf, int length, Error **errp) 377 { 378 int ret; 379 380 ret = fd_write_vmcore(buf, length, s); 381 if (ret < 0) { 382 error_setg_errno(errp, -ret, "dump: failed to save memory"); 383 } else { 384 s->written_size += length; 385 } 386 } 387 388 /* write the memory to vmcore. 1 page per I/O. */ 389 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, 390 int64_t size, Error **errp) 391 { 392 ERRP_GUARD(); 393 int64_t i; 394 395 for (i = 0; i < size / s->dump_info.page_size; i++) { 396 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 397 s->dump_info.page_size, errp); 398 if (*errp) { 399 return; 400 } 401 } 402 403 if ((size % s->dump_info.page_size) != 0) { 404 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 405 size % s->dump_info.page_size, errp); 406 if (*errp) { 407 return; 408 } 409 } 410 } 411 412 /* get the memory's offset and size in the vmcore */ 413 static void get_offset_range(hwaddr phys_addr, 414 ram_addr_t mapping_length, 415 DumpState *s, 416 hwaddr *p_offset, 417 hwaddr *p_filesz) 418 { 419 GuestPhysBlock *block; 420 hwaddr offset = s->memory_offset; 421 int64_t size_in_block, start; 422 423 /* When the memory is not stored into vmcore, offset will be -1 */ 424 *p_offset = -1; 425 *p_filesz = 0; 426 427 if (s->has_filter) { 428 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { 429 return; 430 } 431 } 432 433 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 434 if (s->has_filter) { 435 if (block->target_start >= s->begin + s->length || 436 block->target_end <= s->begin) { 437 /* This block is out of the range */ 438 continue; 439 } 440 441 if (s->begin <= block->target_start) { 442 start = block->target_start; 443 } else { 444 start = s->begin; 445 } 446 447 size_in_block = block->target_end - start; 448 if (s->begin + s->length < block->target_end) { 449 size_in_block -= block->target_end - (s->begin + s->length); 450 } 451 } else { 452 start = block->target_start; 453 size_in_block = block->target_end - block->target_start; 454 } 455 456 if (phys_addr >= start && phys_addr < start + size_in_block) { 457 *p_offset = phys_addr - start + offset; 458 459 /* The offset range mapped from the vmcore file must not spill over 460 * the GuestPhysBlock, clamp it. The rest of the mapping will be 461 * zero-filled in memory at load time; see 462 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. 463 */ 464 *p_filesz = phys_addr + mapping_length <= start + size_in_block ? 465 mapping_length : 466 size_in_block - (phys_addr - start); 467 return; 468 } 469 470 offset += size_in_block; 471 } 472 } 473 474 static void write_elf_loads(DumpState *s, Error **errp) 475 { 476 ERRP_GUARD(); 477 hwaddr offset, filesz; 478 MemoryMapping *memory_mapping; 479 uint32_t phdr_index = 1; 480 uint32_t max_index; 481 482 if (s->have_section) { 483 max_index = s->sh_info; 484 } else { 485 max_index = s->phdr_num; 486 } 487 488 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { 489 get_offset_range(memory_mapping->phys_addr, 490 memory_mapping->length, 491 s, &offset, &filesz); 492 if (s->dump_info.d_class == ELFCLASS64) { 493 write_elf64_load(s, memory_mapping, phdr_index++, offset, 494 filesz, errp); 495 } else { 496 write_elf32_load(s, memory_mapping, phdr_index++, offset, 497 filesz, errp); 498 } 499 500 if (*errp) { 501 return; 502 } 503 504 if (phdr_index >= max_index) { 505 break; 506 } 507 } 508 } 509 510 /* write elf header, PT_NOTE and elf note to vmcore. */ 511 static void dump_begin(DumpState *s, Error **errp) 512 { 513 ERRP_GUARD(); 514 515 /* 516 * the vmcore's format is: 517 * -------------- 518 * | elf header | 519 * -------------- 520 * | PT_NOTE | 521 * -------------- 522 * | PT_LOAD | 523 * -------------- 524 * | ...... | 525 * -------------- 526 * | PT_LOAD | 527 * -------------- 528 * | sec_hdr | 529 * -------------- 530 * | elf note | 531 * -------------- 532 * | memory | 533 * -------------- 534 * 535 * we only know where the memory is saved after we write elf note into 536 * vmcore. 537 */ 538 539 /* write elf header to vmcore */ 540 if (s->dump_info.d_class == ELFCLASS64) { 541 write_elf64_header(s, errp); 542 } else { 543 write_elf32_header(s, errp); 544 } 545 if (*errp) { 546 return; 547 } 548 549 if (s->dump_info.d_class == ELFCLASS64) { 550 /* write PT_NOTE to vmcore */ 551 write_elf64_note(s, errp); 552 if (*errp) { 553 return; 554 } 555 556 /* write all PT_LOAD to vmcore */ 557 write_elf_loads(s, errp); 558 if (*errp) { 559 return; 560 } 561 562 /* write section to vmcore */ 563 if (s->have_section) { 564 write_elf_section(s, 1, errp); 565 if (*errp) { 566 return; 567 } 568 } 569 570 /* write notes to vmcore */ 571 write_elf64_notes(fd_write_vmcore, s, errp); 572 if (*errp) { 573 return; 574 } 575 } else { 576 /* write PT_NOTE to vmcore */ 577 write_elf32_note(s, errp); 578 if (*errp) { 579 return; 580 } 581 582 /* write all PT_LOAD to vmcore */ 583 write_elf_loads(s, errp); 584 if (*errp) { 585 return; 586 } 587 588 /* write section to vmcore */ 589 if (s->have_section) { 590 write_elf_section(s, 0, errp); 591 if (*errp) { 592 return; 593 } 594 } 595 596 /* write notes to vmcore */ 597 write_elf32_notes(fd_write_vmcore, s, errp); 598 if (*errp) { 599 return; 600 } 601 } 602 } 603 604 static int get_next_block(DumpState *s, GuestPhysBlock *block) 605 { 606 while (1) { 607 block = QTAILQ_NEXT(block, next); 608 if (!block) { 609 /* no more block */ 610 return 1; 611 } 612 613 s->start = 0; 614 s->next_block = block; 615 if (s->has_filter) { 616 if (block->target_start >= s->begin + s->length || 617 block->target_end <= s->begin) { 618 /* This block is out of the range */ 619 continue; 620 } 621 622 if (s->begin > block->target_start) { 623 s->start = s->begin - block->target_start; 624 } 625 } 626 627 return 0; 628 } 629 } 630 631 /* write all memory to vmcore */ 632 static void dump_iterate(DumpState *s, Error **errp) 633 { 634 ERRP_GUARD(); 635 GuestPhysBlock *block; 636 int64_t size; 637 638 do { 639 block = s->next_block; 640 641 size = block->target_end - block->target_start; 642 if (s->has_filter) { 643 size -= s->start; 644 if (s->begin + s->length < block->target_end) { 645 size -= block->target_end - (s->begin + s->length); 646 } 647 } 648 write_memory(s, block, s->start, size, errp); 649 if (*errp) { 650 return; 651 } 652 653 } while (!get_next_block(s, block)); 654 } 655 656 static void create_vmcore(DumpState *s, Error **errp) 657 { 658 ERRP_GUARD(); 659 660 dump_begin(s, errp); 661 if (*errp) { 662 return; 663 } 664 665 dump_iterate(s, errp); 666 } 667 668 static int write_start_flat_header(int fd) 669 { 670 MakedumpfileHeader *mh; 671 int ret = 0; 672 673 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); 674 mh = g_malloc0(MAX_SIZE_MDF_HEADER); 675 676 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, 677 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); 678 679 mh->type = cpu_to_be64(TYPE_FLAT_HEADER); 680 mh->version = cpu_to_be64(VERSION_FLAT_HEADER); 681 682 size_t written_size; 683 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); 684 if (written_size != MAX_SIZE_MDF_HEADER) { 685 ret = -1; 686 } 687 688 g_free(mh); 689 return ret; 690 } 691 692 static int write_end_flat_header(int fd) 693 { 694 MakedumpfileDataHeader mdh; 695 696 mdh.offset = END_FLAG_FLAT_HEADER; 697 mdh.buf_size = END_FLAG_FLAT_HEADER; 698 699 size_t written_size; 700 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 701 if (written_size != sizeof(mdh)) { 702 return -1; 703 } 704 705 return 0; 706 } 707 708 static int write_buffer(int fd, off_t offset, const void *buf, size_t size) 709 { 710 size_t written_size; 711 MakedumpfileDataHeader mdh; 712 713 mdh.offset = cpu_to_be64(offset); 714 mdh.buf_size = cpu_to_be64(size); 715 716 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 717 if (written_size != sizeof(mdh)) { 718 return -1; 719 } 720 721 written_size = qemu_write_full(fd, buf, size); 722 if (written_size != size) { 723 return -1; 724 } 725 726 return 0; 727 } 728 729 static int buf_write_note(const void *buf, size_t size, void *opaque) 730 { 731 DumpState *s = opaque; 732 733 /* note_buf is not enough */ 734 if (s->note_buf_offset + size > s->note_size) { 735 return -1; 736 } 737 738 memcpy(s->note_buf + s->note_buf_offset, buf, size); 739 740 s->note_buf_offset += size; 741 742 return 0; 743 } 744 745 /* 746 * This function retrieves various sizes from an elf header. 747 * 748 * @note has to be a valid ELF note. The return sizes are unmodified 749 * (not padded or rounded up to be multiple of 4). 750 */ 751 static void get_note_sizes(DumpState *s, const void *note, 752 uint64_t *note_head_size, 753 uint64_t *name_size, 754 uint64_t *desc_size) 755 { 756 uint64_t note_head_sz; 757 uint64_t name_sz; 758 uint64_t desc_sz; 759 760 if (s->dump_info.d_class == ELFCLASS64) { 761 const Elf64_Nhdr *hdr = note; 762 note_head_sz = sizeof(Elf64_Nhdr); 763 name_sz = tswap64(hdr->n_namesz); 764 desc_sz = tswap64(hdr->n_descsz); 765 } else { 766 const Elf32_Nhdr *hdr = note; 767 note_head_sz = sizeof(Elf32_Nhdr); 768 name_sz = tswap32(hdr->n_namesz); 769 desc_sz = tswap32(hdr->n_descsz); 770 } 771 772 if (note_head_size) { 773 *note_head_size = note_head_sz; 774 } 775 if (name_size) { 776 *name_size = name_sz; 777 } 778 if (desc_size) { 779 *desc_size = desc_sz; 780 } 781 } 782 783 static bool note_name_equal(DumpState *s, 784 const uint8_t *note, const char *name) 785 { 786 int len = strlen(name) + 1; 787 uint64_t head_size, name_size; 788 789 get_note_sizes(s, note, &head_size, &name_size, NULL); 790 head_size = ROUND_UP(head_size, 4); 791 792 return name_size == len && memcmp(note + head_size, name, len) == 0; 793 } 794 795 /* write common header, sub header and elf note to vmcore */ 796 static void create_header32(DumpState *s, Error **errp) 797 { 798 ERRP_GUARD(); 799 DiskDumpHeader32 *dh = NULL; 800 KdumpSubHeader32 *kh = NULL; 801 size_t size; 802 uint32_t block_size; 803 uint32_t sub_hdr_size; 804 uint32_t bitmap_blocks; 805 uint32_t status = 0; 806 uint64_t offset_note; 807 808 /* write common header, the version of kdump-compressed format is 6th */ 809 size = sizeof(DiskDumpHeader32); 810 dh = g_malloc0(size); 811 812 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 813 dh->header_version = cpu_to_dump32(s, 6); 814 block_size = s->dump_info.page_size; 815 dh->block_size = cpu_to_dump32(s, block_size); 816 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; 817 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 818 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 819 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 820 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 821 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 822 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 823 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 824 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 825 826 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 827 status |= DUMP_DH_COMPRESSED_ZLIB; 828 } 829 #ifdef CONFIG_LZO 830 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 831 status |= DUMP_DH_COMPRESSED_LZO; 832 } 833 #endif 834 #ifdef CONFIG_SNAPPY 835 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 836 status |= DUMP_DH_COMPRESSED_SNAPPY; 837 } 838 #endif 839 dh->status = cpu_to_dump32(s, status); 840 841 if (write_buffer(s->fd, 0, dh, size) < 0) { 842 error_setg(errp, "dump: failed to write disk dump header"); 843 goto out; 844 } 845 846 /* write sub header */ 847 size = sizeof(KdumpSubHeader32); 848 kh = g_malloc0(size); 849 850 /* 64bit max_mapnr_64 */ 851 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 852 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); 853 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 854 855 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 856 if (s->guest_note && 857 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 858 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 859 860 get_note_sizes(s, s->guest_note, 861 &hsize, &name_size, &size_vmcoreinfo_desc); 862 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 863 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 864 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 865 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); 866 } 867 868 kh->offset_note = cpu_to_dump64(s, offset_note); 869 kh->note_size = cpu_to_dump32(s, s->note_size); 870 871 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 872 block_size, kh, size) < 0) { 873 error_setg(errp, "dump: failed to write kdump sub header"); 874 goto out; 875 } 876 877 /* write note */ 878 s->note_buf = g_malloc0(s->note_size); 879 s->note_buf_offset = 0; 880 881 /* use s->note_buf to store notes temporarily */ 882 write_elf32_notes(buf_write_note, s, errp); 883 if (*errp) { 884 goto out; 885 } 886 if (write_buffer(s->fd, offset_note, s->note_buf, 887 s->note_size) < 0) { 888 error_setg(errp, "dump: failed to write notes"); 889 goto out; 890 } 891 892 /* get offset of dump_bitmap */ 893 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 894 block_size; 895 896 /* get offset of page */ 897 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 898 block_size; 899 900 out: 901 g_free(dh); 902 g_free(kh); 903 g_free(s->note_buf); 904 } 905 906 /* write common header, sub header and elf note to vmcore */ 907 static void create_header64(DumpState *s, Error **errp) 908 { 909 ERRP_GUARD(); 910 DiskDumpHeader64 *dh = NULL; 911 KdumpSubHeader64 *kh = NULL; 912 size_t size; 913 uint32_t block_size; 914 uint32_t sub_hdr_size; 915 uint32_t bitmap_blocks; 916 uint32_t status = 0; 917 uint64_t offset_note; 918 919 /* write common header, the version of kdump-compressed format is 6th */ 920 size = sizeof(DiskDumpHeader64); 921 dh = g_malloc0(size); 922 923 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 924 dh->header_version = cpu_to_dump32(s, 6); 925 block_size = s->dump_info.page_size; 926 dh->block_size = cpu_to_dump32(s, block_size); 927 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; 928 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 929 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 930 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 931 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 932 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 933 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 934 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 935 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 936 937 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 938 status |= DUMP_DH_COMPRESSED_ZLIB; 939 } 940 #ifdef CONFIG_LZO 941 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 942 status |= DUMP_DH_COMPRESSED_LZO; 943 } 944 #endif 945 #ifdef CONFIG_SNAPPY 946 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 947 status |= DUMP_DH_COMPRESSED_SNAPPY; 948 } 949 #endif 950 dh->status = cpu_to_dump32(s, status); 951 952 if (write_buffer(s->fd, 0, dh, size) < 0) { 953 error_setg(errp, "dump: failed to write disk dump header"); 954 goto out; 955 } 956 957 /* write sub header */ 958 size = sizeof(KdumpSubHeader64); 959 kh = g_malloc0(size); 960 961 /* 64bit max_mapnr_64 */ 962 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 963 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); 964 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 965 966 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 967 if (s->guest_note && 968 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 969 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 970 971 get_note_sizes(s, s->guest_note, 972 &hsize, &name_size, &size_vmcoreinfo_desc); 973 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 974 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 975 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 976 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); 977 } 978 979 kh->offset_note = cpu_to_dump64(s, offset_note); 980 kh->note_size = cpu_to_dump64(s, s->note_size); 981 982 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 983 block_size, kh, size) < 0) { 984 error_setg(errp, "dump: failed to write kdump sub header"); 985 goto out; 986 } 987 988 /* write note */ 989 s->note_buf = g_malloc0(s->note_size); 990 s->note_buf_offset = 0; 991 992 /* use s->note_buf to store notes temporarily */ 993 write_elf64_notes(buf_write_note, s, errp); 994 if (*errp) { 995 goto out; 996 } 997 998 if (write_buffer(s->fd, offset_note, s->note_buf, 999 s->note_size) < 0) { 1000 error_setg(errp, "dump: failed to write notes"); 1001 goto out; 1002 } 1003 1004 /* get offset of dump_bitmap */ 1005 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1006 block_size; 1007 1008 /* get offset of page */ 1009 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1010 block_size; 1011 1012 out: 1013 g_free(dh); 1014 g_free(kh); 1015 g_free(s->note_buf); 1016 } 1017 1018 static void write_dump_header(DumpState *s, Error **errp) 1019 { 1020 if (s->dump_info.d_class == ELFCLASS32) { 1021 create_header32(s, errp); 1022 } else { 1023 create_header64(s, errp); 1024 } 1025 } 1026 1027 static size_t dump_bitmap_get_bufsize(DumpState *s) 1028 { 1029 return s->dump_info.page_size; 1030 } 1031 1032 /* 1033 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be 1034 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. 1035 * set_dump_bitmap will always leave the recently set bit un-sync. And setting 1036 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into 1037 * vmcore, ie. synchronizing un-sync bit into vmcore. 1038 */ 1039 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, 1040 uint8_t *buf, DumpState *s) 1041 { 1042 off_t old_offset, new_offset; 1043 off_t offset_bitmap1, offset_bitmap2; 1044 uint32_t byte, bit; 1045 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1046 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1047 1048 /* should not set the previous place */ 1049 assert(last_pfn <= pfn); 1050 1051 /* 1052 * if the bit needed to be set is not cached in buf, flush the data in buf 1053 * to vmcore firstly. 1054 * making new_offset be bigger than old_offset can also sync remained data 1055 * into vmcore. 1056 */ 1057 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); 1058 new_offset = bitmap_bufsize * (pfn / bits_per_buf); 1059 1060 while (old_offset < new_offset) { 1061 /* calculate the offset and write dump_bitmap */ 1062 offset_bitmap1 = s->offset_dump_bitmap + old_offset; 1063 if (write_buffer(s->fd, offset_bitmap1, buf, 1064 bitmap_bufsize) < 0) { 1065 return -1; 1066 } 1067 1068 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ 1069 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + 1070 old_offset; 1071 if (write_buffer(s->fd, offset_bitmap2, buf, 1072 bitmap_bufsize) < 0) { 1073 return -1; 1074 } 1075 1076 memset(buf, 0, bitmap_bufsize); 1077 old_offset += bitmap_bufsize; 1078 } 1079 1080 /* get the exact place of the bit in the buf, and set it */ 1081 byte = (pfn % bits_per_buf) / CHAR_BIT; 1082 bit = (pfn % bits_per_buf) % CHAR_BIT; 1083 if (value) { 1084 buf[byte] |= 1u << bit; 1085 } else { 1086 buf[byte] &= ~(1u << bit); 1087 } 1088 1089 return 0; 1090 } 1091 1092 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) 1093 { 1094 int target_page_shift = ctz32(s->dump_info.page_size); 1095 1096 return (addr >> target_page_shift) - ARCH_PFN_OFFSET; 1097 } 1098 1099 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) 1100 { 1101 int target_page_shift = ctz32(s->dump_info.page_size); 1102 1103 return (pfn + ARCH_PFN_OFFSET) << target_page_shift; 1104 } 1105 1106 /* 1107 * exam every page and return the page frame number and the address of the page. 1108 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys 1109 * blocks, so block->target_start and block->target_end should be interal 1110 * multiples of the target page size. 1111 */ 1112 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, 1113 uint8_t **bufptr, DumpState *s) 1114 { 1115 GuestPhysBlock *block = *blockptr; 1116 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1); 1117 uint8_t *buf; 1118 1119 /* block == NULL means the start of the iteration */ 1120 if (!block) { 1121 block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1122 *blockptr = block; 1123 assert((block->target_start & ~target_page_mask) == 0); 1124 assert((block->target_end & ~target_page_mask) == 0); 1125 *pfnptr = dump_paddr_to_pfn(s, block->target_start); 1126 if (bufptr) { 1127 *bufptr = block->host_addr; 1128 } 1129 return true; 1130 } 1131 1132 *pfnptr = *pfnptr + 1; 1133 addr = dump_pfn_to_paddr(s, *pfnptr); 1134 1135 if ((addr >= block->target_start) && 1136 (addr + s->dump_info.page_size <= block->target_end)) { 1137 buf = block->host_addr + (addr - block->target_start); 1138 } else { 1139 /* the next page is in the next block */ 1140 block = QTAILQ_NEXT(block, next); 1141 *blockptr = block; 1142 if (!block) { 1143 return false; 1144 } 1145 assert((block->target_start & ~target_page_mask) == 0); 1146 assert((block->target_end & ~target_page_mask) == 0); 1147 *pfnptr = dump_paddr_to_pfn(s, block->target_start); 1148 buf = block->host_addr; 1149 } 1150 1151 if (bufptr) { 1152 *bufptr = buf; 1153 } 1154 1155 return true; 1156 } 1157 1158 static void write_dump_bitmap(DumpState *s, Error **errp) 1159 { 1160 int ret = 0; 1161 uint64_t last_pfn, pfn; 1162 void *dump_bitmap_buf; 1163 size_t num_dumpable; 1164 GuestPhysBlock *block_iter = NULL; 1165 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1166 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1167 1168 /* dump_bitmap_buf is used to store dump_bitmap temporarily */ 1169 dump_bitmap_buf = g_malloc0(bitmap_bufsize); 1170 1171 num_dumpable = 0; 1172 last_pfn = 0; 1173 1174 /* 1175 * exam memory page by page, and set the bit in dump_bitmap corresponded 1176 * to the existing page. 1177 */ 1178 while (get_next_page(&block_iter, &pfn, NULL, s)) { 1179 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); 1180 if (ret < 0) { 1181 error_setg(errp, "dump: failed to set dump_bitmap"); 1182 goto out; 1183 } 1184 1185 last_pfn = pfn; 1186 num_dumpable++; 1187 } 1188 1189 /* 1190 * set_dump_bitmap will always leave the recently set bit un-sync. Here we 1191 * set the remaining bits from last_pfn to the end of the bitmap buffer to 1192 * 0. With those set, the un-sync bit will be synchronized into the vmcore. 1193 */ 1194 if (num_dumpable > 0) { 1195 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, 1196 dump_bitmap_buf, s); 1197 if (ret < 0) { 1198 error_setg(errp, "dump: failed to sync dump_bitmap"); 1199 goto out; 1200 } 1201 } 1202 1203 /* number of dumpable pages that will be dumped later */ 1204 s->num_dumpable = num_dumpable; 1205 1206 out: 1207 g_free(dump_bitmap_buf); 1208 } 1209 1210 static void prepare_data_cache(DataCache *data_cache, DumpState *s, 1211 off_t offset) 1212 { 1213 data_cache->fd = s->fd; 1214 data_cache->data_size = 0; 1215 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); 1216 data_cache->buf = g_malloc0(data_cache->buf_size); 1217 data_cache->offset = offset; 1218 } 1219 1220 static int write_cache(DataCache *dc, const void *buf, size_t size, 1221 bool flag_sync) 1222 { 1223 /* 1224 * dc->buf_size should not be less than size, otherwise dc will never be 1225 * enough 1226 */ 1227 assert(size <= dc->buf_size); 1228 1229 /* 1230 * if flag_sync is set, synchronize data in dc->buf into vmcore. 1231 * otherwise check if the space is enough for caching data in buf, if not, 1232 * write the data in dc->buf to dc->fd and reset dc->buf 1233 */ 1234 if ((!flag_sync && dc->data_size + size > dc->buf_size) || 1235 (flag_sync && dc->data_size > 0)) { 1236 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { 1237 return -1; 1238 } 1239 1240 dc->offset += dc->data_size; 1241 dc->data_size = 0; 1242 } 1243 1244 if (!flag_sync) { 1245 memcpy(dc->buf + dc->data_size, buf, size); 1246 dc->data_size += size; 1247 } 1248 1249 return 0; 1250 } 1251 1252 static void free_data_cache(DataCache *data_cache) 1253 { 1254 g_free(data_cache->buf); 1255 } 1256 1257 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) 1258 { 1259 switch (flag_compress) { 1260 case DUMP_DH_COMPRESSED_ZLIB: 1261 return compressBound(page_size); 1262 1263 case DUMP_DH_COMPRESSED_LZO: 1264 /* 1265 * LZO will expand incompressible data by a little amount. Please check 1266 * the following URL to see the expansion calculation: 1267 * http://www.oberhumer.com/opensource/lzo/lzofaq.php 1268 */ 1269 return page_size + page_size / 16 + 64 + 3; 1270 1271 #ifdef CONFIG_SNAPPY 1272 case DUMP_DH_COMPRESSED_SNAPPY: 1273 return snappy_max_compressed_length(page_size); 1274 #endif 1275 } 1276 return 0; 1277 } 1278 1279 static void write_dump_pages(DumpState *s, Error **errp) 1280 { 1281 int ret = 0; 1282 DataCache page_desc, page_data; 1283 size_t len_buf_out, size_out; 1284 #ifdef CONFIG_LZO 1285 lzo_bytep wrkmem = NULL; 1286 #endif 1287 uint8_t *buf_out = NULL; 1288 off_t offset_desc, offset_data; 1289 PageDescriptor pd, pd_zero; 1290 uint8_t *buf; 1291 GuestPhysBlock *block_iter = NULL; 1292 uint64_t pfn_iter; 1293 1294 /* get offset of page_desc and page_data in dump file */ 1295 offset_desc = s->offset_page; 1296 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; 1297 1298 prepare_data_cache(&page_desc, s, offset_desc); 1299 prepare_data_cache(&page_data, s, offset_data); 1300 1301 /* prepare buffer to store compressed data */ 1302 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); 1303 assert(len_buf_out != 0); 1304 1305 #ifdef CONFIG_LZO 1306 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); 1307 #endif 1308 1309 buf_out = g_malloc(len_buf_out); 1310 1311 /* 1312 * init zero page's page_desc and page_data, because every zero page 1313 * uses the same page_data 1314 */ 1315 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); 1316 pd_zero.flags = cpu_to_dump32(s, 0); 1317 pd_zero.offset = cpu_to_dump64(s, offset_data); 1318 pd_zero.page_flags = cpu_to_dump64(s, 0); 1319 buf = g_malloc0(s->dump_info.page_size); 1320 ret = write_cache(&page_data, buf, s->dump_info.page_size, false); 1321 g_free(buf); 1322 if (ret < 0) { 1323 error_setg(errp, "dump: failed to write page data (zero page)"); 1324 goto out; 1325 } 1326 1327 offset_data += s->dump_info.page_size; 1328 1329 /* 1330 * dump memory to vmcore page by page. zero page will all be resided in the 1331 * first page of page section 1332 */ 1333 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { 1334 /* check zero page */ 1335 if (buffer_is_zero(buf, s->dump_info.page_size)) { 1336 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), 1337 false); 1338 if (ret < 0) { 1339 error_setg(errp, "dump: failed to write page desc"); 1340 goto out; 1341 } 1342 } else { 1343 /* 1344 * not zero page, then: 1345 * 1. compress the page 1346 * 2. write the compressed page into the cache of page_data 1347 * 3. get page desc of the compressed page and write it into the 1348 * cache of page_desc 1349 * 1350 * only one compression format will be used here, for 1351 * s->flag_compress is set. But when compression fails to work, 1352 * we fall back to save in plaintext. 1353 */ 1354 size_out = len_buf_out; 1355 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && 1356 (compress2(buf_out, (uLongf *)&size_out, buf, 1357 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && 1358 (size_out < s->dump_info.page_size)) { 1359 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); 1360 pd.size = cpu_to_dump32(s, size_out); 1361 1362 ret = write_cache(&page_data, buf_out, size_out, false); 1363 if (ret < 0) { 1364 error_setg(errp, "dump: failed to write page data"); 1365 goto out; 1366 } 1367 #ifdef CONFIG_LZO 1368 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && 1369 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, 1370 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && 1371 (size_out < s->dump_info.page_size)) { 1372 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); 1373 pd.size = cpu_to_dump32(s, size_out); 1374 1375 ret = write_cache(&page_data, buf_out, size_out, false); 1376 if (ret < 0) { 1377 error_setg(errp, "dump: failed to write page data"); 1378 goto out; 1379 } 1380 #endif 1381 #ifdef CONFIG_SNAPPY 1382 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && 1383 (snappy_compress((char *)buf, s->dump_info.page_size, 1384 (char *)buf_out, &size_out) == SNAPPY_OK) && 1385 (size_out < s->dump_info.page_size)) { 1386 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); 1387 pd.size = cpu_to_dump32(s, size_out); 1388 1389 ret = write_cache(&page_data, buf_out, size_out, false); 1390 if (ret < 0) { 1391 error_setg(errp, "dump: failed to write page data"); 1392 goto out; 1393 } 1394 #endif 1395 } else { 1396 /* 1397 * fall back to save in plaintext, size_out should be 1398 * assigned the target's page size 1399 */ 1400 pd.flags = cpu_to_dump32(s, 0); 1401 size_out = s->dump_info.page_size; 1402 pd.size = cpu_to_dump32(s, size_out); 1403 1404 ret = write_cache(&page_data, buf, 1405 s->dump_info.page_size, false); 1406 if (ret < 0) { 1407 error_setg(errp, "dump: failed to write page data"); 1408 goto out; 1409 } 1410 } 1411 1412 /* get and write page desc here */ 1413 pd.page_flags = cpu_to_dump64(s, 0); 1414 pd.offset = cpu_to_dump64(s, offset_data); 1415 offset_data += size_out; 1416 1417 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); 1418 if (ret < 0) { 1419 error_setg(errp, "dump: failed to write page desc"); 1420 goto out; 1421 } 1422 } 1423 s->written_size += s->dump_info.page_size; 1424 } 1425 1426 ret = write_cache(&page_desc, NULL, 0, true); 1427 if (ret < 0) { 1428 error_setg(errp, "dump: failed to sync cache for page_desc"); 1429 goto out; 1430 } 1431 ret = write_cache(&page_data, NULL, 0, true); 1432 if (ret < 0) { 1433 error_setg(errp, "dump: failed to sync cache for page_data"); 1434 goto out; 1435 } 1436 1437 out: 1438 free_data_cache(&page_desc); 1439 free_data_cache(&page_data); 1440 1441 #ifdef CONFIG_LZO 1442 g_free(wrkmem); 1443 #endif 1444 1445 g_free(buf_out); 1446 } 1447 1448 static void create_kdump_vmcore(DumpState *s, Error **errp) 1449 { 1450 ERRP_GUARD(); 1451 int ret; 1452 1453 /* 1454 * the kdump-compressed format is: 1455 * File offset 1456 * +------------------------------------------+ 0x0 1457 * | main header (struct disk_dump_header) | 1458 * |------------------------------------------+ block 1 1459 * | sub header (struct kdump_sub_header) | 1460 * |------------------------------------------+ block 2 1461 * | 1st-dump_bitmap | 1462 * |------------------------------------------+ block 2 + X blocks 1463 * | 2nd-dump_bitmap | (aligned by block) 1464 * |------------------------------------------+ block 2 + 2 * X blocks 1465 * | page desc for pfn 0 (struct page_desc) | (aligned by block) 1466 * | page desc for pfn 1 (struct page_desc) | 1467 * | : | 1468 * |------------------------------------------| (not aligned by block) 1469 * | page data (pfn 0) | 1470 * | page data (pfn 1) | 1471 * | : | 1472 * +------------------------------------------+ 1473 */ 1474 1475 ret = write_start_flat_header(s->fd); 1476 if (ret < 0) { 1477 error_setg(errp, "dump: failed to write start flat header"); 1478 return; 1479 } 1480 1481 write_dump_header(s, errp); 1482 if (*errp) { 1483 return; 1484 } 1485 1486 write_dump_bitmap(s, errp); 1487 if (*errp) { 1488 return; 1489 } 1490 1491 write_dump_pages(s, errp); 1492 if (*errp) { 1493 return; 1494 } 1495 1496 ret = write_end_flat_header(s->fd); 1497 if (ret < 0) { 1498 error_setg(errp, "dump: failed to write end flat header"); 1499 return; 1500 } 1501 } 1502 1503 static ram_addr_t get_start_block(DumpState *s) 1504 { 1505 GuestPhysBlock *block; 1506 1507 if (!s->has_filter) { 1508 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1509 return 0; 1510 } 1511 1512 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1513 if (block->target_start >= s->begin + s->length || 1514 block->target_end <= s->begin) { 1515 /* This block is out of the range */ 1516 continue; 1517 } 1518 1519 s->next_block = block; 1520 if (s->begin > block->target_start) { 1521 s->start = s->begin - block->target_start; 1522 } else { 1523 s->start = 0; 1524 } 1525 return s->start; 1526 } 1527 1528 return -1; 1529 } 1530 1531 static void get_max_mapnr(DumpState *s) 1532 { 1533 GuestPhysBlock *last_block; 1534 1535 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); 1536 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); 1537 } 1538 1539 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; 1540 1541 static void dump_state_prepare(DumpState *s) 1542 { 1543 /* zero the struct, setting status to active */ 1544 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; 1545 } 1546 1547 bool qemu_system_dump_in_progress(void) 1548 { 1549 DumpState *state = &dump_state_global; 1550 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); 1551 } 1552 1553 /* calculate total size of memory to be dumped (taking filter into 1554 * acoount.) */ 1555 static int64_t dump_calculate_size(DumpState *s) 1556 { 1557 GuestPhysBlock *block; 1558 int64_t size = 0, total = 0, left = 0, right = 0; 1559 1560 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1561 if (s->has_filter) { 1562 /* calculate the overlapped region. */ 1563 left = MAX(s->begin, block->target_start); 1564 right = MIN(s->begin + s->length, block->target_end); 1565 size = right - left; 1566 size = size > 0 ? size : 0; 1567 } else { 1568 /* count the whole region in */ 1569 size = (block->target_end - block->target_start); 1570 } 1571 total += size; 1572 } 1573 1574 return total; 1575 } 1576 1577 static void vmcoreinfo_update_phys_base(DumpState *s) 1578 { 1579 uint64_t size, note_head_size, name_size, phys_base; 1580 char **lines; 1581 uint8_t *vmci; 1582 size_t i; 1583 1584 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1585 return; 1586 } 1587 1588 get_note_sizes(s, s->guest_note, ¬e_head_size, &name_size, &size); 1589 note_head_size = ROUND_UP(note_head_size, 4); 1590 1591 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); 1592 *(vmci + size) = '\0'; 1593 1594 lines = g_strsplit((char *)vmci, "\n", -1); 1595 for (i = 0; lines[i]; i++) { 1596 const char *prefix = NULL; 1597 1598 if (s->dump_info.d_machine == EM_X86_64) { 1599 prefix = "NUMBER(phys_base)="; 1600 } else if (s->dump_info.d_machine == EM_AARCH64) { 1601 prefix = "NUMBER(PHYS_OFFSET)="; 1602 } 1603 1604 if (prefix && g_str_has_prefix(lines[i], prefix)) { 1605 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, 1606 &phys_base) < 0) { 1607 warn_report("Failed to read %s", prefix); 1608 } else { 1609 s->dump_info.phys_base = phys_base; 1610 } 1611 break; 1612 } 1613 } 1614 1615 g_strfreev(lines); 1616 } 1617 1618 static void dump_init(DumpState *s, int fd, bool has_format, 1619 DumpGuestMemoryFormat format, bool paging, bool has_filter, 1620 int64_t begin, int64_t length, Error **errp) 1621 { 1622 ERRP_GUARD(); 1623 VMCoreInfoState *vmci = vmcoreinfo_find(); 1624 CPUState *cpu; 1625 int nr_cpus; 1626 int ret; 1627 1628 s->has_format = has_format; 1629 s->format = format; 1630 s->written_size = 0; 1631 1632 /* kdump-compressed is conflict with paging and filter */ 1633 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1634 assert(!paging && !has_filter); 1635 } 1636 1637 if (runstate_is_running()) { 1638 vm_stop(RUN_STATE_SAVE_VM); 1639 s->resume = true; 1640 } else { 1641 s->resume = false; 1642 } 1643 1644 /* If we use KVM, we should synchronize the registers before we get dump 1645 * info or physmap info. 1646 */ 1647 cpu_synchronize_all_states(); 1648 nr_cpus = 0; 1649 CPU_FOREACH(cpu) { 1650 nr_cpus++; 1651 } 1652 1653 s->fd = fd; 1654 s->has_filter = has_filter; 1655 s->begin = begin; 1656 s->length = length; 1657 1658 memory_mapping_list_init(&s->list); 1659 1660 guest_phys_blocks_init(&s->guest_phys_blocks); 1661 guest_phys_blocks_append(&s->guest_phys_blocks); 1662 s->total_size = dump_calculate_size(s); 1663 #ifdef DEBUG_DUMP_GUEST_MEMORY 1664 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size); 1665 #endif 1666 1667 /* it does not make sense to dump non-existent memory */ 1668 if (!s->total_size) { 1669 error_setg(errp, "dump: no guest memory to dump"); 1670 goto cleanup; 1671 } 1672 1673 s->start = get_start_block(s); 1674 if (s->start == -1) { 1675 error_setg(errp, QERR_INVALID_PARAMETER, "begin"); 1676 goto cleanup; 1677 } 1678 1679 /* get dump info: endian, class and architecture. 1680 * If the target architecture is not supported, cpu_get_dump_info() will 1681 * return -1. 1682 */ 1683 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); 1684 if (ret < 0) { 1685 error_setg(errp, QERR_UNSUPPORTED); 1686 goto cleanup; 1687 } 1688 1689 if (!s->dump_info.page_size) { 1690 s->dump_info.page_size = TARGET_PAGE_SIZE; 1691 } 1692 1693 s->note_size = cpu_get_note_size(s->dump_info.d_class, 1694 s->dump_info.d_machine, nr_cpus); 1695 if (s->note_size < 0) { 1696 error_setg(errp, QERR_UNSUPPORTED); 1697 goto cleanup; 1698 } 1699 1700 /* 1701 * The goal of this block is to (a) update the previously guessed 1702 * phys_base, (b) copy the guest note out of the guest. 1703 * Failure to do so is not fatal for dumping. 1704 */ 1705 if (vmci) { 1706 uint64_t addr, note_head_size, name_size, desc_size; 1707 uint32_t size; 1708 uint16_t format; 1709 1710 note_head_size = s->dump_info.d_class == ELFCLASS32 ? 1711 sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr); 1712 1713 format = le16_to_cpu(vmci->vmcoreinfo.guest_format); 1714 size = le32_to_cpu(vmci->vmcoreinfo.size); 1715 addr = le64_to_cpu(vmci->vmcoreinfo.paddr); 1716 if (!vmci->has_vmcoreinfo) { 1717 warn_report("guest note is not present"); 1718 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { 1719 warn_report("guest note size is invalid: %" PRIu32, size); 1720 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { 1721 warn_report("guest note format is unsupported: %" PRIu16, format); 1722 } else { 1723 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ 1724 cpu_physical_memory_read(addr, s->guest_note, size); 1725 1726 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); 1727 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, 1728 desc_size); 1729 if (name_size > MAX_GUEST_NOTE_SIZE || 1730 desc_size > MAX_GUEST_NOTE_SIZE || 1731 s->guest_note_size > size) { 1732 warn_report("Invalid guest note header"); 1733 g_free(s->guest_note); 1734 s->guest_note = NULL; 1735 } else { 1736 vmcoreinfo_update_phys_base(s); 1737 s->note_size += s->guest_note_size; 1738 } 1739 } 1740 } 1741 1742 /* get memory mapping */ 1743 if (paging) { 1744 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp); 1745 if (*errp) { 1746 goto cleanup; 1747 } 1748 } else { 1749 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); 1750 } 1751 1752 s->nr_cpus = nr_cpus; 1753 1754 get_max_mapnr(s); 1755 1756 uint64_t tmp; 1757 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), 1758 s->dump_info.page_size); 1759 s->len_dump_bitmap = tmp * s->dump_info.page_size; 1760 1761 /* init for kdump-compressed format */ 1762 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1763 switch (format) { 1764 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: 1765 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; 1766 break; 1767 1768 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: 1769 #ifdef CONFIG_LZO 1770 if (lzo_init() != LZO_E_OK) { 1771 error_setg(errp, "failed to initialize the LZO library"); 1772 goto cleanup; 1773 } 1774 #endif 1775 s->flag_compress = DUMP_DH_COMPRESSED_LZO; 1776 break; 1777 1778 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: 1779 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; 1780 break; 1781 1782 default: 1783 s->flag_compress = 0; 1784 } 1785 1786 return; 1787 } 1788 1789 if (s->has_filter) { 1790 memory_mapping_filter(&s->list, s->begin, s->length); 1791 } 1792 1793 /* 1794 * calculate phdr_num 1795 * 1796 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow 1797 */ 1798 s->phdr_num = 1; /* PT_NOTE */ 1799 if (s->list.num < UINT16_MAX - 2) { 1800 s->phdr_num += s->list.num; 1801 s->have_section = false; 1802 } else { 1803 s->have_section = true; 1804 s->phdr_num = PN_XNUM; 1805 s->sh_info = 1; /* PT_NOTE */ 1806 1807 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ 1808 if (s->list.num <= UINT32_MAX - 1) { 1809 s->sh_info += s->list.num; 1810 } else { 1811 s->sh_info = UINT32_MAX; 1812 } 1813 } 1814 1815 if (s->dump_info.d_class == ELFCLASS64) { 1816 if (s->have_section) { 1817 s->memory_offset = sizeof(Elf64_Ehdr) + 1818 sizeof(Elf64_Phdr) * s->sh_info + 1819 sizeof(Elf64_Shdr) + s->note_size; 1820 } else { 1821 s->memory_offset = sizeof(Elf64_Ehdr) + 1822 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; 1823 } 1824 } else { 1825 if (s->have_section) { 1826 s->memory_offset = sizeof(Elf32_Ehdr) + 1827 sizeof(Elf32_Phdr) * s->sh_info + 1828 sizeof(Elf32_Shdr) + s->note_size; 1829 } else { 1830 s->memory_offset = sizeof(Elf32_Ehdr) + 1831 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; 1832 } 1833 } 1834 1835 return; 1836 1837 cleanup: 1838 dump_cleanup(s); 1839 } 1840 1841 /* this operation might be time consuming. */ 1842 static void dump_process(DumpState *s, Error **errp) 1843 { 1844 ERRP_GUARD(); 1845 DumpQueryResult *result = NULL; 1846 1847 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 1848 #ifdef TARGET_X86_64 1849 create_win_dump(s, errp); 1850 #endif 1851 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1852 create_kdump_vmcore(s, errp); 1853 } else { 1854 create_vmcore(s, errp); 1855 } 1856 1857 /* make sure status is written after written_size updates */ 1858 smp_wmb(); 1859 qatomic_set(&s->status, 1860 (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); 1861 1862 /* send DUMP_COMPLETED message (unconditionally) */ 1863 result = qmp_query_dump(NULL); 1864 /* should never fail */ 1865 assert(result); 1866 qapi_event_send_dump_completed(result, !!*errp, (*errp ? 1867 error_get_pretty(*errp) : NULL)); 1868 qapi_free_DumpQueryResult(result); 1869 1870 dump_cleanup(s); 1871 } 1872 1873 static void *dump_thread(void *data) 1874 { 1875 DumpState *s = (DumpState *)data; 1876 dump_process(s, NULL); 1877 return NULL; 1878 } 1879 1880 DumpQueryResult *qmp_query_dump(Error **errp) 1881 { 1882 DumpQueryResult *result = g_new(DumpQueryResult, 1); 1883 DumpState *state = &dump_state_global; 1884 result->status = qatomic_read(&state->status); 1885 /* make sure we are reading status and written_size in order */ 1886 smp_rmb(); 1887 result->completed = state->written_size; 1888 result->total = state->total_size; 1889 return result; 1890 } 1891 1892 void qmp_dump_guest_memory(bool paging, const char *file, 1893 bool has_detach, bool detach, 1894 bool has_begin, int64_t begin, bool has_length, 1895 int64_t length, bool has_format, 1896 DumpGuestMemoryFormat format, Error **errp) 1897 { 1898 ERRP_GUARD(); 1899 const char *p; 1900 int fd = -1; 1901 DumpState *s; 1902 bool detach_p = false; 1903 1904 if (runstate_check(RUN_STATE_INMIGRATE)) { 1905 error_setg(errp, "Dump not allowed during incoming migration."); 1906 return; 1907 } 1908 1909 /* if there is a dump in background, we should wait until the dump 1910 * finished */ 1911 if (qemu_system_dump_in_progress()) { 1912 error_setg(errp, "There is a dump in process, please wait."); 1913 return; 1914 } 1915 1916 /* 1917 * kdump-compressed format need the whole memory dumped, so paging or 1918 * filter is not supported here. 1919 */ 1920 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && 1921 (paging || has_begin || has_length)) { 1922 error_setg(errp, "kdump-compressed format doesn't support paging or " 1923 "filter"); 1924 return; 1925 } 1926 if (has_begin && !has_length) { 1927 error_setg(errp, QERR_MISSING_PARAMETER, "length"); 1928 return; 1929 } 1930 if (!has_begin && has_length) { 1931 error_setg(errp, QERR_MISSING_PARAMETER, "begin"); 1932 return; 1933 } 1934 if (has_detach) { 1935 detach_p = detach; 1936 } 1937 1938 /* check whether lzo/snappy is supported */ 1939 #ifndef CONFIG_LZO 1940 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { 1941 error_setg(errp, "kdump-lzo is not available now"); 1942 return; 1943 } 1944 #endif 1945 1946 #ifndef CONFIG_SNAPPY 1947 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { 1948 error_setg(errp, "kdump-snappy is not available now"); 1949 return; 1950 } 1951 #endif 1952 1953 #ifndef TARGET_X86_64 1954 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 1955 error_setg(errp, "Windows dump is only available for x86-64"); 1956 return; 1957 } 1958 #endif 1959 1960 #if !defined(WIN32) 1961 if (strstart(file, "fd:", &p)) { 1962 fd = monitor_get_fd(monitor_cur(), p, errp); 1963 if (fd == -1) { 1964 return; 1965 } 1966 } 1967 #endif 1968 1969 if (strstart(file, "file:", &p)) { 1970 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); 1971 if (fd < 0) { 1972 error_setg_file_open(errp, errno, p); 1973 return; 1974 } 1975 } 1976 1977 if (fd == -1) { 1978 error_setg(errp, QERR_INVALID_PARAMETER, "protocol"); 1979 return; 1980 } 1981 1982 if (!dump_migration_blocker) { 1983 error_setg(&dump_migration_blocker, 1984 "Live migration disabled: dump-guest-memory in progress"); 1985 } 1986 1987 /* 1988 * Allows even for -only-migratable, but forbid migration during the 1989 * process of dump guest memory. 1990 */ 1991 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { 1992 /* Remember to release the fd before passing it over to dump state */ 1993 close(fd); 1994 return; 1995 } 1996 1997 s = &dump_state_global; 1998 dump_state_prepare(s); 1999 2000 dump_init(s, fd, has_format, format, paging, has_begin, 2001 begin, length, errp); 2002 if (*errp) { 2003 qatomic_set(&s->status, DUMP_STATUS_FAILED); 2004 return; 2005 } 2006 2007 if (detach_p) { 2008 /* detached dump */ 2009 s->detached = true; 2010 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread, 2011 s, QEMU_THREAD_DETACHED); 2012 } else { 2013 /* sync dump */ 2014 dump_process(s, errp); 2015 } 2016 } 2017 2018 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) 2019 { 2020 DumpGuestMemoryCapability *cap = 2021 g_new0(DumpGuestMemoryCapability, 1); 2022 DumpGuestMemoryFormatList **tail = &cap->formats; 2023 2024 /* elf is always available */ 2025 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF); 2026 2027 /* kdump-zlib is always available */ 2028 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB); 2029 2030 /* add new item if kdump-lzo is available */ 2031 #ifdef CONFIG_LZO 2032 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO); 2033 #endif 2034 2035 /* add new item if kdump-snappy is available */ 2036 #ifdef CONFIG_SNAPPY 2037 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY); 2038 #endif 2039 2040 /* Windows dump is available only if target is x86_64 */ 2041 #ifdef TARGET_X86_64 2042 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); 2043 #endif 2044 2045 return cap; 2046 } 2047