1 /* 2 * QEMU dump 3 * 4 * Copyright Fujitsu, Corp. 2011, 2012 5 * 6 * Authors: 7 * Wen Congyang <wency@cn.fujitsu.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/cutils.h" 16 #include "elf.h" 17 #include "exec/hwaddr.h" 18 #include "monitor/monitor.h" 19 #include "sysemu/kvm.h" 20 #include "sysemu/dump.h" 21 #include "sysemu/memory_mapping.h" 22 #include "sysemu/runstate.h" 23 #include "sysemu/cpus.h" 24 #include "qapi/error.h" 25 #include "qapi/qapi-commands-dump.h" 26 #include "qapi/qapi-events-dump.h" 27 #include "qapi/qmp/qerror.h" 28 #include "qemu/error-report.h" 29 #include "qemu/main-loop.h" 30 #include "hw/misc/vmcoreinfo.h" 31 #include "migration/blocker.h" 32 33 #ifdef TARGET_X86_64 34 #include "win_dump.h" 35 #endif 36 37 #include <zlib.h> 38 #ifdef CONFIG_LZO 39 #include <lzo/lzo1x.h> 40 #endif 41 #ifdef CONFIG_SNAPPY 42 #include <snappy-c.h> 43 #endif 44 #ifndef ELF_MACHINE_UNAME 45 #define ELF_MACHINE_UNAME "Unknown" 46 #endif 47 48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ 49 50 static Error *dump_migration_blocker; 51 52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ 53 ((DIV_ROUND_UP((hdr_size), 4) + \ 54 DIV_ROUND_UP((name_size), 4) + \ 55 DIV_ROUND_UP((desc_size), 4)) * 4) 56 57 uint16_t cpu_to_dump16(DumpState *s, uint16_t val) 58 { 59 if (s->dump_info.d_endian == ELFDATA2LSB) { 60 val = cpu_to_le16(val); 61 } else { 62 val = cpu_to_be16(val); 63 } 64 65 return val; 66 } 67 68 uint32_t cpu_to_dump32(DumpState *s, uint32_t val) 69 { 70 if (s->dump_info.d_endian == ELFDATA2LSB) { 71 val = cpu_to_le32(val); 72 } else { 73 val = cpu_to_be32(val); 74 } 75 76 return val; 77 } 78 79 uint64_t cpu_to_dump64(DumpState *s, uint64_t val) 80 { 81 if (s->dump_info.d_endian == ELFDATA2LSB) { 82 val = cpu_to_le64(val); 83 } else { 84 val = cpu_to_be64(val); 85 } 86 87 return val; 88 } 89 90 static int dump_cleanup(DumpState *s) 91 { 92 guest_phys_blocks_free(&s->guest_phys_blocks); 93 memory_mapping_list_free(&s->list); 94 close(s->fd); 95 g_free(s->guest_note); 96 s->guest_note = NULL; 97 if (s->resume) { 98 if (s->detached) { 99 qemu_mutex_lock_iothread(); 100 } 101 vm_start(); 102 if (s->detached) { 103 qemu_mutex_unlock_iothread(); 104 } 105 } 106 migrate_del_blocker(dump_migration_blocker); 107 108 return 0; 109 } 110 111 static int fd_write_vmcore(const void *buf, size_t size, void *opaque) 112 { 113 DumpState *s = opaque; 114 size_t written_size; 115 116 written_size = qemu_write_full(s->fd, buf, size); 117 if (written_size != size) { 118 return -errno; 119 } 120 121 return 0; 122 } 123 124 static void write_elf64_header(DumpState *s, Error **errp) 125 { 126 Elf64_Ehdr elf_header; 127 int ret; 128 129 memset(&elf_header, 0, sizeof(Elf64_Ehdr)); 130 memcpy(&elf_header, ELFMAG, SELFMAG); 131 elf_header.e_ident[EI_CLASS] = ELFCLASS64; 132 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; 133 elf_header.e_ident[EI_VERSION] = EV_CURRENT; 134 elf_header.e_type = cpu_to_dump16(s, ET_CORE); 135 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 136 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); 137 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 138 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr)); 139 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); 140 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); 141 if (s->have_section) { 142 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info; 143 144 elf_header.e_shoff = cpu_to_dump64(s, shoff); 145 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); 146 elf_header.e_shnum = cpu_to_dump16(s, 1); 147 } 148 149 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); 150 if (ret < 0) { 151 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 152 } 153 } 154 155 static void write_elf32_header(DumpState *s, Error **errp) 156 { 157 Elf32_Ehdr elf_header; 158 int ret; 159 160 memset(&elf_header, 0, sizeof(Elf32_Ehdr)); 161 memcpy(&elf_header, ELFMAG, SELFMAG); 162 elf_header.e_ident[EI_CLASS] = ELFCLASS32; 163 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; 164 elf_header.e_ident[EI_VERSION] = EV_CURRENT; 165 elf_header.e_type = cpu_to_dump16(s, ET_CORE); 166 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 167 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); 168 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 169 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr)); 170 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); 171 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); 172 if (s->have_section) { 173 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info; 174 175 elf_header.e_shoff = cpu_to_dump32(s, shoff); 176 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); 177 elf_header.e_shnum = cpu_to_dump16(s, 1); 178 } 179 180 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); 181 if (ret < 0) { 182 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 183 } 184 } 185 186 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, 187 int phdr_index, hwaddr offset, 188 hwaddr filesz, Error **errp) 189 { 190 Elf64_Phdr phdr; 191 int ret; 192 193 memset(&phdr, 0, sizeof(Elf64_Phdr)); 194 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 195 phdr.p_offset = cpu_to_dump64(s, offset); 196 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); 197 phdr.p_filesz = cpu_to_dump64(s, filesz); 198 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); 199 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 200 201 assert(memory_mapping->length >= filesz); 202 203 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 204 if (ret < 0) { 205 error_setg_errno(errp, -ret, 206 "dump: failed to write program header table"); 207 } 208 } 209 210 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, 211 int phdr_index, hwaddr offset, 212 hwaddr filesz, Error **errp) 213 { 214 Elf32_Phdr phdr; 215 int ret; 216 217 memset(&phdr, 0, sizeof(Elf32_Phdr)); 218 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 219 phdr.p_offset = cpu_to_dump32(s, offset); 220 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); 221 phdr.p_filesz = cpu_to_dump32(s, filesz); 222 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); 223 phdr.p_vaddr = 224 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 225 226 assert(memory_mapping->length >= filesz); 227 228 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 229 if (ret < 0) { 230 error_setg_errno(errp, -ret, 231 "dump: failed to write program header table"); 232 } 233 } 234 235 static void write_elf64_note(DumpState *s, Error **errp) 236 { 237 Elf64_Phdr phdr; 238 hwaddr begin = s->memory_offset - s->note_size; 239 int ret; 240 241 memset(&phdr, 0, sizeof(Elf64_Phdr)); 242 phdr.p_type = cpu_to_dump32(s, PT_NOTE); 243 phdr.p_offset = cpu_to_dump64(s, begin); 244 phdr.p_paddr = 0; 245 phdr.p_filesz = cpu_to_dump64(s, s->note_size); 246 phdr.p_memsz = cpu_to_dump64(s, s->note_size); 247 phdr.p_vaddr = 0; 248 249 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 250 if (ret < 0) { 251 error_setg_errno(errp, -ret, 252 "dump: failed to write program header table"); 253 } 254 } 255 256 static inline int cpu_index(CPUState *cpu) 257 { 258 return cpu->cpu_index + 1; 259 } 260 261 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, 262 Error **errp) 263 { 264 int ret; 265 266 if (s->guest_note) { 267 ret = f(s->guest_note, s->guest_note_size, s); 268 if (ret < 0) { 269 error_setg(errp, "dump: failed to write guest note"); 270 } 271 } 272 } 273 274 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, 275 Error **errp) 276 { 277 CPUState *cpu; 278 int ret; 279 int id; 280 281 CPU_FOREACH(cpu) { 282 id = cpu_index(cpu); 283 ret = cpu_write_elf64_note(f, cpu, id, s); 284 if (ret < 0) { 285 error_setg(errp, "dump: failed to write elf notes"); 286 return; 287 } 288 } 289 290 CPU_FOREACH(cpu) { 291 ret = cpu_write_elf64_qemunote(f, cpu, s); 292 if (ret < 0) { 293 error_setg(errp, "dump: failed to write CPU status"); 294 return; 295 } 296 } 297 298 write_guest_note(f, s, errp); 299 } 300 301 static void write_elf32_note(DumpState *s, Error **errp) 302 { 303 hwaddr begin = s->memory_offset - s->note_size; 304 Elf32_Phdr phdr; 305 int ret; 306 307 memset(&phdr, 0, sizeof(Elf32_Phdr)); 308 phdr.p_type = cpu_to_dump32(s, PT_NOTE); 309 phdr.p_offset = cpu_to_dump32(s, begin); 310 phdr.p_paddr = 0; 311 phdr.p_filesz = cpu_to_dump32(s, s->note_size); 312 phdr.p_memsz = cpu_to_dump32(s, s->note_size); 313 phdr.p_vaddr = 0; 314 315 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 316 if (ret < 0) { 317 error_setg_errno(errp, -ret, 318 "dump: failed to write program header table"); 319 } 320 } 321 322 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, 323 Error **errp) 324 { 325 CPUState *cpu; 326 int ret; 327 int id; 328 329 CPU_FOREACH(cpu) { 330 id = cpu_index(cpu); 331 ret = cpu_write_elf32_note(f, cpu, id, s); 332 if (ret < 0) { 333 error_setg(errp, "dump: failed to write elf notes"); 334 return; 335 } 336 } 337 338 CPU_FOREACH(cpu) { 339 ret = cpu_write_elf32_qemunote(f, cpu, s); 340 if (ret < 0) { 341 error_setg(errp, "dump: failed to write CPU status"); 342 return; 343 } 344 } 345 346 write_guest_note(f, s, errp); 347 } 348 349 static void write_elf_section(DumpState *s, int type, Error **errp) 350 { 351 Elf32_Shdr shdr32; 352 Elf64_Shdr shdr64; 353 int shdr_size; 354 void *shdr; 355 int ret; 356 357 if (type == 0) { 358 shdr_size = sizeof(Elf32_Shdr); 359 memset(&shdr32, 0, shdr_size); 360 shdr32.sh_info = cpu_to_dump32(s, s->sh_info); 361 shdr = &shdr32; 362 } else { 363 shdr_size = sizeof(Elf64_Shdr); 364 memset(&shdr64, 0, shdr_size); 365 shdr64.sh_info = cpu_to_dump32(s, s->sh_info); 366 shdr = &shdr64; 367 } 368 369 ret = fd_write_vmcore(shdr, shdr_size, s); 370 if (ret < 0) { 371 error_setg_errno(errp, -ret, 372 "dump: failed to write section header table"); 373 } 374 } 375 376 static void write_data(DumpState *s, void *buf, int length, Error **errp) 377 { 378 int ret; 379 380 ret = fd_write_vmcore(buf, length, s); 381 if (ret < 0) { 382 error_setg_errno(errp, -ret, "dump: failed to save memory"); 383 } else { 384 s->written_size += length; 385 } 386 } 387 388 /* write the memory to vmcore. 1 page per I/O. */ 389 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, 390 int64_t size, Error **errp) 391 { 392 int64_t i; 393 Error *local_err = NULL; 394 395 for (i = 0; i < size / s->dump_info.page_size; i++) { 396 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 397 s->dump_info.page_size, &local_err); 398 if (local_err) { 399 error_propagate(errp, local_err); 400 return; 401 } 402 } 403 404 if ((size % s->dump_info.page_size) != 0) { 405 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 406 size % s->dump_info.page_size, &local_err); 407 if (local_err) { 408 error_propagate(errp, local_err); 409 return; 410 } 411 } 412 } 413 414 /* get the memory's offset and size in the vmcore */ 415 static void get_offset_range(hwaddr phys_addr, 416 ram_addr_t mapping_length, 417 DumpState *s, 418 hwaddr *p_offset, 419 hwaddr *p_filesz) 420 { 421 GuestPhysBlock *block; 422 hwaddr offset = s->memory_offset; 423 int64_t size_in_block, start; 424 425 /* When the memory is not stored into vmcore, offset will be -1 */ 426 *p_offset = -1; 427 *p_filesz = 0; 428 429 if (s->has_filter) { 430 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { 431 return; 432 } 433 } 434 435 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 436 if (s->has_filter) { 437 if (block->target_start >= s->begin + s->length || 438 block->target_end <= s->begin) { 439 /* This block is out of the range */ 440 continue; 441 } 442 443 if (s->begin <= block->target_start) { 444 start = block->target_start; 445 } else { 446 start = s->begin; 447 } 448 449 size_in_block = block->target_end - start; 450 if (s->begin + s->length < block->target_end) { 451 size_in_block -= block->target_end - (s->begin + s->length); 452 } 453 } else { 454 start = block->target_start; 455 size_in_block = block->target_end - block->target_start; 456 } 457 458 if (phys_addr >= start && phys_addr < start + size_in_block) { 459 *p_offset = phys_addr - start + offset; 460 461 /* The offset range mapped from the vmcore file must not spill over 462 * the GuestPhysBlock, clamp it. The rest of the mapping will be 463 * zero-filled in memory at load time; see 464 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. 465 */ 466 *p_filesz = phys_addr + mapping_length <= start + size_in_block ? 467 mapping_length : 468 size_in_block - (phys_addr - start); 469 return; 470 } 471 472 offset += size_in_block; 473 } 474 } 475 476 static void write_elf_loads(DumpState *s, Error **errp) 477 { 478 hwaddr offset, filesz; 479 MemoryMapping *memory_mapping; 480 uint32_t phdr_index = 1; 481 uint32_t max_index; 482 Error *local_err = NULL; 483 484 if (s->have_section) { 485 max_index = s->sh_info; 486 } else { 487 max_index = s->phdr_num; 488 } 489 490 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { 491 get_offset_range(memory_mapping->phys_addr, 492 memory_mapping->length, 493 s, &offset, &filesz); 494 if (s->dump_info.d_class == ELFCLASS64) { 495 write_elf64_load(s, memory_mapping, phdr_index++, offset, 496 filesz, &local_err); 497 } else { 498 write_elf32_load(s, memory_mapping, phdr_index++, offset, 499 filesz, &local_err); 500 } 501 502 if (local_err) { 503 error_propagate(errp, local_err); 504 return; 505 } 506 507 if (phdr_index >= max_index) { 508 break; 509 } 510 } 511 } 512 513 /* write elf header, PT_NOTE and elf note to vmcore. */ 514 static void dump_begin(DumpState *s, Error **errp) 515 { 516 Error *local_err = NULL; 517 518 /* 519 * the vmcore's format is: 520 * -------------- 521 * | elf header | 522 * -------------- 523 * | PT_NOTE | 524 * -------------- 525 * | PT_LOAD | 526 * -------------- 527 * | ...... | 528 * -------------- 529 * | PT_LOAD | 530 * -------------- 531 * | sec_hdr | 532 * -------------- 533 * | elf note | 534 * -------------- 535 * | memory | 536 * -------------- 537 * 538 * we only know where the memory is saved after we write elf note into 539 * vmcore. 540 */ 541 542 /* write elf header to vmcore */ 543 if (s->dump_info.d_class == ELFCLASS64) { 544 write_elf64_header(s, &local_err); 545 } else { 546 write_elf32_header(s, &local_err); 547 } 548 if (local_err) { 549 error_propagate(errp, local_err); 550 return; 551 } 552 553 if (s->dump_info.d_class == ELFCLASS64) { 554 /* write PT_NOTE to vmcore */ 555 write_elf64_note(s, &local_err); 556 if (local_err) { 557 error_propagate(errp, local_err); 558 return; 559 } 560 561 /* write all PT_LOAD to vmcore */ 562 write_elf_loads(s, &local_err); 563 if (local_err) { 564 error_propagate(errp, local_err); 565 return; 566 } 567 568 /* write section to vmcore */ 569 if (s->have_section) { 570 write_elf_section(s, 1, &local_err); 571 if (local_err) { 572 error_propagate(errp, local_err); 573 return; 574 } 575 } 576 577 /* write notes to vmcore */ 578 write_elf64_notes(fd_write_vmcore, s, &local_err); 579 if (local_err) { 580 error_propagate(errp, local_err); 581 return; 582 } 583 } else { 584 /* write PT_NOTE to vmcore */ 585 write_elf32_note(s, &local_err); 586 if (local_err) { 587 error_propagate(errp, local_err); 588 return; 589 } 590 591 /* write all PT_LOAD to vmcore */ 592 write_elf_loads(s, &local_err); 593 if (local_err) { 594 error_propagate(errp, local_err); 595 return; 596 } 597 598 /* write section to vmcore */ 599 if (s->have_section) { 600 write_elf_section(s, 0, &local_err); 601 if (local_err) { 602 error_propagate(errp, local_err); 603 return; 604 } 605 } 606 607 /* write notes to vmcore */ 608 write_elf32_notes(fd_write_vmcore, s, &local_err); 609 if (local_err) { 610 error_propagate(errp, local_err); 611 return; 612 } 613 } 614 } 615 616 static int get_next_block(DumpState *s, GuestPhysBlock *block) 617 { 618 while (1) { 619 block = QTAILQ_NEXT(block, next); 620 if (!block) { 621 /* no more block */ 622 return 1; 623 } 624 625 s->start = 0; 626 s->next_block = block; 627 if (s->has_filter) { 628 if (block->target_start >= s->begin + s->length || 629 block->target_end <= s->begin) { 630 /* This block is out of the range */ 631 continue; 632 } 633 634 if (s->begin > block->target_start) { 635 s->start = s->begin - block->target_start; 636 } 637 } 638 639 return 0; 640 } 641 } 642 643 /* write all memory to vmcore */ 644 static void dump_iterate(DumpState *s, Error **errp) 645 { 646 GuestPhysBlock *block; 647 int64_t size; 648 Error *local_err = NULL; 649 650 do { 651 block = s->next_block; 652 653 size = block->target_end - block->target_start; 654 if (s->has_filter) { 655 size -= s->start; 656 if (s->begin + s->length < block->target_end) { 657 size -= block->target_end - (s->begin + s->length); 658 } 659 } 660 write_memory(s, block, s->start, size, &local_err); 661 if (local_err) { 662 error_propagate(errp, local_err); 663 return; 664 } 665 666 } while (!get_next_block(s, block)); 667 } 668 669 static void create_vmcore(DumpState *s, Error **errp) 670 { 671 Error *local_err = NULL; 672 673 dump_begin(s, &local_err); 674 if (local_err) { 675 error_propagate(errp, local_err); 676 return; 677 } 678 679 dump_iterate(s, errp); 680 } 681 682 static int write_start_flat_header(int fd) 683 { 684 MakedumpfileHeader *mh; 685 int ret = 0; 686 687 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); 688 mh = g_malloc0(MAX_SIZE_MDF_HEADER); 689 690 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, 691 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); 692 693 mh->type = cpu_to_be64(TYPE_FLAT_HEADER); 694 mh->version = cpu_to_be64(VERSION_FLAT_HEADER); 695 696 size_t written_size; 697 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); 698 if (written_size != MAX_SIZE_MDF_HEADER) { 699 ret = -1; 700 } 701 702 g_free(mh); 703 return ret; 704 } 705 706 static int write_end_flat_header(int fd) 707 { 708 MakedumpfileDataHeader mdh; 709 710 mdh.offset = END_FLAG_FLAT_HEADER; 711 mdh.buf_size = END_FLAG_FLAT_HEADER; 712 713 size_t written_size; 714 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 715 if (written_size != sizeof(mdh)) { 716 return -1; 717 } 718 719 return 0; 720 } 721 722 static int write_buffer(int fd, off_t offset, const void *buf, size_t size) 723 { 724 size_t written_size; 725 MakedumpfileDataHeader mdh; 726 727 mdh.offset = cpu_to_be64(offset); 728 mdh.buf_size = cpu_to_be64(size); 729 730 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 731 if (written_size != sizeof(mdh)) { 732 return -1; 733 } 734 735 written_size = qemu_write_full(fd, buf, size); 736 if (written_size != size) { 737 return -1; 738 } 739 740 return 0; 741 } 742 743 static int buf_write_note(const void *buf, size_t size, void *opaque) 744 { 745 DumpState *s = opaque; 746 747 /* note_buf is not enough */ 748 if (s->note_buf_offset + size > s->note_size) { 749 return -1; 750 } 751 752 memcpy(s->note_buf + s->note_buf_offset, buf, size); 753 754 s->note_buf_offset += size; 755 756 return 0; 757 } 758 759 /* 760 * This function retrieves various sizes from an elf header. 761 * 762 * @note has to be a valid ELF note. The return sizes are unmodified 763 * (not padded or rounded up to be multiple of 4). 764 */ 765 static void get_note_sizes(DumpState *s, const void *note, 766 uint64_t *note_head_size, 767 uint64_t *name_size, 768 uint64_t *desc_size) 769 { 770 uint64_t note_head_sz; 771 uint64_t name_sz; 772 uint64_t desc_sz; 773 774 if (s->dump_info.d_class == ELFCLASS64) { 775 const Elf64_Nhdr *hdr = note; 776 note_head_sz = sizeof(Elf64_Nhdr); 777 name_sz = tswap64(hdr->n_namesz); 778 desc_sz = tswap64(hdr->n_descsz); 779 } else { 780 const Elf32_Nhdr *hdr = note; 781 note_head_sz = sizeof(Elf32_Nhdr); 782 name_sz = tswap32(hdr->n_namesz); 783 desc_sz = tswap32(hdr->n_descsz); 784 } 785 786 if (note_head_size) { 787 *note_head_size = note_head_sz; 788 } 789 if (name_size) { 790 *name_size = name_sz; 791 } 792 if (desc_size) { 793 *desc_size = desc_sz; 794 } 795 } 796 797 static bool note_name_equal(DumpState *s, 798 const uint8_t *note, const char *name) 799 { 800 int len = strlen(name) + 1; 801 uint64_t head_size, name_size; 802 803 get_note_sizes(s, note, &head_size, &name_size, NULL); 804 head_size = ROUND_UP(head_size, 4); 805 806 return name_size == len && memcmp(note + head_size, name, len) == 0; 807 } 808 809 /* write common header, sub header and elf note to vmcore */ 810 static void create_header32(DumpState *s, Error **errp) 811 { 812 DiskDumpHeader32 *dh = NULL; 813 KdumpSubHeader32 *kh = NULL; 814 size_t size; 815 uint32_t block_size; 816 uint32_t sub_hdr_size; 817 uint32_t bitmap_blocks; 818 uint32_t status = 0; 819 uint64_t offset_note; 820 Error *local_err = NULL; 821 822 /* write common header, the version of kdump-compressed format is 6th */ 823 size = sizeof(DiskDumpHeader32); 824 dh = g_malloc0(size); 825 826 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 827 dh->header_version = cpu_to_dump32(s, 6); 828 block_size = s->dump_info.page_size; 829 dh->block_size = cpu_to_dump32(s, block_size); 830 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; 831 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 832 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 833 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 834 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 835 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 836 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 837 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 838 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 839 840 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 841 status |= DUMP_DH_COMPRESSED_ZLIB; 842 } 843 #ifdef CONFIG_LZO 844 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 845 status |= DUMP_DH_COMPRESSED_LZO; 846 } 847 #endif 848 #ifdef CONFIG_SNAPPY 849 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 850 status |= DUMP_DH_COMPRESSED_SNAPPY; 851 } 852 #endif 853 dh->status = cpu_to_dump32(s, status); 854 855 if (write_buffer(s->fd, 0, dh, size) < 0) { 856 error_setg(errp, "dump: failed to write disk dump header"); 857 goto out; 858 } 859 860 /* write sub header */ 861 size = sizeof(KdumpSubHeader32); 862 kh = g_malloc0(size); 863 864 /* 64bit max_mapnr_64 */ 865 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 866 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); 867 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 868 869 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 870 if (s->guest_note && 871 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 872 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 873 874 get_note_sizes(s, s->guest_note, 875 &hsize, &name_size, &size_vmcoreinfo_desc); 876 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 877 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 878 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 879 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); 880 } 881 882 kh->offset_note = cpu_to_dump64(s, offset_note); 883 kh->note_size = cpu_to_dump32(s, s->note_size); 884 885 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 886 block_size, kh, size) < 0) { 887 error_setg(errp, "dump: failed to write kdump sub header"); 888 goto out; 889 } 890 891 /* write note */ 892 s->note_buf = g_malloc0(s->note_size); 893 s->note_buf_offset = 0; 894 895 /* use s->note_buf to store notes temporarily */ 896 write_elf32_notes(buf_write_note, s, &local_err); 897 if (local_err) { 898 error_propagate(errp, local_err); 899 goto out; 900 } 901 if (write_buffer(s->fd, offset_note, s->note_buf, 902 s->note_size) < 0) { 903 error_setg(errp, "dump: failed to write notes"); 904 goto out; 905 } 906 907 /* get offset of dump_bitmap */ 908 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 909 block_size; 910 911 /* get offset of page */ 912 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 913 block_size; 914 915 out: 916 g_free(dh); 917 g_free(kh); 918 g_free(s->note_buf); 919 } 920 921 /* write common header, sub header and elf note to vmcore */ 922 static void create_header64(DumpState *s, Error **errp) 923 { 924 DiskDumpHeader64 *dh = NULL; 925 KdumpSubHeader64 *kh = NULL; 926 size_t size; 927 uint32_t block_size; 928 uint32_t sub_hdr_size; 929 uint32_t bitmap_blocks; 930 uint32_t status = 0; 931 uint64_t offset_note; 932 Error *local_err = NULL; 933 934 /* write common header, the version of kdump-compressed format is 6th */ 935 size = sizeof(DiskDumpHeader64); 936 dh = g_malloc0(size); 937 938 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 939 dh->header_version = cpu_to_dump32(s, 6); 940 block_size = s->dump_info.page_size; 941 dh->block_size = cpu_to_dump32(s, block_size); 942 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; 943 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 944 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 945 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 946 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 947 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 948 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 949 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 950 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 951 952 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 953 status |= DUMP_DH_COMPRESSED_ZLIB; 954 } 955 #ifdef CONFIG_LZO 956 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 957 status |= DUMP_DH_COMPRESSED_LZO; 958 } 959 #endif 960 #ifdef CONFIG_SNAPPY 961 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 962 status |= DUMP_DH_COMPRESSED_SNAPPY; 963 } 964 #endif 965 dh->status = cpu_to_dump32(s, status); 966 967 if (write_buffer(s->fd, 0, dh, size) < 0) { 968 error_setg(errp, "dump: failed to write disk dump header"); 969 goto out; 970 } 971 972 /* write sub header */ 973 size = sizeof(KdumpSubHeader64); 974 kh = g_malloc0(size); 975 976 /* 64bit max_mapnr_64 */ 977 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 978 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); 979 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 980 981 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 982 if (s->guest_note && 983 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 984 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 985 986 get_note_sizes(s, s->guest_note, 987 &hsize, &name_size, &size_vmcoreinfo_desc); 988 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 989 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 990 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 991 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); 992 } 993 994 kh->offset_note = cpu_to_dump64(s, offset_note); 995 kh->note_size = cpu_to_dump64(s, s->note_size); 996 997 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 998 block_size, kh, size) < 0) { 999 error_setg(errp, "dump: failed to write kdump sub header"); 1000 goto out; 1001 } 1002 1003 /* write note */ 1004 s->note_buf = g_malloc0(s->note_size); 1005 s->note_buf_offset = 0; 1006 1007 /* use s->note_buf to store notes temporarily */ 1008 write_elf64_notes(buf_write_note, s, &local_err); 1009 if (local_err) { 1010 error_propagate(errp, local_err); 1011 goto out; 1012 } 1013 1014 if (write_buffer(s->fd, offset_note, s->note_buf, 1015 s->note_size) < 0) { 1016 error_setg(errp, "dump: failed to write notes"); 1017 goto out; 1018 } 1019 1020 /* get offset of dump_bitmap */ 1021 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1022 block_size; 1023 1024 /* get offset of page */ 1025 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1026 block_size; 1027 1028 out: 1029 g_free(dh); 1030 g_free(kh); 1031 g_free(s->note_buf); 1032 } 1033 1034 static void write_dump_header(DumpState *s, Error **errp) 1035 { 1036 if (s->dump_info.d_class == ELFCLASS32) { 1037 create_header32(s, errp); 1038 } else { 1039 create_header64(s, errp); 1040 } 1041 } 1042 1043 static size_t dump_bitmap_get_bufsize(DumpState *s) 1044 { 1045 return s->dump_info.page_size; 1046 } 1047 1048 /* 1049 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be 1050 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. 1051 * set_dump_bitmap will always leave the recently set bit un-sync. And setting 1052 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into 1053 * vmcore, ie. synchronizing un-sync bit into vmcore. 1054 */ 1055 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, 1056 uint8_t *buf, DumpState *s) 1057 { 1058 off_t old_offset, new_offset; 1059 off_t offset_bitmap1, offset_bitmap2; 1060 uint32_t byte, bit; 1061 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1062 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1063 1064 /* should not set the previous place */ 1065 assert(last_pfn <= pfn); 1066 1067 /* 1068 * if the bit needed to be set is not cached in buf, flush the data in buf 1069 * to vmcore firstly. 1070 * making new_offset be bigger than old_offset can also sync remained data 1071 * into vmcore. 1072 */ 1073 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); 1074 new_offset = bitmap_bufsize * (pfn / bits_per_buf); 1075 1076 while (old_offset < new_offset) { 1077 /* calculate the offset and write dump_bitmap */ 1078 offset_bitmap1 = s->offset_dump_bitmap + old_offset; 1079 if (write_buffer(s->fd, offset_bitmap1, buf, 1080 bitmap_bufsize) < 0) { 1081 return -1; 1082 } 1083 1084 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ 1085 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + 1086 old_offset; 1087 if (write_buffer(s->fd, offset_bitmap2, buf, 1088 bitmap_bufsize) < 0) { 1089 return -1; 1090 } 1091 1092 memset(buf, 0, bitmap_bufsize); 1093 old_offset += bitmap_bufsize; 1094 } 1095 1096 /* get the exact place of the bit in the buf, and set it */ 1097 byte = (pfn % bits_per_buf) / CHAR_BIT; 1098 bit = (pfn % bits_per_buf) % CHAR_BIT; 1099 if (value) { 1100 buf[byte] |= 1u << bit; 1101 } else { 1102 buf[byte] &= ~(1u << bit); 1103 } 1104 1105 return 0; 1106 } 1107 1108 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) 1109 { 1110 int target_page_shift = ctz32(s->dump_info.page_size); 1111 1112 return (addr >> target_page_shift) - ARCH_PFN_OFFSET; 1113 } 1114 1115 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) 1116 { 1117 int target_page_shift = ctz32(s->dump_info.page_size); 1118 1119 return (pfn + ARCH_PFN_OFFSET) << target_page_shift; 1120 } 1121 1122 /* 1123 * exam every page and return the page frame number and the address of the page. 1124 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys 1125 * blocks, so block->target_start and block->target_end should be interal 1126 * multiples of the target page size. 1127 */ 1128 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, 1129 uint8_t **bufptr, DumpState *s) 1130 { 1131 GuestPhysBlock *block = *blockptr; 1132 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1); 1133 uint8_t *buf; 1134 1135 /* block == NULL means the start of the iteration */ 1136 if (!block) { 1137 block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1138 *blockptr = block; 1139 assert((block->target_start & ~target_page_mask) == 0); 1140 assert((block->target_end & ~target_page_mask) == 0); 1141 *pfnptr = dump_paddr_to_pfn(s, block->target_start); 1142 if (bufptr) { 1143 *bufptr = block->host_addr; 1144 } 1145 return true; 1146 } 1147 1148 *pfnptr = *pfnptr + 1; 1149 addr = dump_pfn_to_paddr(s, *pfnptr); 1150 1151 if ((addr >= block->target_start) && 1152 (addr + s->dump_info.page_size <= block->target_end)) { 1153 buf = block->host_addr + (addr - block->target_start); 1154 } else { 1155 /* the next page is in the next block */ 1156 block = QTAILQ_NEXT(block, next); 1157 *blockptr = block; 1158 if (!block) { 1159 return false; 1160 } 1161 assert((block->target_start & ~target_page_mask) == 0); 1162 assert((block->target_end & ~target_page_mask) == 0); 1163 *pfnptr = dump_paddr_to_pfn(s, block->target_start); 1164 buf = block->host_addr; 1165 } 1166 1167 if (bufptr) { 1168 *bufptr = buf; 1169 } 1170 1171 return true; 1172 } 1173 1174 static void write_dump_bitmap(DumpState *s, Error **errp) 1175 { 1176 int ret = 0; 1177 uint64_t last_pfn, pfn; 1178 void *dump_bitmap_buf; 1179 size_t num_dumpable; 1180 GuestPhysBlock *block_iter = NULL; 1181 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1182 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1183 1184 /* dump_bitmap_buf is used to store dump_bitmap temporarily */ 1185 dump_bitmap_buf = g_malloc0(bitmap_bufsize); 1186 1187 num_dumpable = 0; 1188 last_pfn = 0; 1189 1190 /* 1191 * exam memory page by page, and set the bit in dump_bitmap corresponded 1192 * to the existing page. 1193 */ 1194 while (get_next_page(&block_iter, &pfn, NULL, s)) { 1195 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); 1196 if (ret < 0) { 1197 error_setg(errp, "dump: failed to set dump_bitmap"); 1198 goto out; 1199 } 1200 1201 last_pfn = pfn; 1202 num_dumpable++; 1203 } 1204 1205 /* 1206 * set_dump_bitmap will always leave the recently set bit un-sync. Here we 1207 * set the remaining bits from last_pfn to the end of the bitmap buffer to 1208 * 0. With those set, the un-sync bit will be synchronized into the vmcore. 1209 */ 1210 if (num_dumpable > 0) { 1211 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, 1212 dump_bitmap_buf, s); 1213 if (ret < 0) { 1214 error_setg(errp, "dump: failed to sync dump_bitmap"); 1215 goto out; 1216 } 1217 } 1218 1219 /* number of dumpable pages that will be dumped later */ 1220 s->num_dumpable = num_dumpable; 1221 1222 out: 1223 g_free(dump_bitmap_buf); 1224 } 1225 1226 static void prepare_data_cache(DataCache *data_cache, DumpState *s, 1227 off_t offset) 1228 { 1229 data_cache->fd = s->fd; 1230 data_cache->data_size = 0; 1231 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); 1232 data_cache->buf = g_malloc0(data_cache->buf_size); 1233 data_cache->offset = offset; 1234 } 1235 1236 static int write_cache(DataCache *dc, const void *buf, size_t size, 1237 bool flag_sync) 1238 { 1239 /* 1240 * dc->buf_size should not be less than size, otherwise dc will never be 1241 * enough 1242 */ 1243 assert(size <= dc->buf_size); 1244 1245 /* 1246 * if flag_sync is set, synchronize data in dc->buf into vmcore. 1247 * otherwise check if the space is enough for caching data in buf, if not, 1248 * write the data in dc->buf to dc->fd and reset dc->buf 1249 */ 1250 if ((!flag_sync && dc->data_size + size > dc->buf_size) || 1251 (flag_sync && dc->data_size > 0)) { 1252 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { 1253 return -1; 1254 } 1255 1256 dc->offset += dc->data_size; 1257 dc->data_size = 0; 1258 } 1259 1260 if (!flag_sync) { 1261 memcpy(dc->buf + dc->data_size, buf, size); 1262 dc->data_size += size; 1263 } 1264 1265 return 0; 1266 } 1267 1268 static void free_data_cache(DataCache *data_cache) 1269 { 1270 g_free(data_cache->buf); 1271 } 1272 1273 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) 1274 { 1275 switch (flag_compress) { 1276 case DUMP_DH_COMPRESSED_ZLIB: 1277 return compressBound(page_size); 1278 1279 case DUMP_DH_COMPRESSED_LZO: 1280 /* 1281 * LZO will expand incompressible data by a little amount. Please check 1282 * the following URL to see the expansion calculation: 1283 * http://www.oberhumer.com/opensource/lzo/lzofaq.php 1284 */ 1285 return page_size + page_size / 16 + 64 + 3; 1286 1287 #ifdef CONFIG_SNAPPY 1288 case DUMP_DH_COMPRESSED_SNAPPY: 1289 return snappy_max_compressed_length(page_size); 1290 #endif 1291 } 1292 return 0; 1293 } 1294 1295 static void write_dump_pages(DumpState *s, Error **errp) 1296 { 1297 int ret = 0; 1298 DataCache page_desc, page_data; 1299 size_t len_buf_out, size_out; 1300 #ifdef CONFIG_LZO 1301 lzo_bytep wrkmem = NULL; 1302 #endif 1303 uint8_t *buf_out = NULL; 1304 off_t offset_desc, offset_data; 1305 PageDescriptor pd, pd_zero; 1306 uint8_t *buf; 1307 GuestPhysBlock *block_iter = NULL; 1308 uint64_t pfn_iter; 1309 1310 /* get offset of page_desc and page_data in dump file */ 1311 offset_desc = s->offset_page; 1312 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; 1313 1314 prepare_data_cache(&page_desc, s, offset_desc); 1315 prepare_data_cache(&page_data, s, offset_data); 1316 1317 /* prepare buffer to store compressed data */ 1318 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); 1319 assert(len_buf_out != 0); 1320 1321 #ifdef CONFIG_LZO 1322 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); 1323 #endif 1324 1325 buf_out = g_malloc(len_buf_out); 1326 1327 /* 1328 * init zero page's page_desc and page_data, because every zero page 1329 * uses the same page_data 1330 */ 1331 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); 1332 pd_zero.flags = cpu_to_dump32(s, 0); 1333 pd_zero.offset = cpu_to_dump64(s, offset_data); 1334 pd_zero.page_flags = cpu_to_dump64(s, 0); 1335 buf = g_malloc0(s->dump_info.page_size); 1336 ret = write_cache(&page_data, buf, s->dump_info.page_size, false); 1337 g_free(buf); 1338 if (ret < 0) { 1339 error_setg(errp, "dump: failed to write page data (zero page)"); 1340 goto out; 1341 } 1342 1343 offset_data += s->dump_info.page_size; 1344 1345 /* 1346 * dump memory to vmcore page by page. zero page will all be resided in the 1347 * first page of page section 1348 */ 1349 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { 1350 /* check zero page */ 1351 if (buffer_is_zero(buf, s->dump_info.page_size)) { 1352 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), 1353 false); 1354 if (ret < 0) { 1355 error_setg(errp, "dump: failed to write page desc"); 1356 goto out; 1357 } 1358 } else { 1359 /* 1360 * not zero page, then: 1361 * 1. compress the page 1362 * 2. write the compressed page into the cache of page_data 1363 * 3. get page desc of the compressed page and write it into the 1364 * cache of page_desc 1365 * 1366 * only one compression format will be used here, for 1367 * s->flag_compress is set. But when compression fails to work, 1368 * we fall back to save in plaintext. 1369 */ 1370 size_out = len_buf_out; 1371 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && 1372 (compress2(buf_out, (uLongf *)&size_out, buf, 1373 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && 1374 (size_out < s->dump_info.page_size)) { 1375 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); 1376 pd.size = cpu_to_dump32(s, size_out); 1377 1378 ret = write_cache(&page_data, buf_out, size_out, false); 1379 if (ret < 0) { 1380 error_setg(errp, "dump: failed to write page data"); 1381 goto out; 1382 } 1383 #ifdef CONFIG_LZO 1384 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && 1385 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, 1386 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && 1387 (size_out < s->dump_info.page_size)) { 1388 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); 1389 pd.size = cpu_to_dump32(s, size_out); 1390 1391 ret = write_cache(&page_data, buf_out, size_out, false); 1392 if (ret < 0) { 1393 error_setg(errp, "dump: failed to write page data"); 1394 goto out; 1395 } 1396 #endif 1397 #ifdef CONFIG_SNAPPY 1398 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && 1399 (snappy_compress((char *)buf, s->dump_info.page_size, 1400 (char *)buf_out, &size_out) == SNAPPY_OK) && 1401 (size_out < s->dump_info.page_size)) { 1402 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); 1403 pd.size = cpu_to_dump32(s, size_out); 1404 1405 ret = write_cache(&page_data, buf_out, size_out, false); 1406 if (ret < 0) { 1407 error_setg(errp, "dump: failed to write page data"); 1408 goto out; 1409 } 1410 #endif 1411 } else { 1412 /* 1413 * fall back to save in plaintext, size_out should be 1414 * assigned the target's page size 1415 */ 1416 pd.flags = cpu_to_dump32(s, 0); 1417 size_out = s->dump_info.page_size; 1418 pd.size = cpu_to_dump32(s, size_out); 1419 1420 ret = write_cache(&page_data, buf, 1421 s->dump_info.page_size, false); 1422 if (ret < 0) { 1423 error_setg(errp, "dump: failed to write page data"); 1424 goto out; 1425 } 1426 } 1427 1428 /* get and write page desc here */ 1429 pd.page_flags = cpu_to_dump64(s, 0); 1430 pd.offset = cpu_to_dump64(s, offset_data); 1431 offset_data += size_out; 1432 1433 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); 1434 if (ret < 0) { 1435 error_setg(errp, "dump: failed to write page desc"); 1436 goto out; 1437 } 1438 } 1439 s->written_size += s->dump_info.page_size; 1440 } 1441 1442 ret = write_cache(&page_desc, NULL, 0, true); 1443 if (ret < 0) { 1444 error_setg(errp, "dump: failed to sync cache for page_desc"); 1445 goto out; 1446 } 1447 ret = write_cache(&page_data, NULL, 0, true); 1448 if (ret < 0) { 1449 error_setg(errp, "dump: failed to sync cache for page_data"); 1450 goto out; 1451 } 1452 1453 out: 1454 free_data_cache(&page_desc); 1455 free_data_cache(&page_data); 1456 1457 #ifdef CONFIG_LZO 1458 g_free(wrkmem); 1459 #endif 1460 1461 g_free(buf_out); 1462 } 1463 1464 static void create_kdump_vmcore(DumpState *s, Error **errp) 1465 { 1466 int ret; 1467 Error *local_err = NULL; 1468 1469 /* 1470 * the kdump-compressed format is: 1471 * File offset 1472 * +------------------------------------------+ 0x0 1473 * | main header (struct disk_dump_header) | 1474 * |------------------------------------------+ block 1 1475 * | sub header (struct kdump_sub_header) | 1476 * |------------------------------------------+ block 2 1477 * | 1st-dump_bitmap | 1478 * |------------------------------------------+ block 2 + X blocks 1479 * | 2nd-dump_bitmap | (aligned by block) 1480 * |------------------------------------------+ block 2 + 2 * X blocks 1481 * | page desc for pfn 0 (struct page_desc) | (aligned by block) 1482 * | page desc for pfn 1 (struct page_desc) | 1483 * | : | 1484 * |------------------------------------------| (not aligned by block) 1485 * | page data (pfn 0) | 1486 * | page data (pfn 1) | 1487 * | : | 1488 * +------------------------------------------+ 1489 */ 1490 1491 ret = write_start_flat_header(s->fd); 1492 if (ret < 0) { 1493 error_setg(errp, "dump: failed to write start flat header"); 1494 return; 1495 } 1496 1497 write_dump_header(s, &local_err); 1498 if (local_err) { 1499 error_propagate(errp, local_err); 1500 return; 1501 } 1502 1503 write_dump_bitmap(s, &local_err); 1504 if (local_err) { 1505 error_propagate(errp, local_err); 1506 return; 1507 } 1508 1509 write_dump_pages(s, &local_err); 1510 if (local_err) { 1511 error_propagate(errp, local_err); 1512 return; 1513 } 1514 1515 ret = write_end_flat_header(s->fd); 1516 if (ret < 0) { 1517 error_setg(errp, "dump: failed to write end flat header"); 1518 return; 1519 } 1520 } 1521 1522 static ram_addr_t get_start_block(DumpState *s) 1523 { 1524 GuestPhysBlock *block; 1525 1526 if (!s->has_filter) { 1527 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1528 return 0; 1529 } 1530 1531 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1532 if (block->target_start >= s->begin + s->length || 1533 block->target_end <= s->begin) { 1534 /* This block is out of the range */ 1535 continue; 1536 } 1537 1538 s->next_block = block; 1539 if (s->begin > block->target_start) { 1540 s->start = s->begin - block->target_start; 1541 } else { 1542 s->start = 0; 1543 } 1544 return s->start; 1545 } 1546 1547 return -1; 1548 } 1549 1550 static void get_max_mapnr(DumpState *s) 1551 { 1552 GuestPhysBlock *last_block; 1553 1554 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); 1555 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); 1556 } 1557 1558 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; 1559 1560 static void dump_state_prepare(DumpState *s) 1561 { 1562 /* zero the struct, setting status to active */ 1563 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; 1564 } 1565 1566 bool qemu_system_dump_in_progress(void) 1567 { 1568 DumpState *state = &dump_state_global; 1569 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); 1570 } 1571 1572 /* calculate total size of memory to be dumped (taking filter into 1573 * acoount.) */ 1574 static int64_t dump_calculate_size(DumpState *s) 1575 { 1576 GuestPhysBlock *block; 1577 int64_t size = 0, total = 0, left = 0, right = 0; 1578 1579 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1580 if (s->has_filter) { 1581 /* calculate the overlapped region. */ 1582 left = MAX(s->begin, block->target_start); 1583 right = MIN(s->begin + s->length, block->target_end); 1584 size = right - left; 1585 size = size > 0 ? size : 0; 1586 } else { 1587 /* count the whole region in */ 1588 size = (block->target_end - block->target_start); 1589 } 1590 total += size; 1591 } 1592 1593 return total; 1594 } 1595 1596 static void vmcoreinfo_update_phys_base(DumpState *s) 1597 { 1598 uint64_t size, note_head_size, name_size, phys_base; 1599 char **lines; 1600 uint8_t *vmci; 1601 size_t i; 1602 1603 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1604 return; 1605 } 1606 1607 get_note_sizes(s, s->guest_note, ¬e_head_size, &name_size, &size); 1608 note_head_size = ROUND_UP(note_head_size, 4); 1609 1610 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); 1611 *(vmci + size) = '\0'; 1612 1613 lines = g_strsplit((char *)vmci, "\n", -1); 1614 for (i = 0; lines[i]; i++) { 1615 const char *prefix = NULL; 1616 1617 if (s->dump_info.d_machine == EM_X86_64) { 1618 prefix = "NUMBER(phys_base)="; 1619 } else if (s->dump_info.d_machine == EM_AARCH64) { 1620 prefix = "NUMBER(PHYS_OFFSET)="; 1621 } 1622 1623 if (prefix && g_str_has_prefix(lines[i], prefix)) { 1624 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, 1625 &phys_base) < 0) { 1626 warn_report("Failed to read %s", prefix); 1627 } else { 1628 s->dump_info.phys_base = phys_base; 1629 } 1630 break; 1631 } 1632 } 1633 1634 g_strfreev(lines); 1635 } 1636 1637 static void dump_init(DumpState *s, int fd, bool has_format, 1638 DumpGuestMemoryFormat format, bool paging, bool has_filter, 1639 int64_t begin, int64_t length, Error **errp) 1640 { 1641 VMCoreInfoState *vmci = vmcoreinfo_find(); 1642 CPUState *cpu; 1643 int nr_cpus; 1644 Error *err = NULL; 1645 int ret; 1646 1647 s->has_format = has_format; 1648 s->format = format; 1649 s->written_size = 0; 1650 1651 /* kdump-compressed is conflict with paging and filter */ 1652 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1653 assert(!paging && !has_filter); 1654 } 1655 1656 if (runstate_is_running()) { 1657 vm_stop(RUN_STATE_SAVE_VM); 1658 s->resume = true; 1659 } else { 1660 s->resume = false; 1661 } 1662 1663 /* If we use KVM, we should synchronize the registers before we get dump 1664 * info or physmap info. 1665 */ 1666 cpu_synchronize_all_states(); 1667 nr_cpus = 0; 1668 CPU_FOREACH(cpu) { 1669 nr_cpus++; 1670 } 1671 1672 s->fd = fd; 1673 s->has_filter = has_filter; 1674 s->begin = begin; 1675 s->length = length; 1676 1677 memory_mapping_list_init(&s->list); 1678 1679 guest_phys_blocks_init(&s->guest_phys_blocks); 1680 guest_phys_blocks_append(&s->guest_phys_blocks); 1681 s->total_size = dump_calculate_size(s); 1682 #ifdef DEBUG_DUMP_GUEST_MEMORY 1683 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size); 1684 #endif 1685 1686 /* it does not make sense to dump non-existent memory */ 1687 if (!s->total_size) { 1688 error_setg(errp, "dump: no guest memory to dump"); 1689 goto cleanup; 1690 } 1691 1692 s->start = get_start_block(s); 1693 if (s->start == -1) { 1694 error_setg(errp, QERR_INVALID_PARAMETER, "begin"); 1695 goto cleanup; 1696 } 1697 1698 /* get dump info: endian, class and architecture. 1699 * If the target architecture is not supported, cpu_get_dump_info() will 1700 * return -1. 1701 */ 1702 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); 1703 if (ret < 0) { 1704 error_setg(errp, QERR_UNSUPPORTED); 1705 goto cleanup; 1706 } 1707 1708 if (!s->dump_info.page_size) { 1709 s->dump_info.page_size = TARGET_PAGE_SIZE; 1710 } 1711 1712 s->note_size = cpu_get_note_size(s->dump_info.d_class, 1713 s->dump_info.d_machine, nr_cpus); 1714 if (s->note_size < 0) { 1715 error_setg(errp, QERR_UNSUPPORTED); 1716 goto cleanup; 1717 } 1718 1719 /* 1720 * The goal of this block is to (a) update the previously guessed 1721 * phys_base, (b) copy the guest note out of the guest. 1722 * Failure to do so is not fatal for dumping. 1723 */ 1724 if (vmci) { 1725 uint64_t addr, note_head_size, name_size, desc_size; 1726 uint32_t size; 1727 uint16_t format; 1728 1729 note_head_size = s->dump_info.d_class == ELFCLASS32 ? 1730 sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr); 1731 1732 format = le16_to_cpu(vmci->vmcoreinfo.guest_format); 1733 size = le32_to_cpu(vmci->vmcoreinfo.size); 1734 addr = le64_to_cpu(vmci->vmcoreinfo.paddr); 1735 if (!vmci->has_vmcoreinfo) { 1736 warn_report("guest note is not present"); 1737 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { 1738 warn_report("guest note size is invalid: %" PRIu32, size); 1739 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { 1740 warn_report("guest note format is unsupported: %" PRIu16, format); 1741 } else { 1742 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ 1743 cpu_physical_memory_read(addr, s->guest_note, size); 1744 1745 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); 1746 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, 1747 desc_size); 1748 if (name_size > MAX_GUEST_NOTE_SIZE || 1749 desc_size > MAX_GUEST_NOTE_SIZE || 1750 s->guest_note_size > size) { 1751 warn_report("Invalid guest note header"); 1752 g_free(s->guest_note); 1753 s->guest_note = NULL; 1754 } else { 1755 vmcoreinfo_update_phys_base(s); 1756 s->note_size += s->guest_note_size; 1757 } 1758 } 1759 } 1760 1761 /* get memory mapping */ 1762 if (paging) { 1763 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err); 1764 if (err != NULL) { 1765 error_propagate(errp, err); 1766 goto cleanup; 1767 } 1768 } else { 1769 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); 1770 } 1771 1772 s->nr_cpus = nr_cpus; 1773 1774 get_max_mapnr(s); 1775 1776 uint64_t tmp; 1777 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), 1778 s->dump_info.page_size); 1779 s->len_dump_bitmap = tmp * s->dump_info.page_size; 1780 1781 /* init for kdump-compressed format */ 1782 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1783 switch (format) { 1784 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: 1785 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; 1786 break; 1787 1788 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: 1789 #ifdef CONFIG_LZO 1790 if (lzo_init() != LZO_E_OK) { 1791 error_setg(errp, "failed to initialize the LZO library"); 1792 goto cleanup; 1793 } 1794 #endif 1795 s->flag_compress = DUMP_DH_COMPRESSED_LZO; 1796 break; 1797 1798 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: 1799 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; 1800 break; 1801 1802 default: 1803 s->flag_compress = 0; 1804 } 1805 1806 return; 1807 } 1808 1809 if (s->has_filter) { 1810 memory_mapping_filter(&s->list, s->begin, s->length); 1811 } 1812 1813 /* 1814 * calculate phdr_num 1815 * 1816 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow 1817 */ 1818 s->phdr_num = 1; /* PT_NOTE */ 1819 if (s->list.num < UINT16_MAX - 2) { 1820 s->phdr_num += s->list.num; 1821 s->have_section = false; 1822 } else { 1823 s->have_section = true; 1824 s->phdr_num = PN_XNUM; 1825 s->sh_info = 1; /* PT_NOTE */ 1826 1827 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ 1828 if (s->list.num <= UINT32_MAX - 1) { 1829 s->sh_info += s->list.num; 1830 } else { 1831 s->sh_info = UINT32_MAX; 1832 } 1833 } 1834 1835 if (s->dump_info.d_class == ELFCLASS64) { 1836 if (s->have_section) { 1837 s->memory_offset = sizeof(Elf64_Ehdr) + 1838 sizeof(Elf64_Phdr) * s->sh_info + 1839 sizeof(Elf64_Shdr) + s->note_size; 1840 } else { 1841 s->memory_offset = sizeof(Elf64_Ehdr) + 1842 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; 1843 } 1844 } else { 1845 if (s->have_section) { 1846 s->memory_offset = sizeof(Elf32_Ehdr) + 1847 sizeof(Elf32_Phdr) * s->sh_info + 1848 sizeof(Elf32_Shdr) + s->note_size; 1849 } else { 1850 s->memory_offset = sizeof(Elf32_Ehdr) + 1851 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; 1852 } 1853 } 1854 1855 return; 1856 1857 cleanup: 1858 dump_cleanup(s); 1859 } 1860 1861 /* this operation might be time consuming. */ 1862 static void dump_process(DumpState *s, Error **errp) 1863 { 1864 Error *local_err = NULL; 1865 DumpQueryResult *result = NULL; 1866 1867 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 1868 #ifdef TARGET_X86_64 1869 create_win_dump(s, &local_err); 1870 #endif 1871 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1872 create_kdump_vmcore(s, &local_err); 1873 } else { 1874 create_vmcore(s, &local_err); 1875 } 1876 1877 /* make sure status is written after written_size updates */ 1878 smp_wmb(); 1879 qatomic_set(&s->status, 1880 (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); 1881 1882 /* send DUMP_COMPLETED message (unconditionally) */ 1883 result = qmp_query_dump(NULL); 1884 /* should never fail */ 1885 assert(result); 1886 qapi_event_send_dump_completed(result, !!local_err, (local_err ? 1887 error_get_pretty(local_err) : NULL)); 1888 qapi_free_DumpQueryResult(result); 1889 1890 error_propagate(errp, local_err); 1891 dump_cleanup(s); 1892 } 1893 1894 static void *dump_thread(void *data) 1895 { 1896 DumpState *s = (DumpState *)data; 1897 dump_process(s, NULL); 1898 return NULL; 1899 } 1900 1901 DumpQueryResult *qmp_query_dump(Error **errp) 1902 { 1903 DumpQueryResult *result = g_new(DumpQueryResult, 1); 1904 DumpState *state = &dump_state_global; 1905 result->status = qatomic_read(&state->status); 1906 /* make sure we are reading status and written_size in order */ 1907 smp_rmb(); 1908 result->completed = state->written_size; 1909 result->total = state->total_size; 1910 return result; 1911 } 1912 1913 void qmp_dump_guest_memory(bool paging, const char *file, 1914 bool has_detach, bool detach, 1915 bool has_begin, int64_t begin, bool has_length, 1916 int64_t length, bool has_format, 1917 DumpGuestMemoryFormat format, Error **errp) 1918 { 1919 const char *p; 1920 int fd = -1; 1921 DumpState *s; 1922 Error *local_err = NULL; 1923 bool detach_p = false; 1924 1925 if (runstate_check(RUN_STATE_INMIGRATE)) { 1926 error_setg(errp, "Dump not allowed during incoming migration."); 1927 return; 1928 } 1929 1930 /* if there is a dump in background, we should wait until the dump 1931 * finished */ 1932 if (qemu_system_dump_in_progress()) { 1933 error_setg(errp, "There is a dump in process, please wait."); 1934 return; 1935 } 1936 1937 /* 1938 * kdump-compressed format need the whole memory dumped, so paging or 1939 * filter is not supported here. 1940 */ 1941 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && 1942 (paging || has_begin || has_length)) { 1943 error_setg(errp, "kdump-compressed format doesn't support paging or " 1944 "filter"); 1945 return; 1946 } 1947 if (has_begin && !has_length) { 1948 error_setg(errp, QERR_MISSING_PARAMETER, "length"); 1949 return; 1950 } 1951 if (!has_begin && has_length) { 1952 error_setg(errp, QERR_MISSING_PARAMETER, "begin"); 1953 return; 1954 } 1955 if (has_detach) { 1956 detach_p = detach; 1957 } 1958 1959 /* check whether lzo/snappy is supported */ 1960 #ifndef CONFIG_LZO 1961 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { 1962 error_setg(errp, "kdump-lzo is not available now"); 1963 return; 1964 } 1965 #endif 1966 1967 #ifndef CONFIG_SNAPPY 1968 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { 1969 error_setg(errp, "kdump-snappy is not available now"); 1970 return; 1971 } 1972 #endif 1973 1974 #ifndef TARGET_X86_64 1975 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 1976 error_setg(errp, "Windows dump is only available for x86-64"); 1977 return; 1978 } 1979 #endif 1980 1981 #if !defined(WIN32) 1982 if (strstart(file, "fd:", &p)) { 1983 fd = monitor_get_fd(monitor_cur(), p, errp); 1984 if (fd == -1) { 1985 return; 1986 } 1987 } 1988 #endif 1989 1990 if (strstart(file, "file:", &p)) { 1991 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); 1992 if (fd < 0) { 1993 error_setg_file_open(errp, errno, p); 1994 return; 1995 } 1996 } 1997 1998 if (fd == -1) { 1999 error_setg(errp, QERR_INVALID_PARAMETER, "protocol"); 2000 return; 2001 } 2002 2003 if (!dump_migration_blocker) { 2004 error_setg(&dump_migration_blocker, 2005 "Live migration disabled: dump-guest-memory in progress"); 2006 } 2007 2008 /* 2009 * Allows even for -only-migratable, but forbid migration during the 2010 * process of dump guest memory. 2011 */ 2012 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { 2013 /* Remember to release the fd before passing it over to dump state */ 2014 close(fd); 2015 return; 2016 } 2017 2018 s = &dump_state_global; 2019 dump_state_prepare(s); 2020 2021 dump_init(s, fd, has_format, format, paging, has_begin, 2022 begin, length, &local_err); 2023 if (local_err) { 2024 error_propagate(errp, local_err); 2025 qatomic_set(&s->status, DUMP_STATUS_FAILED); 2026 return; 2027 } 2028 2029 if (detach_p) { 2030 /* detached dump */ 2031 s->detached = true; 2032 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread, 2033 s, QEMU_THREAD_DETACHED); 2034 } else { 2035 /* sync dump */ 2036 dump_process(s, errp); 2037 } 2038 } 2039 2040 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) 2041 { 2042 DumpGuestMemoryCapability *cap = 2043 g_new0(DumpGuestMemoryCapability, 1); 2044 DumpGuestMemoryFormatList **tail = &cap->formats; 2045 2046 /* elf is always available */ 2047 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF); 2048 2049 /* kdump-zlib is always available */ 2050 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB); 2051 2052 /* add new item if kdump-lzo is available */ 2053 #ifdef CONFIG_LZO 2054 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO); 2055 #endif 2056 2057 /* add new item if kdump-snappy is available */ 2058 #ifdef CONFIG_SNAPPY 2059 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY); 2060 #endif 2061 2062 /* Windows dump is available only if target is x86_64 */ 2063 #ifdef TARGET_X86_64 2064 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); 2065 #endif 2066 2067 return cap; 2068 } 2069