1 /* 2 * QEMU dump 3 * 4 * Copyright Fujitsu, Corp. 2011, 2012 5 * 6 * Authors: 7 * Wen Congyang <wency@cn.fujitsu.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/cutils.h" 16 #include "elf.h" 17 #include "qemu/bswap.h" 18 #include "exec/target_page.h" 19 #include "monitor/monitor.h" 20 #include "sysemu/dump.h" 21 #include "sysemu/runstate.h" 22 #include "sysemu/cpus.h" 23 #include "qapi/error.h" 24 #include "qapi/qapi-commands-dump.h" 25 #include "qapi/qapi-events-dump.h" 26 #include "qapi/qmp/qerror.h" 27 #include "qemu/error-report.h" 28 #include "qemu/main-loop.h" 29 #include "hw/misc/vmcoreinfo.h" 30 #include "migration/blocker.h" 31 #include "hw/core/cpu.h" 32 #include "win_dump.h" 33 34 #include <zlib.h> 35 #ifdef CONFIG_LZO 36 #include <lzo/lzo1x.h> 37 #endif 38 #ifdef CONFIG_SNAPPY 39 #include <snappy-c.h> 40 #endif 41 #ifndef ELF_MACHINE_UNAME 42 #define ELF_MACHINE_UNAME "Unknown" 43 #endif 44 45 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ 46 47 static Error *dump_migration_blocker; 48 49 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ 50 ((DIV_ROUND_UP((hdr_size), 4) + \ 51 DIV_ROUND_UP((name_size), 4) + \ 52 DIV_ROUND_UP((desc_size), 4)) * 4) 53 54 static inline bool dump_is_64bit(DumpState *s) 55 { 56 return s->dump_info.d_class == ELFCLASS64; 57 } 58 59 static inline bool dump_has_filter(DumpState *s) 60 { 61 return s->filter_area_length > 0; 62 } 63 64 uint16_t cpu_to_dump16(DumpState *s, uint16_t val) 65 { 66 if (s->dump_info.d_endian == ELFDATA2LSB) { 67 val = cpu_to_le16(val); 68 } else { 69 val = cpu_to_be16(val); 70 } 71 72 return val; 73 } 74 75 uint32_t cpu_to_dump32(DumpState *s, uint32_t val) 76 { 77 if (s->dump_info.d_endian == ELFDATA2LSB) { 78 val = cpu_to_le32(val); 79 } else { 80 val = cpu_to_be32(val); 81 } 82 83 return val; 84 } 85 86 uint64_t cpu_to_dump64(DumpState *s, uint64_t val) 87 { 88 if (s->dump_info.d_endian == ELFDATA2LSB) { 89 val = cpu_to_le64(val); 90 } else { 91 val = cpu_to_be64(val); 92 } 93 94 return val; 95 } 96 97 static int dump_cleanup(DumpState *s) 98 { 99 guest_phys_blocks_free(&s->guest_phys_blocks); 100 memory_mapping_list_free(&s->list); 101 close(s->fd); 102 g_free(s->guest_note); 103 g_array_unref(s->string_table_buf); 104 s->guest_note = NULL; 105 if (s->resume) { 106 if (s->detached) { 107 qemu_mutex_lock_iothread(); 108 } 109 vm_start(); 110 if (s->detached) { 111 qemu_mutex_unlock_iothread(); 112 } 113 } 114 migrate_del_blocker(dump_migration_blocker); 115 116 return 0; 117 } 118 119 static int fd_write_vmcore(const void *buf, size_t size, void *opaque) 120 { 121 DumpState *s = opaque; 122 size_t written_size; 123 124 written_size = qemu_write_full(s->fd, buf, size); 125 if (written_size != size) { 126 return -errno; 127 } 128 129 return 0; 130 } 131 132 static void prepare_elf64_header(DumpState *s, Elf64_Ehdr *elf_header) 133 { 134 /* 135 * phnum in the elf header is 16 bit, if we have more segments we 136 * set phnum to PN_XNUM and write the real number of segments to a 137 * special section. 138 */ 139 uint16_t phnum = MIN(s->phdr_num, PN_XNUM); 140 141 memset(elf_header, 0, sizeof(Elf64_Ehdr)); 142 memcpy(elf_header, ELFMAG, SELFMAG); 143 elf_header->e_ident[EI_CLASS] = ELFCLASS64; 144 elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; 145 elf_header->e_ident[EI_VERSION] = EV_CURRENT; 146 elf_header->e_type = cpu_to_dump16(s, ET_CORE); 147 elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 148 elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); 149 elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 150 elf_header->e_phoff = cpu_to_dump64(s, s->phdr_offset); 151 elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); 152 elf_header->e_phnum = cpu_to_dump16(s, phnum); 153 elf_header->e_shoff = cpu_to_dump64(s, s->shdr_offset); 154 elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); 155 elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); 156 elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); 157 } 158 159 static void prepare_elf32_header(DumpState *s, Elf32_Ehdr *elf_header) 160 { 161 /* 162 * phnum in the elf header is 16 bit, if we have more segments we 163 * set phnum to PN_XNUM and write the real number of segments to a 164 * special section. 165 */ 166 uint16_t phnum = MIN(s->phdr_num, PN_XNUM); 167 168 memset(elf_header, 0, sizeof(Elf32_Ehdr)); 169 memcpy(elf_header, ELFMAG, SELFMAG); 170 elf_header->e_ident[EI_CLASS] = ELFCLASS32; 171 elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; 172 elf_header->e_ident[EI_VERSION] = EV_CURRENT; 173 elf_header->e_type = cpu_to_dump16(s, ET_CORE); 174 elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 175 elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); 176 elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 177 elf_header->e_phoff = cpu_to_dump32(s, s->phdr_offset); 178 elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); 179 elf_header->e_phnum = cpu_to_dump16(s, phnum); 180 elf_header->e_shoff = cpu_to_dump32(s, s->shdr_offset); 181 elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); 182 elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); 183 elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); 184 } 185 186 static void write_elf_header(DumpState *s, Error **errp) 187 { 188 Elf32_Ehdr elf32_header; 189 Elf64_Ehdr elf64_header; 190 size_t header_size; 191 void *header_ptr; 192 int ret; 193 194 /* The NULL header and the shstrtab are always defined */ 195 assert(s->shdr_num >= 2); 196 if (dump_is_64bit(s)) { 197 prepare_elf64_header(s, &elf64_header); 198 header_size = sizeof(elf64_header); 199 header_ptr = &elf64_header; 200 } else { 201 prepare_elf32_header(s, &elf32_header); 202 header_size = sizeof(elf32_header); 203 header_ptr = &elf32_header; 204 } 205 206 ret = fd_write_vmcore(header_ptr, header_size, s); 207 if (ret < 0) { 208 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 209 } 210 } 211 212 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, 213 int phdr_index, hwaddr offset, 214 hwaddr filesz, Error **errp) 215 { 216 Elf64_Phdr phdr; 217 int ret; 218 219 memset(&phdr, 0, sizeof(Elf64_Phdr)); 220 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 221 phdr.p_offset = cpu_to_dump64(s, offset); 222 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); 223 phdr.p_filesz = cpu_to_dump64(s, filesz); 224 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); 225 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 226 227 assert(memory_mapping->length >= filesz); 228 229 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 230 if (ret < 0) { 231 error_setg_errno(errp, -ret, 232 "dump: failed to write program header table"); 233 } 234 } 235 236 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, 237 int phdr_index, hwaddr offset, 238 hwaddr filesz, Error **errp) 239 { 240 Elf32_Phdr phdr; 241 int ret; 242 243 memset(&phdr, 0, sizeof(Elf32_Phdr)); 244 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 245 phdr.p_offset = cpu_to_dump32(s, offset); 246 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); 247 phdr.p_filesz = cpu_to_dump32(s, filesz); 248 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); 249 phdr.p_vaddr = 250 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 251 252 assert(memory_mapping->length >= filesz); 253 254 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 255 if (ret < 0) { 256 error_setg_errno(errp, -ret, 257 "dump: failed to write program header table"); 258 } 259 } 260 261 static void prepare_elf64_phdr_note(DumpState *s, Elf64_Phdr *phdr) 262 { 263 memset(phdr, 0, sizeof(*phdr)); 264 phdr->p_type = cpu_to_dump32(s, PT_NOTE); 265 phdr->p_offset = cpu_to_dump64(s, s->note_offset); 266 phdr->p_paddr = 0; 267 phdr->p_filesz = cpu_to_dump64(s, s->note_size); 268 phdr->p_memsz = cpu_to_dump64(s, s->note_size); 269 phdr->p_vaddr = 0; 270 } 271 272 static inline int cpu_index(CPUState *cpu) 273 { 274 return cpu->cpu_index + 1; 275 } 276 277 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, 278 Error **errp) 279 { 280 int ret; 281 282 if (s->guest_note) { 283 ret = f(s->guest_note, s->guest_note_size, s); 284 if (ret < 0) { 285 error_setg(errp, "dump: failed to write guest note"); 286 } 287 } 288 } 289 290 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, 291 Error **errp) 292 { 293 CPUState *cpu; 294 int ret; 295 int id; 296 297 CPU_FOREACH(cpu) { 298 id = cpu_index(cpu); 299 ret = cpu_write_elf64_note(f, cpu, id, s); 300 if (ret < 0) { 301 error_setg(errp, "dump: failed to write elf notes"); 302 return; 303 } 304 } 305 306 CPU_FOREACH(cpu) { 307 ret = cpu_write_elf64_qemunote(f, cpu, s); 308 if (ret < 0) { 309 error_setg(errp, "dump: failed to write CPU status"); 310 return; 311 } 312 } 313 314 write_guest_note(f, s, errp); 315 } 316 317 static void prepare_elf32_phdr_note(DumpState *s, Elf32_Phdr *phdr) 318 { 319 memset(phdr, 0, sizeof(*phdr)); 320 phdr->p_type = cpu_to_dump32(s, PT_NOTE); 321 phdr->p_offset = cpu_to_dump32(s, s->note_offset); 322 phdr->p_paddr = 0; 323 phdr->p_filesz = cpu_to_dump32(s, s->note_size); 324 phdr->p_memsz = cpu_to_dump32(s, s->note_size); 325 phdr->p_vaddr = 0; 326 } 327 328 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, 329 Error **errp) 330 { 331 CPUState *cpu; 332 int ret; 333 int id; 334 335 CPU_FOREACH(cpu) { 336 id = cpu_index(cpu); 337 ret = cpu_write_elf32_note(f, cpu, id, s); 338 if (ret < 0) { 339 error_setg(errp, "dump: failed to write elf notes"); 340 return; 341 } 342 } 343 344 CPU_FOREACH(cpu) { 345 ret = cpu_write_elf32_qemunote(f, cpu, s); 346 if (ret < 0) { 347 error_setg(errp, "dump: failed to write CPU status"); 348 return; 349 } 350 } 351 352 write_guest_note(f, s, errp); 353 } 354 355 static void write_elf_phdr_note(DumpState *s, Error **errp) 356 { 357 Elf32_Phdr phdr32; 358 Elf64_Phdr phdr64; 359 void *phdr; 360 size_t size; 361 int ret; 362 363 if (dump_is_64bit(s)) { 364 prepare_elf64_phdr_note(s, &phdr64); 365 size = sizeof(phdr64); 366 phdr = &phdr64; 367 } else { 368 prepare_elf32_phdr_note(s, &phdr32); 369 size = sizeof(phdr32); 370 phdr = &phdr32; 371 } 372 373 ret = fd_write_vmcore(phdr, size, s); 374 if (ret < 0) { 375 error_setg_errno(errp, -ret, 376 "dump: failed to write program header table"); 377 } 378 } 379 380 static void prepare_elf_section_hdr_zero(DumpState *s) 381 { 382 if (dump_is_64bit(s)) { 383 Elf64_Shdr *shdr64 = s->elf_section_hdrs; 384 385 shdr64->sh_info = cpu_to_dump32(s, s->phdr_num); 386 } else { 387 Elf32_Shdr *shdr32 = s->elf_section_hdrs; 388 389 shdr32->sh_info = cpu_to_dump32(s, s->phdr_num); 390 } 391 } 392 393 static void prepare_elf_section_hdr_string(DumpState *s, void *buff) 394 { 395 uint64_t index = s->string_table_buf->len; 396 const char strtab[] = ".shstrtab"; 397 Elf32_Shdr shdr32 = {}; 398 Elf64_Shdr shdr64 = {}; 399 int shdr_size; 400 void *shdr; 401 402 g_array_append_vals(s->string_table_buf, strtab, sizeof(strtab)); 403 if (dump_is_64bit(s)) { 404 shdr_size = sizeof(Elf64_Shdr); 405 shdr64.sh_type = SHT_STRTAB; 406 shdr64.sh_offset = s->section_offset + s->elf_section_data_size; 407 shdr64.sh_name = index; 408 shdr64.sh_size = s->string_table_buf->len; 409 shdr = &shdr64; 410 } else { 411 shdr_size = sizeof(Elf32_Shdr); 412 shdr32.sh_type = SHT_STRTAB; 413 shdr32.sh_offset = s->section_offset + s->elf_section_data_size; 414 shdr32.sh_name = index; 415 shdr32.sh_size = s->string_table_buf->len; 416 shdr = &shdr32; 417 } 418 memcpy(buff, shdr, shdr_size); 419 } 420 421 static bool prepare_elf_section_hdrs(DumpState *s, Error **errp) 422 { 423 size_t len, sizeof_shdr; 424 void *buff_hdr; 425 426 /* 427 * Section ordering: 428 * - HDR zero 429 * - Arch section hdrs 430 * - String table hdr 431 */ 432 sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); 433 len = sizeof_shdr * s->shdr_num; 434 s->elf_section_hdrs = g_malloc0(len); 435 buff_hdr = s->elf_section_hdrs; 436 437 /* 438 * The first section header is ALWAYS a special initial section 439 * header. 440 * 441 * The header should be 0 with one exception being that if 442 * phdr_num is PN_XNUM then the sh_info field contains the real 443 * number of segment entries. 444 * 445 * As we zero allocate the buffer we will only need to modify 446 * sh_info for the PN_XNUM case. 447 */ 448 if (s->phdr_num >= PN_XNUM) { 449 prepare_elf_section_hdr_zero(s); 450 } 451 buff_hdr += sizeof_shdr; 452 453 /* Add architecture defined section headers */ 454 if (s->dump_info.arch_sections_write_hdr_fn 455 && s->shdr_num > 2) { 456 buff_hdr += s->dump_info.arch_sections_write_hdr_fn(s, buff_hdr); 457 458 if (s->shdr_num >= SHN_LORESERVE) { 459 error_setg_errno(errp, EINVAL, 460 "dump: too many architecture defined sections"); 461 return false; 462 } 463 } 464 465 /* 466 * String table is the last section since strings are added via 467 * arch_sections_write_hdr(). 468 */ 469 prepare_elf_section_hdr_string(s, buff_hdr); 470 return true; 471 } 472 473 static void write_elf_section_headers(DumpState *s, Error **errp) 474 { 475 size_t sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); 476 int ret; 477 478 if (!prepare_elf_section_hdrs(s, errp)) { 479 return; 480 } 481 482 ret = fd_write_vmcore(s->elf_section_hdrs, s->shdr_num * sizeof_shdr, s); 483 if (ret < 0) { 484 error_setg_errno(errp, -ret, "dump: failed to write section headers"); 485 } 486 487 g_free(s->elf_section_hdrs); 488 } 489 490 static void write_elf_sections(DumpState *s, Error **errp) 491 { 492 int ret; 493 494 if (s->elf_section_data_size) { 495 /* Write architecture section data */ 496 ret = fd_write_vmcore(s->elf_section_data, 497 s->elf_section_data_size, s); 498 if (ret < 0) { 499 error_setg_errno(errp, -ret, 500 "dump: failed to write architecture section data"); 501 return; 502 } 503 } 504 505 /* Write string table */ 506 ret = fd_write_vmcore(s->string_table_buf->data, 507 s->string_table_buf->len, s); 508 if (ret < 0) { 509 error_setg_errno(errp, -ret, "dump: failed to write string table data"); 510 } 511 } 512 513 static void write_data(DumpState *s, void *buf, int length, Error **errp) 514 { 515 int ret; 516 517 ret = fd_write_vmcore(buf, length, s); 518 if (ret < 0) { 519 error_setg_errno(errp, -ret, "dump: failed to save memory"); 520 } else { 521 s->written_size += length; 522 } 523 } 524 525 /* write the memory to vmcore. 1 page per I/O. */ 526 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, 527 int64_t size, Error **errp) 528 { 529 ERRP_GUARD(); 530 int64_t i; 531 532 for (i = 0; i < size / s->dump_info.page_size; i++) { 533 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 534 s->dump_info.page_size, errp); 535 if (*errp) { 536 return; 537 } 538 } 539 540 if ((size % s->dump_info.page_size) != 0) { 541 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 542 size % s->dump_info.page_size, errp); 543 if (*errp) { 544 return; 545 } 546 } 547 } 548 549 /* get the memory's offset and size in the vmcore */ 550 static void get_offset_range(hwaddr phys_addr, 551 ram_addr_t mapping_length, 552 DumpState *s, 553 hwaddr *p_offset, 554 hwaddr *p_filesz) 555 { 556 GuestPhysBlock *block; 557 hwaddr offset = s->memory_offset; 558 int64_t size_in_block, start; 559 560 /* When the memory is not stored into vmcore, offset will be -1 */ 561 *p_offset = -1; 562 *p_filesz = 0; 563 564 if (dump_has_filter(s)) { 565 if (phys_addr < s->filter_area_begin || 566 phys_addr >= s->filter_area_begin + s->filter_area_length) { 567 return; 568 } 569 } 570 571 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 572 if (dump_has_filter(s)) { 573 if (block->target_start >= s->filter_area_begin + s->filter_area_length || 574 block->target_end <= s->filter_area_begin) { 575 /* This block is out of the range */ 576 continue; 577 } 578 579 if (s->filter_area_begin <= block->target_start) { 580 start = block->target_start; 581 } else { 582 start = s->filter_area_begin; 583 } 584 585 size_in_block = block->target_end - start; 586 if (s->filter_area_begin + s->filter_area_length < block->target_end) { 587 size_in_block -= block->target_end - (s->filter_area_begin + s->filter_area_length); 588 } 589 } else { 590 start = block->target_start; 591 size_in_block = block->target_end - block->target_start; 592 } 593 594 if (phys_addr >= start && phys_addr < start + size_in_block) { 595 *p_offset = phys_addr - start + offset; 596 597 /* The offset range mapped from the vmcore file must not spill over 598 * the GuestPhysBlock, clamp it. The rest of the mapping will be 599 * zero-filled in memory at load time; see 600 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. 601 */ 602 *p_filesz = phys_addr + mapping_length <= start + size_in_block ? 603 mapping_length : 604 size_in_block - (phys_addr - start); 605 return; 606 } 607 608 offset += size_in_block; 609 } 610 } 611 612 static void write_elf_phdr_loads(DumpState *s, Error **errp) 613 { 614 ERRP_GUARD(); 615 hwaddr offset, filesz; 616 MemoryMapping *memory_mapping; 617 uint32_t phdr_index = 1; 618 619 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { 620 get_offset_range(memory_mapping->phys_addr, 621 memory_mapping->length, 622 s, &offset, &filesz); 623 if (dump_is_64bit(s)) { 624 write_elf64_load(s, memory_mapping, phdr_index++, offset, 625 filesz, errp); 626 } else { 627 write_elf32_load(s, memory_mapping, phdr_index++, offset, 628 filesz, errp); 629 } 630 631 if (*errp) { 632 return; 633 } 634 635 if (phdr_index >= s->phdr_num) { 636 break; 637 } 638 } 639 } 640 641 static void write_elf_notes(DumpState *s, Error **errp) 642 { 643 if (dump_is_64bit(s)) { 644 write_elf64_notes(fd_write_vmcore, s, errp); 645 } else { 646 write_elf32_notes(fd_write_vmcore, s, errp); 647 } 648 } 649 650 /* write elf header, PT_NOTE and elf note to vmcore. */ 651 static void dump_begin(DumpState *s, Error **errp) 652 { 653 ERRP_GUARD(); 654 655 /* 656 * the vmcore's format is: 657 * -------------- 658 * | elf header | 659 * -------------- 660 * | sctn_hdr | 661 * -------------- 662 * | PT_NOTE | 663 * -------------- 664 * | PT_LOAD | 665 * -------------- 666 * | ...... | 667 * -------------- 668 * | PT_LOAD | 669 * -------------- 670 * | elf note | 671 * -------------- 672 * | memory | 673 * -------------- 674 * 675 * we only know where the memory is saved after we write elf note into 676 * vmcore. 677 */ 678 679 /* write elf header to vmcore */ 680 write_elf_header(s, errp); 681 if (*errp) { 682 return; 683 } 684 685 /* write section headers to vmcore */ 686 write_elf_section_headers(s, errp); 687 if (*errp) { 688 return; 689 } 690 691 /* write PT_NOTE to vmcore */ 692 write_elf_phdr_note(s, errp); 693 if (*errp) { 694 return; 695 } 696 697 /* write all PT_LOADs to vmcore */ 698 write_elf_phdr_loads(s, errp); 699 if (*errp) { 700 return; 701 } 702 703 /* write notes to vmcore */ 704 write_elf_notes(s, errp); 705 } 706 707 int64_t dump_filtered_memblock_size(GuestPhysBlock *block, 708 int64_t filter_area_start, 709 int64_t filter_area_length) 710 { 711 int64_t size, left, right; 712 713 /* No filter, return full size */ 714 if (!filter_area_length) { 715 return block->target_end - block->target_start; 716 } 717 718 /* calculate the overlapped region. */ 719 left = MAX(filter_area_start, block->target_start); 720 right = MIN(filter_area_start + filter_area_length, block->target_end); 721 size = right - left; 722 size = size > 0 ? size : 0; 723 724 return size; 725 } 726 727 int64_t dump_filtered_memblock_start(GuestPhysBlock *block, 728 int64_t filter_area_start, 729 int64_t filter_area_length) 730 { 731 if (filter_area_length) { 732 /* return -1 if the block is not within filter area */ 733 if (block->target_start >= filter_area_start + filter_area_length || 734 block->target_end <= filter_area_start) { 735 return -1; 736 } 737 738 if (filter_area_start > block->target_start) { 739 return filter_area_start - block->target_start; 740 } 741 } 742 743 return 0; 744 } 745 746 /* write all memory to vmcore */ 747 static void dump_iterate(DumpState *s, Error **errp) 748 { 749 ERRP_GUARD(); 750 GuestPhysBlock *block; 751 int64_t memblock_size, memblock_start; 752 753 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 754 memblock_start = dump_filtered_memblock_start(block, s->filter_area_begin, s->filter_area_length); 755 if (memblock_start == -1) { 756 continue; 757 } 758 759 memblock_size = dump_filtered_memblock_size(block, s->filter_area_begin, s->filter_area_length); 760 761 /* Write the memory to file */ 762 write_memory(s, block, memblock_start, memblock_size, errp); 763 if (*errp) { 764 return; 765 } 766 } 767 } 768 769 static void dump_end(DumpState *s, Error **errp) 770 { 771 int rc; 772 773 if (s->elf_section_data_size) { 774 s->elf_section_data = g_malloc0(s->elf_section_data_size); 775 } 776 777 /* Adds the architecture defined section data to s->elf_section_data */ 778 if (s->dump_info.arch_sections_write_fn && 779 s->elf_section_data_size) { 780 rc = s->dump_info.arch_sections_write_fn(s, s->elf_section_data); 781 if (rc) { 782 error_setg_errno(errp, rc, 783 "dump: failed to get arch section data"); 784 g_free(s->elf_section_data); 785 return; 786 } 787 } 788 789 /* write sections to vmcore */ 790 write_elf_sections(s, errp); 791 } 792 793 static void create_vmcore(DumpState *s, Error **errp) 794 { 795 ERRP_GUARD(); 796 797 dump_begin(s, errp); 798 if (*errp) { 799 return; 800 } 801 802 /* Iterate over memory and dump it to file */ 803 dump_iterate(s, errp); 804 if (*errp) { 805 return; 806 } 807 808 /* Write the section data */ 809 dump_end(s, errp); 810 } 811 812 static int write_start_flat_header(int fd) 813 { 814 MakedumpfileHeader *mh; 815 int ret = 0; 816 817 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); 818 mh = g_malloc0(MAX_SIZE_MDF_HEADER); 819 820 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, 821 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); 822 823 mh->type = cpu_to_be64(TYPE_FLAT_HEADER); 824 mh->version = cpu_to_be64(VERSION_FLAT_HEADER); 825 826 size_t written_size; 827 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); 828 if (written_size != MAX_SIZE_MDF_HEADER) { 829 ret = -1; 830 } 831 832 g_free(mh); 833 return ret; 834 } 835 836 static int write_end_flat_header(int fd) 837 { 838 MakedumpfileDataHeader mdh; 839 840 mdh.offset = END_FLAG_FLAT_HEADER; 841 mdh.buf_size = END_FLAG_FLAT_HEADER; 842 843 size_t written_size; 844 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 845 if (written_size != sizeof(mdh)) { 846 return -1; 847 } 848 849 return 0; 850 } 851 852 static int write_buffer(int fd, off_t offset, const void *buf, size_t size) 853 { 854 size_t written_size; 855 MakedumpfileDataHeader mdh; 856 857 mdh.offset = cpu_to_be64(offset); 858 mdh.buf_size = cpu_to_be64(size); 859 860 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 861 if (written_size != sizeof(mdh)) { 862 return -1; 863 } 864 865 written_size = qemu_write_full(fd, buf, size); 866 if (written_size != size) { 867 return -1; 868 } 869 870 return 0; 871 } 872 873 static int buf_write_note(const void *buf, size_t size, void *opaque) 874 { 875 DumpState *s = opaque; 876 877 /* note_buf is not enough */ 878 if (s->note_buf_offset + size > s->note_size) { 879 return -1; 880 } 881 882 memcpy(s->note_buf + s->note_buf_offset, buf, size); 883 884 s->note_buf_offset += size; 885 886 return 0; 887 } 888 889 /* 890 * This function retrieves various sizes from an elf header. 891 * 892 * @note has to be a valid ELF note. The return sizes are unmodified 893 * (not padded or rounded up to be multiple of 4). 894 */ 895 static void get_note_sizes(DumpState *s, const void *note, 896 uint64_t *note_head_size, 897 uint64_t *name_size, 898 uint64_t *desc_size) 899 { 900 uint64_t note_head_sz; 901 uint64_t name_sz; 902 uint64_t desc_sz; 903 904 if (dump_is_64bit(s)) { 905 const Elf64_Nhdr *hdr = note; 906 note_head_sz = sizeof(Elf64_Nhdr); 907 name_sz = cpu_to_dump64(s, hdr->n_namesz); 908 desc_sz = cpu_to_dump64(s, hdr->n_descsz); 909 } else { 910 const Elf32_Nhdr *hdr = note; 911 note_head_sz = sizeof(Elf32_Nhdr); 912 name_sz = cpu_to_dump32(s, hdr->n_namesz); 913 desc_sz = cpu_to_dump32(s, hdr->n_descsz); 914 } 915 916 if (note_head_size) { 917 *note_head_size = note_head_sz; 918 } 919 if (name_size) { 920 *name_size = name_sz; 921 } 922 if (desc_size) { 923 *desc_size = desc_sz; 924 } 925 } 926 927 static bool note_name_equal(DumpState *s, 928 const uint8_t *note, const char *name) 929 { 930 int len = strlen(name) + 1; 931 uint64_t head_size, name_size; 932 933 get_note_sizes(s, note, &head_size, &name_size, NULL); 934 head_size = ROUND_UP(head_size, 4); 935 936 return name_size == len && memcmp(note + head_size, name, len) == 0; 937 } 938 939 /* write common header, sub header and elf note to vmcore */ 940 static void create_header32(DumpState *s, Error **errp) 941 { 942 ERRP_GUARD(); 943 DiskDumpHeader32 *dh = NULL; 944 KdumpSubHeader32 *kh = NULL; 945 size_t size; 946 uint32_t block_size; 947 uint32_t sub_hdr_size; 948 uint32_t bitmap_blocks; 949 uint32_t status = 0; 950 uint64_t offset_note; 951 952 /* write common header, the version of kdump-compressed format is 6th */ 953 size = sizeof(DiskDumpHeader32); 954 dh = g_malloc0(size); 955 956 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 957 dh->header_version = cpu_to_dump32(s, 6); 958 block_size = s->dump_info.page_size; 959 dh->block_size = cpu_to_dump32(s, block_size); 960 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; 961 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 962 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 963 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 964 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 965 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 966 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 967 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 968 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 969 970 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 971 status |= DUMP_DH_COMPRESSED_ZLIB; 972 } 973 #ifdef CONFIG_LZO 974 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 975 status |= DUMP_DH_COMPRESSED_LZO; 976 } 977 #endif 978 #ifdef CONFIG_SNAPPY 979 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 980 status |= DUMP_DH_COMPRESSED_SNAPPY; 981 } 982 #endif 983 dh->status = cpu_to_dump32(s, status); 984 985 if (write_buffer(s->fd, 0, dh, size) < 0) { 986 error_setg(errp, "dump: failed to write disk dump header"); 987 goto out; 988 } 989 990 /* write sub header */ 991 size = sizeof(KdumpSubHeader32); 992 kh = g_malloc0(size); 993 994 /* 64bit max_mapnr_64 */ 995 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 996 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); 997 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 998 999 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 1000 if (s->guest_note && 1001 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1002 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 1003 1004 get_note_sizes(s, s->guest_note, 1005 &hsize, &name_size, &size_vmcoreinfo_desc); 1006 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 1007 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 1008 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 1009 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); 1010 } 1011 1012 kh->offset_note = cpu_to_dump64(s, offset_note); 1013 kh->note_size = cpu_to_dump32(s, s->note_size); 1014 1015 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 1016 block_size, kh, size) < 0) { 1017 error_setg(errp, "dump: failed to write kdump sub header"); 1018 goto out; 1019 } 1020 1021 /* write note */ 1022 s->note_buf = g_malloc0(s->note_size); 1023 s->note_buf_offset = 0; 1024 1025 /* use s->note_buf to store notes temporarily */ 1026 write_elf32_notes(buf_write_note, s, errp); 1027 if (*errp) { 1028 goto out; 1029 } 1030 if (write_buffer(s->fd, offset_note, s->note_buf, 1031 s->note_size) < 0) { 1032 error_setg(errp, "dump: failed to write notes"); 1033 goto out; 1034 } 1035 1036 /* get offset of dump_bitmap */ 1037 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1038 block_size; 1039 1040 /* get offset of page */ 1041 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1042 block_size; 1043 1044 out: 1045 g_free(dh); 1046 g_free(kh); 1047 g_free(s->note_buf); 1048 } 1049 1050 /* write common header, sub header and elf note to vmcore */ 1051 static void create_header64(DumpState *s, Error **errp) 1052 { 1053 ERRP_GUARD(); 1054 DiskDumpHeader64 *dh = NULL; 1055 KdumpSubHeader64 *kh = NULL; 1056 size_t size; 1057 uint32_t block_size; 1058 uint32_t sub_hdr_size; 1059 uint32_t bitmap_blocks; 1060 uint32_t status = 0; 1061 uint64_t offset_note; 1062 1063 /* write common header, the version of kdump-compressed format is 6th */ 1064 size = sizeof(DiskDumpHeader64); 1065 dh = g_malloc0(size); 1066 1067 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 1068 dh->header_version = cpu_to_dump32(s, 6); 1069 block_size = s->dump_info.page_size; 1070 dh->block_size = cpu_to_dump32(s, block_size); 1071 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; 1072 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 1073 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 1074 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 1075 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 1076 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 1077 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 1078 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 1079 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 1080 1081 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 1082 status |= DUMP_DH_COMPRESSED_ZLIB; 1083 } 1084 #ifdef CONFIG_LZO 1085 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 1086 status |= DUMP_DH_COMPRESSED_LZO; 1087 } 1088 #endif 1089 #ifdef CONFIG_SNAPPY 1090 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 1091 status |= DUMP_DH_COMPRESSED_SNAPPY; 1092 } 1093 #endif 1094 dh->status = cpu_to_dump32(s, status); 1095 1096 if (write_buffer(s->fd, 0, dh, size) < 0) { 1097 error_setg(errp, "dump: failed to write disk dump header"); 1098 goto out; 1099 } 1100 1101 /* write sub header */ 1102 size = sizeof(KdumpSubHeader64); 1103 kh = g_malloc0(size); 1104 1105 /* 64bit max_mapnr_64 */ 1106 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 1107 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); 1108 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 1109 1110 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 1111 if (s->guest_note && 1112 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1113 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 1114 1115 get_note_sizes(s, s->guest_note, 1116 &hsize, &name_size, &size_vmcoreinfo_desc); 1117 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 1118 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 1119 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 1120 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); 1121 } 1122 1123 kh->offset_note = cpu_to_dump64(s, offset_note); 1124 kh->note_size = cpu_to_dump64(s, s->note_size); 1125 1126 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 1127 block_size, kh, size) < 0) { 1128 error_setg(errp, "dump: failed to write kdump sub header"); 1129 goto out; 1130 } 1131 1132 /* write note */ 1133 s->note_buf = g_malloc0(s->note_size); 1134 s->note_buf_offset = 0; 1135 1136 /* use s->note_buf to store notes temporarily */ 1137 write_elf64_notes(buf_write_note, s, errp); 1138 if (*errp) { 1139 goto out; 1140 } 1141 1142 if (write_buffer(s->fd, offset_note, s->note_buf, 1143 s->note_size) < 0) { 1144 error_setg(errp, "dump: failed to write notes"); 1145 goto out; 1146 } 1147 1148 /* get offset of dump_bitmap */ 1149 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1150 block_size; 1151 1152 /* get offset of page */ 1153 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1154 block_size; 1155 1156 out: 1157 g_free(dh); 1158 g_free(kh); 1159 g_free(s->note_buf); 1160 } 1161 1162 static void write_dump_header(DumpState *s, Error **errp) 1163 { 1164 if (dump_is_64bit(s)) { 1165 create_header64(s, errp); 1166 } else { 1167 create_header32(s, errp); 1168 } 1169 } 1170 1171 static size_t dump_bitmap_get_bufsize(DumpState *s) 1172 { 1173 return s->dump_info.page_size; 1174 } 1175 1176 /* 1177 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be 1178 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. 1179 * set_dump_bitmap will always leave the recently set bit un-sync. And setting 1180 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into 1181 * vmcore, ie. synchronizing un-sync bit into vmcore. 1182 */ 1183 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, 1184 uint8_t *buf, DumpState *s) 1185 { 1186 off_t old_offset, new_offset; 1187 off_t offset_bitmap1, offset_bitmap2; 1188 uint32_t byte, bit; 1189 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1190 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1191 1192 /* should not set the previous place */ 1193 assert(last_pfn <= pfn); 1194 1195 /* 1196 * if the bit needed to be set is not cached in buf, flush the data in buf 1197 * to vmcore firstly. 1198 * making new_offset be bigger than old_offset can also sync remained data 1199 * into vmcore. 1200 */ 1201 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); 1202 new_offset = bitmap_bufsize * (pfn / bits_per_buf); 1203 1204 while (old_offset < new_offset) { 1205 /* calculate the offset and write dump_bitmap */ 1206 offset_bitmap1 = s->offset_dump_bitmap + old_offset; 1207 if (write_buffer(s->fd, offset_bitmap1, buf, 1208 bitmap_bufsize) < 0) { 1209 return -1; 1210 } 1211 1212 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ 1213 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + 1214 old_offset; 1215 if (write_buffer(s->fd, offset_bitmap2, buf, 1216 bitmap_bufsize) < 0) { 1217 return -1; 1218 } 1219 1220 memset(buf, 0, bitmap_bufsize); 1221 old_offset += bitmap_bufsize; 1222 } 1223 1224 /* get the exact place of the bit in the buf, and set it */ 1225 byte = (pfn % bits_per_buf) / CHAR_BIT; 1226 bit = (pfn % bits_per_buf) % CHAR_BIT; 1227 if (value) { 1228 buf[byte] |= 1u << bit; 1229 } else { 1230 buf[byte] &= ~(1u << bit); 1231 } 1232 1233 return 0; 1234 } 1235 1236 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) 1237 { 1238 int target_page_shift = ctz32(s->dump_info.page_size); 1239 1240 return (addr >> target_page_shift) - ARCH_PFN_OFFSET; 1241 } 1242 1243 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) 1244 { 1245 int target_page_shift = ctz32(s->dump_info.page_size); 1246 1247 return (pfn + ARCH_PFN_OFFSET) << target_page_shift; 1248 } 1249 1250 /* 1251 * Return the page frame number and the page content in *bufptr. bufptr can be 1252 * NULL. If not NULL, *bufptr must contains a target page size of pre-allocated 1253 * memory. This is not necessarily the memory returned. 1254 */ 1255 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, 1256 uint8_t **bufptr, DumpState *s) 1257 { 1258 GuestPhysBlock *block = *blockptr; 1259 uint32_t page_size = s->dump_info.page_size; 1260 uint8_t *buf = NULL, *hbuf; 1261 hwaddr addr; 1262 1263 /* block == NULL means the start of the iteration */ 1264 if (!block) { 1265 block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1266 *blockptr = block; 1267 addr = block->target_start; 1268 *pfnptr = dump_paddr_to_pfn(s, addr); 1269 } else { 1270 *pfnptr += 1; 1271 addr = dump_pfn_to_paddr(s, *pfnptr); 1272 } 1273 assert(block != NULL); 1274 1275 while (1) { 1276 if (addr >= block->target_start && addr < block->target_end) { 1277 size_t n = MIN(block->target_end - addr, page_size - addr % page_size); 1278 hbuf = block->host_addr + (addr - block->target_start); 1279 if (!buf) { 1280 if (n == page_size) { 1281 /* this is a whole target page, go for it */ 1282 assert(addr % page_size == 0); 1283 buf = hbuf; 1284 break; 1285 } else if (bufptr) { 1286 assert(*bufptr); 1287 buf = *bufptr; 1288 memset(buf, 0, page_size); 1289 } else { 1290 return true; 1291 } 1292 } 1293 1294 memcpy(buf + addr % page_size, hbuf, n); 1295 addr += n; 1296 if (addr % page_size == 0) { 1297 /* we filled up the page */ 1298 break; 1299 } 1300 } else { 1301 /* the next page is in the next block */ 1302 *blockptr = block = QTAILQ_NEXT(block, next); 1303 if (!block) { 1304 break; 1305 } 1306 1307 addr = block->target_start; 1308 /* are we still in the same page? */ 1309 if (dump_paddr_to_pfn(s, addr) != *pfnptr) { 1310 if (buf) { 1311 /* no, but we already filled something earlier, return it */ 1312 break; 1313 } else { 1314 /* else continue from there */ 1315 *pfnptr = dump_paddr_to_pfn(s, addr); 1316 } 1317 } 1318 } 1319 } 1320 1321 if (bufptr) { 1322 *bufptr = buf; 1323 } 1324 1325 return buf != NULL; 1326 } 1327 1328 static void write_dump_bitmap(DumpState *s, Error **errp) 1329 { 1330 int ret = 0; 1331 uint64_t last_pfn, pfn; 1332 void *dump_bitmap_buf; 1333 size_t num_dumpable; 1334 GuestPhysBlock *block_iter = NULL; 1335 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1336 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1337 1338 /* dump_bitmap_buf is used to store dump_bitmap temporarily */ 1339 dump_bitmap_buf = g_malloc0(bitmap_bufsize); 1340 1341 num_dumpable = 0; 1342 last_pfn = 0; 1343 1344 /* 1345 * exam memory page by page, and set the bit in dump_bitmap corresponded 1346 * to the existing page. 1347 */ 1348 while (get_next_page(&block_iter, &pfn, NULL, s)) { 1349 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); 1350 if (ret < 0) { 1351 error_setg(errp, "dump: failed to set dump_bitmap"); 1352 goto out; 1353 } 1354 1355 last_pfn = pfn; 1356 num_dumpable++; 1357 } 1358 1359 /* 1360 * set_dump_bitmap will always leave the recently set bit un-sync. Here we 1361 * set the remaining bits from last_pfn to the end of the bitmap buffer to 1362 * 0. With those set, the un-sync bit will be synchronized into the vmcore. 1363 */ 1364 if (num_dumpable > 0) { 1365 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, 1366 dump_bitmap_buf, s); 1367 if (ret < 0) { 1368 error_setg(errp, "dump: failed to sync dump_bitmap"); 1369 goto out; 1370 } 1371 } 1372 1373 /* number of dumpable pages that will be dumped later */ 1374 s->num_dumpable = num_dumpable; 1375 1376 out: 1377 g_free(dump_bitmap_buf); 1378 } 1379 1380 static void prepare_data_cache(DataCache *data_cache, DumpState *s, 1381 off_t offset) 1382 { 1383 data_cache->fd = s->fd; 1384 data_cache->data_size = 0; 1385 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); 1386 data_cache->buf = g_malloc0(data_cache->buf_size); 1387 data_cache->offset = offset; 1388 } 1389 1390 static int write_cache(DataCache *dc, const void *buf, size_t size, 1391 bool flag_sync) 1392 { 1393 /* 1394 * dc->buf_size should not be less than size, otherwise dc will never be 1395 * enough 1396 */ 1397 assert(size <= dc->buf_size); 1398 1399 /* 1400 * if flag_sync is set, synchronize data in dc->buf into vmcore. 1401 * otherwise check if the space is enough for caching data in buf, if not, 1402 * write the data in dc->buf to dc->fd and reset dc->buf 1403 */ 1404 if ((!flag_sync && dc->data_size + size > dc->buf_size) || 1405 (flag_sync && dc->data_size > 0)) { 1406 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { 1407 return -1; 1408 } 1409 1410 dc->offset += dc->data_size; 1411 dc->data_size = 0; 1412 } 1413 1414 if (!flag_sync) { 1415 memcpy(dc->buf + dc->data_size, buf, size); 1416 dc->data_size += size; 1417 } 1418 1419 return 0; 1420 } 1421 1422 static void free_data_cache(DataCache *data_cache) 1423 { 1424 g_free(data_cache->buf); 1425 } 1426 1427 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) 1428 { 1429 switch (flag_compress) { 1430 case DUMP_DH_COMPRESSED_ZLIB: 1431 return compressBound(page_size); 1432 1433 case DUMP_DH_COMPRESSED_LZO: 1434 /* 1435 * LZO will expand incompressible data by a little amount. Please check 1436 * the following URL to see the expansion calculation: 1437 * http://www.oberhumer.com/opensource/lzo/lzofaq.php 1438 */ 1439 return page_size + page_size / 16 + 64 + 3; 1440 1441 #ifdef CONFIG_SNAPPY 1442 case DUMP_DH_COMPRESSED_SNAPPY: 1443 return snappy_max_compressed_length(page_size); 1444 #endif 1445 } 1446 return 0; 1447 } 1448 1449 static void write_dump_pages(DumpState *s, Error **errp) 1450 { 1451 int ret = 0; 1452 DataCache page_desc, page_data; 1453 size_t len_buf_out, size_out; 1454 #ifdef CONFIG_LZO 1455 lzo_bytep wrkmem = NULL; 1456 #endif 1457 uint8_t *buf_out = NULL; 1458 off_t offset_desc, offset_data; 1459 PageDescriptor pd, pd_zero; 1460 uint8_t *buf; 1461 GuestPhysBlock *block_iter = NULL; 1462 uint64_t pfn_iter; 1463 g_autofree uint8_t *page = NULL; 1464 1465 /* get offset of page_desc and page_data in dump file */ 1466 offset_desc = s->offset_page; 1467 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; 1468 1469 prepare_data_cache(&page_desc, s, offset_desc); 1470 prepare_data_cache(&page_data, s, offset_data); 1471 1472 /* prepare buffer to store compressed data */ 1473 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); 1474 assert(len_buf_out != 0); 1475 1476 #ifdef CONFIG_LZO 1477 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); 1478 #endif 1479 1480 buf_out = g_malloc(len_buf_out); 1481 1482 /* 1483 * init zero page's page_desc and page_data, because every zero page 1484 * uses the same page_data 1485 */ 1486 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); 1487 pd_zero.flags = cpu_to_dump32(s, 0); 1488 pd_zero.offset = cpu_to_dump64(s, offset_data); 1489 pd_zero.page_flags = cpu_to_dump64(s, 0); 1490 buf = g_malloc0(s->dump_info.page_size); 1491 ret = write_cache(&page_data, buf, s->dump_info.page_size, false); 1492 g_free(buf); 1493 if (ret < 0) { 1494 error_setg(errp, "dump: failed to write page data (zero page)"); 1495 goto out; 1496 } 1497 1498 offset_data += s->dump_info.page_size; 1499 page = g_malloc(s->dump_info.page_size); 1500 1501 /* 1502 * dump memory to vmcore page by page. zero page will all be resided in the 1503 * first page of page section 1504 */ 1505 for (buf = page; get_next_page(&block_iter, &pfn_iter, &buf, s); buf = page) { 1506 /* check zero page */ 1507 if (buffer_is_zero(buf, s->dump_info.page_size)) { 1508 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), 1509 false); 1510 if (ret < 0) { 1511 error_setg(errp, "dump: failed to write page desc"); 1512 goto out; 1513 } 1514 } else { 1515 /* 1516 * not zero page, then: 1517 * 1. compress the page 1518 * 2. write the compressed page into the cache of page_data 1519 * 3. get page desc of the compressed page and write it into the 1520 * cache of page_desc 1521 * 1522 * only one compression format will be used here, for 1523 * s->flag_compress is set. But when compression fails to work, 1524 * we fall back to save in plaintext. 1525 */ 1526 size_out = len_buf_out; 1527 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && 1528 (compress2(buf_out, (uLongf *)&size_out, buf, 1529 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && 1530 (size_out < s->dump_info.page_size)) { 1531 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); 1532 pd.size = cpu_to_dump32(s, size_out); 1533 1534 ret = write_cache(&page_data, buf_out, size_out, false); 1535 if (ret < 0) { 1536 error_setg(errp, "dump: failed to write page data"); 1537 goto out; 1538 } 1539 #ifdef CONFIG_LZO 1540 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && 1541 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, 1542 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && 1543 (size_out < s->dump_info.page_size)) { 1544 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); 1545 pd.size = cpu_to_dump32(s, size_out); 1546 1547 ret = write_cache(&page_data, buf_out, size_out, false); 1548 if (ret < 0) { 1549 error_setg(errp, "dump: failed to write page data"); 1550 goto out; 1551 } 1552 #endif 1553 #ifdef CONFIG_SNAPPY 1554 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && 1555 (snappy_compress((char *)buf, s->dump_info.page_size, 1556 (char *)buf_out, &size_out) == SNAPPY_OK) && 1557 (size_out < s->dump_info.page_size)) { 1558 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); 1559 pd.size = cpu_to_dump32(s, size_out); 1560 1561 ret = write_cache(&page_data, buf_out, size_out, false); 1562 if (ret < 0) { 1563 error_setg(errp, "dump: failed to write page data"); 1564 goto out; 1565 } 1566 #endif 1567 } else { 1568 /* 1569 * fall back to save in plaintext, size_out should be 1570 * assigned the target's page size 1571 */ 1572 pd.flags = cpu_to_dump32(s, 0); 1573 size_out = s->dump_info.page_size; 1574 pd.size = cpu_to_dump32(s, size_out); 1575 1576 ret = write_cache(&page_data, buf, 1577 s->dump_info.page_size, false); 1578 if (ret < 0) { 1579 error_setg(errp, "dump: failed to write page data"); 1580 goto out; 1581 } 1582 } 1583 1584 /* get and write page desc here */ 1585 pd.page_flags = cpu_to_dump64(s, 0); 1586 pd.offset = cpu_to_dump64(s, offset_data); 1587 offset_data += size_out; 1588 1589 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); 1590 if (ret < 0) { 1591 error_setg(errp, "dump: failed to write page desc"); 1592 goto out; 1593 } 1594 } 1595 s->written_size += s->dump_info.page_size; 1596 } 1597 1598 ret = write_cache(&page_desc, NULL, 0, true); 1599 if (ret < 0) { 1600 error_setg(errp, "dump: failed to sync cache for page_desc"); 1601 goto out; 1602 } 1603 ret = write_cache(&page_data, NULL, 0, true); 1604 if (ret < 0) { 1605 error_setg(errp, "dump: failed to sync cache for page_data"); 1606 goto out; 1607 } 1608 1609 out: 1610 free_data_cache(&page_desc); 1611 free_data_cache(&page_data); 1612 1613 #ifdef CONFIG_LZO 1614 g_free(wrkmem); 1615 #endif 1616 1617 g_free(buf_out); 1618 } 1619 1620 static void create_kdump_vmcore(DumpState *s, Error **errp) 1621 { 1622 ERRP_GUARD(); 1623 int ret; 1624 1625 /* 1626 * the kdump-compressed format is: 1627 * File offset 1628 * +------------------------------------------+ 0x0 1629 * | main header (struct disk_dump_header) | 1630 * |------------------------------------------+ block 1 1631 * | sub header (struct kdump_sub_header) | 1632 * |------------------------------------------+ block 2 1633 * | 1st-dump_bitmap | 1634 * |------------------------------------------+ block 2 + X blocks 1635 * | 2nd-dump_bitmap | (aligned by block) 1636 * |------------------------------------------+ block 2 + 2 * X blocks 1637 * | page desc for pfn 0 (struct page_desc) | (aligned by block) 1638 * | page desc for pfn 1 (struct page_desc) | 1639 * | : | 1640 * |------------------------------------------| (not aligned by block) 1641 * | page data (pfn 0) | 1642 * | page data (pfn 1) | 1643 * | : | 1644 * +------------------------------------------+ 1645 */ 1646 1647 ret = write_start_flat_header(s->fd); 1648 if (ret < 0) { 1649 error_setg(errp, "dump: failed to write start flat header"); 1650 return; 1651 } 1652 1653 write_dump_header(s, errp); 1654 if (*errp) { 1655 return; 1656 } 1657 1658 write_dump_bitmap(s, errp); 1659 if (*errp) { 1660 return; 1661 } 1662 1663 write_dump_pages(s, errp); 1664 if (*errp) { 1665 return; 1666 } 1667 1668 ret = write_end_flat_header(s->fd); 1669 if (ret < 0) { 1670 error_setg(errp, "dump: failed to write end flat header"); 1671 return; 1672 } 1673 } 1674 1675 static int validate_start_block(DumpState *s) 1676 { 1677 GuestPhysBlock *block; 1678 1679 if (!dump_has_filter(s)) { 1680 return 0; 1681 } 1682 1683 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1684 /* This block is out of the range */ 1685 if (block->target_start >= s->filter_area_begin + s->filter_area_length || 1686 block->target_end <= s->filter_area_begin) { 1687 continue; 1688 } 1689 return 0; 1690 } 1691 1692 return -1; 1693 } 1694 1695 static void get_max_mapnr(DumpState *s) 1696 { 1697 GuestPhysBlock *last_block; 1698 1699 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); 1700 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); 1701 } 1702 1703 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; 1704 1705 static void dump_state_prepare(DumpState *s) 1706 { 1707 /* zero the struct, setting status to active */ 1708 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; 1709 } 1710 1711 bool qemu_system_dump_in_progress(void) 1712 { 1713 DumpState *state = &dump_state_global; 1714 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); 1715 } 1716 1717 /* 1718 * calculate total size of memory to be dumped (taking filter into 1719 * account.) 1720 */ 1721 static int64_t dump_calculate_size(DumpState *s) 1722 { 1723 GuestPhysBlock *block; 1724 int64_t total = 0; 1725 1726 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1727 total += dump_filtered_memblock_size(block, 1728 s->filter_area_begin, 1729 s->filter_area_length); 1730 } 1731 1732 return total; 1733 } 1734 1735 static void vmcoreinfo_update_phys_base(DumpState *s) 1736 { 1737 uint64_t size, note_head_size, name_size, phys_base; 1738 char **lines; 1739 uint8_t *vmci; 1740 size_t i; 1741 1742 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1743 return; 1744 } 1745 1746 get_note_sizes(s, s->guest_note, ¬e_head_size, &name_size, &size); 1747 note_head_size = ROUND_UP(note_head_size, 4); 1748 1749 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); 1750 *(vmci + size) = '\0'; 1751 1752 lines = g_strsplit((char *)vmci, "\n", -1); 1753 for (i = 0; lines[i]; i++) { 1754 const char *prefix = NULL; 1755 1756 if (s->dump_info.d_machine == EM_X86_64) { 1757 prefix = "NUMBER(phys_base)="; 1758 } else if (s->dump_info.d_machine == EM_AARCH64) { 1759 prefix = "NUMBER(PHYS_OFFSET)="; 1760 } 1761 1762 if (prefix && g_str_has_prefix(lines[i], prefix)) { 1763 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, 1764 &phys_base) < 0) { 1765 warn_report("Failed to read %s", prefix); 1766 } else { 1767 s->dump_info.phys_base = phys_base; 1768 } 1769 break; 1770 } 1771 } 1772 1773 g_strfreev(lines); 1774 } 1775 1776 static void dump_init(DumpState *s, int fd, bool has_format, 1777 DumpGuestMemoryFormat format, bool paging, bool has_filter, 1778 int64_t begin, int64_t length, Error **errp) 1779 { 1780 ERRP_GUARD(); 1781 VMCoreInfoState *vmci = vmcoreinfo_find(); 1782 CPUState *cpu; 1783 int nr_cpus; 1784 int ret; 1785 1786 s->has_format = has_format; 1787 s->format = format; 1788 s->written_size = 0; 1789 1790 /* kdump-compressed is conflict with paging and filter */ 1791 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1792 assert(!paging && !has_filter); 1793 } 1794 1795 if (runstate_is_running()) { 1796 vm_stop(RUN_STATE_SAVE_VM); 1797 s->resume = true; 1798 } else { 1799 s->resume = false; 1800 } 1801 1802 /* If we use KVM, we should synchronize the registers before we get dump 1803 * info or physmap info. 1804 */ 1805 cpu_synchronize_all_states(); 1806 nr_cpus = 0; 1807 CPU_FOREACH(cpu) { 1808 nr_cpus++; 1809 } 1810 1811 s->fd = fd; 1812 if (has_filter && !length) { 1813 error_setg(errp, QERR_INVALID_PARAMETER, "length"); 1814 goto cleanup; 1815 } 1816 s->filter_area_begin = begin; 1817 s->filter_area_length = length; 1818 1819 /* First index is 0, it's the special null name */ 1820 s->string_table_buf = g_array_new(FALSE, TRUE, 1); 1821 /* 1822 * Allocate the null name, due to the clearing option set to true 1823 * it will be 0. 1824 */ 1825 g_array_set_size(s->string_table_buf, 1); 1826 1827 memory_mapping_list_init(&s->list); 1828 1829 guest_phys_blocks_init(&s->guest_phys_blocks); 1830 guest_phys_blocks_append(&s->guest_phys_blocks); 1831 s->total_size = dump_calculate_size(s); 1832 #ifdef DEBUG_DUMP_GUEST_MEMORY 1833 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size); 1834 #endif 1835 1836 /* it does not make sense to dump non-existent memory */ 1837 if (!s->total_size) { 1838 error_setg(errp, "dump: no guest memory to dump"); 1839 goto cleanup; 1840 } 1841 1842 /* Is the filter filtering everything? */ 1843 if (validate_start_block(s) == -1) { 1844 error_setg(errp, QERR_INVALID_PARAMETER, "begin"); 1845 goto cleanup; 1846 } 1847 1848 /* get dump info: endian, class and architecture. 1849 * If the target architecture is not supported, cpu_get_dump_info() will 1850 * return -1. 1851 */ 1852 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); 1853 if (ret < 0) { 1854 error_setg(errp, 1855 "dumping guest memory is not supported on this target"); 1856 goto cleanup; 1857 } 1858 1859 if (!s->dump_info.page_size) { 1860 s->dump_info.page_size = qemu_target_page_size(); 1861 } 1862 1863 s->note_size = cpu_get_note_size(s->dump_info.d_class, 1864 s->dump_info.d_machine, nr_cpus); 1865 assert(s->note_size >= 0); 1866 1867 /* 1868 * The goal of this block is to (a) update the previously guessed 1869 * phys_base, (b) copy the guest note out of the guest. 1870 * Failure to do so is not fatal for dumping. 1871 */ 1872 if (vmci) { 1873 uint64_t addr, note_head_size, name_size, desc_size; 1874 uint32_t size; 1875 uint16_t format; 1876 1877 note_head_size = dump_is_64bit(s) ? 1878 sizeof(Elf64_Nhdr) : sizeof(Elf32_Nhdr); 1879 1880 format = le16_to_cpu(vmci->vmcoreinfo.guest_format); 1881 size = le32_to_cpu(vmci->vmcoreinfo.size); 1882 addr = le64_to_cpu(vmci->vmcoreinfo.paddr); 1883 if (!vmci->has_vmcoreinfo) { 1884 warn_report("guest note is not present"); 1885 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { 1886 warn_report("guest note size is invalid: %" PRIu32, size); 1887 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { 1888 warn_report("guest note format is unsupported: %" PRIu16, format); 1889 } else { 1890 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ 1891 cpu_physical_memory_read(addr, s->guest_note, size); 1892 1893 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); 1894 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, 1895 desc_size); 1896 if (name_size > MAX_GUEST_NOTE_SIZE || 1897 desc_size > MAX_GUEST_NOTE_SIZE || 1898 s->guest_note_size > size) { 1899 warn_report("Invalid guest note header"); 1900 g_free(s->guest_note); 1901 s->guest_note = NULL; 1902 } else { 1903 vmcoreinfo_update_phys_base(s); 1904 s->note_size += s->guest_note_size; 1905 } 1906 } 1907 } 1908 1909 /* get memory mapping */ 1910 if (paging) { 1911 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp); 1912 if (*errp) { 1913 goto cleanup; 1914 } 1915 } else { 1916 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); 1917 } 1918 1919 s->nr_cpus = nr_cpus; 1920 1921 get_max_mapnr(s); 1922 1923 uint64_t tmp; 1924 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), 1925 s->dump_info.page_size); 1926 s->len_dump_bitmap = tmp * s->dump_info.page_size; 1927 1928 /* init for kdump-compressed format */ 1929 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1930 switch (format) { 1931 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: 1932 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; 1933 break; 1934 1935 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: 1936 #ifdef CONFIG_LZO 1937 if (lzo_init() != LZO_E_OK) { 1938 error_setg(errp, "failed to initialize the LZO library"); 1939 goto cleanup; 1940 } 1941 #endif 1942 s->flag_compress = DUMP_DH_COMPRESSED_LZO; 1943 break; 1944 1945 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: 1946 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; 1947 break; 1948 1949 default: 1950 s->flag_compress = 0; 1951 } 1952 1953 return; 1954 } 1955 1956 if (dump_has_filter(s)) { 1957 memory_mapping_filter(&s->list, s->filter_area_begin, s->filter_area_length); 1958 } 1959 1960 /* 1961 * The first section header is always a special one in which most 1962 * fields are 0. The section header string table is also always 1963 * set. 1964 */ 1965 s->shdr_num = 2; 1966 1967 /* 1968 * Adds the number of architecture sections to shdr_num and sets 1969 * elf_section_data_size so we know the offsets and sizes of all 1970 * parts. 1971 */ 1972 if (s->dump_info.arch_sections_add_fn) { 1973 s->dump_info.arch_sections_add_fn(s); 1974 } 1975 1976 /* 1977 * calculate shdr_num so we know the offsets and sizes of all 1978 * parts. 1979 * Calculate phdr_num 1980 * 1981 * The absolute maximum amount of phdrs is UINT32_MAX - 1 as 1982 * sh_info is 32 bit. There's special handling once we go over 1983 * UINT16_MAX - 1 but that is handled in the ehdr and section 1984 * code. 1985 */ 1986 s->phdr_num = 1; /* Reserve PT_NOTE */ 1987 if (s->list.num <= UINT32_MAX - 1) { 1988 s->phdr_num += s->list.num; 1989 } else { 1990 s->phdr_num = UINT32_MAX; 1991 } 1992 1993 /* 1994 * Now that the number of section and program headers is known we 1995 * can calculate the offsets of the headers and data. 1996 */ 1997 if (dump_is_64bit(s)) { 1998 s->shdr_offset = sizeof(Elf64_Ehdr); 1999 s->phdr_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num; 2000 s->note_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num; 2001 } else { 2002 s->shdr_offset = sizeof(Elf32_Ehdr); 2003 s->phdr_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num; 2004 s->note_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num; 2005 } 2006 s->memory_offset = s->note_offset + s->note_size; 2007 s->section_offset = s->memory_offset + s->total_size; 2008 2009 return; 2010 2011 cleanup: 2012 dump_cleanup(s); 2013 } 2014 2015 /* this operation might be time consuming. */ 2016 static void dump_process(DumpState *s, Error **errp) 2017 { 2018 ERRP_GUARD(); 2019 DumpQueryResult *result = NULL; 2020 2021 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 2022 create_win_dump(s, errp); 2023 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 2024 create_kdump_vmcore(s, errp); 2025 } else { 2026 create_vmcore(s, errp); 2027 } 2028 2029 /* make sure status is written after written_size updates */ 2030 smp_wmb(); 2031 qatomic_set(&s->status, 2032 (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); 2033 2034 /* send DUMP_COMPLETED message (unconditionally) */ 2035 result = qmp_query_dump(NULL); 2036 /* should never fail */ 2037 assert(result); 2038 qapi_event_send_dump_completed(result, 2039 *errp ? error_get_pretty(*errp) : NULL); 2040 qapi_free_DumpQueryResult(result); 2041 2042 dump_cleanup(s); 2043 } 2044 2045 static void *dump_thread(void *data) 2046 { 2047 DumpState *s = (DumpState *)data; 2048 dump_process(s, NULL); 2049 return NULL; 2050 } 2051 2052 DumpQueryResult *qmp_query_dump(Error **errp) 2053 { 2054 DumpQueryResult *result = g_new(DumpQueryResult, 1); 2055 DumpState *state = &dump_state_global; 2056 result->status = qatomic_read(&state->status); 2057 /* make sure we are reading status and written_size in order */ 2058 smp_rmb(); 2059 result->completed = state->written_size; 2060 result->total = state->total_size; 2061 return result; 2062 } 2063 2064 void qmp_dump_guest_memory(bool paging, const char *file, 2065 bool has_detach, bool detach, 2066 bool has_begin, int64_t begin, bool has_length, 2067 int64_t length, bool has_format, 2068 DumpGuestMemoryFormat format, Error **errp) 2069 { 2070 ERRP_GUARD(); 2071 const char *p; 2072 int fd = -1; 2073 DumpState *s; 2074 bool detach_p = false; 2075 2076 if (runstate_check(RUN_STATE_INMIGRATE)) { 2077 error_setg(errp, "Dump not allowed during incoming migration."); 2078 return; 2079 } 2080 2081 /* if there is a dump in background, we should wait until the dump 2082 * finished */ 2083 if (qemu_system_dump_in_progress()) { 2084 error_setg(errp, "There is a dump in process, please wait."); 2085 return; 2086 } 2087 2088 /* 2089 * kdump-compressed format need the whole memory dumped, so paging or 2090 * filter is not supported here. 2091 */ 2092 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && 2093 (paging || has_begin || has_length)) { 2094 error_setg(errp, "kdump-compressed format doesn't support paging or " 2095 "filter"); 2096 return; 2097 } 2098 if (has_begin && !has_length) { 2099 error_setg(errp, QERR_MISSING_PARAMETER, "length"); 2100 return; 2101 } 2102 if (!has_begin && has_length) { 2103 error_setg(errp, QERR_MISSING_PARAMETER, "begin"); 2104 return; 2105 } 2106 if (has_detach) { 2107 detach_p = detach; 2108 } 2109 2110 /* check whether lzo/snappy is supported */ 2111 #ifndef CONFIG_LZO 2112 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { 2113 error_setg(errp, "kdump-lzo is not available now"); 2114 return; 2115 } 2116 #endif 2117 2118 #ifndef CONFIG_SNAPPY 2119 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { 2120 error_setg(errp, "kdump-snappy is not available now"); 2121 return; 2122 } 2123 #endif 2124 2125 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP 2126 && !win_dump_available(errp)) { 2127 return; 2128 } 2129 2130 #if !defined(WIN32) 2131 if (strstart(file, "fd:", &p)) { 2132 fd = monitor_get_fd(monitor_cur(), p, errp); 2133 if (fd == -1) { 2134 return; 2135 } 2136 } 2137 #endif 2138 2139 if (strstart(file, "file:", &p)) { 2140 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); 2141 if (fd < 0) { 2142 error_setg_file_open(errp, errno, p); 2143 return; 2144 } 2145 } 2146 2147 if (fd == -1) { 2148 error_setg(errp, QERR_INVALID_PARAMETER, "protocol"); 2149 return; 2150 } 2151 2152 if (!dump_migration_blocker) { 2153 error_setg(&dump_migration_blocker, 2154 "Live migration disabled: dump-guest-memory in progress"); 2155 } 2156 2157 /* 2158 * Allows even for -only-migratable, but forbid migration during the 2159 * process of dump guest memory. 2160 */ 2161 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { 2162 /* Remember to release the fd before passing it over to dump state */ 2163 close(fd); 2164 return; 2165 } 2166 2167 s = &dump_state_global; 2168 dump_state_prepare(s); 2169 2170 dump_init(s, fd, has_format, format, paging, has_begin, 2171 begin, length, errp); 2172 if (*errp) { 2173 qatomic_set(&s->status, DUMP_STATUS_FAILED); 2174 return; 2175 } 2176 2177 if (detach_p) { 2178 /* detached dump */ 2179 s->detached = true; 2180 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread, 2181 s, QEMU_THREAD_DETACHED); 2182 } else { 2183 /* sync dump */ 2184 dump_process(s, errp); 2185 } 2186 } 2187 2188 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) 2189 { 2190 DumpGuestMemoryCapability *cap = 2191 g_new0(DumpGuestMemoryCapability, 1); 2192 DumpGuestMemoryFormatList **tail = &cap->formats; 2193 2194 /* elf is always available */ 2195 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF); 2196 2197 /* kdump-zlib is always available */ 2198 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB); 2199 2200 /* add new item if kdump-lzo is available */ 2201 #ifdef CONFIG_LZO 2202 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO); 2203 #endif 2204 2205 /* add new item if kdump-snappy is available */ 2206 #ifdef CONFIG_SNAPPY 2207 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY); 2208 #endif 2209 2210 if (win_dump_available(NULL)) { 2211 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); 2212 } 2213 2214 return cap; 2215 } 2216