Lines Matching +full:segment +full:- +full:no +full:- +full:remap
1 // SPDX-License-Identifier: GPL-2.0-only
74 INIT_LIST_HEAD(&cb->next); in register_vmcore_cb()
76 list_add_tail(&cb->next, &vmcore_cb_list); in register_vmcore_cb()
90 list_del_rcu(&cb->next); in unregister_vmcore_cb()
111 if (unlikely(!cb->pfn_is_ram)) in pfn_is_ram()
113 ret = cb->pfn_is_ram(cb, pfn); in pfn_is_ram()
147 if (count > (PAGE_SIZE - offset)) in read_from_oldmem()
148 nr_bytes = PAGE_SIZE - offset; in read_from_oldmem()
166 return -EFAULT; in read_from_oldmem()
170 count -= nr_bytes; in read_from_oldmem()
252 if (start < offset + dump->size) { in vmcoredd_copy_dumps()
253 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_copy_dumps()
254 buf = dump->buf + start - offset; in vmcoredd_copy_dumps()
256 ret = -EFAULT; in vmcoredd_copy_dumps()
260 size -= tsz; in vmcoredd_copy_dumps()
267 offset += dump->size; in vmcoredd_copy_dumps()
287 if (start < offset + dump->size) { in vmcoredd_mmap_dumps()
288 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_mmap_dumps()
289 buf = dump->buf + start - offset; in vmcoredd_mmap_dumps()
292 ret = -EFAULT; in vmcoredd_mmap_dumps()
296 size -= tsz; in vmcoredd_mmap_dumps()
304 offset += dump->size; in vmcoredd_mmap_dumps()
327 iov_iter_truncate(iter, vmcore_size - *fpos); in __read_vmcore()
331 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter)); in __read_vmcore()
333 return -EFAULT; in __read_vmcore()
342 /* Read ELF note segment */ in __read_vmcore()
348 * completely and we will end up with zero-filled data in __read_vmcore()
350 * then try to decode this zero-filled data as valid notes in __read_vmcore()
352 * the other elf notes ensure that zero-filled data can be in __read_vmcore()
358 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in __read_vmcore()
360 start = *fpos - elfcorebuf_sz; in __read_vmcore()
362 return -EFAULT; in __read_vmcore()
374 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, in __read_vmcore()
376 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; in __read_vmcore()
378 return -EFAULT; in __read_vmcore()
391 if (*fpos < m->offset + m->size) { in __read_vmcore()
393 m->offset + m->size - *fpos, in __read_vmcore()
395 start = m->paddr + *fpos - m->offset; in __read_vmcore()
416 return __read_vmcore(iter, &iocb->ki_pos); in read_vmcore()
429 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in mmap_vmcore_fault()
430 pgoff_t index = vmf->pgoff; in mmap_vmcore_fault()
455 vmf->page = page; in mmap_vmcore_fault()
463 * vmcore_alloc_buf - allocate buffer in vmalloc memory
467 * the buffer to user-space by means of remap_vmalloc_range().
470 * disabled and there's no need to allow users to mmap the buffer.
484 * non-contiguous objects (ELF header, ELF note segment and memory
486 * virtually contiguous user-space in ELF layout.
495 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
504 * Returns zero on success, -EAGAIN on failure.
521 * We hit a page which is not ram. Remap the continuous in remap_oldmem_pfn_checked()
522 * region between pos_start and pos-1 and replace in remap_oldmem_pfn_checked()
523 * the non-ram page at pos with the zero page. in remap_oldmem_pfn_checked()
526 /* Remap continuous region */ in remap_oldmem_pfn_checked()
527 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
534 /* Remap the zero page */ in remap_oldmem_pfn_checked()
544 /* Remap the rest */ in remap_oldmem_pfn_checked()
545 map_size = (pos - pos_start) << PAGE_SHIFT; in remap_oldmem_pfn_checked()
552 do_munmap(vma->vm_mm, from, len, NULL); in remap_oldmem_pfn_checked()
553 return -EAGAIN; in remap_oldmem_pfn_checked()
577 size_t size = vma->vm_end - vma->vm_start; in mmap_vmcore()
581 start = (u64)vma->vm_pgoff << PAGE_SHIFT; in mmap_vmcore()
585 return -EINVAL; in mmap_vmcore()
587 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) in mmap_vmcore()
588 return -EPERM; in mmap_vmcore()
591 vma->vm_ops = &vmcore_mmap_ops; in mmap_vmcore()
598 tsz = min(elfcorebuf_sz - (size_t)start, size); in mmap_vmcore()
600 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, in mmap_vmcore()
601 vma->vm_page_prot)) in mmap_vmcore()
602 return -EAGAIN; in mmap_vmcore()
603 size -= tsz; in mmap_vmcore()
616 * completely and we will end up with zero-filled data in mmap_vmcore()
618 * then try to decode this zero-filled data as valid notes in mmap_vmcore()
620 * the other elf notes ensure that zero-filled data can be in mmap_vmcore()
630 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - in mmap_vmcore()
632 start_off = start - elfcorebuf_sz; in mmap_vmcore()
633 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, in mmap_vmcore()
637 size -= tsz; in mmap_vmcore()
648 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); in mmap_vmcore()
649 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; in mmap_vmcore()
650 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, in mmap_vmcore()
654 size -= tsz; in mmap_vmcore()
663 if (start < m->offset + m->size) { in mmap_vmcore()
667 m->offset + m->size - start, size); in mmap_vmcore()
668 paddr = m->paddr + start - m->offset; in mmap_vmcore()
669 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, in mmap_vmcore()
671 vma->vm_page_prot)) in mmap_vmcore()
673 size -= tsz; in mmap_vmcore()
684 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); in mmap_vmcore()
685 return -EAGAIN; in mmap_vmcore()
690 return -ENOSYS; in mmap_vmcore()
714 size += m->size; in get_vmcore_size()
720 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
726 * note segment.
735 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf64()
738 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf64()
740 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf64()
741 offset = phdr_ptr->p_offset; in update_note_header_size_elf64()
744 return -ENOMEM; in update_note_header_size_elf64()
751 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf64()
753 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf64()
754 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf64()
757 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf64()
764 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf64()
774 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
775 * headers and sum of real size of their ELF note segment headers and
784 * @sz_ptnote in its phdr->p_mem.
788 * and each of PT_NOTE program headers has actual ELF note segment
800 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf64()
801 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf64()
804 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf64()
811 * copy_notes_elf64 - copy ELF note segments in a given buffer
816 * This function is used to copy ELF note segment in the 1st kernel
819 * real ELF note segment headers and data.
823 * and each of PT_NOTE program headers has actual ELF note segment
833 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf64()
835 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf64()
837 offset = phdr_ptr->p_offset; in copy_notes_elf64()
838 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf64()
842 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf64()
871 return -ENOMEM; in merge_note_headers_elf64()
881 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
893 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); in merge_note_headers_elf64()
894 *elfsz = *elfsz - i; in merge_note_headers_elf64()
895 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); in merge_note_headers_elf64()
900 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf64()
911 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
917 * note segment.
926 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in update_note_header_size_elf32()
929 if (phdr_ptr->p_type != PT_NOTE) in update_note_header_size_elf32()
931 max_sz = phdr_ptr->p_memsz; in update_note_header_size_elf32()
932 offset = phdr_ptr->p_offset; in update_note_header_size_elf32()
935 return -ENOMEM; in update_note_header_size_elf32()
942 while (nhdr_ptr->n_namesz != 0) { in update_note_header_size_elf32()
944 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + in update_note_header_size_elf32()
945 (((u64)nhdr_ptr->n_descsz + 3) & ~3); in update_note_header_size_elf32()
948 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); in update_note_header_size_elf32()
955 phdr_ptr->p_memsz = real_sz; in update_note_header_size_elf32()
965 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
966 * headers and sum of real size of their ELF note segment headers and
975 * @sz_ptnote in its phdr->p_mem.
979 * and each of PT_NOTE program headers has actual ELF note segment
991 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in get_note_number_and_size_elf32()
992 if (phdr_ptr->p_type != PT_NOTE) in get_note_number_and_size_elf32()
995 *sz_ptnote += phdr_ptr->p_memsz; in get_note_number_and_size_elf32()
1002 * copy_notes_elf32 - copy ELF note segments in a given buffer
1007 * This function is used to copy ELF note segment in the 1st kernel
1010 * real ELF note segment headers and data.
1014 * and each of PT_NOTE program headers has actual ELF note segment
1024 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in copy_notes_elf32()
1026 if (phdr_ptr->p_type != PT_NOTE) in copy_notes_elf32()
1028 offset = phdr_ptr->p_offset; in copy_notes_elf32()
1029 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, in copy_notes_elf32()
1033 notes_buf += phdr_ptr->p_memsz; in copy_notes_elf32()
1062 return -ENOMEM; in merge_note_headers_elf32()
1072 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1084 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); in merge_note_headers_elf32()
1085 *elfsz = *elfsz - i; in merge_note_headers_elf32()
1086 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); in merge_note_headers_elf32()
1091 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; in merge_note_headers_elf32()
1117 /* Skip ELF header, program headers and ELF note segment. */ in process_ptload_program_headers_elf64()
1120 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf64()
1123 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf64()
1126 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf64()
1128 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf64()
1129 size = end - start; in process_ptload_program_headers_elf64()
1134 return -ENOMEM; in process_ptload_program_headers_elf64()
1135 new->paddr = start; in process_ptload_program_headers_elf64()
1136 new->size = size; in process_ptload_program_headers_elf64()
1137 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf64()
1140 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf64()
1160 /* Skip ELF header, program headers and ELF note segment. */ in process_ptload_program_headers_elf32()
1163 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { in process_ptload_program_headers_elf32()
1166 if (phdr_ptr->p_type != PT_LOAD) in process_ptload_program_headers_elf32()
1169 paddr = phdr_ptr->p_offset; in process_ptload_program_headers_elf32()
1171 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); in process_ptload_program_headers_elf32()
1172 size = end - start; in process_ptload_program_headers_elf32()
1177 return -ENOMEM; in process_ptload_program_headers_elf32()
1178 new->paddr = start; in process_ptload_program_headers_elf32()
1179 new->size = size; in process_ptload_program_headers_elf32()
1180 list_add_tail(&new->list, vc_list); in process_ptload_program_headers_elf32()
1183 phdr_ptr->p_offset = vmcore_off + (paddr - start); in process_ptload_program_headers_elf32()
1196 /* Skip ELF header, program headers and ELF note segment. */ in set_vmcore_list_offsets()
1200 m->offset = vmcore_off; in set_vmcore_list_offsets()
1201 vmcore_off += m->size; in set_vmcore_list_offsets()
1237 return -EINVAL; in parse_crash_elf64_headers()
1247 return -ENOMEM; in parse_crash_elf64_headers()
1293 return -EINVAL; in parse_crash_elf32_headers()
1302 return -ENOMEM; in parse_crash_elf32_headers()
1336 return -EINVAL; in parse_crash_elf_headers()
1349 return -EINVAL; in parse_crash_elf_headers()
1361 * vmcoredd_write_header - Write vmcore device dump header at the
1374 vdd_hdr->n_namesz = sizeof(vdd_hdr->name); in vmcoredd_write_header()
1375 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); in vmcoredd_write_header()
1376 vdd_hdr->n_type = NT_VMCOREDD; in vmcoredd_write_header()
1378 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME, in vmcoredd_write_header()
1379 sizeof(vdd_hdr->name)); in vmcoredd_write_header()
1380 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name)); in vmcoredd_write_header()
1384 * vmcoredd_update_program_headers - Update all ELF program headers
1407 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1408 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1410 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1411 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1415 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1416 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1418 size = end - start; in vmcoredd_update_program_headers()
1419 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1427 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { in vmcoredd_update_program_headers()
1428 if (phdr->p_type == PT_NOTE) { in vmcoredd_update_program_headers()
1430 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; in vmcoredd_update_program_headers()
1431 phdr->p_filesz = phdr->p_memsz; in vmcoredd_update_program_headers()
1435 start = rounddown(phdr->p_offset, PAGE_SIZE); in vmcoredd_update_program_headers()
1436 end = roundup(phdr->p_offset + phdr->p_memsz, in vmcoredd_update_program_headers()
1438 size = end - start; in vmcoredd_update_program_headers()
1439 phdr->p_offset = vmcore_off + (phdr->p_offset - start); in vmcoredd_update_program_headers()
1446 * vmcoredd_update_size - Update the total size of the device dumps and update
1466 proc_vmcore->size = vmcore_size; in vmcoredd_update_size()
1470 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1486 return -EINVAL; in vmcore_add_device_dump()
1489 if (!data || !strlen(data->dump_name) || in vmcore_add_device_dump()
1490 !data->vmcoredd_callback || !data->size) in vmcore_add_device_dump()
1491 return -EINVAL; in vmcore_add_device_dump()
1495 ret = -ENOMEM; in vmcore_add_device_dump()
1500 data_size = roundup(sizeof(struct vmcoredd_header) + data->size, in vmcore_add_device_dump()
1506 ret = -ENOMEM; in vmcore_add_device_dump()
1510 vmcoredd_write_header(buf, data, data_size - in vmcore_add_device_dump()
1514 ret = data->vmcoredd_callback(data, buf + in vmcore_add_device_dump()
1519 dump->buf = buf; in vmcore_add_device_dump()
1520 dump->size = data_size; in vmcore_add_device_dump()
1524 list_add_tail(&dump->list, &vmcoredd_list); in vmcore_add_device_dump()
1549 list_del(&dump->list); in vmcore_free_device_dumps()
1550 vfree(dump->buf); in vmcore_free_device_dumps()
1583 proc_vmcore->size = vmcore_size; in vmcore_init()
1601 list_del(&m->list); in vmcore_cleanup()