1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/ia64/kernel/machine_kexec.c 4 * 5 * Handle transition of Linux booting another kernel 6 * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P. 7 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> 8 * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/kexec.h> 13 #include <linux/cpu.h> 14 #include <linux/irq.h> 15 #include <linux/efi.h> 16 #include <linux/numa.h> 17 #include <linux/mmzone.h> 18 19 #include <asm/numa.h> 20 #include <asm/mmu_context.h> 21 #include <asm/setup.h> 22 #include <asm/delay.h> 23 #include <asm/meminit.h> 24 #include <asm/processor.h> 25 #include <asm/sal.h> 26 #include <asm/mca.h> 27 28 typedef void (*relocate_new_kernel_t)( 29 unsigned long indirection_page, 30 unsigned long start_address, 31 struct ia64_boot_param *boot_param, 32 unsigned long pal_addr) __noreturn; 33 34 struct kimage *ia64_kimage; 35 36 struct resource efi_memmap_res = { 37 .name = "EFI Memory Map", 38 .start = 0, 39 .end = 0, 40 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 41 }; 42 43 struct resource boot_param_res = { 44 .name = "Boot parameter", 45 .start = 0, 46 .end = 0, 47 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 48 }; 49 50 51 /* 52 * Do what every setup is needed on image and the 53 * reboot code buffer to allow us to avoid allocations 54 * later. 55 */ 56 int machine_kexec_prepare(struct kimage *image) 57 { 58 void *control_code_buffer; 59 const unsigned long *func; 60 61 func = (unsigned long *)&relocate_new_kernel; 62 /* Pre-load control code buffer to minimize work in kexec path */ 63 control_code_buffer = page_address(image->control_code_page); 64 memcpy((void *)control_code_buffer, (const void *)func[0], 65 relocate_new_kernel_size); 66 flush_icache_range((unsigned long)control_code_buffer, 67 (unsigned long)control_code_buffer + relocate_new_kernel_size); 68 ia64_kimage = image; 69 70 return 0; 71 } 72 73 void machine_kexec_cleanup(struct kimage *image) 74 { 75 } 76 77 /* 78 * Do not allocate memory (or fail in any way) in machine_kexec(). 79 * We are past the point of no return, committed to rebooting now. 80 */ 81 static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 82 { 83 struct kimage *image = arg; 84 relocate_new_kernel_t rnk; 85 void *pal_addr = efi_get_pal_addr(); 86 unsigned long code_addr; 87 int ii; 88 u64 fp, gp; 89 ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; 90 91 BUG_ON(!image); 92 code_addr = (unsigned long)page_address(image->control_code_page); 93 if (image->type == KEXEC_TYPE_CRASH) { 94 crash_save_this_cpu(); 95 current->thread.ksp = (__u64)info->sw - 16; 96 97 /* Register noop init handler */ 98 fp = ia64_tpa(init_handler->fp); 99 gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); 100 ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); 101 } else { 102 /* Unregister init handlers of current kernel */ 103 ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); 104 } 105 106 /* Unregister mca handler - No more recovery on current kernel */ 107 ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); 108 109 /* Interrupts aren't acceptable while we reboot */ 110 local_irq_disable(); 111 112 /* Mask CMC and Performance Monitor interrupts */ 113 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 114 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 115 116 /* Mask ITV and Local Redirect Registers */ 117 ia64_set_itv(1 << 16); 118 ia64_set_lrr0(1 << 16); 119 ia64_set_lrr1(1 << 16); 120 121 /* terminate possible nested in-service interrupts */ 122 for (ii = 0; ii < 16; ii++) 123 ia64_eoi(); 124 125 /* unmask TPR and clear any pending interrupts */ 126 ia64_setreg(_IA64_REG_CR_TPR, 0); 127 ia64_srlz_d(); 128 while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) 129 ia64_eoi(); 130 rnk = (relocate_new_kernel_t)&code_addr; 131 (*rnk)(image->head, image->start, ia64_boot_param, 132 GRANULEROUNDDOWN((unsigned long) pal_addr)); 133 BUG(); 134 } 135 136 void machine_kexec(struct kimage *image) 137 { 138 BUG_ON(!image); 139 unw_init_running(ia64_machine_kexec, image); 140 for(;;); 141 } 142 143 void arch_crash_save_vmcoreinfo(void) 144 { 145 #if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM) 146 VMCOREINFO_SYMBOL(pgdat_list); 147 VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); 148 #endif 149 #ifdef CONFIG_NUMA 150 VMCOREINFO_SYMBOL(node_memblk); 151 VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS); 152 VMCOREINFO_STRUCT_SIZE(node_memblk_s); 153 VMCOREINFO_OFFSET(node_memblk_s, start_paddr); 154 VMCOREINFO_OFFSET(node_memblk_s, size); 155 #endif 156 #if CONFIG_PGTABLE_LEVELS == 3 157 VMCOREINFO_CONFIG(PGTABLE_3); 158 #elif CONFIG_PGTABLE_LEVELS == 4 159 VMCOREINFO_CONFIG(PGTABLE_4); 160 #endif 161 } 162 163