1 /* 2 * machine_kexec.c - handle transition of Linux booting another kernel 3 */ 4 5 #include <linux/mm.h> 6 #include <linux/kexec.h> 7 #include <linux/delay.h> 8 #include <linux/reboot.h> 9 #include <linux/io.h> 10 #include <linux/irq.h> 11 #include <linux/memblock.h> 12 #include <asm/pgtable.h> 13 #include <linux/of_fdt.h> 14 #include <asm/pgalloc.h> 15 #include <asm/mmu_context.h> 16 #include <asm/cacheflush.h> 17 #include <asm/mach-types.h> 18 #include <asm/smp_plat.h> 19 #include <asm/system_misc.h> 20 21 extern const unsigned char relocate_new_kernel[]; 22 extern const unsigned int relocate_new_kernel_size; 23 24 extern unsigned long kexec_start_address; 25 extern unsigned long kexec_indirection_page; 26 extern unsigned long kexec_mach_type; 27 extern unsigned long kexec_boot_atags; 28 29 static atomic_t waiting_for_crash_ipi; 30 31 /* 32 * Provide a dummy crash_notes definition while crash dump arrives to arm. 33 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. 34 */ 35 36 int machine_kexec_prepare(struct kimage *image) 37 { 38 struct kexec_segment *current_segment; 39 __be32 header; 40 int i, err; 41 42 /* 43 * Validate that if the current HW supports SMP, then the SW supports 44 * and implements CPU hotplug for the current HW. If not, we won't be 45 * able to kexec reliably, so fail the prepare operation. 46 */ 47 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) 48 return -EINVAL; 49 50 /* 51 * No segment at default ATAGs address. try to locate 52 * a dtb using magic. 53 */ 54 for (i = 0; i < image->nr_segments; i++) { 55 current_segment = &image->segment[i]; 56 57 if (!memblock_is_region_memory(current_segment->mem, 58 current_segment->memsz)) 59 return -EINVAL; 60 61 err = get_user(header, (__be32*)current_segment->buf); 62 if (err) 63 return err; 64 65 if (be32_to_cpu(header) == OF_DT_HEADER) 66 kexec_boot_atags = current_segment->mem; 67 } 68 return 0; 69 } 70 71 void machine_kexec_cleanup(struct kimage *image) 72 { 73 } 74 75 void machine_crash_nonpanic_core(void *unused) 76 { 77 struct pt_regs regs; 78 79 crash_setup_regs(®s, NULL); 80 printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", 81 smp_processor_id()); 82 crash_save_cpu(®s, smp_processor_id()); 83 flush_cache_all(); 84 85 set_cpu_online(smp_processor_id(), false); 86 atomic_dec(&waiting_for_crash_ipi); 87 while (1) 88 cpu_relax(); 89 } 90 91 static void machine_kexec_mask_interrupts(void) 92 { 93 unsigned int i; 94 struct irq_desc *desc; 95 96 for_each_irq_desc(i, desc) { 97 struct irq_chip *chip; 98 99 chip = irq_desc_get_chip(desc); 100 if (!chip) 101 continue; 102 103 if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) 104 chip->irq_eoi(&desc->irq_data); 105 106 if (chip->irq_mask) 107 chip->irq_mask(&desc->irq_data); 108 109 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) 110 chip->irq_disable(&desc->irq_data); 111 } 112 } 113 114 void machine_crash_shutdown(struct pt_regs *regs) 115 { 116 unsigned long msecs; 117 118 local_irq_disable(); 119 120 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 121 smp_call_function(machine_crash_nonpanic_core, NULL, false); 122 msecs = 1000; /* Wait at most a second for the other cpus to stop */ 123 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 124 mdelay(1); 125 msecs--; 126 } 127 if (atomic_read(&waiting_for_crash_ipi) > 0) 128 printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); 129 130 crash_save_cpu(regs, smp_processor_id()); 131 machine_kexec_mask_interrupts(); 132 133 printk(KERN_INFO "Loading crashdump kernel...\n"); 134 } 135 136 /* 137 * Function pointer to optional machine-specific reinitialization 138 */ 139 void (*kexec_reinit)(void); 140 141 void machine_kexec(struct kimage *image) 142 { 143 unsigned long page_list; 144 unsigned long reboot_code_buffer_phys; 145 void *reboot_code_buffer; 146 147 /* 148 * This can only happen if machine_shutdown() failed to disable some 149 * CPU, and that can only happen if the checks in 150 * machine_kexec_prepare() were not correct. If this fails, we can't 151 * reliably kexec anyway, so BUG_ON is appropriate. 152 */ 153 BUG_ON(num_online_cpus() > 1); 154 155 page_list = image->head & PAGE_MASK; 156 157 /* we need both effective and real address here */ 158 reboot_code_buffer_phys = 159 page_to_pfn(image->control_code_page) << PAGE_SHIFT; 160 reboot_code_buffer = page_address(image->control_code_page); 161 162 /* Prepare parameters for reboot_code_buffer*/ 163 kexec_start_address = image->start; 164 kexec_indirection_page = page_list; 165 kexec_mach_type = machine_arch_type; 166 if (!kexec_boot_atags) 167 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; 168 169 170 /* copy our kernel relocation code to the control code page */ 171 memcpy(reboot_code_buffer, 172 relocate_new_kernel, relocate_new_kernel_size); 173 174 175 flush_icache_range((unsigned long) reboot_code_buffer, 176 (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); 177 printk(KERN_INFO "Bye!\n"); 178 179 if (kexec_reinit) 180 kexec_reinit(); 181 182 soft_restart(reboot_code_buffer_phys); 183 } 184