1 /* 2 * Copyright (C) Paul Mackerras 1997. 3 * 4 * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <stdarg.h> 12 #include <stddef.h> 13 #include "elf.h" 14 #include "page.h" 15 #include "string.h" 16 #include "stdio.h" 17 #include "prom.h" 18 #include "zlib.h" 19 20 extern void flush_cache(void *, unsigned long); 21 22 23 /* Value picked to match that used by yaboot */ 24 #define PROG_START 0x01400000 25 #define RAM_END (512<<20) // Fixme: use OF */ 26 #define ONE_MB 0x100000 27 28 extern char _start[]; 29 extern char __bss_start[]; 30 extern char _end[]; 31 extern char _vmlinux_start[]; 32 extern char _vmlinux_end[]; 33 extern char _initrd_start[]; 34 extern char _initrd_end[]; 35 36 struct addr_range { 37 unsigned long addr; 38 unsigned long size; 39 unsigned long memsize; 40 }; 41 static struct addr_range vmlinux; 42 static struct addr_range vmlinuz; 43 static struct addr_range initrd; 44 45 static unsigned long elfoffset; 46 47 static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */ 48 static char elfheader[256]; 49 50 51 typedef void (*kernel_entry_t)( unsigned long, 52 unsigned long, 53 void *, 54 void *); 55 56 57 #undef DEBUG 58 59 static unsigned long claim_base; 60 61 #define HEAD_CRC 2 62 #define EXTRA_FIELD 4 63 #define ORIG_NAME 8 64 #define COMMENT 0x10 65 #define RESERVED 0xe0 66 67 static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 68 { 69 z_stream s; 70 int r, i, flags; 71 72 /* skip header */ 73 i = 10; 74 flags = src[3]; 75 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) { 76 printf("bad gzipped data\n\r"); 77 exit(); 78 } 79 if ((flags & EXTRA_FIELD) != 0) 80 i = 12 + src[10] + (src[11] << 8); 81 if ((flags & ORIG_NAME) != 0) 82 while (src[i++] != 0) 83 ; 84 if ((flags & COMMENT) != 0) 85 while (src[i++] != 0) 86 ; 87 if ((flags & HEAD_CRC) != 0) 88 i += 2; 89 if (i >= *lenp) { 90 printf("gunzip: ran out of data in header\n\r"); 91 exit(); 92 } 93 94 if (zlib_inflate_workspacesize() > sizeof(scratch)) { 95 printf("gunzip needs more mem\n"); 96 exit(); 97 } 98 memset(&s, 0, sizeof(s)); 99 s.workspace = scratch; 100 r = zlib_inflateInit2(&s, -MAX_WBITS); 101 if (r != Z_OK) { 102 printf("inflateInit2 returned %d\n\r", r); 103 exit(); 104 } 105 s.next_in = src + i; 106 s.avail_in = *lenp - i; 107 s.next_out = dst; 108 s.avail_out = dstlen; 109 r = zlib_inflate(&s, Z_FULL_FLUSH); 110 if (r != Z_OK && r != Z_STREAM_END) { 111 printf("inflate returned %d msg: %s\n\r", r, s.msg); 112 exit(); 113 } 114 *lenp = s.next_out - (unsigned char *) dst; 115 zlib_inflateEnd(&s); 116 } 117 118 static unsigned long try_claim(unsigned long size) 119 { 120 unsigned long addr = 0; 121 122 for(; claim_base < RAM_END; claim_base += ONE_MB) { 123 #ifdef DEBUG 124 printf(" trying: 0x%08lx\n\r", claim_base); 125 #endif 126 addr = (unsigned long)claim(claim_base, size, 0); 127 if ((void *)addr != (void *)-1) 128 break; 129 } 130 if (addr == 0) 131 return 0; 132 claim_base = PAGE_ALIGN(claim_base + size); 133 return addr; 134 } 135 136 static int is_elf64(void *hdr) 137 { 138 Elf64_Ehdr *elf64 = hdr; 139 Elf64_Phdr *elf64ph; 140 unsigned int i; 141 142 if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 && 143 elf64->e_ident[EI_MAG1] == ELFMAG1 && 144 elf64->e_ident[EI_MAG2] == ELFMAG2 && 145 elf64->e_ident[EI_MAG3] == ELFMAG3 && 146 elf64->e_ident[EI_CLASS] == ELFCLASS64 && 147 elf64->e_ident[EI_DATA] == ELFDATA2MSB && 148 elf64->e_type == ET_EXEC && 149 elf64->e_machine == EM_PPC64)) 150 return 0; 151 152 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 + 153 (unsigned long)elf64->e_phoff); 154 for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++) 155 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0) 156 break; 157 if (i >= (unsigned int)elf64->e_phnum) 158 return 0; 159 160 elfoffset = (unsigned long)elf64ph->p_offset; 161 vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset; 162 vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset; 163 return 1; 164 } 165 166 static int is_elf32(void *hdr) 167 { 168 Elf32_Ehdr *elf32 = hdr; 169 Elf32_Phdr *elf32ph; 170 unsigned int i; 171 172 if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 && 173 elf32->e_ident[EI_MAG1] == ELFMAG1 && 174 elf32->e_ident[EI_MAG2] == ELFMAG2 && 175 elf32->e_ident[EI_MAG3] == ELFMAG3 && 176 elf32->e_ident[EI_CLASS] == ELFCLASS32 && 177 elf32->e_ident[EI_DATA] == ELFDATA2MSB && 178 elf32->e_type == ET_EXEC && 179 elf32->e_machine == EM_PPC)) 180 return 0; 181 182 elf32 = (Elf32_Ehdr *)elfheader; 183 elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff); 184 for (i = 0; i < elf32->e_phnum; i++, elf32ph++) 185 if (elf32ph->p_type == PT_LOAD && elf32ph->p_offset != 0) 186 break; 187 if (i >= elf32->e_phnum) 188 return 0; 189 190 elfoffset = elf32ph->p_offset; 191 vmlinux.size = elf32ph->p_filesz + elf32ph->p_offset; 192 vmlinux.memsize = elf32ph->p_memsz + elf32ph->p_offset; 193 return 1; 194 } 195 196 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) 197 { 198 int len; 199 kernel_entry_t kernel_entry; 200 201 memset(__bss_start, 0, _end - __bss_start); 202 203 prom = (int (*)(void *)) promptr; 204 chosen_handle = finddevice("/chosen"); 205 if (chosen_handle == (void *) -1) 206 exit(); 207 if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4) 208 exit(); 209 stderr = stdout; 210 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) 211 exit(); 212 213 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp); 214 215 vmlinuz.addr = (unsigned long)_vmlinux_start; 216 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start); 217 218 /* gunzip the ELF header of the kernel */ 219 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 220 len = vmlinuz.size; 221 gunzip(elfheader, sizeof(elfheader), 222 (unsigned char *)vmlinuz.addr, &len); 223 } else 224 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader)); 225 226 if (!is_elf64(elfheader) && !is_elf32(elfheader)) { 227 printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); 228 exit(); 229 } 230 231 /* 232 * The first available claim_base must be above the end of the 233 * the loaded kernel wrapper file (_start to _end includes the 234 * initrd image if it is present) and rounded up to a nice 235 * 1 MB boundary for good measure. 236 */ 237 238 claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); 239 240 #if defined(PROG_START) 241 /* 242 * Maintain a "magic" minimum address. This keeps some older 243 * firmware platforms running. 244 */ 245 246 if (claim_base < PROG_START) 247 claim_base = PROG_START; 248 #endif 249 250 /* We need to claim the memsize plus the file offset since gzip 251 * will expand the header (file offset), then the kernel, then 252 * possible rubbish we don't care about. But the kernel bss must 253 * be claimed (it will be zero'd by the kernel itself) 254 */ 255 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize); 256 vmlinux.addr = try_claim(vmlinux.memsize); 257 if (vmlinux.addr == 0) { 258 printf("Can't allocate memory for kernel image !\n\r"); 259 exit(); 260 } 261 262 /* 263 * Now we try to claim memory for the initrd (and copy it there) 264 */ 265 initrd.size = (unsigned long)(_initrd_end - _initrd_start); 266 initrd.memsize = initrd.size; 267 if ( initrd.size > 0 ) { 268 printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size); 269 initrd.addr = try_claim(initrd.size); 270 if (initrd.addr == 0) { 271 printf("Can't allocate memory for initial ramdisk !\n\r"); 272 exit(); 273 } 274 a1 = initrd.addr; 275 a2 = initrd.size; 276 printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r", 277 initrd.addr, (unsigned long)_initrd_start, initrd.size); 278 memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size); 279 printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr)); 280 } 281 282 /* Eventually gunzip the kernel */ 283 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 284 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 285 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 286 len = vmlinuz.size; 287 gunzip((void *)vmlinux.addr, vmlinux.memsize, 288 (unsigned char *)vmlinuz.addr, &len); 289 printf("done 0x%lx bytes\n\r", len); 290 } else { 291 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); 292 } 293 294 /* Skip over the ELF header */ 295 #ifdef DEBUG 296 printf("... skipping 0x%lx bytes of ELF header\n\r", 297 elfoffset); 298 #endif 299 vmlinux.addr += elfoffset; 300 301 flush_cache((void *)vmlinux.addr, vmlinux.size); 302 303 kernel_entry = (kernel_entry_t)vmlinux.addr; 304 #ifdef DEBUG 305 printf( "kernel:\n\r" 306 " entry addr = 0x%lx\n\r" 307 " a1 = 0x%lx,\n\r" 308 " a2 = 0x%lx,\n\r" 309 " prom = 0x%lx,\n\r" 310 " bi_recs = 0x%lx,\n\r", 311 (unsigned long)kernel_entry, a1, a2, 312 (unsigned long)prom, NULL); 313 #endif 314 315 kernel_entry(a1, a2, prom, NULL); 316 317 printf("Error: Linux kernel returned to zImage bootloader!\n\r"); 318 319 exit(); 320 } 321 322