1 /* 2 * Extensible Firmware Interface 3 * 4 * Based on Extensible Firmware Interface Specification version 0.9 5 * April 30, 1999 6 * 7 * Copyright (C) 1999 VA Linux Systems 8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 9 * Copyright (C) 1999-2003 Hewlett-Packard Co. 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * Stephane Eranian <eranian@hpl.hp.com> 12 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 13 * Bjorn Helgaas <bjorn.helgaas@hp.com> 14 * 15 * All EFI Runtime Services are not implemented yet as EFI only 16 * supports physical mode addressing on SoftSDV. This is to be fixed 17 * in a future version. --drummond 1999-07-20 18 * 19 * Implemented EFI runtime services and virtual mode calls. --davidm 20 * 21 * Goutham Rao: <goutham.rao@intel.com> 22 * Skip non-WB memory and ignore empty memory ranges. 23 */ 24 #include <linux/module.h> 25 #include <linux/bootmem.h> 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/time.h> 30 #include <linux/efi.h> 31 #include <linux/kexec.h> 32 #include <linux/mm.h> 33 34 #include <asm/io.h> 35 #include <asm/kregs.h> 36 #include <asm/meminit.h> 37 #include <asm/pgtable.h> 38 #include <asm/processor.h> 39 #include <asm/mca.h> 40 #include <asm/tlbflush.h> 41 42 #define EFI_DEBUG 0 43 44 extern efi_status_t efi_call_phys (void *, ...); 45 46 struct efi efi; 47 EXPORT_SYMBOL(efi); 48 static efi_runtime_services_t *runtime; 49 static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; 50 51 #define efi_call_virt(f, args...) (*(f))(args) 52 53 #define STUB_GET_TIME(prefix, adjust_arg) \ 54 static efi_status_t \ 55 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 56 { \ 57 struct ia64_fpreg fr[6]; \ 58 efi_time_cap_t *atc = NULL; \ 59 efi_status_t ret; \ 60 \ 61 if (tc) \ 62 atc = adjust_arg(tc); \ 63 ia64_save_scratch_fpregs(fr); \ 64 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 65 adjust_arg(tm), atc); \ 66 ia64_load_scratch_fpregs(fr); \ 67 return ret; \ 68 } 69 70 #define STUB_SET_TIME(prefix, adjust_arg) \ 71 static efi_status_t \ 72 prefix##_set_time (efi_time_t *tm) \ 73 { \ 74 struct ia64_fpreg fr[6]; \ 75 efi_status_t ret; \ 76 \ 77 ia64_save_scratch_fpregs(fr); \ 78 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 79 adjust_arg(tm)); \ 80 ia64_load_scratch_fpregs(fr); \ 81 return ret; \ 82 } 83 84 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 85 static efi_status_t \ 86 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 87 efi_time_t *tm) \ 88 { \ 89 struct ia64_fpreg fr[6]; \ 90 efi_status_t ret; \ 91 \ 92 ia64_save_scratch_fpregs(fr); \ 93 ret = efi_call_##prefix( \ 94 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 95 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 96 ia64_load_scratch_fpregs(fr); \ 97 return ret; \ 98 } 99 100 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 101 static efi_status_t \ 102 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 103 { \ 104 struct ia64_fpreg fr[6]; \ 105 efi_time_t *atm = NULL; \ 106 efi_status_t ret; \ 107 \ 108 if (tm) \ 109 atm = adjust_arg(tm); \ 110 ia64_save_scratch_fpregs(fr); \ 111 ret = efi_call_##prefix( \ 112 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 113 enabled, atm); \ 114 ia64_load_scratch_fpregs(fr); \ 115 return ret; \ 116 } 117 118 #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 119 static efi_status_t \ 120 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 121 unsigned long *data_size, void *data) \ 122 { \ 123 struct ia64_fpreg fr[6]; \ 124 u32 *aattr = NULL; \ 125 efi_status_t ret; \ 126 \ 127 if (attr) \ 128 aattr = adjust_arg(attr); \ 129 ia64_save_scratch_fpregs(fr); \ 130 ret = efi_call_##prefix( \ 131 (efi_get_variable_t *) __va(runtime->get_variable), \ 132 adjust_arg(name), adjust_arg(vendor), aattr, \ 133 adjust_arg(data_size), adjust_arg(data)); \ 134 ia64_load_scratch_fpregs(fr); \ 135 return ret; \ 136 } 137 138 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 139 static efi_status_t \ 140 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 141 efi_guid_t *vendor) \ 142 { \ 143 struct ia64_fpreg fr[6]; \ 144 efi_status_t ret; \ 145 \ 146 ia64_save_scratch_fpregs(fr); \ 147 ret = efi_call_##prefix( \ 148 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 149 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 150 ia64_load_scratch_fpregs(fr); \ 151 return ret; \ 152 } 153 154 #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 155 static efi_status_t \ 156 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 157 unsigned long attr, unsigned long data_size, \ 158 void *data) \ 159 { \ 160 struct ia64_fpreg fr[6]; \ 161 efi_status_t ret; \ 162 \ 163 ia64_save_scratch_fpregs(fr); \ 164 ret = efi_call_##prefix( \ 165 (efi_set_variable_t *) __va(runtime->set_variable), \ 166 adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 167 adjust_arg(data)); \ 168 ia64_load_scratch_fpregs(fr); \ 169 return ret; \ 170 } 171 172 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 173 static efi_status_t \ 174 prefix##_get_next_high_mono_count (u32 *count) \ 175 { \ 176 struct ia64_fpreg fr[6]; \ 177 efi_status_t ret; \ 178 \ 179 ia64_save_scratch_fpregs(fr); \ 180 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 181 __va(runtime->get_next_high_mono_count), \ 182 adjust_arg(count)); \ 183 ia64_load_scratch_fpregs(fr); \ 184 return ret; \ 185 } 186 187 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 188 static void \ 189 prefix##_reset_system (int reset_type, efi_status_t status, \ 190 unsigned long data_size, efi_char16_t *data) \ 191 { \ 192 struct ia64_fpreg fr[6]; \ 193 efi_char16_t *adata = NULL; \ 194 \ 195 if (data) \ 196 adata = adjust_arg(data); \ 197 \ 198 ia64_save_scratch_fpregs(fr); \ 199 efi_call_##prefix( \ 200 (efi_reset_system_t *) __va(runtime->reset_system), \ 201 reset_type, status, data_size, adata); \ 202 /* should not return, but just in case... */ \ 203 ia64_load_scratch_fpregs(fr); \ 204 } 205 206 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) 207 208 STUB_GET_TIME(phys, phys_ptr) 209 STUB_SET_TIME(phys, phys_ptr) 210 STUB_GET_WAKEUP_TIME(phys, phys_ptr) 211 STUB_SET_WAKEUP_TIME(phys, phys_ptr) 212 STUB_GET_VARIABLE(phys, phys_ptr) 213 STUB_GET_NEXT_VARIABLE(phys, phys_ptr) 214 STUB_SET_VARIABLE(phys, phys_ptr) 215 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) 216 STUB_RESET_SYSTEM(phys, phys_ptr) 217 218 #define id(arg) arg 219 220 STUB_GET_TIME(virt, id) 221 STUB_SET_TIME(virt, id) 222 STUB_GET_WAKEUP_TIME(virt, id) 223 STUB_SET_WAKEUP_TIME(virt, id) 224 STUB_GET_VARIABLE(virt, id) 225 STUB_GET_NEXT_VARIABLE(virt, id) 226 STUB_SET_VARIABLE(virt, id) 227 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) 228 STUB_RESET_SYSTEM(virt, id) 229 230 void 231 efi_gettimeofday (struct timespec *ts) 232 { 233 efi_time_t tm; 234 235 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { 236 memset(ts, 0, sizeof(*ts)); 237 return; 238 } 239 240 ts->tv_sec = mktime(tm.year, tm.month, tm.day, 241 tm.hour, tm.minute, tm.second); 242 ts->tv_nsec = tm.nanosecond; 243 } 244 245 static int 246 is_memory_available (efi_memory_desc_t *md) 247 { 248 if (!(md->attribute & EFI_MEMORY_WB)) 249 return 0; 250 251 switch (md->type) { 252 case EFI_LOADER_CODE: 253 case EFI_LOADER_DATA: 254 case EFI_BOOT_SERVICES_CODE: 255 case EFI_BOOT_SERVICES_DATA: 256 case EFI_CONVENTIONAL_MEMORY: 257 return 1; 258 } 259 return 0; 260 } 261 262 typedef struct kern_memdesc { 263 u64 attribute; 264 u64 start; 265 u64 num_pages; 266 } kern_memdesc_t; 267 268 static kern_memdesc_t *kern_memmap; 269 270 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 271 272 static inline u64 273 kmd_end(kern_memdesc_t *kmd) 274 { 275 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); 276 } 277 278 static inline u64 279 efi_md_end(efi_memory_desc_t *md) 280 { 281 return (md->phys_addr + efi_md_size(md)); 282 } 283 284 static inline int 285 efi_wb(efi_memory_desc_t *md) 286 { 287 return (md->attribute & EFI_MEMORY_WB); 288 } 289 290 static inline int 291 efi_uc(efi_memory_desc_t *md) 292 { 293 return (md->attribute & EFI_MEMORY_UC); 294 } 295 296 static void 297 walk (efi_freemem_callback_t callback, void *arg, u64 attr) 298 { 299 kern_memdesc_t *k; 300 u64 start, end, voff; 301 302 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; 303 for (k = kern_memmap; k->start != ~0UL; k++) { 304 if (k->attribute != attr) 305 continue; 306 start = PAGE_ALIGN(k->start); 307 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; 308 if (start < end) 309 if ((*callback)(start + voff, end + voff, arg) < 0) 310 return; 311 } 312 } 313 314 /* 315 * Walk the EFI memory map and call CALLBACK once for each EFI memory 316 * descriptor that has memory that is available for OS use. 317 */ 318 void 319 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) 320 { 321 walk(callback, arg, EFI_MEMORY_WB); 322 } 323 324 /* 325 * Walk the EFI memory map and call CALLBACK once for each EFI memory 326 * descriptor that has memory that is available for uncached allocator. 327 */ 328 void 329 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) 330 { 331 walk(callback, arg, EFI_MEMORY_UC); 332 } 333 334 /* 335 * Look for the PAL_CODE region reported by EFI and map it using an 336 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 337 * Abstraction Layer chapter 11 in ADAG 338 */ 339 void * 340 efi_get_pal_addr (void) 341 { 342 void *efi_map_start, *efi_map_end, *p; 343 efi_memory_desc_t *md; 344 u64 efi_desc_size; 345 int pal_code_count = 0; 346 u64 vaddr, mask; 347 348 efi_map_start = __va(ia64_boot_param->efi_memmap); 349 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 350 efi_desc_size = ia64_boot_param->efi_memdesc_size; 351 352 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 353 md = p; 354 if (md->type != EFI_PAL_CODE) 355 continue; 356 357 if (++pal_code_count > 1) { 358 printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 359 "dropped @ %llx\n", md->phys_addr); 360 continue; 361 } 362 /* 363 * The only ITLB entry in region 7 that is used is the one 364 * installed by __start(). That entry covers a 64MB range. 365 */ 366 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 367 vaddr = PAGE_OFFSET + md->phys_addr; 368 369 /* 370 * We must check that the PAL mapping won't overlap with the 371 * kernel mapping. 372 * 373 * PAL code is guaranteed to be aligned on a power of 2 between 374 * 4k and 256KB and that only one ITR is needed to map it. This 375 * implies that the PAL code is always aligned on its size, 376 * i.e., the closest matching page size supported by the TLB. 377 * Therefore PAL code is guaranteed never to cross a 64MB unless 378 * it is bigger than 64MB (very unlikely!). So for now the 379 * following test is enough to determine whether or not we need 380 * a dedicated ITR for the PAL code. 381 */ 382 if ((vaddr & mask) == (KERNEL_START & mask)) { 383 printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 384 __func__); 385 continue; 386 } 387 388 if (efi_md_size(md) > IA64_GRANULE_SIZE) 389 panic("Whoa! PAL code size bigger than a granule!"); 390 391 #if EFI_DEBUG 392 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 393 394 printk(KERN_INFO "CPU %d: mapping PAL code " 395 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 396 smp_processor_id(), md->phys_addr, 397 md->phys_addr + efi_md_size(md), 398 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 399 #endif 400 return __va(md->phys_addr); 401 } 402 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 403 __func__); 404 return NULL; 405 } 406 407 408 static u8 __init palo_checksum(u8 *buffer, u32 length) 409 { 410 u8 sum = 0; 411 u8 *end = buffer + length; 412 413 while (buffer < end) 414 sum = (u8) (sum + *(buffer++)); 415 416 return sum; 417 } 418 419 /* 420 * Parse and handle PALO table which is published at: 421 * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf 422 */ 423 static void __init handle_palo(unsigned long palo_phys) 424 { 425 struct palo_table *palo = __va(palo_phys); 426 u8 checksum; 427 428 if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { 429 printk(KERN_INFO "PALO signature incorrect.\n"); 430 return; 431 } 432 433 checksum = palo_checksum((u8 *)palo, palo->length); 434 if (checksum) { 435 printk(KERN_INFO "PALO checksum incorrect.\n"); 436 return; 437 } 438 439 setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); 440 } 441 442 void 443 efi_map_pal_code (void) 444 { 445 void *pal_vaddr = efi_get_pal_addr (); 446 u64 psr; 447 448 if (!pal_vaddr) 449 return; 450 451 /* 452 * Cannot write to CRx with PSR.ic=1 453 */ 454 psr = ia64_clear_ic(); 455 ia64_itr(0x1, IA64_TR_PALCODE, 456 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 457 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 458 IA64_GRANULE_SHIFT); 459 paravirt_dv_serialize_data(); 460 ia64_set_psr(psr); /* restore psr */ 461 } 462 463 void __init 464 efi_init (void) 465 { 466 void *efi_map_start, *efi_map_end; 467 efi_config_table_t *config_tables; 468 efi_char16_t *c16; 469 u64 efi_desc_size; 470 char *cp, vendor[100] = "unknown"; 471 int i; 472 unsigned long palo_phys; 473 474 /* 475 * It's too early to be able to use the standard kernel command line 476 * support... 477 */ 478 for (cp = boot_command_line; *cp; ) { 479 if (memcmp(cp, "mem=", 4) == 0) { 480 mem_limit = memparse(cp + 4, &cp); 481 } else if (memcmp(cp, "max_addr=", 9) == 0) { 482 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 483 } else if (memcmp(cp, "min_addr=", 9) == 0) { 484 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 485 } else { 486 while (*cp != ' ' && *cp) 487 ++cp; 488 while (*cp == ' ') 489 ++cp; 490 } 491 } 492 if (min_addr != 0UL) 493 printk(KERN_INFO "Ignoring memory below %lluMB\n", 494 min_addr >> 20); 495 if (max_addr != ~0UL) 496 printk(KERN_INFO "Ignoring memory above %lluMB\n", 497 max_addr >> 20); 498 499 efi.systab = __va(ia64_boot_param->efi_systab); 500 501 /* 502 * Verify the EFI Table 503 */ 504 if (efi.systab == NULL) 505 panic("Whoa! Can't find EFI system table.\n"); 506 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 507 panic("Whoa! EFI system table signature incorrect\n"); 508 if ((efi.systab->hdr.revision >> 16) == 0) 509 printk(KERN_WARNING "Warning: EFI system table version " 510 "%d.%02d, expected 1.00 or greater\n", 511 efi.systab->hdr.revision >> 16, 512 efi.systab->hdr.revision & 0xffff); 513 514 config_tables = __va(efi.systab->tables); 515 516 /* Show what we know for posterity */ 517 c16 = __va(efi.systab->fw_vendor); 518 if (c16) { 519 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i) 520 vendor[i] = *c16++; 521 vendor[i] = '\0'; 522 } 523 524 printk(KERN_INFO "EFI v%u.%.02u by %s:", 525 efi.systab->hdr.revision >> 16, 526 efi.systab->hdr.revision & 0xffff, vendor); 527 528 efi.mps = EFI_INVALID_TABLE_ADDR; 529 efi.acpi = EFI_INVALID_TABLE_ADDR; 530 efi.acpi20 = EFI_INVALID_TABLE_ADDR; 531 efi.smbios = EFI_INVALID_TABLE_ADDR; 532 efi.sal_systab = EFI_INVALID_TABLE_ADDR; 533 efi.boot_info = EFI_INVALID_TABLE_ADDR; 534 efi.hcdp = EFI_INVALID_TABLE_ADDR; 535 efi.uga = EFI_INVALID_TABLE_ADDR; 536 537 palo_phys = EFI_INVALID_TABLE_ADDR; 538 539 for (i = 0; i < (int) efi.systab->nr_tables; i++) { 540 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { 541 efi.mps = config_tables[i].table; 542 printk(" MPS=0x%lx", config_tables[i].table); 543 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { 544 efi.acpi20 = config_tables[i].table; 545 printk(" ACPI 2.0=0x%lx", config_tables[i].table); 546 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { 547 efi.acpi = config_tables[i].table; 548 printk(" ACPI=0x%lx", config_tables[i].table); 549 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { 550 efi.smbios = config_tables[i].table; 551 printk(" SMBIOS=0x%lx", config_tables[i].table); 552 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { 553 efi.sal_systab = config_tables[i].table; 554 printk(" SALsystab=0x%lx", config_tables[i].table); 555 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { 556 efi.hcdp = config_tables[i].table; 557 printk(" HCDP=0x%lx", config_tables[i].table); 558 } else if (efi_guidcmp(config_tables[i].guid, 559 PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { 560 palo_phys = config_tables[i].table; 561 printk(" PALO=0x%lx", config_tables[i].table); 562 } 563 } 564 printk("\n"); 565 566 if (palo_phys != EFI_INVALID_TABLE_ADDR) 567 handle_palo(palo_phys); 568 569 runtime = __va(efi.systab->runtime); 570 efi.get_time = phys_get_time; 571 efi.set_time = phys_set_time; 572 efi.get_wakeup_time = phys_get_wakeup_time; 573 efi.set_wakeup_time = phys_set_wakeup_time; 574 efi.get_variable = phys_get_variable; 575 efi.get_next_variable = phys_get_next_variable; 576 efi.set_variable = phys_set_variable; 577 efi.get_next_high_mono_count = phys_get_next_high_mono_count; 578 efi.reset_system = phys_reset_system; 579 580 efi_map_start = __va(ia64_boot_param->efi_memmap); 581 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 582 efi_desc_size = ia64_boot_param->efi_memdesc_size; 583 584 #if EFI_DEBUG 585 /* print EFI memory map: */ 586 { 587 efi_memory_desc_t *md; 588 void *p; 589 590 for (i = 0, p = efi_map_start; p < efi_map_end; 591 ++i, p += efi_desc_size) 592 { 593 const char *unit; 594 unsigned long size; 595 596 md = p; 597 size = md->num_pages << EFI_PAGE_SHIFT; 598 599 if ((size >> 40) > 0) { 600 size >>= 40; 601 unit = "TB"; 602 } else if ((size >> 30) > 0) { 603 size >>= 30; 604 unit = "GB"; 605 } else if ((size >> 20) > 0) { 606 size >>= 20; 607 unit = "MB"; 608 } else { 609 size >>= 10; 610 unit = "KB"; 611 } 612 613 printk("mem%02d: type=%2u, attr=0x%016lx, " 614 "range=[0x%016lx-0x%016lx) (%4lu%s)\n", 615 i, md->type, md->attribute, md->phys_addr, 616 md->phys_addr + efi_md_size(md), size, unit); 617 } 618 } 619 #endif 620 621 efi_map_pal_code(); 622 efi_enter_virtual_mode(); 623 } 624 625 void 626 efi_enter_virtual_mode (void) 627 { 628 void *efi_map_start, *efi_map_end, *p; 629 efi_memory_desc_t *md; 630 efi_status_t status; 631 u64 efi_desc_size; 632 633 efi_map_start = __va(ia64_boot_param->efi_memmap); 634 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 635 efi_desc_size = ia64_boot_param->efi_memdesc_size; 636 637 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 638 md = p; 639 if (md->attribute & EFI_MEMORY_RUNTIME) { 640 /* 641 * Some descriptors have multiple bits set, so the 642 * order of the tests is relevant. 643 */ 644 if (md->attribute & EFI_MEMORY_WB) { 645 md->virt_addr = (u64) __va(md->phys_addr); 646 } else if (md->attribute & EFI_MEMORY_UC) { 647 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 648 } else if (md->attribute & EFI_MEMORY_WC) { 649 #if 0 650 md->virt_addr = ia64_remap(md->phys_addr, 651 (_PAGE_A | 652 _PAGE_P | 653 _PAGE_D | 654 _PAGE_MA_WC | 655 _PAGE_PL_0 | 656 _PAGE_AR_RW)); 657 #else 658 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 659 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 660 #endif 661 } else if (md->attribute & EFI_MEMORY_WT) { 662 #if 0 663 md->virt_addr = ia64_remap(md->phys_addr, 664 (_PAGE_A | 665 _PAGE_P | 666 _PAGE_D | 667 _PAGE_MA_WT | 668 _PAGE_PL_0 | 669 _PAGE_AR_RW)); 670 #else 671 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 672 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 673 #endif 674 } 675 } 676 } 677 678 status = efi_call_phys(__va(runtime->set_virtual_address_map), 679 ia64_boot_param->efi_memmap_size, 680 efi_desc_size, 681 ia64_boot_param->efi_memdesc_version, 682 ia64_boot_param->efi_memmap); 683 if (status != EFI_SUCCESS) { 684 printk(KERN_WARNING "warning: unable to switch EFI into " 685 "virtual mode (status=%lu)\n", status); 686 return; 687 } 688 689 /* 690 * Now that EFI is in virtual mode, we call the EFI functions more 691 * efficiently: 692 */ 693 efi.get_time = virt_get_time; 694 efi.set_time = virt_set_time; 695 efi.get_wakeup_time = virt_get_wakeup_time; 696 efi.set_wakeup_time = virt_set_wakeup_time; 697 efi.get_variable = virt_get_variable; 698 efi.get_next_variable = virt_get_next_variable; 699 efi.set_variable = virt_set_variable; 700 efi.get_next_high_mono_count = virt_get_next_high_mono_count; 701 efi.reset_system = virt_reset_system; 702 } 703 704 /* 705 * Walk the EFI memory map looking for the I/O port range. There can only be 706 * one entry of this type, other I/O port ranges should be described via ACPI. 707 */ 708 u64 709 efi_get_iobase (void) 710 { 711 void *efi_map_start, *efi_map_end, *p; 712 efi_memory_desc_t *md; 713 u64 efi_desc_size; 714 715 efi_map_start = __va(ia64_boot_param->efi_memmap); 716 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 717 efi_desc_size = ia64_boot_param->efi_memdesc_size; 718 719 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 720 md = p; 721 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { 722 if (md->attribute & EFI_MEMORY_UC) 723 return md->phys_addr; 724 } 725 } 726 return 0; 727 } 728 729 static struct kern_memdesc * 730 kern_memory_descriptor (unsigned long phys_addr) 731 { 732 struct kern_memdesc *md; 733 734 for (md = kern_memmap; md->start != ~0UL; md++) { 735 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 736 return md; 737 } 738 return NULL; 739 } 740 741 static efi_memory_desc_t * 742 efi_memory_descriptor (unsigned long phys_addr) 743 { 744 void *efi_map_start, *efi_map_end, *p; 745 efi_memory_desc_t *md; 746 u64 efi_desc_size; 747 748 efi_map_start = __va(ia64_boot_param->efi_memmap); 749 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 750 efi_desc_size = ia64_boot_param->efi_memdesc_size; 751 752 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 753 md = p; 754 755 if (phys_addr - md->phys_addr < efi_md_size(md)) 756 return md; 757 } 758 return NULL; 759 } 760 761 static int 762 efi_memmap_intersects (unsigned long phys_addr, unsigned long size) 763 { 764 void *efi_map_start, *efi_map_end, *p; 765 efi_memory_desc_t *md; 766 u64 efi_desc_size; 767 unsigned long end; 768 769 efi_map_start = __va(ia64_boot_param->efi_memmap); 770 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 771 efi_desc_size = ia64_boot_param->efi_memdesc_size; 772 773 end = phys_addr + size; 774 775 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 776 md = p; 777 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 778 return 1; 779 } 780 return 0; 781 } 782 783 u32 784 efi_mem_type (unsigned long phys_addr) 785 { 786 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 787 788 if (md) 789 return md->type; 790 return 0; 791 } 792 793 u64 794 efi_mem_attributes (unsigned long phys_addr) 795 { 796 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 797 798 if (md) 799 return md->attribute; 800 return 0; 801 } 802 EXPORT_SYMBOL(efi_mem_attributes); 803 804 u64 805 efi_mem_attribute (unsigned long phys_addr, unsigned long size) 806 { 807 unsigned long end = phys_addr + size; 808 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 809 u64 attr; 810 811 if (!md) 812 return 0; 813 814 /* 815 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells 816 * the kernel that firmware needs this region mapped. 817 */ 818 attr = md->attribute & ~EFI_MEMORY_RUNTIME; 819 do { 820 unsigned long md_end = efi_md_end(md); 821 822 if (end <= md_end) 823 return attr; 824 825 md = efi_memory_descriptor(md_end); 826 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 827 return 0; 828 } while (md); 829 return 0; /* never reached */ 830 } 831 832 u64 833 kern_mem_attribute (unsigned long phys_addr, unsigned long size) 834 { 835 unsigned long end = phys_addr + size; 836 struct kern_memdesc *md; 837 u64 attr; 838 839 /* 840 * This is a hack for ioremap calls before we set up kern_memmap. 841 * Maybe we should do efi_memmap_init() earlier instead. 842 */ 843 if (!kern_memmap) { 844 attr = efi_mem_attribute(phys_addr, size); 845 if (attr & EFI_MEMORY_WB) 846 return EFI_MEMORY_WB; 847 return 0; 848 } 849 850 md = kern_memory_descriptor(phys_addr); 851 if (!md) 852 return 0; 853 854 attr = md->attribute; 855 do { 856 unsigned long md_end = kmd_end(md); 857 858 if (end <= md_end) 859 return attr; 860 861 md = kern_memory_descriptor(md_end); 862 if (!md || md->attribute != attr) 863 return 0; 864 } while (md); 865 return 0; /* never reached */ 866 } 867 EXPORT_SYMBOL(kern_mem_attribute); 868 869 int 870 valid_phys_addr_range (unsigned long phys_addr, unsigned long size) 871 { 872 u64 attr; 873 874 /* 875 * /dev/mem reads and writes use copy_to_user(), which implicitly 876 * uses a granule-sized kernel identity mapping. It's really 877 * only safe to do this for regions in kern_memmap. For more 878 * details, see Documentation/ia64/aliasing.txt. 879 */ 880 attr = kern_mem_attribute(phys_addr, size); 881 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 882 return 1; 883 return 0; 884 } 885 886 int 887 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) 888 { 889 unsigned long phys_addr = pfn << PAGE_SHIFT; 890 u64 attr; 891 892 attr = efi_mem_attribute(phys_addr, size); 893 894 /* 895 * /dev/mem mmap uses normal user pages, so we don't need the entire 896 * granule, but the entire region we're mapping must support the same 897 * attribute. 898 */ 899 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 900 return 1; 901 902 /* 903 * Intel firmware doesn't tell us about all the MMIO regions, so 904 * in general we have to allow mmap requests. But if EFI *does* 905 * tell us about anything inside this region, we should deny it. 906 * The user can always map a smaller region to avoid the overlap. 907 */ 908 if (efi_memmap_intersects(phys_addr, size)) 909 return 0; 910 911 return 1; 912 } 913 914 pgprot_t 915 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, 916 pgprot_t vma_prot) 917 { 918 unsigned long phys_addr = pfn << PAGE_SHIFT; 919 u64 attr; 920 921 /* 922 * For /dev/mem mmap, we use user mappings, but if the region is 923 * in kern_memmap (and hence may be covered by a kernel mapping), 924 * we must use the same attribute as the kernel mapping. 925 */ 926 attr = kern_mem_attribute(phys_addr, size); 927 if (attr & EFI_MEMORY_WB) 928 return pgprot_cacheable(vma_prot); 929 else if (attr & EFI_MEMORY_UC) 930 return pgprot_noncached(vma_prot); 931 932 /* 933 * Some chipsets don't support UC access to memory. If 934 * WB is supported, we prefer that. 935 */ 936 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 937 return pgprot_cacheable(vma_prot); 938 939 return pgprot_noncached(vma_prot); 940 } 941 942 int __init 943 efi_uart_console_only(void) 944 { 945 efi_status_t status; 946 char *s, name[] = "ConOut"; 947 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 948 efi_char16_t *utf16, name_utf16[32]; 949 unsigned char data[1024]; 950 unsigned long size = sizeof(data); 951 struct efi_generic_dev_path *hdr, *end_addr; 952 int uart = 0; 953 954 /* Convert to UTF-16 */ 955 utf16 = name_utf16; 956 s = name; 957 while (*s) 958 *utf16++ = *s++ & 0x7f; 959 *utf16 = 0; 960 961 status = efi.get_variable(name_utf16, &guid, NULL, &size, data); 962 if (status != EFI_SUCCESS) { 963 printk(KERN_ERR "No EFI %s variable?\n", name); 964 return 0; 965 } 966 967 hdr = (struct efi_generic_dev_path *) data; 968 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); 969 while (hdr < end_addr) { 970 if (hdr->type == EFI_DEV_MSG && 971 hdr->sub_type == EFI_DEV_MSG_UART) 972 uart = 1; 973 else if (hdr->type == EFI_DEV_END_PATH || 974 hdr->type == EFI_DEV_END_PATH2) { 975 if (!uart) 976 return 0; 977 if (hdr->sub_type == EFI_DEV_END_ENTIRE) 978 return 1; 979 uart = 0; 980 } 981 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 982 } 983 printk(KERN_ERR "Malformed %s value\n", name); 984 return 0; 985 } 986 987 /* 988 * Look for the first granule aligned memory descriptor memory 989 * that is big enough to hold EFI memory map. Make sure this 990 * descriptor is atleast granule sized so it does not get trimmed 991 */ 992 struct kern_memdesc * 993 find_memmap_space (void) 994 { 995 u64 contig_low=0, contig_high=0; 996 u64 as = 0, ae; 997 void *efi_map_start, *efi_map_end, *p, *q; 998 efi_memory_desc_t *md, *pmd = NULL, *check_md; 999 u64 space_needed, efi_desc_size; 1000 unsigned long total_mem = 0; 1001 1002 efi_map_start = __va(ia64_boot_param->efi_memmap); 1003 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1004 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1005 1006 /* 1007 * Worst case: we need 3 kernel descriptors for each efi descriptor 1008 * (if every entry has a WB part in the middle, and UC head and tail), 1009 * plus one for the end marker. 1010 */ 1011 space_needed = sizeof(kern_memdesc_t) * 1012 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); 1013 1014 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1015 md = p; 1016 if (!efi_wb(md)) { 1017 continue; 1018 } 1019 if (pmd == NULL || !efi_wb(pmd) || 1020 efi_md_end(pmd) != md->phys_addr) { 1021 contig_low = GRANULEROUNDUP(md->phys_addr); 1022 contig_high = efi_md_end(md); 1023 for (q = p + efi_desc_size; q < efi_map_end; 1024 q += efi_desc_size) { 1025 check_md = q; 1026 if (!efi_wb(check_md)) 1027 break; 1028 if (contig_high != check_md->phys_addr) 1029 break; 1030 contig_high = efi_md_end(check_md); 1031 } 1032 contig_high = GRANULEROUNDDOWN(contig_high); 1033 } 1034 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) 1035 continue; 1036 1037 /* Round ends inward to granule boundaries */ 1038 as = max(contig_low, md->phys_addr); 1039 ae = min(contig_high, efi_md_end(md)); 1040 1041 /* keep within max_addr= and min_addr= command line arg */ 1042 as = max(as, min_addr); 1043 ae = min(ae, max_addr); 1044 if (ae <= as) 1045 continue; 1046 1047 /* avoid going over mem= command line arg */ 1048 if (total_mem + (ae - as) > mem_limit) 1049 ae -= total_mem + (ae - as) - mem_limit; 1050 1051 if (ae <= as) 1052 continue; 1053 1054 if (ae - as > space_needed) 1055 break; 1056 } 1057 if (p >= efi_map_end) 1058 panic("Can't allocate space for kernel memory descriptors"); 1059 1060 return __va(as); 1061 } 1062 1063 /* 1064 * Walk the EFI memory map and gather all memory available for kernel 1065 * to use. We can allocate partial granules only if the unavailable 1066 * parts exist, and are WB. 1067 */ 1068 unsigned long 1069 efi_memmap_init(u64 *s, u64 *e) 1070 { 1071 struct kern_memdesc *k, *prev = NULL; 1072 u64 contig_low=0, contig_high=0; 1073 u64 as, ae, lim; 1074 void *efi_map_start, *efi_map_end, *p, *q; 1075 efi_memory_desc_t *md, *pmd = NULL, *check_md; 1076 u64 efi_desc_size; 1077 unsigned long total_mem = 0; 1078 1079 k = kern_memmap = find_memmap_space(); 1080 1081 efi_map_start = __va(ia64_boot_param->efi_memmap); 1082 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1083 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1084 1085 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1086 md = p; 1087 if (!efi_wb(md)) { 1088 if (efi_uc(md) && 1089 (md->type == EFI_CONVENTIONAL_MEMORY || 1090 md->type == EFI_BOOT_SERVICES_DATA)) { 1091 k->attribute = EFI_MEMORY_UC; 1092 k->start = md->phys_addr; 1093 k->num_pages = md->num_pages; 1094 k++; 1095 } 1096 continue; 1097 } 1098 if (pmd == NULL || !efi_wb(pmd) || 1099 efi_md_end(pmd) != md->phys_addr) { 1100 contig_low = GRANULEROUNDUP(md->phys_addr); 1101 contig_high = efi_md_end(md); 1102 for (q = p + efi_desc_size; q < efi_map_end; 1103 q += efi_desc_size) { 1104 check_md = q; 1105 if (!efi_wb(check_md)) 1106 break; 1107 if (contig_high != check_md->phys_addr) 1108 break; 1109 contig_high = efi_md_end(check_md); 1110 } 1111 contig_high = GRANULEROUNDDOWN(contig_high); 1112 } 1113 if (!is_memory_available(md)) 1114 continue; 1115 1116 #ifdef CONFIG_CRASH_DUMP 1117 /* saved_max_pfn should ignore max_addr= command line arg */ 1118 if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) 1119 saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); 1120 #endif 1121 /* 1122 * Round ends inward to granule boundaries 1123 * Give trimmings to uncached allocator 1124 */ 1125 if (md->phys_addr < contig_low) { 1126 lim = min(efi_md_end(md), contig_low); 1127 if (efi_uc(md)) { 1128 if (k > kern_memmap && 1129 (k-1)->attribute == EFI_MEMORY_UC && 1130 kmd_end(k-1) == md->phys_addr) { 1131 (k-1)->num_pages += 1132 (lim - md->phys_addr) 1133 >> EFI_PAGE_SHIFT; 1134 } else { 1135 k->attribute = EFI_MEMORY_UC; 1136 k->start = md->phys_addr; 1137 k->num_pages = (lim - md->phys_addr) 1138 >> EFI_PAGE_SHIFT; 1139 k++; 1140 } 1141 } 1142 as = contig_low; 1143 } else 1144 as = md->phys_addr; 1145 1146 if (efi_md_end(md) > contig_high) { 1147 lim = max(md->phys_addr, contig_high); 1148 if (efi_uc(md)) { 1149 if (lim == md->phys_addr && k > kern_memmap && 1150 (k-1)->attribute == EFI_MEMORY_UC && 1151 kmd_end(k-1) == md->phys_addr) { 1152 (k-1)->num_pages += md->num_pages; 1153 } else { 1154 k->attribute = EFI_MEMORY_UC; 1155 k->start = lim; 1156 k->num_pages = (efi_md_end(md) - lim) 1157 >> EFI_PAGE_SHIFT; 1158 k++; 1159 } 1160 } 1161 ae = contig_high; 1162 } else 1163 ae = efi_md_end(md); 1164 1165 /* keep within max_addr= and min_addr= command line arg */ 1166 as = max(as, min_addr); 1167 ae = min(ae, max_addr); 1168 if (ae <= as) 1169 continue; 1170 1171 /* avoid going over mem= command line arg */ 1172 if (total_mem + (ae - as) > mem_limit) 1173 ae -= total_mem + (ae - as) - mem_limit; 1174 1175 if (ae <= as) 1176 continue; 1177 if (prev && kmd_end(prev) == md->phys_addr) { 1178 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; 1179 total_mem += ae - as; 1180 continue; 1181 } 1182 k->attribute = EFI_MEMORY_WB; 1183 k->start = as; 1184 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; 1185 total_mem += ae - as; 1186 prev = k++; 1187 } 1188 k->start = ~0L; /* end-marker */ 1189 1190 /* reserve the memory we are using for kern_memmap */ 1191 *s = (u64)kern_memmap; 1192 *e = (u64)++k; 1193 1194 return total_mem; 1195 } 1196 1197 void 1198 efi_initialize_iomem_resources(struct resource *code_resource, 1199 struct resource *data_resource, 1200 struct resource *bss_resource) 1201 { 1202 struct resource *res; 1203 void *efi_map_start, *efi_map_end, *p; 1204 efi_memory_desc_t *md; 1205 u64 efi_desc_size; 1206 char *name; 1207 unsigned long flags; 1208 1209 efi_map_start = __va(ia64_boot_param->efi_memmap); 1210 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1211 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1212 1213 res = NULL; 1214 1215 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1216 md = p; 1217 1218 if (md->num_pages == 0) /* should not happen */ 1219 continue; 1220 1221 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1222 switch (md->type) { 1223 1224 case EFI_MEMORY_MAPPED_IO: 1225 case EFI_MEMORY_MAPPED_IO_PORT_SPACE: 1226 continue; 1227 1228 case EFI_LOADER_CODE: 1229 case EFI_LOADER_DATA: 1230 case EFI_BOOT_SERVICES_DATA: 1231 case EFI_BOOT_SERVICES_CODE: 1232 case EFI_CONVENTIONAL_MEMORY: 1233 if (md->attribute & EFI_MEMORY_WP) { 1234 name = "System ROM"; 1235 flags |= IORESOURCE_READONLY; 1236 } else if (md->attribute == EFI_MEMORY_UC) 1237 name = "Uncached RAM"; 1238 else 1239 name = "System RAM"; 1240 break; 1241 1242 case EFI_ACPI_MEMORY_NVS: 1243 name = "ACPI Non-volatile Storage"; 1244 break; 1245 1246 case EFI_UNUSABLE_MEMORY: 1247 name = "reserved"; 1248 flags |= IORESOURCE_DISABLED; 1249 break; 1250 1251 case EFI_RESERVED_TYPE: 1252 case EFI_RUNTIME_SERVICES_CODE: 1253 case EFI_RUNTIME_SERVICES_DATA: 1254 case EFI_ACPI_RECLAIM_MEMORY: 1255 default: 1256 name = "reserved"; 1257 break; 1258 } 1259 1260 if ((res = kzalloc(sizeof(struct resource), 1261 GFP_KERNEL)) == NULL) { 1262 printk(KERN_ERR 1263 "failed to allocate resource for iomem\n"); 1264 return; 1265 } 1266 1267 res->name = name; 1268 res->start = md->phys_addr; 1269 res->end = md->phys_addr + efi_md_size(md) - 1; 1270 res->flags = flags; 1271 1272 if (insert_resource(&iomem_resource, res) < 0) 1273 kfree(res); 1274 else { 1275 /* 1276 * We don't know which region contains 1277 * kernel data so we try it repeatedly and 1278 * let the resource manager test it. 1279 */ 1280 insert_resource(res, code_resource); 1281 insert_resource(res, data_resource); 1282 insert_resource(res, bss_resource); 1283 #ifdef CONFIG_KEXEC 1284 insert_resource(res, &efi_memmap_res); 1285 insert_resource(res, &boot_param_res); 1286 if (crashk_res.end > crashk_res.start) 1287 insert_resource(res, &crashk_res); 1288 #endif 1289 } 1290 } 1291 } 1292 1293 #ifdef CONFIG_KEXEC 1294 /* find a block of memory aligned to 64M exclude reserved regions 1295 rsvd_regions are sorted 1296 */ 1297 unsigned long __init 1298 kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1299 { 1300 int i; 1301 u64 start, end; 1302 u64 alignment = 1UL << _PAGE_SIZE_64M; 1303 void *efi_map_start, *efi_map_end, *p; 1304 efi_memory_desc_t *md; 1305 u64 efi_desc_size; 1306 1307 efi_map_start = __va(ia64_boot_param->efi_memmap); 1308 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1309 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1310 1311 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1312 md = p; 1313 if (!efi_wb(md)) 1314 continue; 1315 start = ALIGN(md->phys_addr, alignment); 1316 end = efi_md_end(md); 1317 for (i = 0; i < n; i++) { 1318 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1319 if (__pa(r[i].start) > start + size) 1320 return start; 1321 start = ALIGN(__pa(r[i].end), alignment); 1322 if (i < n-1 && 1323 __pa(r[i+1].start) < start + size) 1324 continue; 1325 else 1326 break; 1327 } 1328 } 1329 if (end > start + size) 1330 return start; 1331 } 1332 1333 printk(KERN_WARNING 1334 "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1335 return ~0UL; 1336 } 1337 #endif 1338 1339 #ifdef CONFIG_CRASH_DUMP 1340 /* locate the size find a the descriptor at a certain address */ 1341 unsigned long __init 1342 vmcore_find_descriptor_size (unsigned long address) 1343 { 1344 void *efi_map_start, *efi_map_end, *p; 1345 efi_memory_desc_t *md; 1346 u64 efi_desc_size; 1347 unsigned long ret = 0; 1348 1349 efi_map_start = __va(ia64_boot_param->efi_memmap); 1350 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1351 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1352 1353 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1354 md = p; 1355 if (efi_wb(md) && md->type == EFI_LOADER_DATA 1356 && md->phys_addr == address) { 1357 ret = efi_md_size(md); 1358 break; 1359 } 1360 } 1361 1362 if (ret == 0) 1363 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); 1364 1365 return ret; 1366 } 1367 #endif 1368