1 /* 2 * Extensible Firmware Interface 3 * 4 * Based on Extensible Firmware Interface Specification version 0.9 5 * April 30, 1999 6 * 7 * Copyright (C) 1999 VA Linux Systems 8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 9 * Copyright (C) 1999-2003 Hewlett-Packard Co. 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * Stephane Eranian <eranian@hpl.hp.com> 12 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 13 * Bjorn Helgaas <bjorn.helgaas@hp.com> 14 * 15 * All EFI Runtime Services are not implemented yet as EFI only 16 * supports physical mode addressing on SoftSDV. This is to be fixed 17 * in a future version. --drummond 1999-07-20 18 * 19 * Implemented EFI runtime services and virtual mode calls. --davidm 20 * 21 * Goutham Rao: <goutham.rao@intel.com> 22 * Skip non-WB memory and ignore empty memory ranges. 23 */ 24 #include <linux/module.h> 25 #include <linux/bootmem.h> 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/time.h> 30 #include <linux/efi.h> 31 #include <linux/kexec.h> 32 #include <linux/mm.h> 33 34 #include <asm/io.h> 35 #include <asm/kregs.h> 36 #include <asm/meminit.h> 37 #include <asm/pgtable.h> 38 #include <asm/processor.h> 39 #include <asm/mca.h> 40 #include <asm/tlbflush.h> 41 42 #define EFI_DEBUG 0 43 44 extern efi_status_t efi_call_phys (void *, ...); 45 46 struct efi efi; 47 EXPORT_SYMBOL(efi); 48 static efi_runtime_services_t *runtime; 49 static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; 50 51 #define efi_call_virt(f, args...) (*(f))(args) 52 53 #define STUB_GET_TIME(prefix, adjust_arg) \ 54 static efi_status_t \ 55 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 56 { \ 57 struct ia64_fpreg fr[6]; \ 58 efi_time_cap_t *atc = NULL; \ 59 efi_status_t ret; \ 60 \ 61 if (tc) \ 62 atc = adjust_arg(tc); \ 63 ia64_save_scratch_fpregs(fr); \ 64 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 65 adjust_arg(tm), atc); \ 66 ia64_load_scratch_fpregs(fr); \ 67 return ret; \ 68 } 69 70 #define STUB_SET_TIME(prefix, adjust_arg) \ 71 static efi_status_t \ 72 prefix##_set_time (efi_time_t *tm) \ 73 { \ 74 struct ia64_fpreg fr[6]; \ 75 efi_status_t ret; \ 76 \ 77 ia64_save_scratch_fpregs(fr); \ 78 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 79 adjust_arg(tm)); \ 80 ia64_load_scratch_fpregs(fr); \ 81 return ret; \ 82 } 83 84 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 85 static efi_status_t \ 86 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 87 efi_time_t *tm) \ 88 { \ 89 struct ia64_fpreg fr[6]; \ 90 efi_status_t ret; \ 91 \ 92 ia64_save_scratch_fpregs(fr); \ 93 ret = efi_call_##prefix( \ 94 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 95 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 96 ia64_load_scratch_fpregs(fr); \ 97 return ret; \ 98 } 99 100 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 101 static efi_status_t \ 102 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 103 { \ 104 struct ia64_fpreg fr[6]; \ 105 efi_time_t *atm = NULL; \ 106 efi_status_t ret; \ 107 \ 108 if (tm) \ 109 atm = adjust_arg(tm); \ 110 ia64_save_scratch_fpregs(fr); \ 111 ret = efi_call_##prefix( \ 112 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 113 enabled, atm); \ 114 ia64_load_scratch_fpregs(fr); \ 115 return ret; \ 116 } 117 118 #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 119 static efi_status_t \ 120 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 121 unsigned long *data_size, void *data) \ 122 { \ 123 struct ia64_fpreg fr[6]; \ 124 u32 *aattr = NULL; \ 125 efi_status_t ret; \ 126 \ 127 if (attr) \ 128 aattr = adjust_arg(attr); \ 129 ia64_save_scratch_fpregs(fr); \ 130 ret = efi_call_##prefix( \ 131 (efi_get_variable_t *) __va(runtime->get_variable), \ 132 adjust_arg(name), adjust_arg(vendor), aattr, \ 133 adjust_arg(data_size), adjust_arg(data)); \ 134 ia64_load_scratch_fpregs(fr); \ 135 return ret; \ 136 } 137 138 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 139 static efi_status_t \ 140 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 141 efi_guid_t *vendor) \ 142 { \ 143 struct ia64_fpreg fr[6]; \ 144 efi_status_t ret; \ 145 \ 146 ia64_save_scratch_fpregs(fr); \ 147 ret = efi_call_##prefix( \ 148 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 149 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 150 ia64_load_scratch_fpregs(fr); \ 151 return ret; \ 152 } 153 154 #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 155 static efi_status_t \ 156 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 157 unsigned long attr, unsigned long data_size, \ 158 void *data) \ 159 { \ 160 struct ia64_fpreg fr[6]; \ 161 efi_status_t ret; \ 162 \ 163 ia64_save_scratch_fpregs(fr); \ 164 ret = efi_call_##prefix( \ 165 (efi_set_variable_t *) __va(runtime->set_variable), \ 166 adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 167 adjust_arg(data)); \ 168 ia64_load_scratch_fpregs(fr); \ 169 return ret; \ 170 } 171 172 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 173 static efi_status_t \ 174 prefix##_get_next_high_mono_count (u32 *count) \ 175 { \ 176 struct ia64_fpreg fr[6]; \ 177 efi_status_t ret; \ 178 \ 179 ia64_save_scratch_fpregs(fr); \ 180 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 181 __va(runtime->get_next_high_mono_count), \ 182 adjust_arg(count)); \ 183 ia64_load_scratch_fpregs(fr); \ 184 return ret; \ 185 } 186 187 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 188 static void \ 189 prefix##_reset_system (int reset_type, efi_status_t status, \ 190 unsigned long data_size, efi_char16_t *data) \ 191 { \ 192 struct ia64_fpreg fr[6]; \ 193 efi_char16_t *adata = NULL; \ 194 \ 195 if (data) \ 196 adata = adjust_arg(data); \ 197 \ 198 ia64_save_scratch_fpregs(fr); \ 199 efi_call_##prefix( \ 200 (efi_reset_system_t *) __va(runtime->reset_system), \ 201 reset_type, status, data_size, adata); \ 202 /* should not return, but just in case... */ \ 203 ia64_load_scratch_fpregs(fr); \ 204 } 205 206 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) 207 208 STUB_GET_TIME(phys, phys_ptr) 209 STUB_SET_TIME(phys, phys_ptr) 210 STUB_GET_WAKEUP_TIME(phys, phys_ptr) 211 STUB_SET_WAKEUP_TIME(phys, phys_ptr) 212 STUB_GET_VARIABLE(phys, phys_ptr) 213 STUB_GET_NEXT_VARIABLE(phys, phys_ptr) 214 STUB_SET_VARIABLE(phys, phys_ptr) 215 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) 216 STUB_RESET_SYSTEM(phys, phys_ptr) 217 218 #define id(arg) arg 219 220 STUB_GET_TIME(virt, id) 221 STUB_SET_TIME(virt, id) 222 STUB_GET_WAKEUP_TIME(virt, id) 223 STUB_SET_WAKEUP_TIME(virt, id) 224 STUB_GET_VARIABLE(virt, id) 225 STUB_GET_NEXT_VARIABLE(virt, id) 226 STUB_SET_VARIABLE(virt, id) 227 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) 228 STUB_RESET_SYSTEM(virt, id) 229 230 void 231 efi_gettimeofday (struct timespec *ts) 232 { 233 efi_time_t tm; 234 235 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { 236 memset(ts, 0, sizeof(*ts)); 237 return; 238 } 239 240 ts->tv_sec = mktime(tm.year, tm.month, tm.day, 241 tm.hour, tm.minute, tm.second); 242 ts->tv_nsec = tm.nanosecond; 243 } 244 245 static int 246 is_memory_available (efi_memory_desc_t *md) 247 { 248 if (!(md->attribute & EFI_MEMORY_WB)) 249 return 0; 250 251 switch (md->type) { 252 case EFI_LOADER_CODE: 253 case EFI_LOADER_DATA: 254 case EFI_BOOT_SERVICES_CODE: 255 case EFI_BOOT_SERVICES_DATA: 256 case EFI_CONVENTIONAL_MEMORY: 257 return 1; 258 } 259 return 0; 260 } 261 262 typedef struct kern_memdesc { 263 u64 attribute; 264 u64 start; 265 u64 num_pages; 266 } kern_memdesc_t; 267 268 static kern_memdesc_t *kern_memmap; 269 270 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 271 272 static inline u64 273 kmd_end(kern_memdesc_t *kmd) 274 { 275 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); 276 } 277 278 static inline u64 279 efi_md_end(efi_memory_desc_t *md) 280 { 281 return (md->phys_addr + efi_md_size(md)); 282 } 283 284 static inline int 285 efi_wb(efi_memory_desc_t *md) 286 { 287 return (md->attribute & EFI_MEMORY_WB); 288 } 289 290 static inline int 291 efi_uc(efi_memory_desc_t *md) 292 { 293 return (md->attribute & EFI_MEMORY_UC); 294 } 295 296 static void 297 walk (efi_freemem_callback_t callback, void *arg, u64 attr) 298 { 299 kern_memdesc_t *k; 300 u64 start, end, voff; 301 302 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; 303 for (k = kern_memmap; k->start != ~0UL; k++) { 304 if (k->attribute != attr) 305 continue; 306 start = PAGE_ALIGN(k->start); 307 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; 308 if (start < end) 309 if ((*callback)(start + voff, end + voff, arg) < 0) 310 return; 311 } 312 } 313 314 /* 315 * Walk the EFI memory map and call CALLBACK once for each EFI memory 316 * descriptor that has memory that is available for OS use. 317 */ 318 void 319 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) 320 { 321 walk(callback, arg, EFI_MEMORY_WB); 322 } 323 324 /* 325 * Walk the EFI memory map and call CALLBACK once for each EFI memory 326 * descriptor that has memory that is available for uncached allocator. 327 */ 328 void 329 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) 330 { 331 walk(callback, arg, EFI_MEMORY_UC); 332 } 333 334 /* 335 * Look for the PAL_CODE region reported by EFI and map it using an 336 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 337 * Abstraction Layer chapter 11 in ADAG 338 */ 339 void * 340 efi_get_pal_addr (void) 341 { 342 void *efi_map_start, *efi_map_end, *p; 343 efi_memory_desc_t *md; 344 u64 efi_desc_size; 345 int pal_code_count = 0; 346 u64 vaddr, mask; 347 348 efi_map_start = __va(ia64_boot_param->efi_memmap); 349 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 350 efi_desc_size = ia64_boot_param->efi_memdesc_size; 351 352 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 353 md = p; 354 if (md->type != EFI_PAL_CODE) 355 continue; 356 357 if (++pal_code_count > 1) { 358 printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 359 "dropped @ %lx\n", md->phys_addr); 360 continue; 361 } 362 /* 363 * The only ITLB entry in region 7 that is used is the one 364 * installed by __start(). That entry covers a 64MB range. 365 */ 366 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 367 vaddr = PAGE_OFFSET + md->phys_addr; 368 369 /* 370 * We must check that the PAL mapping won't overlap with the 371 * kernel mapping. 372 * 373 * PAL code is guaranteed to be aligned on a power of 2 between 374 * 4k and 256KB and that only one ITR is needed to map it. This 375 * implies that the PAL code is always aligned on its size, 376 * i.e., the closest matching page size supported by the TLB. 377 * Therefore PAL code is guaranteed never to cross a 64MB unless 378 * it is bigger than 64MB (very unlikely!). So for now the 379 * following test is enough to determine whether or not we need 380 * a dedicated ITR for the PAL code. 381 */ 382 if ((vaddr & mask) == (KERNEL_START & mask)) { 383 printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 384 __func__); 385 continue; 386 } 387 388 if (efi_md_size(md) > IA64_GRANULE_SIZE) 389 panic("Whoa! PAL code size bigger than a granule!"); 390 391 #if EFI_DEBUG 392 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 393 394 printk(KERN_INFO "CPU %d: mapping PAL code " 395 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 396 smp_processor_id(), md->phys_addr, 397 md->phys_addr + efi_md_size(md), 398 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 399 #endif 400 return __va(md->phys_addr); 401 } 402 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 403 __func__); 404 return NULL; 405 } 406 407 408 static u8 __init palo_checksum(u8 *buffer, u32 length) 409 { 410 u8 sum = 0; 411 u8 *end = buffer + length; 412 413 while (buffer < end) 414 sum = (u8) (sum + *(buffer++)); 415 416 return sum; 417 } 418 419 /* 420 * Parse and handle PALO table which is published at: 421 * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf 422 */ 423 static void __init handle_palo(unsigned long palo_phys) 424 { 425 struct palo_table *palo = __va(palo_phys); 426 u8 checksum; 427 428 if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { 429 printk(KERN_INFO "PALO signature incorrect.\n"); 430 return; 431 } 432 433 checksum = palo_checksum((u8 *)palo, palo->length); 434 if (checksum) { 435 printk(KERN_INFO "PALO checksum incorrect.\n"); 436 return; 437 } 438 439 setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); 440 } 441 442 void 443 efi_map_pal_code (void) 444 { 445 void *pal_vaddr = efi_get_pal_addr (); 446 u64 psr; 447 448 if (!pal_vaddr) 449 return; 450 451 /* 452 * Cannot write to CRx with PSR.ic=1 453 */ 454 psr = ia64_clear_ic(); 455 ia64_itr(0x1, IA64_TR_PALCODE, 456 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 457 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 458 IA64_GRANULE_SHIFT); 459 ia64_set_psr(psr); /* restore psr */ 460 } 461 462 void __init 463 efi_init (void) 464 { 465 void *efi_map_start, *efi_map_end; 466 efi_config_table_t *config_tables; 467 efi_char16_t *c16; 468 u64 efi_desc_size; 469 char *cp, vendor[100] = "unknown"; 470 int i; 471 unsigned long palo_phys; 472 473 /* 474 * It's too early to be able to use the standard kernel command line 475 * support... 476 */ 477 for (cp = boot_command_line; *cp; ) { 478 if (memcmp(cp, "mem=", 4) == 0) { 479 mem_limit = memparse(cp + 4, &cp); 480 } else if (memcmp(cp, "max_addr=", 9) == 0) { 481 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 482 } else if (memcmp(cp, "min_addr=", 9) == 0) { 483 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 484 } else { 485 while (*cp != ' ' && *cp) 486 ++cp; 487 while (*cp == ' ') 488 ++cp; 489 } 490 } 491 if (min_addr != 0UL) 492 printk(KERN_INFO "Ignoring memory below %luMB\n", 493 min_addr >> 20); 494 if (max_addr != ~0UL) 495 printk(KERN_INFO "Ignoring memory above %luMB\n", 496 max_addr >> 20); 497 498 efi.systab = __va(ia64_boot_param->efi_systab); 499 500 /* 501 * Verify the EFI Table 502 */ 503 if (efi.systab == NULL) 504 panic("Whoa! Can't find EFI system table.\n"); 505 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 506 panic("Whoa! EFI system table signature incorrect\n"); 507 if ((efi.systab->hdr.revision >> 16) == 0) 508 printk(KERN_WARNING "Warning: EFI system table version " 509 "%d.%02d, expected 1.00 or greater\n", 510 efi.systab->hdr.revision >> 16, 511 efi.systab->hdr.revision & 0xffff); 512 513 config_tables = __va(efi.systab->tables); 514 515 /* Show what we know for posterity */ 516 c16 = __va(efi.systab->fw_vendor); 517 if (c16) { 518 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i) 519 vendor[i] = *c16++; 520 vendor[i] = '\0'; 521 } 522 523 printk(KERN_INFO "EFI v%u.%.02u by %s:", 524 efi.systab->hdr.revision >> 16, 525 efi.systab->hdr.revision & 0xffff, vendor); 526 527 efi.mps = EFI_INVALID_TABLE_ADDR; 528 efi.acpi = EFI_INVALID_TABLE_ADDR; 529 efi.acpi20 = EFI_INVALID_TABLE_ADDR; 530 efi.smbios = EFI_INVALID_TABLE_ADDR; 531 efi.sal_systab = EFI_INVALID_TABLE_ADDR; 532 efi.boot_info = EFI_INVALID_TABLE_ADDR; 533 efi.hcdp = EFI_INVALID_TABLE_ADDR; 534 efi.uga = EFI_INVALID_TABLE_ADDR; 535 536 palo_phys = EFI_INVALID_TABLE_ADDR; 537 538 for (i = 0; i < (int) efi.systab->nr_tables; i++) { 539 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { 540 efi.mps = config_tables[i].table; 541 printk(" MPS=0x%lx", config_tables[i].table); 542 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { 543 efi.acpi20 = config_tables[i].table; 544 printk(" ACPI 2.0=0x%lx", config_tables[i].table); 545 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { 546 efi.acpi = config_tables[i].table; 547 printk(" ACPI=0x%lx", config_tables[i].table); 548 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { 549 efi.smbios = config_tables[i].table; 550 printk(" SMBIOS=0x%lx", config_tables[i].table); 551 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { 552 efi.sal_systab = config_tables[i].table; 553 printk(" SALsystab=0x%lx", config_tables[i].table); 554 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { 555 efi.hcdp = config_tables[i].table; 556 printk(" HCDP=0x%lx", config_tables[i].table); 557 } else if (efi_guidcmp(config_tables[i].guid, 558 PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID) == 0) { 559 palo_phys = config_tables[i].table; 560 printk(" PALO=0x%lx", config_tables[i].table); 561 } 562 } 563 printk("\n"); 564 565 if (palo_phys != EFI_INVALID_TABLE_ADDR) 566 handle_palo(palo_phys); 567 568 runtime = __va(efi.systab->runtime); 569 efi.get_time = phys_get_time; 570 efi.set_time = phys_set_time; 571 efi.get_wakeup_time = phys_get_wakeup_time; 572 efi.set_wakeup_time = phys_set_wakeup_time; 573 efi.get_variable = phys_get_variable; 574 efi.get_next_variable = phys_get_next_variable; 575 efi.set_variable = phys_set_variable; 576 efi.get_next_high_mono_count = phys_get_next_high_mono_count; 577 efi.reset_system = phys_reset_system; 578 579 efi_map_start = __va(ia64_boot_param->efi_memmap); 580 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 581 efi_desc_size = ia64_boot_param->efi_memdesc_size; 582 583 #if EFI_DEBUG 584 /* print EFI memory map: */ 585 { 586 efi_memory_desc_t *md; 587 void *p; 588 589 for (i = 0, p = efi_map_start; p < efi_map_end; 590 ++i, p += efi_desc_size) 591 { 592 const char *unit; 593 unsigned long size; 594 595 md = p; 596 size = md->num_pages << EFI_PAGE_SHIFT; 597 598 if ((size >> 40) > 0) { 599 size >>= 40; 600 unit = "TB"; 601 } else if ((size >> 30) > 0) { 602 size >>= 30; 603 unit = "GB"; 604 } else if ((size >> 20) > 0) { 605 size >>= 20; 606 unit = "MB"; 607 } else { 608 size >>= 10; 609 unit = "KB"; 610 } 611 612 printk("mem%02d: type=%2u, attr=0x%016lx, " 613 "range=[0x%016lx-0x%016lx) (%4lu%s)\n", 614 i, md->type, md->attribute, md->phys_addr, 615 md->phys_addr + efi_md_size(md), size, unit); 616 } 617 } 618 #endif 619 620 efi_map_pal_code(); 621 efi_enter_virtual_mode(); 622 } 623 624 void 625 efi_enter_virtual_mode (void) 626 { 627 void *efi_map_start, *efi_map_end, *p; 628 efi_memory_desc_t *md; 629 efi_status_t status; 630 u64 efi_desc_size; 631 632 efi_map_start = __va(ia64_boot_param->efi_memmap); 633 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 634 efi_desc_size = ia64_boot_param->efi_memdesc_size; 635 636 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 637 md = p; 638 if (md->attribute & EFI_MEMORY_RUNTIME) { 639 /* 640 * Some descriptors have multiple bits set, so the 641 * order of the tests is relevant. 642 */ 643 if (md->attribute & EFI_MEMORY_WB) { 644 md->virt_addr = (u64) __va(md->phys_addr); 645 } else if (md->attribute & EFI_MEMORY_UC) { 646 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 647 } else if (md->attribute & EFI_MEMORY_WC) { 648 #if 0 649 md->virt_addr = ia64_remap(md->phys_addr, 650 (_PAGE_A | 651 _PAGE_P | 652 _PAGE_D | 653 _PAGE_MA_WC | 654 _PAGE_PL_0 | 655 _PAGE_AR_RW)); 656 #else 657 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 658 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 659 #endif 660 } else if (md->attribute & EFI_MEMORY_WT) { 661 #if 0 662 md->virt_addr = ia64_remap(md->phys_addr, 663 (_PAGE_A | 664 _PAGE_P | 665 _PAGE_D | 666 _PAGE_MA_WT | 667 _PAGE_PL_0 | 668 _PAGE_AR_RW)); 669 #else 670 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 671 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 672 #endif 673 } 674 } 675 } 676 677 status = efi_call_phys(__va(runtime->set_virtual_address_map), 678 ia64_boot_param->efi_memmap_size, 679 efi_desc_size, 680 ia64_boot_param->efi_memdesc_version, 681 ia64_boot_param->efi_memmap); 682 if (status != EFI_SUCCESS) { 683 printk(KERN_WARNING "warning: unable to switch EFI into " 684 "virtual mode (status=%lu)\n", status); 685 return; 686 } 687 688 /* 689 * Now that EFI is in virtual mode, we call the EFI functions more 690 * efficiently: 691 */ 692 efi.get_time = virt_get_time; 693 efi.set_time = virt_set_time; 694 efi.get_wakeup_time = virt_get_wakeup_time; 695 efi.set_wakeup_time = virt_set_wakeup_time; 696 efi.get_variable = virt_get_variable; 697 efi.get_next_variable = virt_get_next_variable; 698 efi.set_variable = virt_set_variable; 699 efi.get_next_high_mono_count = virt_get_next_high_mono_count; 700 efi.reset_system = virt_reset_system; 701 } 702 703 /* 704 * Walk the EFI memory map looking for the I/O port range. There can only be 705 * one entry of this type, other I/O port ranges should be described via ACPI. 706 */ 707 u64 708 efi_get_iobase (void) 709 { 710 void *efi_map_start, *efi_map_end, *p; 711 efi_memory_desc_t *md; 712 u64 efi_desc_size; 713 714 efi_map_start = __va(ia64_boot_param->efi_memmap); 715 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 716 efi_desc_size = ia64_boot_param->efi_memdesc_size; 717 718 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 719 md = p; 720 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { 721 if (md->attribute & EFI_MEMORY_UC) 722 return md->phys_addr; 723 } 724 } 725 return 0; 726 } 727 728 static struct kern_memdesc * 729 kern_memory_descriptor (unsigned long phys_addr) 730 { 731 struct kern_memdesc *md; 732 733 for (md = kern_memmap; md->start != ~0UL; md++) { 734 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 735 return md; 736 } 737 return NULL; 738 } 739 740 static efi_memory_desc_t * 741 efi_memory_descriptor (unsigned long phys_addr) 742 { 743 void *efi_map_start, *efi_map_end, *p; 744 efi_memory_desc_t *md; 745 u64 efi_desc_size; 746 747 efi_map_start = __va(ia64_boot_param->efi_memmap); 748 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 749 efi_desc_size = ia64_boot_param->efi_memdesc_size; 750 751 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 752 md = p; 753 754 if (phys_addr - md->phys_addr < efi_md_size(md)) 755 return md; 756 } 757 return NULL; 758 } 759 760 static int 761 efi_memmap_intersects (unsigned long phys_addr, unsigned long size) 762 { 763 void *efi_map_start, *efi_map_end, *p; 764 efi_memory_desc_t *md; 765 u64 efi_desc_size; 766 unsigned long end; 767 768 efi_map_start = __va(ia64_boot_param->efi_memmap); 769 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 770 efi_desc_size = ia64_boot_param->efi_memdesc_size; 771 772 end = phys_addr + size; 773 774 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 775 md = p; 776 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 777 return 1; 778 } 779 return 0; 780 } 781 782 u32 783 efi_mem_type (unsigned long phys_addr) 784 { 785 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 786 787 if (md) 788 return md->type; 789 return 0; 790 } 791 792 u64 793 efi_mem_attributes (unsigned long phys_addr) 794 { 795 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 796 797 if (md) 798 return md->attribute; 799 return 0; 800 } 801 EXPORT_SYMBOL(efi_mem_attributes); 802 803 u64 804 efi_mem_attribute (unsigned long phys_addr, unsigned long size) 805 { 806 unsigned long end = phys_addr + size; 807 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 808 u64 attr; 809 810 if (!md) 811 return 0; 812 813 /* 814 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells 815 * the kernel that firmware needs this region mapped. 816 */ 817 attr = md->attribute & ~EFI_MEMORY_RUNTIME; 818 do { 819 unsigned long md_end = efi_md_end(md); 820 821 if (end <= md_end) 822 return attr; 823 824 md = efi_memory_descriptor(md_end); 825 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 826 return 0; 827 } while (md); 828 return 0; /* never reached */ 829 } 830 831 u64 832 kern_mem_attribute (unsigned long phys_addr, unsigned long size) 833 { 834 unsigned long end = phys_addr + size; 835 struct kern_memdesc *md; 836 u64 attr; 837 838 /* 839 * This is a hack for ioremap calls before we set up kern_memmap. 840 * Maybe we should do efi_memmap_init() earlier instead. 841 */ 842 if (!kern_memmap) { 843 attr = efi_mem_attribute(phys_addr, size); 844 if (attr & EFI_MEMORY_WB) 845 return EFI_MEMORY_WB; 846 return 0; 847 } 848 849 md = kern_memory_descriptor(phys_addr); 850 if (!md) 851 return 0; 852 853 attr = md->attribute; 854 do { 855 unsigned long md_end = kmd_end(md); 856 857 if (end <= md_end) 858 return attr; 859 860 md = kern_memory_descriptor(md_end); 861 if (!md || md->attribute != attr) 862 return 0; 863 } while (md); 864 return 0; /* never reached */ 865 } 866 EXPORT_SYMBOL(kern_mem_attribute); 867 868 int 869 valid_phys_addr_range (unsigned long phys_addr, unsigned long size) 870 { 871 u64 attr; 872 873 /* 874 * /dev/mem reads and writes use copy_to_user(), which implicitly 875 * uses a granule-sized kernel identity mapping. It's really 876 * only safe to do this for regions in kern_memmap. For more 877 * details, see Documentation/ia64/aliasing.txt. 878 */ 879 attr = kern_mem_attribute(phys_addr, size); 880 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 881 return 1; 882 return 0; 883 } 884 885 int 886 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) 887 { 888 unsigned long phys_addr = pfn << PAGE_SHIFT; 889 u64 attr; 890 891 attr = efi_mem_attribute(phys_addr, size); 892 893 /* 894 * /dev/mem mmap uses normal user pages, so we don't need the entire 895 * granule, but the entire region we're mapping must support the same 896 * attribute. 897 */ 898 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 899 return 1; 900 901 /* 902 * Intel firmware doesn't tell us about all the MMIO regions, so 903 * in general we have to allow mmap requests. But if EFI *does* 904 * tell us about anything inside this region, we should deny it. 905 * The user can always map a smaller region to avoid the overlap. 906 */ 907 if (efi_memmap_intersects(phys_addr, size)) 908 return 0; 909 910 return 1; 911 } 912 913 pgprot_t 914 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, 915 pgprot_t vma_prot) 916 { 917 unsigned long phys_addr = pfn << PAGE_SHIFT; 918 u64 attr; 919 920 /* 921 * For /dev/mem mmap, we use user mappings, but if the region is 922 * in kern_memmap (and hence may be covered by a kernel mapping), 923 * we must use the same attribute as the kernel mapping. 924 */ 925 attr = kern_mem_attribute(phys_addr, size); 926 if (attr & EFI_MEMORY_WB) 927 return pgprot_cacheable(vma_prot); 928 else if (attr & EFI_MEMORY_UC) 929 return pgprot_noncached(vma_prot); 930 931 /* 932 * Some chipsets don't support UC access to memory. If 933 * WB is supported, we prefer that. 934 */ 935 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 936 return pgprot_cacheable(vma_prot); 937 938 return pgprot_noncached(vma_prot); 939 } 940 941 int __init 942 efi_uart_console_only(void) 943 { 944 efi_status_t status; 945 char *s, name[] = "ConOut"; 946 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 947 efi_char16_t *utf16, name_utf16[32]; 948 unsigned char data[1024]; 949 unsigned long size = sizeof(data); 950 struct efi_generic_dev_path *hdr, *end_addr; 951 int uart = 0; 952 953 /* Convert to UTF-16 */ 954 utf16 = name_utf16; 955 s = name; 956 while (*s) 957 *utf16++ = *s++ & 0x7f; 958 *utf16 = 0; 959 960 status = efi.get_variable(name_utf16, &guid, NULL, &size, data); 961 if (status != EFI_SUCCESS) { 962 printk(KERN_ERR "No EFI %s variable?\n", name); 963 return 0; 964 } 965 966 hdr = (struct efi_generic_dev_path *) data; 967 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); 968 while (hdr < end_addr) { 969 if (hdr->type == EFI_DEV_MSG && 970 hdr->sub_type == EFI_DEV_MSG_UART) 971 uart = 1; 972 else if (hdr->type == EFI_DEV_END_PATH || 973 hdr->type == EFI_DEV_END_PATH2) { 974 if (!uart) 975 return 0; 976 if (hdr->sub_type == EFI_DEV_END_ENTIRE) 977 return 1; 978 uart = 0; 979 } 980 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 981 } 982 printk(KERN_ERR "Malformed %s value\n", name); 983 return 0; 984 } 985 986 /* 987 * Look for the first granule aligned memory descriptor memory 988 * that is big enough to hold EFI memory map. Make sure this 989 * descriptor is atleast granule sized so it does not get trimmed 990 */ 991 struct kern_memdesc * 992 find_memmap_space (void) 993 { 994 u64 contig_low=0, contig_high=0; 995 u64 as = 0, ae; 996 void *efi_map_start, *efi_map_end, *p, *q; 997 efi_memory_desc_t *md, *pmd = NULL, *check_md; 998 u64 space_needed, efi_desc_size; 999 unsigned long total_mem = 0; 1000 1001 efi_map_start = __va(ia64_boot_param->efi_memmap); 1002 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1003 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1004 1005 /* 1006 * Worst case: we need 3 kernel descriptors for each efi descriptor 1007 * (if every entry has a WB part in the middle, and UC head and tail), 1008 * plus one for the end marker. 1009 */ 1010 space_needed = sizeof(kern_memdesc_t) * 1011 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); 1012 1013 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1014 md = p; 1015 if (!efi_wb(md)) { 1016 continue; 1017 } 1018 if (pmd == NULL || !efi_wb(pmd) || 1019 efi_md_end(pmd) != md->phys_addr) { 1020 contig_low = GRANULEROUNDUP(md->phys_addr); 1021 contig_high = efi_md_end(md); 1022 for (q = p + efi_desc_size; q < efi_map_end; 1023 q += efi_desc_size) { 1024 check_md = q; 1025 if (!efi_wb(check_md)) 1026 break; 1027 if (contig_high != check_md->phys_addr) 1028 break; 1029 contig_high = efi_md_end(check_md); 1030 } 1031 contig_high = GRANULEROUNDDOWN(contig_high); 1032 } 1033 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) 1034 continue; 1035 1036 /* Round ends inward to granule boundaries */ 1037 as = max(contig_low, md->phys_addr); 1038 ae = min(contig_high, efi_md_end(md)); 1039 1040 /* keep within max_addr= and min_addr= command line arg */ 1041 as = max(as, min_addr); 1042 ae = min(ae, max_addr); 1043 if (ae <= as) 1044 continue; 1045 1046 /* avoid going over mem= command line arg */ 1047 if (total_mem + (ae - as) > mem_limit) 1048 ae -= total_mem + (ae - as) - mem_limit; 1049 1050 if (ae <= as) 1051 continue; 1052 1053 if (ae - as > space_needed) 1054 break; 1055 } 1056 if (p >= efi_map_end) 1057 panic("Can't allocate space for kernel memory descriptors"); 1058 1059 return __va(as); 1060 } 1061 1062 /* 1063 * Walk the EFI memory map and gather all memory available for kernel 1064 * to use. We can allocate partial granules only if the unavailable 1065 * parts exist, and are WB. 1066 */ 1067 unsigned long 1068 efi_memmap_init(unsigned long *s, unsigned long *e) 1069 { 1070 struct kern_memdesc *k, *prev = NULL; 1071 u64 contig_low=0, contig_high=0; 1072 u64 as, ae, lim; 1073 void *efi_map_start, *efi_map_end, *p, *q; 1074 efi_memory_desc_t *md, *pmd = NULL, *check_md; 1075 u64 efi_desc_size; 1076 unsigned long total_mem = 0; 1077 1078 k = kern_memmap = find_memmap_space(); 1079 1080 efi_map_start = __va(ia64_boot_param->efi_memmap); 1081 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1082 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1083 1084 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1085 md = p; 1086 if (!efi_wb(md)) { 1087 if (efi_uc(md) && 1088 (md->type == EFI_CONVENTIONAL_MEMORY || 1089 md->type == EFI_BOOT_SERVICES_DATA)) { 1090 k->attribute = EFI_MEMORY_UC; 1091 k->start = md->phys_addr; 1092 k->num_pages = md->num_pages; 1093 k++; 1094 } 1095 continue; 1096 } 1097 if (pmd == NULL || !efi_wb(pmd) || 1098 efi_md_end(pmd) != md->phys_addr) { 1099 contig_low = GRANULEROUNDUP(md->phys_addr); 1100 contig_high = efi_md_end(md); 1101 for (q = p + efi_desc_size; q < efi_map_end; 1102 q += efi_desc_size) { 1103 check_md = q; 1104 if (!efi_wb(check_md)) 1105 break; 1106 if (contig_high != check_md->phys_addr) 1107 break; 1108 contig_high = efi_md_end(check_md); 1109 } 1110 contig_high = GRANULEROUNDDOWN(contig_high); 1111 } 1112 if (!is_memory_available(md)) 1113 continue; 1114 1115 #ifdef CONFIG_CRASH_DUMP 1116 /* saved_max_pfn should ignore max_addr= command line arg */ 1117 if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT)) 1118 saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT); 1119 #endif 1120 /* 1121 * Round ends inward to granule boundaries 1122 * Give trimmings to uncached allocator 1123 */ 1124 if (md->phys_addr < contig_low) { 1125 lim = min(efi_md_end(md), contig_low); 1126 if (efi_uc(md)) { 1127 if (k > kern_memmap && 1128 (k-1)->attribute == EFI_MEMORY_UC && 1129 kmd_end(k-1) == md->phys_addr) { 1130 (k-1)->num_pages += 1131 (lim - md->phys_addr) 1132 >> EFI_PAGE_SHIFT; 1133 } else { 1134 k->attribute = EFI_MEMORY_UC; 1135 k->start = md->phys_addr; 1136 k->num_pages = (lim - md->phys_addr) 1137 >> EFI_PAGE_SHIFT; 1138 k++; 1139 } 1140 } 1141 as = contig_low; 1142 } else 1143 as = md->phys_addr; 1144 1145 if (efi_md_end(md) > contig_high) { 1146 lim = max(md->phys_addr, contig_high); 1147 if (efi_uc(md)) { 1148 if (lim == md->phys_addr && k > kern_memmap && 1149 (k-1)->attribute == EFI_MEMORY_UC && 1150 kmd_end(k-1) == md->phys_addr) { 1151 (k-1)->num_pages += md->num_pages; 1152 } else { 1153 k->attribute = EFI_MEMORY_UC; 1154 k->start = lim; 1155 k->num_pages = (efi_md_end(md) - lim) 1156 >> EFI_PAGE_SHIFT; 1157 k++; 1158 } 1159 } 1160 ae = contig_high; 1161 } else 1162 ae = efi_md_end(md); 1163 1164 /* keep within max_addr= and min_addr= command line arg */ 1165 as = max(as, min_addr); 1166 ae = min(ae, max_addr); 1167 if (ae <= as) 1168 continue; 1169 1170 /* avoid going over mem= command line arg */ 1171 if (total_mem + (ae - as) > mem_limit) 1172 ae -= total_mem + (ae - as) - mem_limit; 1173 1174 if (ae <= as) 1175 continue; 1176 if (prev && kmd_end(prev) == md->phys_addr) { 1177 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; 1178 total_mem += ae - as; 1179 continue; 1180 } 1181 k->attribute = EFI_MEMORY_WB; 1182 k->start = as; 1183 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; 1184 total_mem += ae - as; 1185 prev = k++; 1186 } 1187 k->start = ~0L; /* end-marker */ 1188 1189 /* reserve the memory we are using for kern_memmap */ 1190 *s = (u64)kern_memmap; 1191 *e = (u64)++k; 1192 1193 return total_mem; 1194 } 1195 1196 void 1197 efi_initialize_iomem_resources(struct resource *code_resource, 1198 struct resource *data_resource, 1199 struct resource *bss_resource) 1200 { 1201 struct resource *res; 1202 void *efi_map_start, *efi_map_end, *p; 1203 efi_memory_desc_t *md; 1204 u64 efi_desc_size; 1205 char *name; 1206 unsigned long flags; 1207 1208 efi_map_start = __va(ia64_boot_param->efi_memmap); 1209 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1210 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1211 1212 res = NULL; 1213 1214 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1215 md = p; 1216 1217 if (md->num_pages == 0) /* should not happen */ 1218 continue; 1219 1220 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1221 switch (md->type) { 1222 1223 case EFI_MEMORY_MAPPED_IO: 1224 case EFI_MEMORY_MAPPED_IO_PORT_SPACE: 1225 continue; 1226 1227 case EFI_LOADER_CODE: 1228 case EFI_LOADER_DATA: 1229 case EFI_BOOT_SERVICES_DATA: 1230 case EFI_BOOT_SERVICES_CODE: 1231 case EFI_CONVENTIONAL_MEMORY: 1232 if (md->attribute & EFI_MEMORY_WP) { 1233 name = "System ROM"; 1234 flags |= IORESOURCE_READONLY; 1235 } else { 1236 name = "System RAM"; 1237 } 1238 break; 1239 1240 case EFI_ACPI_MEMORY_NVS: 1241 name = "ACPI Non-volatile Storage"; 1242 break; 1243 1244 case EFI_UNUSABLE_MEMORY: 1245 name = "reserved"; 1246 flags |= IORESOURCE_DISABLED; 1247 break; 1248 1249 case EFI_RESERVED_TYPE: 1250 case EFI_RUNTIME_SERVICES_CODE: 1251 case EFI_RUNTIME_SERVICES_DATA: 1252 case EFI_ACPI_RECLAIM_MEMORY: 1253 default: 1254 name = "reserved"; 1255 break; 1256 } 1257 1258 if ((res = kzalloc(sizeof(struct resource), 1259 GFP_KERNEL)) == NULL) { 1260 printk(KERN_ERR 1261 "failed to allocate resource for iomem\n"); 1262 return; 1263 } 1264 1265 res->name = name; 1266 res->start = md->phys_addr; 1267 res->end = md->phys_addr + efi_md_size(md) - 1; 1268 res->flags = flags; 1269 1270 if (insert_resource(&iomem_resource, res) < 0) 1271 kfree(res); 1272 else { 1273 /* 1274 * We don't know which region contains 1275 * kernel data so we try it repeatedly and 1276 * let the resource manager test it. 1277 */ 1278 insert_resource(res, code_resource); 1279 insert_resource(res, data_resource); 1280 insert_resource(res, bss_resource); 1281 #ifdef CONFIG_KEXEC 1282 insert_resource(res, &efi_memmap_res); 1283 insert_resource(res, &boot_param_res); 1284 if (crashk_res.end > crashk_res.start) 1285 insert_resource(res, &crashk_res); 1286 #endif 1287 } 1288 } 1289 } 1290 1291 #ifdef CONFIG_KEXEC 1292 /* find a block of memory aligned to 64M exclude reserved regions 1293 rsvd_regions are sorted 1294 */ 1295 unsigned long __init 1296 kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1297 { 1298 int i; 1299 u64 start, end; 1300 u64 alignment = 1UL << _PAGE_SIZE_64M; 1301 void *efi_map_start, *efi_map_end, *p; 1302 efi_memory_desc_t *md; 1303 u64 efi_desc_size; 1304 1305 efi_map_start = __va(ia64_boot_param->efi_memmap); 1306 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1307 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1308 1309 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1310 md = p; 1311 if (!efi_wb(md)) 1312 continue; 1313 start = ALIGN(md->phys_addr, alignment); 1314 end = efi_md_end(md); 1315 for (i = 0; i < n; i++) { 1316 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1317 if (__pa(r[i].start) > start + size) 1318 return start; 1319 start = ALIGN(__pa(r[i].end), alignment); 1320 if (i < n-1 && 1321 __pa(r[i+1].start) < start + size) 1322 continue; 1323 else 1324 break; 1325 } 1326 } 1327 if (end > start + size) 1328 return start; 1329 } 1330 1331 printk(KERN_WARNING 1332 "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1333 return ~0UL; 1334 } 1335 #endif 1336 1337 #ifdef CONFIG_PROC_VMCORE 1338 /* locate the size find a the descriptor at a certain address */ 1339 unsigned long __init 1340 vmcore_find_descriptor_size (unsigned long address) 1341 { 1342 void *efi_map_start, *efi_map_end, *p; 1343 efi_memory_desc_t *md; 1344 u64 efi_desc_size; 1345 unsigned long ret = 0; 1346 1347 efi_map_start = __va(ia64_boot_param->efi_memmap); 1348 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1349 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1350 1351 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1352 md = p; 1353 if (efi_wb(md) && md->type == EFI_LOADER_DATA 1354 && md->phys_addr == address) { 1355 ret = efi_md_size(md); 1356 break; 1357 } 1358 } 1359 1360 if (ret == 0) 1361 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); 1362 1363 return ret; 1364 } 1365 #endif 1366