1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Extensible Firmware Interface 4 * 5 * Based on Extensible Firmware Interface Specification version 0.9 6 * April 30, 1999 7 * 8 * Copyright (C) 1999 VA Linux Systems 9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 10 * Copyright (C) 1999-2003 Hewlett-Packard Co. 11 * David Mosberger-Tang <davidm@hpl.hp.com> 12 * Stephane Eranian <eranian@hpl.hp.com> 13 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 14 * Bjorn Helgaas <bjorn.helgaas@hp.com> 15 * 16 * All EFI Runtime Services are not implemented yet as EFI only 17 * supports physical mode addressing on SoftSDV. This is to be fixed 18 * in a future version. --drummond 1999-07-20 19 * 20 * Implemented EFI runtime services and virtual mode calls. --davidm 21 * 22 * Goutham Rao: <goutham.rao@intel.com> 23 * Skip non-WB memory and ignore empty memory ranges. 24 */ 25 #include <linux/module.h> 26 #include <linux/memblock.h> 27 #include <linux/crash_dump.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/time.h> 33 #include <linux/efi.h> 34 #include <linux/kexec.h> 35 #include <linux/mm.h> 36 37 #include <asm/efi.h> 38 #include <asm/io.h> 39 #include <asm/kregs.h> 40 #include <asm/meminit.h> 41 #include <asm/processor.h> 42 #include <asm/mca.h> 43 #include <asm/sal.h> 44 #include <asm/setup.h> 45 #include <asm/tlbflush.h> 46 47 #define EFI_DEBUG 0 48 49 #define ESI_TABLE_GUID \ 50 EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \ 51 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4) 52 53 static unsigned long mps_phys = EFI_INVALID_TABLE_ADDR; 54 static __initdata unsigned long palo_phys; 55 56 unsigned long __initdata esi_phys = EFI_INVALID_TABLE_ADDR; 57 unsigned long hcdp_phys = EFI_INVALID_TABLE_ADDR; 58 unsigned long sal_systab_phys = EFI_INVALID_TABLE_ADDR; 59 60 static const efi_config_table_type_t arch_tables[] __initconst = { 61 {ESI_TABLE_GUID, &esi_phys, "ESI" }, 62 {HCDP_TABLE_GUID, &hcdp_phys, "HCDP" }, 63 {MPS_TABLE_GUID, &mps_phys, "MPS" }, 64 {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, &palo_phys, "PALO" }, 65 {SAL_SYSTEM_TABLE_GUID, &sal_systab_phys, "SALsystab" }, 66 {}, 67 }; 68 69 extern efi_status_t efi_call_phys (void *, ...); 70 71 static efi_runtime_services_t *runtime; 72 static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; 73 74 #define efi_call_virt(f, args...) (*(f))(args) 75 76 #define STUB_GET_TIME(prefix, adjust_arg) \ 77 static efi_status_t \ 78 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 79 { \ 80 struct ia64_fpreg fr[6]; \ 81 efi_time_cap_t *atc = NULL; \ 82 efi_status_t ret; \ 83 \ 84 if (tc) \ 85 atc = adjust_arg(tc); \ 86 ia64_save_scratch_fpregs(fr); \ 87 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 88 adjust_arg(tm), atc); \ 89 ia64_load_scratch_fpregs(fr); \ 90 return ret; \ 91 } 92 93 #define STUB_SET_TIME(prefix, adjust_arg) \ 94 static efi_status_t \ 95 prefix##_set_time (efi_time_t *tm) \ 96 { \ 97 struct ia64_fpreg fr[6]; \ 98 efi_status_t ret; \ 99 \ 100 ia64_save_scratch_fpregs(fr); \ 101 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 102 adjust_arg(tm)); \ 103 ia64_load_scratch_fpregs(fr); \ 104 return ret; \ 105 } 106 107 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 108 static efi_status_t \ 109 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 110 efi_time_t *tm) \ 111 { \ 112 struct ia64_fpreg fr[6]; \ 113 efi_status_t ret; \ 114 \ 115 ia64_save_scratch_fpregs(fr); \ 116 ret = efi_call_##prefix( \ 117 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 118 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 119 ia64_load_scratch_fpregs(fr); \ 120 return ret; \ 121 } 122 123 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 124 static efi_status_t \ 125 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 126 { \ 127 struct ia64_fpreg fr[6]; \ 128 efi_time_t *atm = NULL; \ 129 efi_status_t ret; \ 130 \ 131 if (tm) \ 132 atm = adjust_arg(tm); \ 133 ia64_save_scratch_fpregs(fr); \ 134 ret = efi_call_##prefix( \ 135 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 136 enabled, atm); \ 137 ia64_load_scratch_fpregs(fr); \ 138 return ret; \ 139 } 140 141 #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 142 static efi_status_t \ 143 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 144 unsigned long *data_size, void *data) \ 145 { \ 146 struct ia64_fpreg fr[6]; \ 147 u32 *aattr = NULL; \ 148 efi_status_t ret; \ 149 \ 150 if (attr) \ 151 aattr = adjust_arg(attr); \ 152 ia64_save_scratch_fpregs(fr); \ 153 ret = efi_call_##prefix( \ 154 (efi_get_variable_t *) __va(runtime->get_variable), \ 155 adjust_arg(name), adjust_arg(vendor), aattr, \ 156 adjust_arg(data_size), adjust_arg(data)); \ 157 ia64_load_scratch_fpregs(fr); \ 158 return ret; \ 159 } 160 161 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 162 static efi_status_t \ 163 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 164 efi_guid_t *vendor) \ 165 { \ 166 struct ia64_fpreg fr[6]; \ 167 efi_status_t ret; \ 168 \ 169 ia64_save_scratch_fpregs(fr); \ 170 ret = efi_call_##prefix( \ 171 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 172 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 173 ia64_load_scratch_fpregs(fr); \ 174 return ret; \ 175 } 176 177 #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 178 static efi_status_t \ 179 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 180 u32 attr, unsigned long data_size, \ 181 void *data) \ 182 { \ 183 struct ia64_fpreg fr[6]; \ 184 efi_status_t ret; \ 185 \ 186 ia64_save_scratch_fpregs(fr); \ 187 ret = efi_call_##prefix( \ 188 (efi_set_variable_t *) __va(runtime->set_variable), \ 189 adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 190 adjust_arg(data)); \ 191 ia64_load_scratch_fpregs(fr); \ 192 return ret; \ 193 } 194 195 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 196 static efi_status_t \ 197 prefix##_get_next_high_mono_count (u32 *count) \ 198 { \ 199 struct ia64_fpreg fr[6]; \ 200 efi_status_t ret; \ 201 \ 202 ia64_save_scratch_fpregs(fr); \ 203 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 204 __va(runtime->get_next_high_mono_count), \ 205 adjust_arg(count)); \ 206 ia64_load_scratch_fpregs(fr); \ 207 return ret; \ 208 } 209 210 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 211 static void \ 212 prefix##_reset_system (int reset_type, efi_status_t status, \ 213 unsigned long data_size, efi_char16_t *data) \ 214 { \ 215 struct ia64_fpreg fr[6]; \ 216 efi_char16_t *adata = NULL; \ 217 \ 218 if (data) \ 219 adata = adjust_arg(data); \ 220 \ 221 ia64_save_scratch_fpregs(fr); \ 222 efi_call_##prefix( \ 223 (efi_reset_system_t *) __va(runtime->reset_system), \ 224 reset_type, status, data_size, adata); \ 225 /* should not return, but just in case... */ \ 226 ia64_load_scratch_fpregs(fr); \ 227 } 228 229 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) 230 231 STUB_GET_TIME(phys, phys_ptr) 232 STUB_SET_TIME(phys, phys_ptr) 233 STUB_GET_WAKEUP_TIME(phys, phys_ptr) 234 STUB_SET_WAKEUP_TIME(phys, phys_ptr) 235 STUB_GET_VARIABLE(phys, phys_ptr) 236 STUB_GET_NEXT_VARIABLE(phys, phys_ptr) 237 STUB_SET_VARIABLE(phys, phys_ptr) 238 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) 239 STUB_RESET_SYSTEM(phys, phys_ptr) 240 241 #define id(arg) arg 242 243 STUB_GET_TIME(virt, id) 244 STUB_SET_TIME(virt, id) 245 STUB_GET_WAKEUP_TIME(virt, id) 246 STUB_SET_WAKEUP_TIME(virt, id) 247 STUB_GET_VARIABLE(virt, id) 248 STUB_GET_NEXT_VARIABLE(virt, id) 249 STUB_SET_VARIABLE(virt, id) 250 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) 251 STUB_RESET_SYSTEM(virt, id) 252 253 void 254 efi_gettimeofday (struct timespec64 *ts) 255 { 256 efi_time_t tm; 257 258 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { 259 memset(ts, 0, sizeof(*ts)); 260 return; 261 } 262 263 ts->tv_sec = mktime64(tm.year, tm.month, tm.day, 264 tm.hour, tm.minute, tm.second); 265 ts->tv_nsec = tm.nanosecond; 266 } 267 268 static int 269 is_memory_available (efi_memory_desc_t *md) 270 { 271 if (!(md->attribute & EFI_MEMORY_WB)) 272 return 0; 273 274 switch (md->type) { 275 case EFI_LOADER_CODE: 276 case EFI_LOADER_DATA: 277 case EFI_BOOT_SERVICES_CODE: 278 case EFI_BOOT_SERVICES_DATA: 279 case EFI_CONVENTIONAL_MEMORY: 280 return 1; 281 } 282 return 0; 283 } 284 285 typedef struct kern_memdesc { 286 u64 attribute; 287 u64 start; 288 u64 num_pages; 289 } kern_memdesc_t; 290 291 static kern_memdesc_t *kern_memmap; 292 293 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 294 295 static inline u64 296 kmd_end(kern_memdesc_t *kmd) 297 { 298 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); 299 } 300 301 static inline u64 302 efi_md_end(efi_memory_desc_t *md) 303 { 304 return (md->phys_addr + efi_md_size(md)); 305 } 306 307 static inline int 308 efi_wb(efi_memory_desc_t *md) 309 { 310 return (md->attribute & EFI_MEMORY_WB); 311 } 312 313 static inline int 314 efi_uc(efi_memory_desc_t *md) 315 { 316 return (md->attribute & EFI_MEMORY_UC); 317 } 318 319 static void 320 walk (efi_freemem_callback_t callback, void *arg, u64 attr) 321 { 322 kern_memdesc_t *k; 323 u64 start, end, voff; 324 325 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; 326 for (k = kern_memmap; k->start != ~0UL; k++) { 327 if (k->attribute != attr) 328 continue; 329 start = PAGE_ALIGN(k->start); 330 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; 331 if (start < end) 332 if ((*callback)(start + voff, end + voff, arg) < 0) 333 return; 334 } 335 } 336 337 /* 338 * Walk the EFI memory map and call CALLBACK once for each EFI memory 339 * descriptor that has memory that is available for OS use. 340 */ 341 void 342 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) 343 { 344 walk(callback, arg, EFI_MEMORY_WB); 345 } 346 347 /* 348 * Walk the EFI memory map and call CALLBACK once for each EFI memory 349 * descriptor that has memory that is available for uncached allocator. 350 */ 351 void 352 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) 353 { 354 walk(callback, arg, EFI_MEMORY_UC); 355 } 356 357 /* 358 * Look for the PAL_CODE region reported by EFI and map it using an 359 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 360 * Abstraction Layer chapter 11 in ADAG 361 */ 362 void * 363 efi_get_pal_addr (void) 364 { 365 void *efi_map_start, *efi_map_end, *p; 366 efi_memory_desc_t *md; 367 u64 efi_desc_size; 368 int pal_code_count = 0; 369 u64 vaddr, mask; 370 371 efi_map_start = __va(ia64_boot_param->efi_memmap); 372 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 373 efi_desc_size = ia64_boot_param->efi_memdesc_size; 374 375 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 376 md = p; 377 if (md->type != EFI_PAL_CODE) 378 continue; 379 380 if (++pal_code_count > 1) { 381 printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 382 "dropped @ %llx\n", md->phys_addr); 383 continue; 384 } 385 /* 386 * The only ITLB entry in region 7 that is used is the one 387 * installed by __start(). That entry covers a 64MB range. 388 */ 389 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 390 vaddr = PAGE_OFFSET + md->phys_addr; 391 392 /* 393 * We must check that the PAL mapping won't overlap with the 394 * kernel mapping. 395 * 396 * PAL code is guaranteed to be aligned on a power of 2 between 397 * 4k and 256KB and that only one ITR is needed to map it. This 398 * implies that the PAL code is always aligned on its size, 399 * i.e., the closest matching page size supported by the TLB. 400 * Therefore PAL code is guaranteed never to cross a 64MB unless 401 * it is bigger than 64MB (very unlikely!). So for now the 402 * following test is enough to determine whether or not we need 403 * a dedicated ITR for the PAL code. 404 */ 405 if ((vaddr & mask) == (KERNEL_START & mask)) { 406 printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 407 __func__); 408 continue; 409 } 410 411 if (efi_md_size(md) > IA64_GRANULE_SIZE) 412 panic("Whoa! PAL code size bigger than a granule!"); 413 414 #if EFI_DEBUG 415 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 416 417 printk(KERN_INFO "CPU %d: mapping PAL code " 418 "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n", 419 smp_processor_id(), md->phys_addr, 420 md->phys_addr + efi_md_size(md), 421 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 422 #endif 423 return __va(md->phys_addr); 424 } 425 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 426 __func__); 427 return NULL; 428 } 429 430 431 static u8 __init palo_checksum(u8 *buffer, u32 length) 432 { 433 u8 sum = 0; 434 u8 *end = buffer + length; 435 436 while (buffer < end) 437 sum = (u8) (sum + *(buffer++)); 438 439 return sum; 440 } 441 442 /* 443 * Parse and handle PALO table which is published at: 444 * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf 445 */ 446 static void __init handle_palo(unsigned long phys_addr) 447 { 448 struct palo_table *palo = __va(phys_addr); 449 u8 checksum; 450 451 if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { 452 printk(KERN_INFO "PALO signature incorrect.\n"); 453 return; 454 } 455 456 checksum = palo_checksum((u8 *)palo, palo->length); 457 if (checksum) { 458 printk(KERN_INFO "PALO checksum incorrect.\n"); 459 return; 460 } 461 462 setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); 463 } 464 465 void 466 efi_map_pal_code (void) 467 { 468 void *pal_vaddr = efi_get_pal_addr (); 469 u64 psr; 470 471 if (!pal_vaddr) 472 return; 473 474 /* 475 * Cannot write to CRx with PSR.ic=1 476 */ 477 psr = ia64_clear_ic(); 478 ia64_itr(0x1, IA64_TR_PALCODE, 479 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 480 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 481 IA64_GRANULE_SHIFT); 482 ia64_set_psr(psr); /* restore psr */ 483 } 484 485 void __init 486 efi_init (void) 487 { 488 const efi_system_table_t *efi_systab; 489 void *efi_map_start, *efi_map_end; 490 u64 efi_desc_size; 491 char *cp; 492 493 set_bit(EFI_BOOT, &efi.flags); 494 set_bit(EFI_64BIT, &efi.flags); 495 496 /* 497 * It's too early to be able to use the standard kernel command line 498 * support... 499 */ 500 for (cp = boot_command_line; *cp; ) { 501 if (memcmp(cp, "mem=", 4) == 0) { 502 mem_limit = memparse(cp + 4, &cp); 503 } else if (memcmp(cp, "max_addr=", 9) == 0) { 504 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 505 } else if (memcmp(cp, "min_addr=", 9) == 0) { 506 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 507 } else { 508 while (*cp != ' ' && *cp) 509 ++cp; 510 while (*cp == ' ') 511 ++cp; 512 } 513 } 514 if (min_addr != 0UL) 515 printk(KERN_INFO "Ignoring memory below %lluMB\n", 516 min_addr >> 20); 517 if (max_addr != ~0UL) 518 printk(KERN_INFO "Ignoring memory above %lluMB\n", 519 max_addr >> 20); 520 521 efi_systab = __va(ia64_boot_param->efi_systab); 522 523 /* 524 * Verify the EFI Table 525 */ 526 if (efi_systab == NULL) 527 panic("Whoa! Can't find EFI system table.\n"); 528 if (efi_systab_check_header(&efi_systab->hdr, 1)) 529 panic("Whoa! EFI system table signature incorrect\n"); 530 531 efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); 532 533 palo_phys = EFI_INVALID_TABLE_ADDR; 534 535 if (efi_config_parse_tables(__va(efi_systab->tables), 536 efi_systab->nr_tables, 537 arch_tables) != 0) 538 return; 539 540 if (palo_phys != EFI_INVALID_TABLE_ADDR) 541 handle_palo(palo_phys); 542 543 runtime = __va(efi_systab->runtime); 544 efi.get_time = phys_get_time; 545 efi.set_time = phys_set_time; 546 efi.get_wakeup_time = phys_get_wakeup_time; 547 efi.set_wakeup_time = phys_set_wakeup_time; 548 efi.get_variable = phys_get_variable; 549 efi.get_next_variable = phys_get_next_variable; 550 efi.set_variable = phys_set_variable; 551 efi.get_next_high_mono_count = phys_get_next_high_mono_count; 552 efi.reset_system = phys_reset_system; 553 554 efi_map_start = __va(ia64_boot_param->efi_memmap); 555 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 556 efi_desc_size = ia64_boot_param->efi_memdesc_size; 557 558 #if EFI_DEBUG 559 /* print EFI memory map: */ 560 { 561 efi_memory_desc_t *md; 562 void *p; 563 unsigned int i; 564 565 for (i = 0, p = efi_map_start; p < efi_map_end; 566 ++i, p += efi_desc_size) 567 { 568 const char *unit; 569 unsigned long size; 570 char buf[64]; 571 572 md = p; 573 size = md->num_pages << EFI_PAGE_SHIFT; 574 575 if ((size >> 40) > 0) { 576 size >>= 40; 577 unit = "TB"; 578 } else if ((size >> 30) > 0) { 579 size >>= 30; 580 unit = "GB"; 581 } else if ((size >> 20) > 0) { 582 size >>= 20; 583 unit = "MB"; 584 } else { 585 size >>= 10; 586 unit = "KB"; 587 } 588 589 printk("mem%02d: %s " 590 "range=[0x%016llx-0x%016llx) (%4lu%s)\n", 591 i, efi_md_typeattr_format(buf, sizeof(buf), md), 592 md->phys_addr, 593 md->phys_addr + efi_md_size(md), size, unit); 594 } 595 } 596 #endif 597 598 efi_map_pal_code(); 599 efi_enter_virtual_mode(); 600 } 601 602 void 603 efi_enter_virtual_mode (void) 604 { 605 void *efi_map_start, *efi_map_end, *p; 606 efi_memory_desc_t *md; 607 efi_status_t status; 608 u64 efi_desc_size; 609 610 efi_map_start = __va(ia64_boot_param->efi_memmap); 611 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 612 efi_desc_size = ia64_boot_param->efi_memdesc_size; 613 614 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 615 md = p; 616 if (md->attribute & EFI_MEMORY_RUNTIME) { 617 /* 618 * Some descriptors have multiple bits set, so the 619 * order of the tests is relevant. 620 */ 621 if (md->attribute & EFI_MEMORY_WB) { 622 md->virt_addr = (u64) __va(md->phys_addr); 623 } else if (md->attribute & EFI_MEMORY_UC) { 624 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 625 } else if (md->attribute & EFI_MEMORY_WC) { 626 #if 0 627 md->virt_addr = ia64_remap(md->phys_addr, 628 (_PAGE_A | 629 _PAGE_P | 630 _PAGE_D | 631 _PAGE_MA_WC | 632 _PAGE_PL_0 | 633 _PAGE_AR_RW)); 634 #else 635 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 636 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 637 #endif 638 } else if (md->attribute & EFI_MEMORY_WT) { 639 #if 0 640 md->virt_addr = ia64_remap(md->phys_addr, 641 (_PAGE_A | 642 _PAGE_P | 643 _PAGE_D | 644 _PAGE_MA_WT | 645 _PAGE_PL_0 | 646 _PAGE_AR_RW)); 647 #else 648 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 649 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 650 #endif 651 } 652 } 653 } 654 655 status = efi_call_phys(__va(runtime->set_virtual_address_map), 656 ia64_boot_param->efi_memmap_size, 657 efi_desc_size, 658 ia64_boot_param->efi_memdesc_version, 659 ia64_boot_param->efi_memmap); 660 if (status != EFI_SUCCESS) { 661 printk(KERN_WARNING "warning: unable to switch EFI into " 662 "virtual mode (status=%lu)\n", status); 663 return; 664 } 665 666 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 667 668 /* 669 * Now that EFI is in virtual mode, we call the EFI functions more 670 * efficiently: 671 */ 672 efi.get_time = virt_get_time; 673 efi.set_time = virt_set_time; 674 efi.get_wakeup_time = virt_get_wakeup_time; 675 efi.set_wakeup_time = virt_set_wakeup_time; 676 efi.get_variable = virt_get_variable; 677 efi.get_next_variable = virt_get_next_variable; 678 efi.set_variable = virt_set_variable; 679 efi.get_next_high_mono_count = virt_get_next_high_mono_count; 680 efi.reset_system = virt_reset_system; 681 } 682 683 /* 684 * Walk the EFI memory map looking for the I/O port range. There can only be 685 * one entry of this type, other I/O port ranges should be described via ACPI. 686 */ 687 u64 688 efi_get_iobase (void) 689 { 690 void *efi_map_start, *efi_map_end, *p; 691 efi_memory_desc_t *md; 692 u64 efi_desc_size; 693 694 efi_map_start = __va(ia64_boot_param->efi_memmap); 695 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 696 efi_desc_size = ia64_boot_param->efi_memdesc_size; 697 698 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 699 md = p; 700 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { 701 if (md->attribute & EFI_MEMORY_UC) 702 return md->phys_addr; 703 } 704 } 705 return 0; 706 } 707 708 static struct kern_memdesc * 709 kern_memory_descriptor (unsigned long phys_addr) 710 { 711 struct kern_memdesc *md; 712 713 for (md = kern_memmap; md->start != ~0UL; md++) { 714 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 715 return md; 716 } 717 return NULL; 718 } 719 720 static efi_memory_desc_t * 721 efi_memory_descriptor (unsigned long phys_addr) 722 { 723 void *efi_map_start, *efi_map_end, *p; 724 efi_memory_desc_t *md; 725 u64 efi_desc_size; 726 727 efi_map_start = __va(ia64_boot_param->efi_memmap); 728 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 729 efi_desc_size = ia64_boot_param->efi_memdesc_size; 730 731 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 732 md = p; 733 734 if (phys_addr - md->phys_addr < efi_md_size(md)) 735 return md; 736 } 737 return NULL; 738 } 739 740 static int 741 efi_memmap_intersects (unsigned long phys_addr, unsigned long size) 742 { 743 void *efi_map_start, *efi_map_end, *p; 744 efi_memory_desc_t *md; 745 u64 efi_desc_size; 746 unsigned long end; 747 748 efi_map_start = __va(ia64_boot_param->efi_memmap); 749 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 750 efi_desc_size = ia64_boot_param->efi_memdesc_size; 751 752 end = phys_addr + size; 753 754 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 755 md = p; 756 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 757 return 1; 758 } 759 return 0; 760 } 761 762 int 763 efi_mem_type (unsigned long phys_addr) 764 { 765 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 766 767 if (md) 768 return md->type; 769 return -EINVAL; 770 } 771 772 u64 773 efi_mem_attributes (unsigned long phys_addr) 774 { 775 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 776 777 if (md) 778 return md->attribute; 779 return 0; 780 } 781 EXPORT_SYMBOL(efi_mem_attributes); 782 783 u64 784 efi_mem_attribute (unsigned long phys_addr, unsigned long size) 785 { 786 unsigned long end = phys_addr + size; 787 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 788 u64 attr; 789 790 if (!md) 791 return 0; 792 793 /* 794 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells 795 * the kernel that firmware needs this region mapped. 796 */ 797 attr = md->attribute & ~EFI_MEMORY_RUNTIME; 798 do { 799 unsigned long md_end = efi_md_end(md); 800 801 if (end <= md_end) 802 return attr; 803 804 md = efi_memory_descriptor(md_end); 805 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 806 return 0; 807 } while (md); 808 return 0; /* never reached */ 809 } 810 811 u64 812 kern_mem_attribute (unsigned long phys_addr, unsigned long size) 813 { 814 unsigned long end = phys_addr + size; 815 struct kern_memdesc *md; 816 u64 attr; 817 818 /* 819 * This is a hack for ioremap calls before we set up kern_memmap. 820 * Maybe we should do efi_memmap_init() earlier instead. 821 */ 822 if (!kern_memmap) { 823 attr = efi_mem_attribute(phys_addr, size); 824 if (attr & EFI_MEMORY_WB) 825 return EFI_MEMORY_WB; 826 return 0; 827 } 828 829 md = kern_memory_descriptor(phys_addr); 830 if (!md) 831 return 0; 832 833 attr = md->attribute; 834 do { 835 unsigned long md_end = kmd_end(md); 836 837 if (end <= md_end) 838 return attr; 839 840 md = kern_memory_descriptor(md_end); 841 if (!md || md->attribute != attr) 842 return 0; 843 } while (md); 844 return 0; /* never reached */ 845 } 846 847 int 848 valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size) 849 { 850 u64 attr; 851 852 /* 853 * /dev/mem reads and writes use copy_to_user(), which implicitly 854 * uses a granule-sized kernel identity mapping. It's really 855 * only safe to do this for regions in kern_memmap. For more 856 * details, see Documentation/ia64/aliasing.rst. 857 */ 858 attr = kern_mem_attribute(phys_addr, size); 859 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 860 return 1; 861 return 0; 862 } 863 864 int 865 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) 866 { 867 unsigned long phys_addr = pfn << PAGE_SHIFT; 868 u64 attr; 869 870 attr = efi_mem_attribute(phys_addr, size); 871 872 /* 873 * /dev/mem mmap uses normal user pages, so we don't need the entire 874 * granule, but the entire region we're mapping must support the same 875 * attribute. 876 */ 877 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 878 return 1; 879 880 /* 881 * Intel firmware doesn't tell us about all the MMIO regions, so 882 * in general we have to allow mmap requests. But if EFI *does* 883 * tell us about anything inside this region, we should deny it. 884 * The user can always map a smaller region to avoid the overlap. 885 */ 886 if (efi_memmap_intersects(phys_addr, size)) 887 return 0; 888 889 return 1; 890 } 891 892 pgprot_t 893 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, 894 pgprot_t vma_prot) 895 { 896 unsigned long phys_addr = pfn << PAGE_SHIFT; 897 u64 attr; 898 899 /* 900 * For /dev/mem mmap, we use user mappings, but if the region is 901 * in kern_memmap (and hence may be covered by a kernel mapping), 902 * we must use the same attribute as the kernel mapping. 903 */ 904 attr = kern_mem_attribute(phys_addr, size); 905 if (attr & EFI_MEMORY_WB) 906 return pgprot_cacheable(vma_prot); 907 else if (attr & EFI_MEMORY_UC) 908 return pgprot_noncached(vma_prot); 909 910 /* 911 * Some chipsets don't support UC access to memory. If 912 * WB is supported, we prefer that. 913 */ 914 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 915 return pgprot_cacheable(vma_prot); 916 917 return pgprot_noncached(vma_prot); 918 } 919 920 int __init 921 efi_uart_console_only(void) 922 { 923 efi_status_t status; 924 char *s, name[] = "ConOut"; 925 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 926 efi_char16_t *utf16, name_utf16[32]; 927 unsigned char data[1024]; 928 unsigned long size = sizeof(data); 929 struct efi_generic_dev_path *hdr, *end_addr; 930 int uart = 0; 931 932 /* Convert to UTF-16 */ 933 utf16 = name_utf16; 934 s = name; 935 while (*s) 936 *utf16++ = *s++ & 0x7f; 937 *utf16 = 0; 938 939 status = efi.get_variable(name_utf16, &guid, NULL, &size, data); 940 if (status != EFI_SUCCESS) { 941 printk(KERN_ERR "No EFI %s variable?\n", name); 942 return 0; 943 } 944 945 hdr = (struct efi_generic_dev_path *) data; 946 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); 947 while (hdr < end_addr) { 948 if (hdr->type == EFI_DEV_MSG && 949 hdr->sub_type == EFI_DEV_MSG_UART) 950 uart = 1; 951 else if (hdr->type == EFI_DEV_END_PATH || 952 hdr->type == EFI_DEV_END_PATH2) { 953 if (!uart) 954 return 0; 955 if (hdr->sub_type == EFI_DEV_END_ENTIRE) 956 return 1; 957 uart = 0; 958 } 959 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 960 } 961 printk(KERN_ERR "Malformed %s value\n", name); 962 return 0; 963 } 964 965 /* 966 * Look for the first granule aligned memory descriptor memory 967 * that is big enough to hold EFI memory map. Make sure this 968 * descriptor is at least granule sized so it does not get trimmed 969 */ 970 struct kern_memdesc * 971 find_memmap_space (void) 972 { 973 u64 contig_low=0, contig_high=0; 974 u64 as = 0, ae; 975 void *efi_map_start, *efi_map_end, *p, *q; 976 efi_memory_desc_t *md, *pmd = NULL, *check_md; 977 u64 space_needed, efi_desc_size; 978 unsigned long total_mem = 0; 979 980 efi_map_start = __va(ia64_boot_param->efi_memmap); 981 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 982 efi_desc_size = ia64_boot_param->efi_memdesc_size; 983 984 /* 985 * Worst case: we need 3 kernel descriptors for each efi descriptor 986 * (if every entry has a WB part in the middle, and UC head and tail), 987 * plus one for the end marker. 988 */ 989 space_needed = sizeof(kern_memdesc_t) * 990 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); 991 992 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 993 md = p; 994 if (!efi_wb(md)) { 995 continue; 996 } 997 if (pmd == NULL || !efi_wb(pmd) || 998 efi_md_end(pmd) != md->phys_addr) { 999 contig_low = GRANULEROUNDUP(md->phys_addr); 1000 contig_high = efi_md_end(md); 1001 for (q = p + efi_desc_size; q < efi_map_end; 1002 q += efi_desc_size) { 1003 check_md = q; 1004 if (!efi_wb(check_md)) 1005 break; 1006 if (contig_high != check_md->phys_addr) 1007 break; 1008 contig_high = efi_md_end(check_md); 1009 } 1010 contig_high = GRANULEROUNDDOWN(contig_high); 1011 } 1012 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) 1013 continue; 1014 1015 /* Round ends inward to granule boundaries */ 1016 as = max(contig_low, md->phys_addr); 1017 ae = min(contig_high, efi_md_end(md)); 1018 1019 /* keep within max_addr= and min_addr= command line arg */ 1020 as = max(as, min_addr); 1021 ae = min(ae, max_addr); 1022 if (ae <= as) 1023 continue; 1024 1025 /* avoid going over mem= command line arg */ 1026 if (total_mem + (ae - as) > mem_limit) 1027 ae -= total_mem + (ae - as) - mem_limit; 1028 1029 if (ae <= as) 1030 continue; 1031 1032 if (ae - as > space_needed) 1033 break; 1034 } 1035 if (p >= efi_map_end) 1036 panic("Can't allocate space for kernel memory descriptors"); 1037 1038 return __va(as); 1039 } 1040 1041 /* 1042 * Walk the EFI memory map and gather all memory available for kernel 1043 * to use. We can allocate partial granules only if the unavailable 1044 * parts exist, and are WB. 1045 */ 1046 unsigned long 1047 efi_memmap_init(u64 *s, u64 *e) 1048 { 1049 struct kern_memdesc *k, *prev = NULL; 1050 u64 contig_low=0, contig_high=0; 1051 u64 as, ae, lim; 1052 void *efi_map_start, *efi_map_end, *p, *q; 1053 efi_memory_desc_t *md, *pmd = NULL, *check_md; 1054 u64 efi_desc_size; 1055 unsigned long total_mem = 0; 1056 1057 k = kern_memmap = find_memmap_space(); 1058 1059 efi_map_start = __va(ia64_boot_param->efi_memmap); 1060 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1061 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1062 1063 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1064 md = p; 1065 if (!efi_wb(md)) { 1066 if (efi_uc(md) && 1067 (md->type == EFI_CONVENTIONAL_MEMORY || 1068 md->type == EFI_BOOT_SERVICES_DATA)) { 1069 k->attribute = EFI_MEMORY_UC; 1070 k->start = md->phys_addr; 1071 k->num_pages = md->num_pages; 1072 k++; 1073 } 1074 continue; 1075 } 1076 if (pmd == NULL || !efi_wb(pmd) || 1077 efi_md_end(pmd) != md->phys_addr) { 1078 contig_low = GRANULEROUNDUP(md->phys_addr); 1079 contig_high = efi_md_end(md); 1080 for (q = p + efi_desc_size; q < efi_map_end; 1081 q += efi_desc_size) { 1082 check_md = q; 1083 if (!efi_wb(check_md)) 1084 break; 1085 if (contig_high != check_md->phys_addr) 1086 break; 1087 contig_high = efi_md_end(check_md); 1088 } 1089 contig_high = GRANULEROUNDDOWN(contig_high); 1090 } 1091 if (!is_memory_available(md)) 1092 continue; 1093 1094 /* 1095 * Round ends inward to granule boundaries 1096 * Give trimmings to uncached allocator 1097 */ 1098 if (md->phys_addr < contig_low) { 1099 lim = min(efi_md_end(md), contig_low); 1100 if (efi_uc(md)) { 1101 if (k > kern_memmap && 1102 (k-1)->attribute == EFI_MEMORY_UC && 1103 kmd_end(k-1) == md->phys_addr) { 1104 (k-1)->num_pages += 1105 (lim - md->phys_addr) 1106 >> EFI_PAGE_SHIFT; 1107 } else { 1108 k->attribute = EFI_MEMORY_UC; 1109 k->start = md->phys_addr; 1110 k->num_pages = (lim - md->phys_addr) 1111 >> EFI_PAGE_SHIFT; 1112 k++; 1113 } 1114 } 1115 as = contig_low; 1116 } else 1117 as = md->phys_addr; 1118 1119 if (efi_md_end(md) > contig_high) { 1120 lim = max(md->phys_addr, contig_high); 1121 if (efi_uc(md)) { 1122 if (lim == md->phys_addr && k > kern_memmap && 1123 (k-1)->attribute == EFI_MEMORY_UC && 1124 kmd_end(k-1) == md->phys_addr) { 1125 (k-1)->num_pages += md->num_pages; 1126 } else { 1127 k->attribute = EFI_MEMORY_UC; 1128 k->start = lim; 1129 k->num_pages = (efi_md_end(md) - lim) 1130 >> EFI_PAGE_SHIFT; 1131 k++; 1132 } 1133 } 1134 ae = contig_high; 1135 } else 1136 ae = efi_md_end(md); 1137 1138 /* keep within max_addr= and min_addr= command line arg */ 1139 as = max(as, min_addr); 1140 ae = min(ae, max_addr); 1141 if (ae <= as) 1142 continue; 1143 1144 /* avoid going over mem= command line arg */ 1145 if (total_mem + (ae - as) > mem_limit) 1146 ae -= total_mem + (ae - as) - mem_limit; 1147 1148 if (ae <= as) 1149 continue; 1150 if (prev && kmd_end(prev) == md->phys_addr) { 1151 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; 1152 total_mem += ae - as; 1153 continue; 1154 } 1155 k->attribute = EFI_MEMORY_WB; 1156 k->start = as; 1157 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; 1158 total_mem += ae - as; 1159 prev = k++; 1160 } 1161 k->start = ~0L; /* end-marker */ 1162 1163 /* reserve the memory we are using for kern_memmap */ 1164 *s = (u64)kern_memmap; 1165 *e = (u64)++k; 1166 1167 return total_mem; 1168 } 1169 1170 void 1171 efi_initialize_iomem_resources(struct resource *code_resource, 1172 struct resource *data_resource, 1173 struct resource *bss_resource) 1174 { 1175 struct resource *res; 1176 void *efi_map_start, *efi_map_end, *p; 1177 efi_memory_desc_t *md; 1178 u64 efi_desc_size; 1179 char *name; 1180 unsigned long flags, desc; 1181 1182 efi_map_start = __va(ia64_boot_param->efi_memmap); 1183 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1184 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1185 1186 res = NULL; 1187 1188 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1189 md = p; 1190 1191 if (md->num_pages == 0) /* should not happen */ 1192 continue; 1193 1194 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1195 desc = IORES_DESC_NONE; 1196 1197 switch (md->type) { 1198 1199 case EFI_MEMORY_MAPPED_IO: 1200 case EFI_MEMORY_MAPPED_IO_PORT_SPACE: 1201 continue; 1202 1203 case EFI_LOADER_CODE: 1204 case EFI_LOADER_DATA: 1205 case EFI_BOOT_SERVICES_DATA: 1206 case EFI_BOOT_SERVICES_CODE: 1207 case EFI_CONVENTIONAL_MEMORY: 1208 if (md->attribute & EFI_MEMORY_WP) { 1209 name = "System ROM"; 1210 flags |= IORESOURCE_READONLY; 1211 } else if (md->attribute == EFI_MEMORY_UC) { 1212 name = "Uncached RAM"; 1213 } else { 1214 name = "System RAM"; 1215 flags |= IORESOURCE_SYSRAM; 1216 } 1217 break; 1218 1219 case EFI_ACPI_MEMORY_NVS: 1220 name = "ACPI Non-volatile Storage"; 1221 desc = IORES_DESC_ACPI_NV_STORAGE; 1222 break; 1223 1224 case EFI_UNUSABLE_MEMORY: 1225 name = "reserved"; 1226 flags |= IORESOURCE_DISABLED; 1227 break; 1228 1229 case EFI_PERSISTENT_MEMORY: 1230 name = "Persistent Memory"; 1231 desc = IORES_DESC_PERSISTENT_MEMORY; 1232 break; 1233 1234 case EFI_RESERVED_TYPE: 1235 case EFI_RUNTIME_SERVICES_CODE: 1236 case EFI_RUNTIME_SERVICES_DATA: 1237 case EFI_ACPI_RECLAIM_MEMORY: 1238 default: 1239 name = "reserved"; 1240 break; 1241 } 1242 1243 if ((res = kzalloc(sizeof(struct resource), 1244 GFP_KERNEL)) == NULL) { 1245 printk(KERN_ERR 1246 "failed to allocate resource for iomem\n"); 1247 return; 1248 } 1249 1250 res->name = name; 1251 res->start = md->phys_addr; 1252 res->end = md->phys_addr + efi_md_size(md) - 1; 1253 res->flags = flags; 1254 res->desc = desc; 1255 1256 if (insert_resource(&iomem_resource, res) < 0) 1257 kfree(res); 1258 else { 1259 /* 1260 * We don't know which region contains 1261 * kernel data so we try it repeatedly and 1262 * let the resource manager test it. 1263 */ 1264 insert_resource(res, code_resource); 1265 insert_resource(res, data_resource); 1266 insert_resource(res, bss_resource); 1267 #ifdef CONFIG_KEXEC 1268 insert_resource(res, &efi_memmap_res); 1269 insert_resource(res, &boot_param_res); 1270 if (crashk_res.end > crashk_res.start) 1271 insert_resource(res, &crashk_res); 1272 #endif 1273 } 1274 } 1275 } 1276 1277 #ifdef CONFIG_KEXEC 1278 /* find a block of memory aligned to 64M exclude reserved regions 1279 rsvd_regions are sorted 1280 */ 1281 unsigned long __init 1282 kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1283 { 1284 int i; 1285 u64 start, end; 1286 u64 alignment = 1UL << _PAGE_SIZE_64M; 1287 void *efi_map_start, *efi_map_end, *p; 1288 efi_memory_desc_t *md; 1289 u64 efi_desc_size; 1290 1291 efi_map_start = __va(ia64_boot_param->efi_memmap); 1292 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1293 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1294 1295 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1296 md = p; 1297 if (!efi_wb(md)) 1298 continue; 1299 start = ALIGN(md->phys_addr, alignment); 1300 end = efi_md_end(md); 1301 for (i = 0; i < n; i++) { 1302 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1303 if (__pa(r[i].start) > start + size) 1304 return start; 1305 start = ALIGN(__pa(r[i].end), alignment); 1306 if (i < n-1 && 1307 __pa(r[i+1].start) < start + size) 1308 continue; 1309 else 1310 break; 1311 } 1312 } 1313 if (end > start + size) 1314 return start; 1315 } 1316 1317 printk(KERN_WARNING 1318 "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1319 return ~0UL; 1320 } 1321 #endif 1322 1323 #ifdef CONFIG_CRASH_DUMP 1324 /* locate the size find a the descriptor at a certain address */ 1325 unsigned long __init 1326 vmcore_find_descriptor_size (unsigned long address) 1327 { 1328 void *efi_map_start, *efi_map_end, *p; 1329 efi_memory_desc_t *md; 1330 u64 efi_desc_size; 1331 unsigned long ret = 0; 1332 1333 efi_map_start = __va(ia64_boot_param->efi_memmap); 1334 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1335 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1336 1337 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1338 md = p; 1339 if (efi_wb(md) && md->type == EFI_LOADER_DATA 1340 && md->phys_addr == address) { 1341 ret = efi_md_size(md); 1342 break; 1343 } 1344 } 1345 1346 if (ret == 0) 1347 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); 1348 1349 return ret; 1350 } 1351 #endif 1352 1353 char *efi_systab_show_arch(char *str) 1354 { 1355 if (mps_phys != EFI_INVALID_TABLE_ADDR) 1356 str += sprintf(str, "MPS=0x%lx\n", mps_phys); 1357 if (hcdp_phys != EFI_INVALID_TABLE_ADDR) 1358 str += sprintf(str, "HCDP=0x%lx\n", hcdp_phys); 1359 return str; 1360 } 1361