1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Extensible Firmware Interface 4 * 5 * Based on Extensible Firmware Interface Specification version 0.9 6 * April 30, 1999 7 * 8 * Copyright (C) 1999 VA Linux Systems 9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 10 * Copyright (C) 1999-2003 Hewlett-Packard Co. 11 * David Mosberger-Tang <davidm@hpl.hp.com> 12 * Stephane Eranian <eranian@hpl.hp.com> 13 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. 14 * Bjorn Helgaas <bjorn.helgaas@hp.com> 15 * 16 * All EFI Runtime Services are not implemented yet as EFI only 17 * supports physical mode addressing on SoftSDV. This is to be fixed 18 * in a future version. --drummond 1999-07-20 19 * 20 * Implemented EFI runtime services and virtual mode calls. --davidm 21 * 22 * Goutham Rao: <goutham.rao@intel.com> 23 * Skip non-WB memory and ignore empty memory ranges. 24 */ 25 #include <linux/module.h> 26 #include <linux/memblock.h> 27 #include <linux/crash_dump.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/types.h> 31 #include <linux/slab.h> 32 #include <linux/time.h> 33 #include <linux/efi.h> 34 #include <linux/kexec.h> 35 #include <linux/mm.h> 36 37 #include <asm/io.h> 38 #include <asm/kregs.h> 39 #include <asm/meminit.h> 40 #include <asm/processor.h> 41 #include <asm/mca.h> 42 #include <asm/setup.h> 43 #include <asm/tlbflush.h> 44 45 #define EFI_DEBUG 0 46 47 #define ESI_TABLE_GUID \ 48 EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \ 49 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4) 50 51 static unsigned long mps_phys = EFI_INVALID_TABLE_ADDR; 52 static __initdata unsigned long palo_phys; 53 54 unsigned long __initdata esi_phys = EFI_INVALID_TABLE_ADDR; 55 unsigned long hcdp_phys = EFI_INVALID_TABLE_ADDR; 56 unsigned long sal_systab_phys = EFI_INVALID_TABLE_ADDR; 57 58 static const efi_config_table_type_t arch_tables[] __initconst = { 59 {ESI_TABLE_GUID, &esi_phys, "ESI" }, 60 {HCDP_TABLE_GUID, &hcdp_phys, "HCDP" }, 61 {MPS_TABLE_GUID, &mps_phys, "MPS" }, 62 {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, &palo_phys, "PALO" }, 63 {SAL_SYSTEM_TABLE_GUID, &sal_systab_phys, "SALsystab" }, 64 {}, 65 }; 66 67 extern efi_status_t efi_call_phys (void *, ...); 68 69 static efi_runtime_services_t *runtime; 70 static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; 71 72 #define efi_call_virt(f, args...) (*(f))(args) 73 74 #define STUB_GET_TIME(prefix, adjust_arg) \ 75 static efi_status_t \ 76 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 77 { \ 78 struct ia64_fpreg fr[6]; \ 79 efi_time_cap_t *atc = NULL; \ 80 efi_status_t ret; \ 81 \ 82 if (tc) \ 83 atc = adjust_arg(tc); \ 84 ia64_save_scratch_fpregs(fr); \ 85 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 86 adjust_arg(tm), atc); \ 87 ia64_load_scratch_fpregs(fr); \ 88 return ret; \ 89 } 90 91 #define STUB_SET_TIME(prefix, adjust_arg) \ 92 static efi_status_t \ 93 prefix##_set_time (efi_time_t *tm) \ 94 { \ 95 struct ia64_fpreg fr[6]; \ 96 efi_status_t ret; \ 97 \ 98 ia64_save_scratch_fpregs(fr); \ 99 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 100 adjust_arg(tm)); \ 101 ia64_load_scratch_fpregs(fr); \ 102 return ret; \ 103 } 104 105 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 106 static efi_status_t \ 107 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 108 efi_time_t *tm) \ 109 { \ 110 struct ia64_fpreg fr[6]; \ 111 efi_status_t ret; \ 112 \ 113 ia64_save_scratch_fpregs(fr); \ 114 ret = efi_call_##prefix( \ 115 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 116 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 117 ia64_load_scratch_fpregs(fr); \ 118 return ret; \ 119 } 120 121 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 122 static efi_status_t \ 123 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 124 { \ 125 struct ia64_fpreg fr[6]; \ 126 efi_time_t *atm = NULL; \ 127 efi_status_t ret; \ 128 \ 129 if (tm) \ 130 atm = adjust_arg(tm); \ 131 ia64_save_scratch_fpregs(fr); \ 132 ret = efi_call_##prefix( \ 133 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 134 enabled, atm); \ 135 ia64_load_scratch_fpregs(fr); \ 136 return ret; \ 137 } 138 139 #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 140 static efi_status_t \ 141 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 142 unsigned long *data_size, void *data) \ 143 { \ 144 struct ia64_fpreg fr[6]; \ 145 u32 *aattr = NULL; \ 146 efi_status_t ret; \ 147 \ 148 if (attr) \ 149 aattr = adjust_arg(attr); \ 150 ia64_save_scratch_fpregs(fr); \ 151 ret = efi_call_##prefix( \ 152 (efi_get_variable_t *) __va(runtime->get_variable), \ 153 adjust_arg(name), adjust_arg(vendor), aattr, \ 154 adjust_arg(data_size), adjust_arg(data)); \ 155 ia64_load_scratch_fpregs(fr); \ 156 return ret; \ 157 } 158 159 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 160 static efi_status_t \ 161 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 162 efi_guid_t *vendor) \ 163 { \ 164 struct ia64_fpreg fr[6]; \ 165 efi_status_t ret; \ 166 \ 167 ia64_save_scratch_fpregs(fr); \ 168 ret = efi_call_##prefix( \ 169 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 170 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 171 ia64_load_scratch_fpregs(fr); \ 172 return ret; \ 173 } 174 175 #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 176 static efi_status_t \ 177 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 178 u32 attr, unsigned long data_size, \ 179 void *data) \ 180 { \ 181 struct ia64_fpreg fr[6]; \ 182 efi_status_t ret; \ 183 \ 184 ia64_save_scratch_fpregs(fr); \ 185 ret = efi_call_##prefix( \ 186 (efi_set_variable_t *) __va(runtime->set_variable), \ 187 adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 188 adjust_arg(data)); \ 189 ia64_load_scratch_fpregs(fr); \ 190 return ret; \ 191 } 192 193 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 194 static efi_status_t \ 195 prefix##_get_next_high_mono_count (u32 *count) \ 196 { \ 197 struct ia64_fpreg fr[6]; \ 198 efi_status_t ret; \ 199 \ 200 ia64_save_scratch_fpregs(fr); \ 201 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 202 __va(runtime->get_next_high_mono_count), \ 203 adjust_arg(count)); \ 204 ia64_load_scratch_fpregs(fr); \ 205 return ret; \ 206 } 207 208 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 209 static void \ 210 prefix##_reset_system (int reset_type, efi_status_t status, \ 211 unsigned long data_size, efi_char16_t *data) \ 212 { \ 213 struct ia64_fpreg fr[6]; \ 214 efi_char16_t *adata = NULL; \ 215 \ 216 if (data) \ 217 adata = adjust_arg(data); \ 218 \ 219 ia64_save_scratch_fpregs(fr); \ 220 efi_call_##prefix( \ 221 (efi_reset_system_t *) __va(runtime->reset_system), \ 222 reset_type, status, data_size, adata); \ 223 /* should not return, but just in case... */ \ 224 ia64_load_scratch_fpregs(fr); \ 225 } 226 227 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) 228 229 STUB_GET_TIME(phys, phys_ptr) 230 STUB_SET_TIME(phys, phys_ptr) 231 STUB_GET_WAKEUP_TIME(phys, phys_ptr) 232 STUB_SET_WAKEUP_TIME(phys, phys_ptr) 233 STUB_GET_VARIABLE(phys, phys_ptr) 234 STUB_GET_NEXT_VARIABLE(phys, phys_ptr) 235 STUB_SET_VARIABLE(phys, phys_ptr) 236 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) 237 STUB_RESET_SYSTEM(phys, phys_ptr) 238 239 #define id(arg) arg 240 241 STUB_GET_TIME(virt, id) 242 STUB_SET_TIME(virt, id) 243 STUB_GET_WAKEUP_TIME(virt, id) 244 STUB_SET_WAKEUP_TIME(virt, id) 245 STUB_GET_VARIABLE(virt, id) 246 STUB_GET_NEXT_VARIABLE(virt, id) 247 STUB_SET_VARIABLE(virt, id) 248 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) 249 STUB_RESET_SYSTEM(virt, id) 250 251 void 252 efi_gettimeofday (struct timespec64 *ts) 253 { 254 efi_time_t tm; 255 256 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { 257 memset(ts, 0, sizeof(*ts)); 258 return; 259 } 260 261 ts->tv_sec = mktime64(tm.year, tm.month, tm.day, 262 tm.hour, tm.minute, tm.second); 263 ts->tv_nsec = tm.nanosecond; 264 } 265 266 static int 267 is_memory_available (efi_memory_desc_t *md) 268 { 269 if (!(md->attribute & EFI_MEMORY_WB)) 270 return 0; 271 272 switch (md->type) { 273 case EFI_LOADER_CODE: 274 case EFI_LOADER_DATA: 275 case EFI_BOOT_SERVICES_CODE: 276 case EFI_BOOT_SERVICES_DATA: 277 case EFI_CONVENTIONAL_MEMORY: 278 return 1; 279 } 280 return 0; 281 } 282 283 typedef struct kern_memdesc { 284 u64 attribute; 285 u64 start; 286 u64 num_pages; 287 } kern_memdesc_t; 288 289 static kern_memdesc_t *kern_memmap; 290 291 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 292 293 static inline u64 294 kmd_end(kern_memdesc_t *kmd) 295 { 296 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); 297 } 298 299 static inline u64 300 efi_md_end(efi_memory_desc_t *md) 301 { 302 return (md->phys_addr + efi_md_size(md)); 303 } 304 305 static inline int 306 efi_wb(efi_memory_desc_t *md) 307 { 308 return (md->attribute & EFI_MEMORY_WB); 309 } 310 311 static inline int 312 efi_uc(efi_memory_desc_t *md) 313 { 314 return (md->attribute & EFI_MEMORY_UC); 315 } 316 317 static void 318 walk (efi_freemem_callback_t callback, void *arg, u64 attr) 319 { 320 kern_memdesc_t *k; 321 u64 start, end, voff; 322 323 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; 324 for (k = kern_memmap; k->start != ~0UL; k++) { 325 if (k->attribute != attr) 326 continue; 327 start = PAGE_ALIGN(k->start); 328 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; 329 if (start < end) 330 if ((*callback)(start + voff, end + voff, arg) < 0) 331 return; 332 } 333 } 334 335 /* 336 * Walk the EFI memory map and call CALLBACK once for each EFI memory 337 * descriptor that has memory that is available for OS use. 338 */ 339 void 340 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) 341 { 342 walk(callback, arg, EFI_MEMORY_WB); 343 } 344 345 /* 346 * Walk the EFI memory map and call CALLBACK once for each EFI memory 347 * descriptor that has memory that is available for uncached allocator. 348 */ 349 void 350 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) 351 { 352 walk(callback, arg, EFI_MEMORY_UC); 353 } 354 355 /* 356 * Look for the PAL_CODE region reported by EFI and map it using an 357 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 358 * Abstraction Layer chapter 11 in ADAG 359 */ 360 void * 361 efi_get_pal_addr (void) 362 { 363 void *efi_map_start, *efi_map_end, *p; 364 efi_memory_desc_t *md; 365 u64 efi_desc_size; 366 int pal_code_count = 0; 367 u64 vaddr, mask; 368 369 efi_map_start = __va(ia64_boot_param->efi_memmap); 370 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 371 efi_desc_size = ia64_boot_param->efi_memdesc_size; 372 373 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 374 md = p; 375 if (md->type != EFI_PAL_CODE) 376 continue; 377 378 if (++pal_code_count > 1) { 379 printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 380 "dropped @ %llx\n", md->phys_addr); 381 continue; 382 } 383 /* 384 * The only ITLB entry in region 7 that is used is the one 385 * installed by __start(). That entry covers a 64MB range. 386 */ 387 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 388 vaddr = PAGE_OFFSET + md->phys_addr; 389 390 /* 391 * We must check that the PAL mapping won't overlap with the 392 * kernel mapping. 393 * 394 * PAL code is guaranteed to be aligned on a power of 2 between 395 * 4k and 256KB and that only one ITR is needed to map it. This 396 * implies that the PAL code is always aligned on its size, 397 * i.e., the closest matching page size supported by the TLB. 398 * Therefore PAL code is guaranteed never to cross a 64MB unless 399 * it is bigger than 64MB (very unlikely!). So for now the 400 * following test is enough to determine whether or not we need 401 * a dedicated ITR for the PAL code. 402 */ 403 if ((vaddr & mask) == (KERNEL_START & mask)) { 404 printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 405 __func__); 406 continue; 407 } 408 409 if (efi_md_size(md) > IA64_GRANULE_SIZE) 410 panic("Whoa! PAL code size bigger than a granule!"); 411 412 #if EFI_DEBUG 413 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 414 415 printk(KERN_INFO "CPU %d: mapping PAL code " 416 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 417 smp_processor_id(), md->phys_addr, 418 md->phys_addr + efi_md_size(md), 419 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 420 #endif 421 return __va(md->phys_addr); 422 } 423 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", 424 __func__); 425 return NULL; 426 } 427 428 429 static u8 __init palo_checksum(u8 *buffer, u32 length) 430 { 431 u8 sum = 0; 432 u8 *end = buffer + length; 433 434 while (buffer < end) 435 sum = (u8) (sum + *(buffer++)); 436 437 return sum; 438 } 439 440 /* 441 * Parse and handle PALO table which is published at: 442 * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf 443 */ 444 static void __init handle_palo(unsigned long phys_addr) 445 { 446 struct palo_table *palo = __va(phys_addr); 447 u8 checksum; 448 449 if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { 450 printk(KERN_INFO "PALO signature incorrect.\n"); 451 return; 452 } 453 454 checksum = palo_checksum((u8 *)palo, palo->length); 455 if (checksum) { 456 printk(KERN_INFO "PALO checksum incorrect.\n"); 457 return; 458 } 459 460 setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); 461 } 462 463 void 464 efi_map_pal_code (void) 465 { 466 void *pal_vaddr = efi_get_pal_addr (); 467 u64 psr; 468 469 if (!pal_vaddr) 470 return; 471 472 /* 473 * Cannot write to CRx with PSR.ic=1 474 */ 475 psr = ia64_clear_ic(); 476 ia64_itr(0x1, IA64_TR_PALCODE, 477 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 478 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 479 IA64_GRANULE_SHIFT); 480 ia64_set_psr(psr); /* restore psr */ 481 } 482 483 void __init 484 efi_init (void) 485 { 486 const efi_system_table_t *efi_systab; 487 void *efi_map_start, *efi_map_end; 488 u64 efi_desc_size; 489 char *cp; 490 491 set_bit(EFI_BOOT, &efi.flags); 492 set_bit(EFI_64BIT, &efi.flags); 493 494 /* 495 * It's too early to be able to use the standard kernel command line 496 * support... 497 */ 498 for (cp = boot_command_line; *cp; ) { 499 if (memcmp(cp, "mem=", 4) == 0) { 500 mem_limit = memparse(cp + 4, &cp); 501 } else if (memcmp(cp, "max_addr=", 9) == 0) { 502 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 503 } else if (memcmp(cp, "min_addr=", 9) == 0) { 504 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 505 } else { 506 while (*cp != ' ' && *cp) 507 ++cp; 508 while (*cp == ' ') 509 ++cp; 510 } 511 } 512 if (min_addr != 0UL) 513 printk(KERN_INFO "Ignoring memory below %lluMB\n", 514 min_addr >> 20); 515 if (max_addr != ~0UL) 516 printk(KERN_INFO "Ignoring memory above %lluMB\n", 517 max_addr >> 20); 518 519 efi_systab = __va(ia64_boot_param->efi_systab); 520 521 /* 522 * Verify the EFI Table 523 */ 524 if (efi_systab == NULL) 525 panic("Whoa! Can't find EFI system table.\n"); 526 if (efi_systab_check_header(&efi_systab->hdr, 1)) 527 panic("Whoa! EFI system table signature incorrect\n"); 528 529 efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); 530 531 palo_phys = EFI_INVALID_TABLE_ADDR; 532 533 if (efi_config_parse_tables(__va(efi_systab->tables), 534 efi_systab->nr_tables, 535 arch_tables) != 0) 536 return; 537 538 if (palo_phys != EFI_INVALID_TABLE_ADDR) 539 handle_palo(palo_phys); 540 541 runtime = __va(efi_systab->runtime); 542 efi.get_time = phys_get_time; 543 efi.set_time = phys_set_time; 544 efi.get_wakeup_time = phys_get_wakeup_time; 545 efi.set_wakeup_time = phys_set_wakeup_time; 546 efi.get_variable = phys_get_variable; 547 efi.get_next_variable = phys_get_next_variable; 548 efi.set_variable = phys_set_variable; 549 efi.get_next_high_mono_count = phys_get_next_high_mono_count; 550 efi.reset_system = phys_reset_system; 551 552 efi_map_start = __va(ia64_boot_param->efi_memmap); 553 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 554 efi_desc_size = ia64_boot_param->efi_memdesc_size; 555 556 #if EFI_DEBUG 557 /* print EFI memory map: */ 558 { 559 efi_memory_desc_t *md; 560 void *p; 561 562 for (i = 0, p = efi_map_start; p < efi_map_end; 563 ++i, p += efi_desc_size) 564 { 565 const char *unit; 566 unsigned long size; 567 char buf[64]; 568 569 md = p; 570 size = md->num_pages << EFI_PAGE_SHIFT; 571 572 if ((size >> 40) > 0) { 573 size >>= 40; 574 unit = "TB"; 575 } else if ((size >> 30) > 0) { 576 size >>= 30; 577 unit = "GB"; 578 } else if ((size >> 20) > 0) { 579 size >>= 20; 580 unit = "MB"; 581 } else { 582 size >>= 10; 583 unit = "KB"; 584 } 585 586 printk("mem%02d: %s " 587 "range=[0x%016lx-0x%016lx) (%4lu%s)\n", 588 i, efi_md_typeattr_format(buf, sizeof(buf), md), 589 md->phys_addr, 590 md->phys_addr + efi_md_size(md), size, unit); 591 } 592 } 593 #endif 594 595 efi_map_pal_code(); 596 efi_enter_virtual_mode(); 597 } 598 599 void 600 efi_enter_virtual_mode (void) 601 { 602 void *efi_map_start, *efi_map_end, *p; 603 efi_memory_desc_t *md; 604 efi_status_t status; 605 u64 efi_desc_size; 606 607 efi_map_start = __va(ia64_boot_param->efi_memmap); 608 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 609 efi_desc_size = ia64_boot_param->efi_memdesc_size; 610 611 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 612 md = p; 613 if (md->attribute & EFI_MEMORY_RUNTIME) { 614 /* 615 * Some descriptors have multiple bits set, so the 616 * order of the tests is relevant. 617 */ 618 if (md->attribute & EFI_MEMORY_WB) { 619 md->virt_addr = (u64) __va(md->phys_addr); 620 } else if (md->attribute & EFI_MEMORY_UC) { 621 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 622 } else if (md->attribute & EFI_MEMORY_WC) { 623 #if 0 624 md->virt_addr = ia64_remap(md->phys_addr, 625 (_PAGE_A | 626 _PAGE_P | 627 _PAGE_D | 628 _PAGE_MA_WC | 629 _PAGE_PL_0 | 630 _PAGE_AR_RW)); 631 #else 632 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 633 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 634 #endif 635 } else if (md->attribute & EFI_MEMORY_WT) { 636 #if 0 637 md->virt_addr = ia64_remap(md->phys_addr, 638 (_PAGE_A | 639 _PAGE_P | 640 _PAGE_D | 641 _PAGE_MA_WT | 642 _PAGE_PL_0 | 643 _PAGE_AR_RW)); 644 #else 645 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 646 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 647 #endif 648 } 649 } 650 } 651 652 status = efi_call_phys(__va(runtime->set_virtual_address_map), 653 ia64_boot_param->efi_memmap_size, 654 efi_desc_size, 655 ia64_boot_param->efi_memdesc_version, 656 ia64_boot_param->efi_memmap); 657 if (status != EFI_SUCCESS) { 658 printk(KERN_WARNING "warning: unable to switch EFI into " 659 "virtual mode (status=%lu)\n", status); 660 return; 661 } 662 663 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 664 665 /* 666 * Now that EFI is in virtual mode, we call the EFI functions more 667 * efficiently: 668 */ 669 efi.get_time = virt_get_time; 670 efi.set_time = virt_set_time; 671 efi.get_wakeup_time = virt_get_wakeup_time; 672 efi.set_wakeup_time = virt_set_wakeup_time; 673 efi.get_variable = virt_get_variable; 674 efi.get_next_variable = virt_get_next_variable; 675 efi.set_variable = virt_set_variable; 676 efi.get_next_high_mono_count = virt_get_next_high_mono_count; 677 efi.reset_system = virt_reset_system; 678 } 679 680 /* 681 * Walk the EFI memory map looking for the I/O port range. There can only be 682 * one entry of this type, other I/O port ranges should be described via ACPI. 683 */ 684 u64 685 efi_get_iobase (void) 686 { 687 void *efi_map_start, *efi_map_end, *p; 688 efi_memory_desc_t *md; 689 u64 efi_desc_size; 690 691 efi_map_start = __va(ia64_boot_param->efi_memmap); 692 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 693 efi_desc_size = ia64_boot_param->efi_memdesc_size; 694 695 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 696 md = p; 697 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { 698 if (md->attribute & EFI_MEMORY_UC) 699 return md->phys_addr; 700 } 701 } 702 return 0; 703 } 704 705 static struct kern_memdesc * 706 kern_memory_descriptor (unsigned long phys_addr) 707 { 708 struct kern_memdesc *md; 709 710 for (md = kern_memmap; md->start != ~0UL; md++) { 711 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) 712 return md; 713 } 714 return NULL; 715 } 716 717 static efi_memory_desc_t * 718 efi_memory_descriptor (unsigned long phys_addr) 719 { 720 void *efi_map_start, *efi_map_end, *p; 721 efi_memory_desc_t *md; 722 u64 efi_desc_size; 723 724 efi_map_start = __va(ia64_boot_param->efi_memmap); 725 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 726 efi_desc_size = ia64_boot_param->efi_memdesc_size; 727 728 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 729 md = p; 730 731 if (phys_addr - md->phys_addr < efi_md_size(md)) 732 return md; 733 } 734 return NULL; 735 } 736 737 static int 738 efi_memmap_intersects (unsigned long phys_addr, unsigned long size) 739 { 740 void *efi_map_start, *efi_map_end, *p; 741 efi_memory_desc_t *md; 742 u64 efi_desc_size; 743 unsigned long end; 744 745 efi_map_start = __va(ia64_boot_param->efi_memmap); 746 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 747 efi_desc_size = ia64_boot_param->efi_memdesc_size; 748 749 end = phys_addr + size; 750 751 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 752 md = p; 753 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 754 return 1; 755 } 756 return 0; 757 } 758 759 int 760 efi_mem_type (unsigned long phys_addr) 761 { 762 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 763 764 if (md) 765 return md->type; 766 return -EINVAL; 767 } 768 769 u64 770 efi_mem_attributes (unsigned long phys_addr) 771 { 772 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 773 774 if (md) 775 return md->attribute; 776 return 0; 777 } 778 EXPORT_SYMBOL(efi_mem_attributes); 779 780 u64 781 efi_mem_attribute (unsigned long phys_addr, unsigned long size) 782 { 783 unsigned long end = phys_addr + size; 784 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); 785 u64 attr; 786 787 if (!md) 788 return 0; 789 790 /* 791 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells 792 * the kernel that firmware needs this region mapped. 793 */ 794 attr = md->attribute & ~EFI_MEMORY_RUNTIME; 795 do { 796 unsigned long md_end = efi_md_end(md); 797 798 if (end <= md_end) 799 return attr; 800 801 md = efi_memory_descriptor(md_end); 802 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 803 return 0; 804 } while (md); 805 return 0; /* never reached */ 806 } 807 808 u64 809 kern_mem_attribute (unsigned long phys_addr, unsigned long size) 810 { 811 unsigned long end = phys_addr + size; 812 struct kern_memdesc *md; 813 u64 attr; 814 815 /* 816 * This is a hack for ioremap calls before we set up kern_memmap. 817 * Maybe we should do efi_memmap_init() earlier instead. 818 */ 819 if (!kern_memmap) { 820 attr = efi_mem_attribute(phys_addr, size); 821 if (attr & EFI_MEMORY_WB) 822 return EFI_MEMORY_WB; 823 return 0; 824 } 825 826 md = kern_memory_descriptor(phys_addr); 827 if (!md) 828 return 0; 829 830 attr = md->attribute; 831 do { 832 unsigned long md_end = kmd_end(md); 833 834 if (end <= md_end) 835 return attr; 836 837 md = kern_memory_descriptor(md_end); 838 if (!md || md->attribute != attr) 839 return 0; 840 } while (md); 841 return 0; /* never reached */ 842 } 843 844 int 845 valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size) 846 { 847 u64 attr; 848 849 /* 850 * /dev/mem reads and writes use copy_to_user(), which implicitly 851 * uses a granule-sized kernel identity mapping. It's really 852 * only safe to do this for regions in kern_memmap. For more 853 * details, see Documentation/ia64/aliasing.rst. 854 */ 855 attr = kern_mem_attribute(phys_addr, size); 856 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 857 return 1; 858 return 0; 859 } 860 861 int 862 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) 863 { 864 unsigned long phys_addr = pfn << PAGE_SHIFT; 865 u64 attr; 866 867 attr = efi_mem_attribute(phys_addr, size); 868 869 /* 870 * /dev/mem mmap uses normal user pages, so we don't need the entire 871 * granule, but the entire region we're mapping must support the same 872 * attribute. 873 */ 874 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) 875 return 1; 876 877 /* 878 * Intel firmware doesn't tell us about all the MMIO regions, so 879 * in general we have to allow mmap requests. But if EFI *does* 880 * tell us about anything inside this region, we should deny it. 881 * The user can always map a smaller region to avoid the overlap. 882 */ 883 if (efi_memmap_intersects(phys_addr, size)) 884 return 0; 885 886 return 1; 887 } 888 889 pgprot_t 890 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, 891 pgprot_t vma_prot) 892 { 893 unsigned long phys_addr = pfn << PAGE_SHIFT; 894 u64 attr; 895 896 /* 897 * For /dev/mem mmap, we use user mappings, but if the region is 898 * in kern_memmap (and hence may be covered by a kernel mapping), 899 * we must use the same attribute as the kernel mapping. 900 */ 901 attr = kern_mem_attribute(phys_addr, size); 902 if (attr & EFI_MEMORY_WB) 903 return pgprot_cacheable(vma_prot); 904 else if (attr & EFI_MEMORY_UC) 905 return pgprot_noncached(vma_prot); 906 907 /* 908 * Some chipsets don't support UC access to memory. If 909 * WB is supported, we prefer that. 910 */ 911 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 912 return pgprot_cacheable(vma_prot); 913 914 return pgprot_noncached(vma_prot); 915 } 916 917 int __init 918 efi_uart_console_only(void) 919 { 920 efi_status_t status; 921 char *s, name[] = "ConOut"; 922 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 923 efi_char16_t *utf16, name_utf16[32]; 924 unsigned char data[1024]; 925 unsigned long size = sizeof(data); 926 struct efi_generic_dev_path *hdr, *end_addr; 927 int uart = 0; 928 929 /* Convert to UTF-16 */ 930 utf16 = name_utf16; 931 s = name; 932 while (*s) 933 *utf16++ = *s++ & 0x7f; 934 *utf16 = 0; 935 936 status = efi.get_variable(name_utf16, &guid, NULL, &size, data); 937 if (status != EFI_SUCCESS) { 938 printk(KERN_ERR "No EFI %s variable?\n", name); 939 return 0; 940 } 941 942 hdr = (struct efi_generic_dev_path *) data; 943 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); 944 while (hdr < end_addr) { 945 if (hdr->type == EFI_DEV_MSG && 946 hdr->sub_type == EFI_DEV_MSG_UART) 947 uart = 1; 948 else if (hdr->type == EFI_DEV_END_PATH || 949 hdr->type == EFI_DEV_END_PATH2) { 950 if (!uart) 951 return 0; 952 if (hdr->sub_type == EFI_DEV_END_ENTIRE) 953 return 1; 954 uart = 0; 955 } 956 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 957 } 958 printk(KERN_ERR "Malformed %s value\n", name); 959 return 0; 960 } 961 962 /* 963 * Look for the first granule aligned memory descriptor memory 964 * that is big enough to hold EFI memory map. Make sure this 965 * descriptor is at least granule sized so it does not get trimmed 966 */ 967 struct kern_memdesc * 968 find_memmap_space (void) 969 { 970 u64 contig_low=0, contig_high=0; 971 u64 as = 0, ae; 972 void *efi_map_start, *efi_map_end, *p, *q; 973 efi_memory_desc_t *md, *pmd = NULL, *check_md; 974 u64 space_needed, efi_desc_size; 975 unsigned long total_mem = 0; 976 977 efi_map_start = __va(ia64_boot_param->efi_memmap); 978 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 979 efi_desc_size = ia64_boot_param->efi_memdesc_size; 980 981 /* 982 * Worst case: we need 3 kernel descriptors for each efi descriptor 983 * (if every entry has a WB part in the middle, and UC head and tail), 984 * plus one for the end marker. 985 */ 986 space_needed = sizeof(kern_memdesc_t) * 987 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); 988 989 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 990 md = p; 991 if (!efi_wb(md)) { 992 continue; 993 } 994 if (pmd == NULL || !efi_wb(pmd) || 995 efi_md_end(pmd) != md->phys_addr) { 996 contig_low = GRANULEROUNDUP(md->phys_addr); 997 contig_high = efi_md_end(md); 998 for (q = p + efi_desc_size; q < efi_map_end; 999 q += efi_desc_size) { 1000 check_md = q; 1001 if (!efi_wb(check_md)) 1002 break; 1003 if (contig_high != check_md->phys_addr) 1004 break; 1005 contig_high = efi_md_end(check_md); 1006 } 1007 contig_high = GRANULEROUNDDOWN(contig_high); 1008 } 1009 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) 1010 continue; 1011 1012 /* Round ends inward to granule boundaries */ 1013 as = max(contig_low, md->phys_addr); 1014 ae = min(contig_high, efi_md_end(md)); 1015 1016 /* keep within max_addr= and min_addr= command line arg */ 1017 as = max(as, min_addr); 1018 ae = min(ae, max_addr); 1019 if (ae <= as) 1020 continue; 1021 1022 /* avoid going over mem= command line arg */ 1023 if (total_mem + (ae - as) > mem_limit) 1024 ae -= total_mem + (ae - as) - mem_limit; 1025 1026 if (ae <= as) 1027 continue; 1028 1029 if (ae - as > space_needed) 1030 break; 1031 } 1032 if (p >= efi_map_end) 1033 panic("Can't allocate space for kernel memory descriptors"); 1034 1035 return __va(as); 1036 } 1037 1038 /* 1039 * Walk the EFI memory map and gather all memory available for kernel 1040 * to use. We can allocate partial granules only if the unavailable 1041 * parts exist, and are WB. 1042 */ 1043 unsigned long 1044 efi_memmap_init(u64 *s, u64 *e) 1045 { 1046 struct kern_memdesc *k, *prev = NULL; 1047 u64 contig_low=0, contig_high=0; 1048 u64 as, ae, lim; 1049 void *efi_map_start, *efi_map_end, *p, *q; 1050 efi_memory_desc_t *md, *pmd = NULL, *check_md; 1051 u64 efi_desc_size; 1052 unsigned long total_mem = 0; 1053 1054 k = kern_memmap = find_memmap_space(); 1055 1056 efi_map_start = __va(ia64_boot_param->efi_memmap); 1057 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1058 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1059 1060 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1061 md = p; 1062 if (!efi_wb(md)) { 1063 if (efi_uc(md) && 1064 (md->type == EFI_CONVENTIONAL_MEMORY || 1065 md->type == EFI_BOOT_SERVICES_DATA)) { 1066 k->attribute = EFI_MEMORY_UC; 1067 k->start = md->phys_addr; 1068 k->num_pages = md->num_pages; 1069 k++; 1070 } 1071 continue; 1072 } 1073 if (pmd == NULL || !efi_wb(pmd) || 1074 efi_md_end(pmd) != md->phys_addr) { 1075 contig_low = GRANULEROUNDUP(md->phys_addr); 1076 contig_high = efi_md_end(md); 1077 for (q = p + efi_desc_size; q < efi_map_end; 1078 q += efi_desc_size) { 1079 check_md = q; 1080 if (!efi_wb(check_md)) 1081 break; 1082 if (contig_high != check_md->phys_addr) 1083 break; 1084 contig_high = efi_md_end(check_md); 1085 } 1086 contig_high = GRANULEROUNDDOWN(contig_high); 1087 } 1088 if (!is_memory_available(md)) 1089 continue; 1090 1091 /* 1092 * Round ends inward to granule boundaries 1093 * Give trimmings to uncached allocator 1094 */ 1095 if (md->phys_addr < contig_low) { 1096 lim = min(efi_md_end(md), contig_low); 1097 if (efi_uc(md)) { 1098 if (k > kern_memmap && 1099 (k-1)->attribute == EFI_MEMORY_UC && 1100 kmd_end(k-1) == md->phys_addr) { 1101 (k-1)->num_pages += 1102 (lim - md->phys_addr) 1103 >> EFI_PAGE_SHIFT; 1104 } else { 1105 k->attribute = EFI_MEMORY_UC; 1106 k->start = md->phys_addr; 1107 k->num_pages = (lim - md->phys_addr) 1108 >> EFI_PAGE_SHIFT; 1109 k++; 1110 } 1111 } 1112 as = contig_low; 1113 } else 1114 as = md->phys_addr; 1115 1116 if (efi_md_end(md) > contig_high) { 1117 lim = max(md->phys_addr, contig_high); 1118 if (efi_uc(md)) { 1119 if (lim == md->phys_addr && k > kern_memmap && 1120 (k-1)->attribute == EFI_MEMORY_UC && 1121 kmd_end(k-1) == md->phys_addr) { 1122 (k-1)->num_pages += md->num_pages; 1123 } else { 1124 k->attribute = EFI_MEMORY_UC; 1125 k->start = lim; 1126 k->num_pages = (efi_md_end(md) - lim) 1127 >> EFI_PAGE_SHIFT; 1128 k++; 1129 } 1130 } 1131 ae = contig_high; 1132 } else 1133 ae = efi_md_end(md); 1134 1135 /* keep within max_addr= and min_addr= command line arg */ 1136 as = max(as, min_addr); 1137 ae = min(ae, max_addr); 1138 if (ae <= as) 1139 continue; 1140 1141 /* avoid going over mem= command line arg */ 1142 if (total_mem + (ae - as) > mem_limit) 1143 ae -= total_mem + (ae - as) - mem_limit; 1144 1145 if (ae <= as) 1146 continue; 1147 if (prev && kmd_end(prev) == md->phys_addr) { 1148 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; 1149 total_mem += ae - as; 1150 continue; 1151 } 1152 k->attribute = EFI_MEMORY_WB; 1153 k->start = as; 1154 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; 1155 total_mem += ae - as; 1156 prev = k++; 1157 } 1158 k->start = ~0L; /* end-marker */ 1159 1160 /* reserve the memory we are using for kern_memmap */ 1161 *s = (u64)kern_memmap; 1162 *e = (u64)++k; 1163 1164 return total_mem; 1165 } 1166 1167 void 1168 efi_initialize_iomem_resources(struct resource *code_resource, 1169 struct resource *data_resource, 1170 struct resource *bss_resource) 1171 { 1172 struct resource *res; 1173 void *efi_map_start, *efi_map_end, *p; 1174 efi_memory_desc_t *md; 1175 u64 efi_desc_size; 1176 char *name; 1177 unsigned long flags, desc; 1178 1179 efi_map_start = __va(ia64_boot_param->efi_memmap); 1180 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1181 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1182 1183 res = NULL; 1184 1185 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1186 md = p; 1187 1188 if (md->num_pages == 0) /* should not happen */ 1189 continue; 1190 1191 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 1192 desc = IORES_DESC_NONE; 1193 1194 switch (md->type) { 1195 1196 case EFI_MEMORY_MAPPED_IO: 1197 case EFI_MEMORY_MAPPED_IO_PORT_SPACE: 1198 continue; 1199 1200 case EFI_LOADER_CODE: 1201 case EFI_LOADER_DATA: 1202 case EFI_BOOT_SERVICES_DATA: 1203 case EFI_BOOT_SERVICES_CODE: 1204 case EFI_CONVENTIONAL_MEMORY: 1205 if (md->attribute & EFI_MEMORY_WP) { 1206 name = "System ROM"; 1207 flags |= IORESOURCE_READONLY; 1208 } else if (md->attribute == EFI_MEMORY_UC) { 1209 name = "Uncached RAM"; 1210 } else { 1211 name = "System RAM"; 1212 flags |= IORESOURCE_SYSRAM; 1213 } 1214 break; 1215 1216 case EFI_ACPI_MEMORY_NVS: 1217 name = "ACPI Non-volatile Storage"; 1218 desc = IORES_DESC_ACPI_NV_STORAGE; 1219 break; 1220 1221 case EFI_UNUSABLE_MEMORY: 1222 name = "reserved"; 1223 flags |= IORESOURCE_DISABLED; 1224 break; 1225 1226 case EFI_PERSISTENT_MEMORY: 1227 name = "Persistent Memory"; 1228 desc = IORES_DESC_PERSISTENT_MEMORY; 1229 break; 1230 1231 case EFI_RESERVED_TYPE: 1232 case EFI_RUNTIME_SERVICES_CODE: 1233 case EFI_RUNTIME_SERVICES_DATA: 1234 case EFI_ACPI_RECLAIM_MEMORY: 1235 default: 1236 name = "reserved"; 1237 break; 1238 } 1239 1240 if ((res = kzalloc(sizeof(struct resource), 1241 GFP_KERNEL)) == NULL) { 1242 printk(KERN_ERR 1243 "failed to allocate resource for iomem\n"); 1244 return; 1245 } 1246 1247 res->name = name; 1248 res->start = md->phys_addr; 1249 res->end = md->phys_addr + efi_md_size(md) - 1; 1250 res->flags = flags; 1251 res->desc = desc; 1252 1253 if (insert_resource(&iomem_resource, res) < 0) 1254 kfree(res); 1255 else { 1256 /* 1257 * We don't know which region contains 1258 * kernel data so we try it repeatedly and 1259 * let the resource manager test it. 1260 */ 1261 insert_resource(res, code_resource); 1262 insert_resource(res, data_resource); 1263 insert_resource(res, bss_resource); 1264 #ifdef CONFIG_KEXEC 1265 insert_resource(res, &efi_memmap_res); 1266 insert_resource(res, &boot_param_res); 1267 if (crashk_res.end > crashk_res.start) 1268 insert_resource(res, &crashk_res); 1269 #endif 1270 } 1271 } 1272 } 1273 1274 #ifdef CONFIG_KEXEC 1275 /* find a block of memory aligned to 64M exclude reserved regions 1276 rsvd_regions are sorted 1277 */ 1278 unsigned long __init 1279 kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1280 { 1281 int i; 1282 u64 start, end; 1283 u64 alignment = 1UL << _PAGE_SIZE_64M; 1284 void *efi_map_start, *efi_map_end, *p; 1285 efi_memory_desc_t *md; 1286 u64 efi_desc_size; 1287 1288 efi_map_start = __va(ia64_boot_param->efi_memmap); 1289 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1290 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1291 1292 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1293 md = p; 1294 if (!efi_wb(md)) 1295 continue; 1296 start = ALIGN(md->phys_addr, alignment); 1297 end = efi_md_end(md); 1298 for (i = 0; i < n; i++) { 1299 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1300 if (__pa(r[i].start) > start + size) 1301 return start; 1302 start = ALIGN(__pa(r[i].end), alignment); 1303 if (i < n-1 && 1304 __pa(r[i+1].start) < start + size) 1305 continue; 1306 else 1307 break; 1308 } 1309 } 1310 if (end > start + size) 1311 return start; 1312 } 1313 1314 printk(KERN_WARNING 1315 "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1316 return ~0UL; 1317 } 1318 #endif 1319 1320 #ifdef CONFIG_CRASH_DUMP 1321 /* locate the size find a the descriptor at a certain address */ 1322 unsigned long __init 1323 vmcore_find_descriptor_size (unsigned long address) 1324 { 1325 void *efi_map_start, *efi_map_end, *p; 1326 efi_memory_desc_t *md; 1327 u64 efi_desc_size; 1328 unsigned long ret = 0; 1329 1330 efi_map_start = __va(ia64_boot_param->efi_memmap); 1331 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1332 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1333 1334 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1335 md = p; 1336 if (efi_wb(md) && md->type == EFI_LOADER_DATA 1337 && md->phys_addr == address) { 1338 ret = efi_md_size(md); 1339 break; 1340 } 1341 } 1342 1343 if (ret == 0) 1344 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); 1345 1346 return ret; 1347 } 1348 #endif 1349 1350 char *efi_systab_show_arch(char *str) 1351 { 1352 if (mps_phys != EFI_INVALID_TABLE_ADDR) 1353 str += sprintf(str, "MPS=0x%lx\n", mps_phys); 1354 if (hcdp_phys != EFI_INVALID_TABLE_ADDR) 1355 str += sprintf(str, "HCDP=0x%lx\n", hcdp_phys); 1356 return str; 1357 } 1358