1 /* 2 * x86_64 specific EFI support functions 3 * Based on Extensible Firmware Interface Specification version 1.0 4 * 5 * Copyright (C) 2005-2008 Intel Co. 6 * Fenghua Yu <fenghua.yu@intel.com> 7 * Bibo Mao <bibo.mao@intel.com> 8 * Chandramouli Narayanan <mouli@linux.intel.com> 9 * Huang Ying <ying.huang@intel.com> 10 * 11 * Code to convert EFI to E820 map has been implemented in elilo bootloader 12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table 13 * is setup appropriately for EFI runtime code. 14 * - mouli 06/14/2007. 15 * 16 */ 17 18 #define pr_fmt(fmt) "efi: " fmt 19 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/mm.h> 23 #include <linux/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/bootmem.h> 26 #include <linux/ioport.h> 27 #include <linux/init.h> 28 #include <linux/mc146818rtc.h> 29 #include <linux/efi.h> 30 #include <linux/uaccess.h> 31 #include <linux/io.h> 32 #include <linux/reboot.h> 33 #include <linux/slab.h> 34 #include <linux/ucs2_string.h> 35 36 #include <asm/setup.h> 37 #include <asm/page.h> 38 #include <asm/e820.h> 39 #include <asm/pgtable.h> 40 #include <asm/tlbflush.h> 41 #include <asm/proto.h> 42 #include <asm/efi.h> 43 #include <asm/cacheflush.h> 44 #include <asm/fixmap.h> 45 #include <asm/realmode.h> 46 #include <asm/time.h> 47 #include <asm/pgalloc.h> 48 49 /* 50 * We allocate runtime services regions bottom-up, starting from -4G, i.e. 51 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. 52 */ 53 static u64 efi_va = EFI_VA_START; 54 55 struct efi_scratch efi_scratch; 56 57 static void __init early_code_mapping_set_exec(int executable) 58 { 59 efi_memory_desc_t *md; 60 61 if (!(__supported_pte_mask & _PAGE_NX)) 62 return; 63 64 /* Make EFI service code area executable */ 65 for_each_efi_memory_desc(md) { 66 if (md->type == EFI_RUNTIME_SERVICES_CODE || 67 md->type == EFI_BOOT_SERVICES_CODE) 68 efi_set_executable(md, executable); 69 } 70 } 71 72 pgd_t * __init efi_call_phys_prolog(void) 73 { 74 unsigned long vaddress; 75 pgd_t *save_pgd; 76 77 int pgd; 78 int n_pgds; 79 80 if (!efi_enabled(EFI_OLD_MEMMAP)) { 81 save_pgd = (pgd_t *)read_cr3(); 82 write_cr3((unsigned long)efi_scratch.efi_pgt); 83 goto out; 84 } 85 86 early_code_mapping_set_exec(1); 87 88 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 89 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 90 91 for (pgd = 0; pgd < n_pgds; pgd++) { 92 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 93 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 94 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 95 } 96 out: 97 __flush_tlb_all(); 98 99 return save_pgd; 100 } 101 102 void __init efi_call_phys_epilog(pgd_t *save_pgd) 103 { 104 /* 105 * After the lock is released, the original page table is restored. 106 */ 107 int pgd_idx; 108 int nr_pgds; 109 110 if (!efi_enabled(EFI_OLD_MEMMAP)) { 111 write_cr3((unsigned long)save_pgd); 112 __flush_tlb_all(); 113 return; 114 } 115 116 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 117 118 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) 119 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 120 121 kfree(save_pgd); 122 123 __flush_tlb_all(); 124 early_code_mapping_set_exec(0); 125 } 126 127 static pgd_t *efi_pgd; 128 129 /* 130 * We need our own copy of the higher levels of the page tables 131 * because we want to avoid inserting EFI region mappings (EFI_VA_END 132 * to EFI_VA_START) into the standard kernel page tables. Everything 133 * else can be shared, see efi_sync_low_kernel_mappings(). 134 */ 135 int __init efi_alloc_page_tables(void) 136 { 137 pgd_t *pgd; 138 pud_t *pud; 139 gfp_t gfp_mask; 140 141 if (efi_enabled(EFI_OLD_MEMMAP)) 142 return 0; 143 144 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; 145 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 146 if (!efi_pgd) 147 return -ENOMEM; 148 149 pgd = efi_pgd + pgd_index(EFI_VA_END); 150 151 pud = pud_alloc_one(NULL, 0); 152 if (!pud) { 153 free_page((unsigned long)efi_pgd); 154 return -ENOMEM; 155 } 156 157 pgd_populate(NULL, pgd, pud); 158 159 return 0; 160 } 161 162 /* 163 * Add low kernel mappings for passing arguments to EFI functions. 164 */ 165 void efi_sync_low_kernel_mappings(void) 166 { 167 unsigned num_entries; 168 pgd_t *pgd_k, *pgd_efi; 169 pud_t *pud_k, *pud_efi; 170 171 if (efi_enabled(EFI_OLD_MEMMAP)) 172 return; 173 174 /* 175 * We can share all PGD entries apart from the one entry that 176 * covers the EFI runtime mapping space. 177 * 178 * Make sure the EFI runtime region mappings are guaranteed to 179 * only span a single PGD entry and that the entry also maps 180 * other important kernel regions. 181 */ 182 BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); 183 BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != 184 (EFI_VA_END & PGDIR_MASK)); 185 186 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); 187 pgd_k = pgd_offset_k(PAGE_OFFSET); 188 189 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); 190 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); 191 192 /* 193 * We share all the PUD entries apart from those that map the 194 * EFI regions. Copy around them. 195 */ 196 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); 197 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); 198 199 pgd_efi = efi_pgd + pgd_index(EFI_VA_END); 200 pud_efi = pud_offset(pgd_efi, 0); 201 202 pgd_k = pgd_offset_k(EFI_VA_END); 203 pud_k = pud_offset(pgd_k, 0); 204 205 num_entries = pud_index(EFI_VA_END); 206 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 207 208 pud_efi = pud_offset(pgd_efi, EFI_VA_START); 209 pud_k = pud_offset(pgd_k, EFI_VA_START); 210 211 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); 212 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 213 } 214 215 /* 216 * Wrapper for slow_virt_to_phys() that handles NULL addresses. 217 */ 218 static inline phys_addr_t 219 virt_to_phys_or_null_size(void *va, unsigned long size) 220 { 221 bool bad_size; 222 223 if (!va) 224 return 0; 225 226 if (virt_addr_valid(va)) 227 return virt_to_phys(va); 228 229 /* 230 * A fully aligned variable on the stack is guaranteed not to 231 * cross a page bounary. Try to catch strings on the stack by 232 * checking that 'size' is a power of two. 233 */ 234 bad_size = size > PAGE_SIZE || !is_power_of_2(size); 235 236 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); 237 238 return slow_virt_to_phys(va); 239 } 240 241 #define virt_to_phys_or_null(addr) \ 242 virt_to_phys_or_null_size((addr), sizeof(*(addr))) 243 244 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 245 { 246 unsigned long pfn, text; 247 struct page *page; 248 unsigned npages; 249 pgd_t *pgd; 250 251 if (efi_enabled(EFI_OLD_MEMMAP)) 252 return 0; 253 254 efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd); 255 pgd = efi_pgd; 256 257 /* 258 * It can happen that the physical address of new_memmap lands in memory 259 * which is not mapped in the EFI page table. Therefore we need to go 260 * and ident-map those pages containing the map before calling 261 * phys_efi_set_virtual_address_map(). 262 */ 263 pfn = pa_memmap >> PAGE_SHIFT; 264 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) { 265 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); 266 return 1; 267 } 268 269 efi_scratch.use_pgd = true; 270 271 /* 272 * When making calls to the firmware everything needs to be 1:1 273 * mapped and addressable with 32-bit pointers. Map the kernel 274 * text and allocate a new stack because we can't rely on the 275 * stack pointer being < 4GB. 276 */ 277 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native()) 278 return 0; 279 280 page = alloc_page(GFP_KERNEL|__GFP_DMA32); 281 if (!page) 282 panic("Unable to allocate EFI runtime stack < 4GB\n"); 283 284 efi_scratch.phys_stack = virt_to_phys(page_address(page)); 285 efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */ 286 287 npages = (_etext - _text) >> PAGE_SHIFT; 288 text = __pa(_text); 289 pfn = text >> PAGE_SHIFT; 290 291 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) { 292 pr_err("Failed to map kernel text 1:1\n"); 293 return 1; 294 } 295 296 return 0; 297 } 298 299 static void __init __map_region(efi_memory_desc_t *md, u64 va) 300 { 301 unsigned long flags = _PAGE_RW; 302 unsigned long pfn; 303 pgd_t *pgd = efi_pgd; 304 305 if (!(md->attribute & EFI_MEMORY_WB)) 306 flags |= _PAGE_PCD; 307 308 pfn = md->phys_addr >> PAGE_SHIFT; 309 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) 310 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 311 md->phys_addr, va); 312 } 313 314 void __init efi_map_region(efi_memory_desc_t *md) 315 { 316 unsigned long size = md->num_pages << PAGE_SHIFT; 317 u64 pa = md->phys_addr; 318 319 if (efi_enabled(EFI_OLD_MEMMAP)) 320 return old_map_region(md); 321 322 /* 323 * Make sure the 1:1 mappings are present as a catch-all for b0rked 324 * firmware which doesn't update all internal pointers after switching 325 * to virtual mode and would otherwise crap on us. 326 */ 327 __map_region(md, md->phys_addr); 328 329 /* 330 * Enforce the 1:1 mapping as the default virtual address when 331 * booting in EFI mixed mode, because even though we may be 332 * running a 64-bit kernel, the firmware may only be 32-bit. 333 */ 334 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) { 335 md->virt_addr = md->phys_addr; 336 return; 337 } 338 339 efi_va -= size; 340 341 /* Is PA 2M-aligned? */ 342 if (!(pa & (PMD_SIZE - 1))) { 343 efi_va &= PMD_MASK; 344 } else { 345 u64 pa_offset = pa & (PMD_SIZE - 1); 346 u64 prev_va = efi_va; 347 348 /* get us the same offset within this 2M page */ 349 efi_va = (efi_va & PMD_MASK) + pa_offset; 350 351 if (efi_va > prev_va) 352 efi_va -= PMD_SIZE; 353 } 354 355 if (efi_va < EFI_VA_END) { 356 pr_warn(FW_WARN "VA address range overflow!\n"); 357 return; 358 } 359 360 /* Do the VA map */ 361 __map_region(md, efi_va); 362 md->virt_addr = efi_va; 363 } 364 365 /* 366 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. 367 * md->virt_addr is the original virtual address which had been mapped in kexec 368 * 1st kernel. 369 */ 370 void __init efi_map_region_fixed(efi_memory_desc_t *md) 371 { 372 __map_region(md, md->phys_addr); 373 __map_region(md, md->virt_addr); 374 } 375 376 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 377 u32 type, u64 attribute) 378 { 379 unsigned long last_map_pfn; 380 381 if (type == EFI_MEMORY_MAPPED_IO) 382 return ioremap(phys_addr, size); 383 384 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 385 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 386 unsigned long top = last_map_pfn << PAGE_SHIFT; 387 efi_ioremap(top, size - (top - phys_addr), type, attribute); 388 } 389 390 if (!(attribute & EFI_MEMORY_WB)) 391 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 392 393 return (void __iomem *)__va(phys_addr); 394 } 395 396 void __init parse_efi_setup(u64 phys_addr, u32 data_len) 397 { 398 efi_setup = phys_addr + sizeof(struct setup_data); 399 } 400 401 void __init efi_runtime_update_mappings(void) 402 { 403 unsigned long pfn; 404 pgd_t *pgd = efi_pgd; 405 efi_memory_desc_t *md; 406 407 if (efi_enabled(EFI_OLD_MEMMAP)) { 408 if (__supported_pte_mask & _PAGE_NX) 409 runtime_code_page_mkexec(); 410 return; 411 } 412 413 if (!efi_enabled(EFI_NX_PE_DATA)) 414 return; 415 416 for_each_efi_memory_desc(md) { 417 unsigned long pf = 0; 418 419 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 420 continue; 421 422 if (!(md->attribute & EFI_MEMORY_WB)) 423 pf |= _PAGE_PCD; 424 425 if ((md->attribute & EFI_MEMORY_XP) || 426 (md->type == EFI_RUNTIME_SERVICES_DATA)) 427 pf |= _PAGE_NX; 428 429 if (!(md->attribute & EFI_MEMORY_RO) && 430 (md->type != EFI_RUNTIME_SERVICES_CODE)) 431 pf |= _PAGE_RW; 432 433 /* Update the 1:1 mapping */ 434 pfn = md->phys_addr >> PAGE_SHIFT; 435 if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf)) 436 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 437 md->phys_addr, md->virt_addr); 438 439 if (kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf)) 440 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 441 md->phys_addr, md->virt_addr); 442 } 443 } 444 445 void __init efi_dump_pagetable(void) 446 { 447 #ifdef CONFIG_EFI_PGT_DUMP 448 ptdump_walk_pgd_level(NULL, efi_pgd); 449 #endif 450 } 451 452 #ifdef CONFIG_EFI_MIXED 453 extern efi_status_t efi64_thunk(u32, ...); 454 455 #define runtime_service32(func) \ 456 ({ \ 457 u32 table = (u32)(unsigned long)efi.systab; \ 458 u32 *rt, *___f; \ 459 \ 460 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \ 461 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \ 462 *___f; \ 463 }) 464 465 /* 466 * Switch to the EFI page tables early so that we can access the 1:1 467 * runtime services mappings which are not mapped in any other page 468 * tables. This function must be called before runtime_service32(). 469 * 470 * Also, disable interrupts because the IDT points to 64-bit handlers, 471 * which aren't going to function correctly when we switch to 32-bit. 472 */ 473 #define efi_thunk(f, ...) \ 474 ({ \ 475 efi_status_t __s; \ 476 unsigned long __flags; \ 477 u32 __func; \ 478 \ 479 local_irq_save(__flags); \ 480 arch_efi_call_virt_setup(); \ 481 \ 482 __func = runtime_service32(f); \ 483 __s = efi64_thunk(__func, __VA_ARGS__); \ 484 \ 485 arch_efi_call_virt_teardown(); \ 486 local_irq_restore(__flags); \ 487 \ 488 __s; \ 489 }) 490 491 efi_status_t efi_thunk_set_virtual_address_map( 492 void *phys_set_virtual_address_map, 493 unsigned long memory_map_size, 494 unsigned long descriptor_size, 495 u32 descriptor_version, 496 efi_memory_desc_t *virtual_map) 497 { 498 efi_status_t status; 499 unsigned long flags; 500 u32 func; 501 502 efi_sync_low_kernel_mappings(); 503 local_irq_save(flags); 504 505 efi_scratch.prev_cr3 = read_cr3(); 506 write_cr3((unsigned long)efi_scratch.efi_pgt); 507 __flush_tlb_all(); 508 509 func = (u32)(unsigned long)phys_set_virtual_address_map; 510 status = efi64_thunk(func, memory_map_size, descriptor_size, 511 descriptor_version, virtual_map); 512 513 write_cr3(efi_scratch.prev_cr3); 514 __flush_tlb_all(); 515 local_irq_restore(flags); 516 517 return status; 518 } 519 520 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) 521 { 522 efi_status_t status; 523 u32 phys_tm, phys_tc; 524 525 spin_lock(&rtc_lock); 526 527 phys_tm = virt_to_phys_or_null(tm); 528 phys_tc = virt_to_phys_or_null(tc); 529 530 status = efi_thunk(get_time, phys_tm, phys_tc); 531 532 spin_unlock(&rtc_lock); 533 534 return status; 535 } 536 537 static efi_status_t efi_thunk_set_time(efi_time_t *tm) 538 { 539 efi_status_t status; 540 u32 phys_tm; 541 542 spin_lock(&rtc_lock); 543 544 phys_tm = virt_to_phys_or_null(tm); 545 546 status = efi_thunk(set_time, phys_tm); 547 548 spin_unlock(&rtc_lock); 549 550 return status; 551 } 552 553 static efi_status_t 554 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, 555 efi_time_t *tm) 556 { 557 efi_status_t status; 558 u32 phys_enabled, phys_pending, phys_tm; 559 560 spin_lock(&rtc_lock); 561 562 phys_enabled = virt_to_phys_or_null(enabled); 563 phys_pending = virt_to_phys_or_null(pending); 564 phys_tm = virt_to_phys_or_null(tm); 565 566 status = efi_thunk(get_wakeup_time, phys_enabled, 567 phys_pending, phys_tm); 568 569 spin_unlock(&rtc_lock); 570 571 return status; 572 } 573 574 static efi_status_t 575 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) 576 { 577 efi_status_t status; 578 u32 phys_tm; 579 580 spin_lock(&rtc_lock); 581 582 phys_tm = virt_to_phys_or_null(tm); 583 584 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 585 586 spin_unlock(&rtc_lock); 587 588 return status; 589 } 590 591 static unsigned long efi_name_size(efi_char16_t *name) 592 { 593 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1; 594 } 595 596 static efi_status_t 597 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, 598 u32 *attr, unsigned long *data_size, void *data) 599 { 600 efi_status_t status; 601 u32 phys_name, phys_vendor, phys_attr; 602 u32 phys_data_size, phys_data; 603 604 phys_data_size = virt_to_phys_or_null(data_size); 605 phys_vendor = virt_to_phys_or_null(vendor); 606 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); 607 phys_attr = virt_to_phys_or_null(attr); 608 phys_data = virt_to_phys_or_null_size(data, *data_size); 609 610 status = efi_thunk(get_variable, phys_name, phys_vendor, 611 phys_attr, phys_data_size, phys_data); 612 613 return status; 614 } 615 616 static efi_status_t 617 efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, 618 u32 attr, unsigned long data_size, void *data) 619 { 620 u32 phys_name, phys_vendor, phys_data; 621 efi_status_t status; 622 623 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); 624 phys_vendor = virt_to_phys_or_null(vendor); 625 phys_data = virt_to_phys_or_null_size(data, data_size); 626 627 /* If data_size is > sizeof(u32) we've got problems */ 628 status = efi_thunk(set_variable, phys_name, phys_vendor, 629 attr, data_size, phys_data); 630 631 return status; 632 } 633 634 static efi_status_t 635 efi_thunk_get_next_variable(unsigned long *name_size, 636 efi_char16_t *name, 637 efi_guid_t *vendor) 638 { 639 efi_status_t status; 640 u32 phys_name_size, phys_name, phys_vendor; 641 642 phys_name_size = virt_to_phys_or_null(name_size); 643 phys_vendor = virt_to_phys_or_null(vendor); 644 phys_name = virt_to_phys_or_null_size(name, *name_size); 645 646 status = efi_thunk(get_next_variable, phys_name_size, 647 phys_name, phys_vendor); 648 649 return status; 650 } 651 652 static efi_status_t 653 efi_thunk_get_next_high_mono_count(u32 *count) 654 { 655 efi_status_t status; 656 u32 phys_count; 657 658 phys_count = virt_to_phys_or_null(count); 659 status = efi_thunk(get_next_high_mono_count, phys_count); 660 661 return status; 662 } 663 664 static void 665 efi_thunk_reset_system(int reset_type, efi_status_t status, 666 unsigned long data_size, efi_char16_t *data) 667 { 668 u32 phys_data; 669 670 phys_data = virt_to_phys_or_null_size(data, data_size); 671 672 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 673 } 674 675 static efi_status_t 676 efi_thunk_update_capsule(efi_capsule_header_t **capsules, 677 unsigned long count, unsigned long sg_list) 678 { 679 /* 680 * To properly support this function we would need to repackage 681 * 'capsules' because the firmware doesn't understand 64-bit 682 * pointers. 683 */ 684 return EFI_UNSUPPORTED; 685 } 686 687 static efi_status_t 688 efi_thunk_query_variable_info(u32 attr, u64 *storage_space, 689 u64 *remaining_space, 690 u64 *max_variable_size) 691 { 692 efi_status_t status; 693 u32 phys_storage, phys_remaining, phys_max; 694 695 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 696 return EFI_UNSUPPORTED; 697 698 phys_storage = virt_to_phys_or_null(storage_space); 699 phys_remaining = virt_to_phys_or_null(remaining_space); 700 phys_max = virt_to_phys_or_null(max_variable_size); 701 702 status = efi_thunk(query_variable_info, attr, phys_storage, 703 phys_remaining, phys_max); 704 705 return status; 706 } 707 708 static efi_status_t 709 efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules, 710 unsigned long count, u64 *max_size, 711 int *reset_type) 712 { 713 /* 714 * To properly support this function we would need to repackage 715 * 'capsules' because the firmware doesn't understand 64-bit 716 * pointers. 717 */ 718 return EFI_UNSUPPORTED; 719 } 720 721 void efi_thunk_runtime_setup(void) 722 { 723 efi.get_time = efi_thunk_get_time; 724 efi.set_time = efi_thunk_set_time; 725 efi.get_wakeup_time = efi_thunk_get_wakeup_time; 726 efi.set_wakeup_time = efi_thunk_set_wakeup_time; 727 efi.get_variable = efi_thunk_get_variable; 728 efi.get_next_variable = efi_thunk_get_next_variable; 729 efi.set_variable = efi_thunk_set_variable; 730 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count; 731 efi.reset_system = efi_thunk_reset_system; 732 efi.query_variable_info = efi_thunk_query_variable_info; 733 efi.update_capsule = efi_thunk_update_capsule; 734 efi.query_capsule_caps = efi_thunk_query_capsule_caps; 735 } 736 #endif /* CONFIG_EFI_MIXED */ 737