1 /* 2 * x86_64 specific EFI support functions 3 * Based on Extensible Firmware Interface Specification version 1.0 4 * 5 * Copyright (C) 2005-2008 Intel Co. 6 * Fenghua Yu <fenghua.yu@intel.com> 7 * Bibo Mao <bibo.mao@intel.com> 8 * Chandramouli Narayanan <mouli@linux.intel.com> 9 * Huang Ying <ying.huang@intel.com> 10 * 11 * Code to convert EFI to E820 map has been implemented in elilo bootloader 12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table 13 * is setup appropriately for EFI runtime code. 14 * - mouli 06/14/2007. 15 * 16 */ 17 18 #define pr_fmt(fmt) "efi: " fmt 19 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/mm.h> 23 #include <linux/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/bootmem.h> 26 #include <linux/ioport.h> 27 #include <linux/module.h> 28 #include <linux/efi.h> 29 #include <linux/uaccess.h> 30 #include <linux/io.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 34 #include <asm/setup.h> 35 #include <asm/page.h> 36 #include <asm/e820.h> 37 #include <asm/pgtable.h> 38 #include <asm/tlbflush.h> 39 #include <asm/proto.h> 40 #include <asm/efi.h> 41 #include <asm/cacheflush.h> 42 #include <asm/fixmap.h> 43 #include <asm/realmode.h> 44 #include <asm/time.h> 45 #include <asm/pgalloc.h> 46 47 /* 48 * We allocate runtime services regions bottom-up, starting from -4G, i.e. 49 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. 50 */ 51 static u64 efi_va = EFI_VA_START; 52 53 struct efi_scratch efi_scratch; 54 55 static void __init early_code_mapping_set_exec(int executable) 56 { 57 efi_memory_desc_t *md; 58 void *p; 59 60 if (!(__supported_pte_mask & _PAGE_NX)) 61 return; 62 63 /* Make EFI service code area executable */ 64 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 65 md = p; 66 if (md->type == EFI_RUNTIME_SERVICES_CODE || 67 md->type == EFI_BOOT_SERVICES_CODE) 68 efi_set_executable(md, executable); 69 } 70 } 71 72 pgd_t * __init efi_call_phys_prolog(void) 73 { 74 unsigned long vaddress; 75 pgd_t *save_pgd; 76 77 int pgd; 78 int n_pgds; 79 80 if (!efi_enabled(EFI_OLD_MEMMAP)) { 81 save_pgd = (pgd_t *)read_cr3(); 82 write_cr3((unsigned long)efi_scratch.efi_pgt); 83 goto out; 84 } 85 86 early_code_mapping_set_exec(1); 87 88 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 89 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); 90 91 for (pgd = 0; pgd < n_pgds; pgd++) { 92 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 93 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 94 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 95 } 96 out: 97 __flush_tlb_all(); 98 99 return save_pgd; 100 } 101 102 void __init efi_call_phys_epilog(pgd_t *save_pgd) 103 { 104 /* 105 * After the lock is released, the original page table is restored. 106 */ 107 int pgd_idx; 108 int nr_pgds; 109 110 if (!efi_enabled(EFI_OLD_MEMMAP)) { 111 write_cr3((unsigned long)save_pgd); 112 __flush_tlb_all(); 113 return; 114 } 115 116 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 117 118 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) 119 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 120 121 kfree(save_pgd); 122 123 __flush_tlb_all(); 124 early_code_mapping_set_exec(0); 125 } 126 127 static pgd_t *efi_pgd; 128 129 /* 130 * We need our own copy of the higher levels of the page tables 131 * because we want to avoid inserting EFI region mappings (EFI_VA_END 132 * to EFI_VA_START) into the standard kernel page tables. Everything 133 * else can be shared, see efi_sync_low_kernel_mappings(). 134 */ 135 int __init efi_alloc_page_tables(void) 136 { 137 pgd_t *pgd; 138 pud_t *pud; 139 gfp_t gfp_mask; 140 141 if (efi_enabled(EFI_OLD_MEMMAP)) 142 return 0; 143 144 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; 145 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 146 if (!efi_pgd) 147 return -ENOMEM; 148 149 pgd = efi_pgd + pgd_index(EFI_VA_END); 150 151 pud = pud_alloc_one(NULL, 0); 152 if (!pud) { 153 free_page((unsigned long)efi_pgd); 154 return -ENOMEM; 155 } 156 157 pgd_populate(NULL, pgd, pud); 158 159 return 0; 160 } 161 162 /* 163 * Add low kernel mappings for passing arguments to EFI functions. 164 */ 165 void efi_sync_low_kernel_mappings(void) 166 { 167 unsigned num_entries; 168 pgd_t *pgd_k, *pgd_efi; 169 pud_t *pud_k, *pud_efi; 170 171 if (efi_enabled(EFI_OLD_MEMMAP)) 172 return; 173 174 /* 175 * We can share all PGD entries apart from the one entry that 176 * covers the EFI runtime mapping space. 177 * 178 * Make sure the EFI runtime region mappings are guaranteed to 179 * only span a single PGD entry and that the entry also maps 180 * other important kernel regions. 181 */ 182 BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); 183 BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != 184 (EFI_VA_END & PGDIR_MASK)); 185 186 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); 187 pgd_k = pgd_offset_k(PAGE_OFFSET); 188 189 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); 190 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries); 191 192 /* 193 * We share all the PUD entries apart from those that map the 194 * EFI regions. Copy around them. 195 */ 196 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0); 197 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0); 198 199 pgd_efi = efi_pgd + pgd_index(EFI_VA_END); 200 pud_efi = pud_offset(pgd_efi, 0); 201 202 pgd_k = pgd_offset_k(EFI_VA_END); 203 pud_k = pud_offset(pgd_k, 0); 204 205 num_entries = pud_index(EFI_VA_END); 206 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 207 208 pud_efi = pud_offset(pgd_efi, EFI_VA_START); 209 pud_k = pud_offset(pgd_k, EFI_VA_START); 210 211 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START); 212 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 213 } 214 215 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 216 { 217 unsigned long pfn, text; 218 efi_memory_desc_t *md; 219 struct page *page; 220 unsigned npages; 221 pgd_t *pgd; 222 223 if (efi_enabled(EFI_OLD_MEMMAP)) 224 return 0; 225 226 efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd); 227 pgd = efi_pgd; 228 229 /* 230 * It can happen that the physical address of new_memmap lands in memory 231 * which is not mapped in the EFI page table. Therefore we need to go 232 * and ident-map those pages containing the map before calling 233 * phys_efi_set_virtual_address_map(). 234 */ 235 pfn = pa_memmap >> PAGE_SHIFT; 236 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) { 237 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); 238 return 1; 239 } 240 241 efi_scratch.use_pgd = true; 242 243 /* 244 * When making calls to the firmware everything needs to be 1:1 245 * mapped and addressable with 32-bit pointers. Map the kernel 246 * text and allocate a new stack because we can't rely on the 247 * stack pointer being < 4GB. 248 */ 249 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 250 return 0; 251 252 /* 253 * Map all of RAM so that we can access arguments in the 1:1 254 * mapping when making EFI runtime calls. 255 */ 256 for_each_efi_memory_desc(&memmap, md) { 257 if (md->type != EFI_CONVENTIONAL_MEMORY && 258 md->type != EFI_LOADER_DATA && 259 md->type != EFI_LOADER_CODE) 260 continue; 261 262 pfn = md->phys_addr >> PAGE_SHIFT; 263 npages = md->num_pages; 264 265 if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, _PAGE_RW)) { 266 pr_err("Failed to map 1:1 memory\n"); 267 return 1; 268 } 269 } 270 271 page = alloc_page(GFP_KERNEL|__GFP_DMA32); 272 if (!page) 273 panic("Unable to allocate EFI runtime stack < 4GB\n"); 274 275 efi_scratch.phys_stack = virt_to_phys(page_address(page)); 276 efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */ 277 278 npages = (_etext - _text) >> PAGE_SHIFT; 279 text = __pa(_text); 280 pfn = text >> PAGE_SHIFT; 281 282 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) { 283 pr_err("Failed to map kernel text 1:1\n"); 284 return 1; 285 } 286 287 return 0; 288 } 289 290 void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) 291 { 292 kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages); 293 } 294 295 static void __init __map_region(efi_memory_desc_t *md, u64 va) 296 { 297 unsigned long flags = _PAGE_RW; 298 unsigned long pfn; 299 pgd_t *pgd = efi_pgd; 300 301 if (!(md->attribute & EFI_MEMORY_WB)) 302 flags |= _PAGE_PCD; 303 304 pfn = md->phys_addr >> PAGE_SHIFT; 305 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags)) 306 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 307 md->phys_addr, va); 308 } 309 310 void __init efi_map_region(efi_memory_desc_t *md) 311 { 312 unsigned long size = md->num_pages << PAGE_SHIFT; 313 u64 pa = md->phys_addr; 314 315 if (efi_enabled(EFI_OLD_MEMMAP)) 316 return old_map_region(md); 317 318 /* 319 * Make sure the 1:1 mappings are present as a catch-all for b0rked 320 * firmware which doesn't update all internal pointers after switching 321 * to virtual mode and would otherwise crap on us. 322 */ 323 __map_region(md, md->phys_addr); 324 325 /* 326 * Enforce the 1:1 mapping as the default virtual address when 327 * booting in EFI mixed mode, because even though we may be 328 * running a 64-bit kernel, the firmware may only be 32-bit. 329 */ 330 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) { 331 md->virt_addr = md->phys_addr; 332 return; 333 } 334 335 efi_va -= size; 336 337 /* Is PA 2M-aligned? */ 338 if (!(pa & (PMD_SIZE - 1))) { 339 efi_va &= PMD_MASK; 340 } else { 341 u64 pa_offset = pa & (PMD_SIZE - 1); 342 u64 prev_va = efi_va; 343 344 /* get us the same offset within this 2M page */ 345 efi_va = (efi_va & PMD_MASK) + pa_offset; 346 347 if (efi_va > prev_va) 348 efi_va -= PMD_SIZE; 349 } 350 351 if (efi_va < EFI_VA_END) { 352 pr_warn(FW_WARN "VA address range overflow!\n"); 353 return; 354 } 355 356 /* Do the VA map */ 357 __map_region(md, efi_va); 358 md->virt_addr = efi_va; 359 } 360 361 /* 362 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. 363 * md->virt_addr is the original virtual address which had been mapped in kexec 364 * 1st kernel. 365 */ 366 void __init efi_map_region_fixed(efi_memory_desc_t *md) 367 { 368 __map_region(md, md->virt_addr); 369 } 370 371 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 372 u32 type, u64 attribute) 373 { 374 unsigned long last_map_pfn; 375 376 if (type == EFI_MEMORY_MAPPED_IO) 377 return ioremap(phys_addr, size); 378 379 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 380 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 381 unsigned long top = last_map_pfn << PAGE_SHIFT; 382 efi_ioremap(top, size - (top - phys_addr), type, attribute); 383 } 384 385 if (!(attribute & EFI_MEMORY_WB)) 386 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 387 388 return (void __iomem *)__va(phys_addr); 389 } 390 391 void __init parse_efi_setup(u64 phys_addr, u32 data_len) 392 { 393 efi_setup = phys_addr + sizeof(struct setup_data); 394 } 395 396 void __init efi_runtime_update_mappings(void) 397 { 398 unsigned long pfn; 399 pgd_t *pgd = efi_pgd; 400 efi_memory_desc_t *md; 401 void *p; 402 403 if (efi_enabled(EFI_OLD_MEMMAP)) { 404 if (__supported_pte_mask & _PAGE_NX) 405 runtime_code_page_mkexec(); 406 return; 407 } 408 409 if (!efi_enabled(EFI_NX_PE_DATA)) 410 return; 411 412 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 413 unsigned long pf = 0; 414 md = p; 415 416 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 417 continue; 418 419 if (!(md->attribute & EFI_MEMORY_WB)) 420 pf |= _PAGE_PCD; 421 422 if ((md->attribute & EFI_MEMORY_XP) || 423 (md->type == EFI_RUNTIME_SERVICES_DATA)) 424 pf |= _PAGE_NX; 425 426 if (!(md->attribute & EFI_MEMORY_RO) && 427 (md->type != EFI_RUNTIME_SERVICES_CODE)) 428 pf |= _PAGE_RW; 429 430 /* Update the 1:1 mapping */ 431 pfn = md->phys_addr >> PAGE_SHIFT; 432 if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf)) 433 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 434 md->phys_addr, md->virt_addr); 435 436 if (kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf)) 437 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 438 md->phys_addr, md->virt_addr); 439 } 440 } 441 442 void __init efi_dump_pagetable(void) 443 { 444 #ifdef CONFIG_EFI_PGT_DUMP 445 ptdump_walk_pgd_level(NULL, efi_pgd); 446 #endif 447 } 448 449 #ifdef CONFIG_EFI_MIXED 450 extern efi_status_t efi64_thunk(u32, ...); 451 452 #define runtime_service32(func) \ 453 ({ \ 454 u32 table = (u32)(unsigned long)efi.systab; \ 455 u32 *rt, *___f; \ 456 \ 457 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \ 458 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \ 459 *___f; \ 460 }) 461 462 /* 463 * Switch to the EFI page tables early so that we can access the 1:1 464 * runtime services mappings which are not mapped in any other page 465 * tables. This function must be called before runtime_service32(). 466 * 467 * Also, disable interrupts because the IDT points to 64-bit handlers, 468 * which aren't going to function correctly when we switch to 32-bit. 469 */ 470 #define efi_thunk(f, ...) \ 471 ({ \ 472 efi_status_t __s; \ 473 unsigned long flags; \ 474 u32 func; \ 475 \ 476 efi_sync_low_kernel_mappings(); \ 477 local_irq_save(flags); \ 478 \ 479 efi_scratch.prev_cr3 = read_cr3(); \ 480 write_cr3((unsigned long)efi_scratch.efi_pgt); \ 481 __flush_tlb_all(); \ 482 \ 483 func = runtime_service32(f); \ 484 __s = efi64_thunk(func, __VA_ARGS__); \ 485 \ 486 write_cr3(efi_scratch.prev_cr3); \ 487 __flush_tlb_all(); \ 488 local_irq_restore(flags); \ 489 \ 490 __s; \ 491 }) 492 493 efi_status_t efi_thunk_set_virtual_address_map( 494 void *phys_set_virtual_address_map, 495 unsigned long memory_map_size, 496 unsigned long descriptor_size, 497 u32 descriptor_version, 498 efi_memory_desc_t *virtual_map) 499 { 500 efi_status_t status; 501 unsigned long flags; 502 u32 func; 503 504 efi_sync_low_kernel_mappings(); 505 local_irq_save(flags); 506 507 efi_scratch.prev_cr3 = read_cr3(); 508 write_cr3((unsigned long)efi_scratch.efi_pgt); 509 __flush_tlb_all(); 510 511 func = (u32)(unsigned long)phys_set_virtual_address_map; 512 status = efi64_thunk(func, memory_map_size, descriptor_size, 513 descriptor_version, virtual_map); 514 515 write_cr3(efi_scratch.prev_cr3); 516 __flush_tlb_all(); 517 local_irq_restore(flags); 518 519 return status; 520 } 521 522 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) 523 { 524 efi_status_t status; 525 u32 phys_tm, phys_tc; 526 527 spin_lock(&rtc_lock); 528 529 phys_tm = virt_to_phys(tm); 530 phys_tc = virt_to_phys(tc); 531 532 status = efi_thunk(get_time, phys_tm, phys_tc); 533 534 spin_unlock(&rtc_lock); 535 536 return status; 537 } 538 539 static efi_status_t efi_thunk_set_time(efi_time_t *tm) 540 { 541 efi_status_t status; 542 u32 phys_tm; 543 544 spin_lock(&rtc_lock); 545 546 phys_tm = virt_to_phys(tm); 547 548 status = efi_thunk(set_time, phys_tm); 549 550 spin_unlock(&rtc_lock); 551 552 return status; 553 } 554 555 static efi_status_t 556 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, 557 efi_time_t *tm) 558 { 559 efi_status_t status; 560 u32 phys_enabled, phys_pending, phys_tm; 561 562 spin_lock(&rtc_lock); 563 564 phys_enabled = virt_to_phys(enabled); 565 phys_pending = virt_to_phys(pending); 566 phys_tm = virt_to_phys(tm); 567 568 status = efi_thunk(get_wakeup_time, phys_enabled, 569 phys_pending, phys_tm); 570 571 spin_unlock(&rtc_lock); 572 573 return status; 574 } 575 576 static efi_status_t 577 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) 578 { 579 efi_status_t status; 580 u32 phys_tm; 581 582 spin_lock(&rtc_lock); 583 584 phys_tm = virt_to_phys(tm); 585 586 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 587 588 spin_unlock(&rtc_lock); 589 590 return status; 591 } 592 593 594 static efi_status_t 595 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, 596 u32 *attr, unsigned long *data_size, void *data) 597 { 598 efi_status_t status; 599 u32 phys_name, phys_vendor, phys_attr; 600 u32 phys_data_size, phys_data; 601 602 phys_data_size = virt_to_phys(data_size); 603 phys_vendor = virt_to_phys(vendor); 604 phys_name = virt_to_phys(name); 605 phys_attr = virt_to_phys(attr); 606 phys_data = virt_to_phys(data); 607 608 status = efi_thunk(get_variable, phys_name, phys_vendor, 609 phys_attr, phys_data_size, phys_data); 610 611 return status; 612 } 613 614 static efi_status_t 615 efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, 616 u32 attr, unsigned long data_size, void *data) 617 { 618 u32 phys_name, phys_vendor, phys_data; 619 efi_status_t status; 620 621 phys_name = virt_to_phys(name); 622 phys_vendor = virt_to_phys(vendor); 623 phys_data = virt_to_phys(data); 624 625 /* If data_size is > sizeof(u32) we've got problems */ 626 status = efi_thunk(set_variable, phys_name, phys_vendor, 627 attr, data_size, phys_data); 628 629 return status; 630 } 631 632 static efi_status_t 633 efi_thunk_get_next_variable(unsigned long *name_size, 634 efi_char16_t *name, 635 efi_guid_t *vendor) 636 { 637 efi_status_t status; 638 u32 phys_name_size, phys_name, phys_vendor; 639 640 phys_name_size = virt_to_phys(name_size); 641 phys_vendor = virt_to_phys(vendor); 642 phys_name = virt_to_phys(name); 643 644 status = efi_thunk(get_next_variable, phys_name_size, 645 phys_name, phys_vendor); 646 647 return status; 648 } 649 650 static efi_status_t 651 efi_thunk_get_next_high_mono_count(u32 *count) 652 { 653 efi_status_t status; 654 u32 phys_count; 655 656 phys_count = virt_to_phys(count); 657 status = efi_thunk(get_next_high_mono_count, phys_count); 658 659 return status; 660 } 661 662 static void 663 efi_thunk_reset_system(int reset_type, efi_status_t status, 664 unsigned long data_size, efi_char16_t *data) 665 { 666 u32 phys_data; 667 668 phys_data = virt_to_phys(data); 669 670 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 671 } 672 673 static efi_status_t 674 efi_thunk_update_capsule(efi_capsule_header_t **capsules, 675 unsigned long count, unsigned long sg_list) 676 { 677 /* 678 * To properly support this function we would need to repackage 679 * 'capsules' because the firmware doesn't understand 64-bit 680 * pointers. 681 */ 682 return EFI_UNSUPPORTED; 683 } 684 685 static efi_status_t 686 efi_thunk_query_variable_info(u32 attr, u64 *storage_space, 687 u64 *remaining_space, 688 u64 *max_variable_size) 689 { 690 efi_status_t status; 691 u32 phys_storage, phys_remaining, phys_max; 692 693 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 694 return EFI_UNSUPPORTED; 695 696 phys_storage = virt_to_phys(storage_space); 697 phys_remaining = virt_to_phys(remaining_space); 698 phys_max = virt_to_phys(max_variable_size); 699 700 status = efi_thunk(query_variable_info, attr, phys_storage, 701 phys_remaining, phys_max); 702 703 return status; 704 } 705 706 static efi_status_t 707 efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules, 708 unsigned long count, u64 *max_size, 709 int *reset_type) 710 { 711 /* 712 * To properly support this function we would need to repackage 713 * 'capsules' because the firmware doesn't understand 64-bit 714 * pointers. 715 */ 716 return EFI_UNSUPPORTED; 717 } 718 719 void efi_thunk_runtime_setup(void) 720 { 721 efi.get_time = efi_thunk_get_time; 722 efi.set_time = efi_thunk_set_time; 723 efi.get_wakeup_time = efi_thunk_get_wakeup_time; 724 efi.set_wakeup_time = efi_thunk_set_wakeup_time; 725 efi.get_variable = efi_thunk_get_variable; 726 efi.get_next_variable = efi_thunk_get_next_variable; 727 efi.set_variable = efi_thunk_set_variable; 728 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count; 729 efi.reset_system = efi_thunk_reset_system; 730 efi.query_variable_info = efi_thunk_query_variable_info; 731 efi.update_capsule = efi_thunk_update_capsule; 732 efi.query_capsule_caps = efi_thunk_query_capsule_caps; 733 } 734 #endif /* CONFIG_EFI_MIXED */ 735