1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2da58fb65SArd Biesheuvel /*
3da58fb65SArd Biesheuvel * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4da58fb65SArd Biesheuvel */
5da58fb65SArd Biesheuvel
6da58fb65SArd Biesheuvel #include <linux/efi.h>
769e377b2SArd Biesheuvel #include <linux/memblock.h>
8*8b0d1354SThomas Zimmermann #include <linux/screen_info.h>
9*8b0d1354SThomas Zimmermann
10da58fb65SArd Biesheuvel #include <asm/efi.h>
11da58fb65SArd Biesheuvel #include <asm/mach/map.h>
12da58fb65SArd Biesheuvel #include <asm/mmu_context.h>
13da58fb65SArd Biesheuvel
set_permissions(pte_t * ptep,unsigned long addr,void * data)148b1e0f81SAnshuman Khandual static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
159fc68b71SArd Biesheuvel {
169fc68b71SArd Biesheuvel efi_memory_desc_t *md = data;
179fc68b71SArd Biesheuvel pte_t pte = *ptep;
189fc68b71SArd Biesheuvel
199fc68b71SArd Biesheuvel if (md->attribute & EFI_MEMORY_RO)
209fc68b71SArd Biesheuvel pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
219fc68b71SArd Biesheuvel if (md->attribute & EFI_MEMORY_XP)
229fc68b71SArd Biesheuvel pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
239fc68b71SArd Biesheuvel set_pte_ext(ptep, pte, PTE_EXT_NG);
249fc68b71SArd Biesheuvel return 0;
259fc68b71SArd Biesheuvel }
269fc68b71SArd Biesheuvel
efi_set_mapping_permissions(struct mm_struct * mm,efi_memory_desc_t * md,bool ignored)279fc68b71SArd Biesheuvel int __init efi_set_mapping_permissions(struct mm_struct *mm,
28cf1d2ffcSArd Biesheuvel efi_memory_desc_t *md,
29cf1d2ffcSArd Biesheuvel bool ignored)
309fc68b71SArd Biesheuvel {
319fc68b71SArd Biesheuvel unsigned long base, size;
329fc68b71SArd Biesheuvel
339fc68b71SArd Biesheuvel base = md->virt_addr;
349fc68b71SArd Biesheuvel size = md->num_pages << EFI_PAGE_SHIFT;
359fc68b71SArd Biesheuvel
369fc68b71SArd Biesheuvel /*
379fc68b71SArd Biesheuvel * We can only use apply_to_page_range() if we can guarantee that the
389fc68b71SArd Biesheuvel * entire region was mapped using pages. This should be the case if the
399fc68b71SArd Biesheuvel * region does not cover any naturally aligned SECTION_SIZE sized
409fc68b71SArd Biesheuvel * blocks.
419fc68b71SArd Biesheuvel */
429fc68b71SArd Biesheuvel if (round_down(base + size, SECTION_SIZE) <
439fc68b71SArd Biesheuvel round_up(base, SECTION_SIZE) + SECTION_SIZE)
449fc68b71SArd Biesheuvel return apply_to_page_range(mm, base, size, set_permissions, md);
459fc68b71SArd Biesheuvel
469fc68b71SArd Biesheuvel return 0;
479fc68b71SArd Biesheuvel }
489fc68b71SArd Biesheuvel
efi_create_mapping(struct mm_struct * mm,efi_memory_desc_t * md)49da58fb65SArd Biesheuvel int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
50da58fb65SArd Biesheuvel {
51da58fb65SArd Biesheuvel struct map_desc desc = {
52da58fb65SArd Biesheuvel .virtual = md->virt_addr,
53da58fb65SArd Biesheuvel .pfn = __phys_to_pfn(md->phys_addr),
54da58fb65SArd Biesheuvel .length = md->num_pages * EFI_PAGE_SIZE,
55da58fb65SArd Biesheuvel };
56da58fb65SArd Biesheuvel
57da58fb65SArd Biesheuvel /*
58da58fb65SArd Biesheuvel * Order is important here: memory regions may have all of the
59da58fb65SArd Biesheuvel * bits below set (and usually do), so we check them in order of
60da58fb65SArd Biesheuvel * preference.
61da58fb65SArd Biesheuvel */
62da58fb65SArd Biesheuvel if (md->attribute & EFI_MEMORY_WB)
63da58fb65SArd Biesheuvel desc.type = MT_MEMORY_RWX;
64da58fb65SArd Biesheuvel else if (md->attribute & EFI_MEMORY_WT)
65da58fb65SArd Biesheuvel desc.type = MT_MEMORY_RWX_NONCACHED;
66da58fb65SArd Biesheuvel else if (md->attribute & EFI_MEMORY_WC)
67da58fb65SArd Biesheuvel desc.type = MT_DEVICE_WC;
68da58fb65SArd Biesheuvel else
69da58fb65SArd Biesheuvel desc.type = MT_DEVICE;
70da58fb65SArd Biesheuvel
71da58fb65SArd Biesheuvel create_mapping_late(mm, &desc, true);
729fc68b71SArd Biesheuvel
739fc68b71SArd Biesheuvel /*
749fc68b71SArd Biesheuvel * If stricter permissions were specified, apply them now.
759fc68b71SArd Biesheuvel */
769fc68b71SArd Biesheuvel if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
77cf1d2ffcSArd Biesheuvel return efi_set_mapping_permissions(mm, md, false);
78da58fb65SArd Biesheuvel return 0;
79da58fb65SArd Biesheuvel }
8069e377b2SArd Biesheuvel
8169e377b2SArd Biesheuvel static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR;
8269e377b2SArd Biesheuvel
8369e377b2SArd Biesheuvel const efi_config_table_type_t efi_arch_tables[] __initconst = {
8469e377b2SArd Biesheuvel {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
8569e377b2SArd Biesheuvel {}
8669e377b2SArd Biesheuvel };
8769e377b2SArd Biesheuvel
load_cpu_state_table(void)8869e377b2SArd Biesheuvel static void __init load_cpu_state_table(void)
8969e377b2SArd Biesheuvel {
9069e377b2SArd Biesheuvel if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
9169e377b2SArd Biesheuvel struct efi_arm_entry_state *state;
9269e377b2SArd Biesheuvel bool dump_state = true;
9369e377b2SArd Biesheuvel
9469e377b2SArd Biesheuvel state = early_memremap_ro(cpu_state_table,
9569e377b2SArd Biesheuvel sizeof(struct efi_arm_entry_state));
9669e377b2SArd Biesheuvel if (state == NULL) {
9769e377b2SArd Biesheuvel pr_warn("Unable to map CPU entry state table.\n");
9869e377b2SArd Biesheuvel return;
9969e377b2SArd Biesheuvel }
10069e377b2SArd Biesheuvel
10169e377b2SArd Biesheuvel if ((state->sctlr_before_ebs & 1) == 0)
10269e377b2SArd Biesheuvel pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
10369e377b2SArd Biesheuvel else if ((state->sctlr_after_ebs & 1) == 0)
10469e377b2SArd Biesheuvel pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
10569e377b2SArd Biesheuvel else
10669e377b2SArd Biesheuvel dump_state = false;
10769e377b2SArd Biesheuvel
10869e377b2SArd Biesheuvel if (dump_state || efi_enabled(EFI_DBG)) {
10969e377b2SArd Biesheuvel pr_info("CPSR at EFI stub entry : 0x%08x\n",
11069e377b2SArd Biesheuvel state->cpsr_before_ebs);
11169e377b2SArd Biesheuvel pr_info("SCTLR at EFI stub entry : 0x%08x\n",
11269e377b2SArd Biesheuvel state->sctlr_before_ebs);
11369e377b2SArd Biesheuvel pr_info("CPSR after ExitBootServices() : 0x%08x\n",
11469e377b2SArd Biesheuvel state->cpsr_after_ebs);
11569e377b2SArd Biesheuvel pr_info("SCTLR after ExitBootServices(): 0x%08x\n",
11669e377b2SArd Biesheuvel state->sctlr_after_ebs);
11769e377b2SArd Biesheuvel }
11869e377b2SArd Biesheuvel early_memunmap(state, sizeof(struct efi_arm_entry_state));
11969e377b2SArd Biesheuvel }
12069e377b2SArd Biesheuvel }
12169e377b2SArd Biesheuvel
arm_efi_init(void)12269e377b2SArd Biesheuvel void __init arm_efi_init(void)
12369e377b2SArd Biesheuvel {
12469e377b2SArd Biesheuvel efi_init();
12569e377b2SArd Biesheuvel
126732ea9dbSArd Biesheuvel if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
127732ea9dbSArd Biesheuvel /* dummycon on ARM needs non-zero values for columns/lines */
128732ea9dbSArd Biesheuvel screen_info.orig_video_cols = 80;
129732ea9dbSArd Biesheuvel screen_info.orig_video_lines = 25;
130732ea9dbSArd Biesheuvel }
13169e377b2SArd Biesheuvel
13269e377b2SArd Biesheuvel /* ARM does not permit early mappings to persist across paging_init() */
13369e377b2SArd Biesheuvel efi_memmap_unmap();
13469e377b2SArd Biesheuvel
13569e377b2SArd Biesheuvel load_cpu_state_table();
13669e377b2SArd Biesheuvel }
137