xref: /openbmc/linux/arch/arm/kernel/efi.c (revision cf1d2ffcc6f17b422239f6ab34b078945d07f9aa)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2da58fb65SArd Biesheuvel /*
3da58fb65SArd Biesheuvel  * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4da58fb65SArd Biesheuvel  */
5da58fb65SArd Biesheuvel 
6da58fb65SArd Biesheuvel #include <linux/efi.h>
769e377b2SArd Biesheuvel #include <linux/memblock.h>
8da58fb65SArd Biesheuvel #include <asm/efi.h>
9da58fb65SArd Biesheuvel #include <asm/mach/map.h>
10da58fb65SArd Biesheuvel #include <asm/mmu_context.h>
11da58fb65SArd Biesheuvel 
128b1e0f81SAnshuman Khandual static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
139fc68b71SArd Biesheuvel {
149fc68b71SArd Biesheuvel 	efi_memory_desc_t *md = data;
159fc68b71SArd Biesheuvel 	pte_t pte = *ptep;
169fc68b71SArd Biesheuvel 
179fc68b71SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_RO)
189fc68b71SArd Biesheuvel 		pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
199fc68b71SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_XP)
209fc68b71SArd Biesheuvel 		pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
219fc68b71SArd Biesheuvel 	set_pte_ext(ptep, pte, PTE_EXT_NG);
229fc68b71SArd Biesheuvel 	return 0;
239fc68b71SArd Biesheuvel }
249fc68b71SArd Biesheuvel 
259fc68b71SArd Biesheuvel int __init efi_set_mapping_permissions(struct mm_struct *mm,
26*cf1d2ffcSArd Biesheuvel 				       efi_memory_desc_t *md,
27*cf1d2ffcSArd Biesheuvel 				       bool ignored)
289fc68b71SArd Biesheuvel {
299fc68b71SArd Biesheuvel 	unsigned long base, size;
309fc68b71SArd Biesheuvel 
319fc68b71SArd Biesheuvel 	base = md->virt_addr;
329fc68b71SArd Biesheuvel 	size = md->num_pages << EFI_PAGE_SHIFT;
339fc68b71SArd Biesheuvel 
349fc68b71SArd Biesheuvel 	/*
359fc68b71SArd Biesheuvel 	 * We can only use apply_to_page_range() if we can guarantee that the
369fc68b71SArd Biesheuvel 	 * entire region was mapped using pages. This should be the case if the
379fc68b71SArd Biesheuvel 	 * region does not cover any naturally aligned SECTION_SIZE sized
389fc68b71SArd Biesheuvel 	 * blocks.
399fc68b71SArd Biesheuvel 	 */
409fc68b71SArd Biesheuvel 	if (round_down(base + size, SECTION_SIZE) <
419fc68b71SArd Biesheuvel 	    round_up(base, SECTION_SIZE) + SECTION_SIZE)
429fc68b71SArd Biesheuvel 		return apply_to_page_range(mm, base, size, set_permissions, md);
439fc68b71SArd Biesheuvel 
449fc68b71SArd Biesheuvel 	return 0;
459fc68b71SArd Biesheuvel }
469fc68b71SArd Biesheuvel 
47da58fb65SArd Biesheuvel int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
48da58fb65SArd Biesheuvel {
49da58fb65SArd Biesheuvel 	struct map_desc desc = {
50da58fb65SArd Biesheuvel 		.virtual	= md->virt_addr,
51da58fb65SArd Biesheuvel 		.pfn		= __phys_to_pfn(md->phys_addr),
52da58fb65SArd Biesheuvel 		.length		= md->num_pages * EFI_PAGE_SIZE,
53da58fb65SArd Biesheuvel 	};
54da58fb65SArd Biesheuvel 
55da58fb65SArd Biesheuvel 	/*
56da58fb65SArd Biesheuvel 	 * Order is important here: memory regions may have all of the
57da58fb65SArd Biesheuvel 	 * bits below set (and usually do), so we check them in order of
58da58fb65SArd Biesheuvel 	 * preference.
59da58fb65SArd Biesheuvel 	 */
60da58fb65SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_WB)
61da58fb65SArd Biesheuvel 		desc.type = MT_MEMORY_RWX;
62da58fb65SArd Biesheuvel 	else if (md->attribute & EFI_MEMORY_WT)
63da58fb65SArd Biesheuvel 		desc.type = MT_MEMORY_RWX_NONCACHED;
64da58fb65SArd Biesheuvel 	else if (md->attribute & EFI_MEMORY_WC)
65da58fb65SArd Biesheuvel 		desc.type = MT_DEVICE_WC;
66da58fb65SArd Biesheuvel 	else
67da58fb65SArd Biesheuvel 		desc.type = MT_DEVICE;
68da58fb65SArd Biesheuvel 
69da58fb65SArd Biesheuvel 	create_mapping_late(mm, &desc, true);
709fc68b71SArd Biesheuvel 
719fc68b71SArd Biesheuvel 	/*
729fc68b71SArd Biesheuvel 	 * If stricter permissions were specified, apply them now.
739fc68b71SArd Biesheuvel 	 */
749fc68b71SArd Biesheuvel 	if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
75*cf1d2ffcSArd Biesheuvel 		return efi_set_mapping_permissions(mm, md, false);
76da58fb65SArd Biesheuvel 	return 0;
77da58fb65SArd Biesheuvel }
7869e377b2SArd Biesheuvel 
7969e377b2SArd Biesheuvel static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR;
8069e377b2SArd Biesheuvel 
8169e377b2SArd Biesheuvel const efi_config_table_type_t efi_arch_tables[] __initconst = {
8269e377b2SArd Biesheuvel 	{LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
8369e377b2SArd Biesheuvel 	{}
8469e377b2SArd Biesheuvel };
8569e377b2SArd Biesheuvel 
8669e377b2SArd Biesheuvel static void __init load_cpu_state_table(void)
8769e377b2SArd Biesheuvel {
8869e377b2SArd Biesheuvel 	if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
8969e377b2SArd Biesheuvel 		struct efi_arm_entry_state *state;
9069e377b2SArd Biesheuvel 		bool dump_state = true;
9169e377b2SArd Biesheuvel 
9269e377b2SArd Biesheuvel 		state = early_memremap_ro(cpu_state_table,
9369e377b2SArd Biesheuvel 					  sizeof(struct efi_arm_entry_state));
9469e377b2SArd Biesheuvel 		if (state == NULL) {
9569e377b2SArd Biesheuvel 			pr_warn("Unable to map CPU entry state table.\n");
9669e377b2SArd Biesheuvel 			return;
9769e377b2SArd Biesheuvel 		}
9869e377b2SArd Biesheuvel 
9969e377b2SArd Biesheuvel 		if ((state->sctlr_before_ebs & 1) == 0)
10069e377b2SArd Biesheuvel 			pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
10169e377b2SArd Biesheuvel 		else if ((state->sctlr_after_ebs & 1) == 0)
10269e377b2SArd Biesheuvel 			pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
10369e377b2SArd Biesheuvel 		else
10469e377b2SArd Biesheuvel 			dump_state = false;
10569e377b2SArd Biesheuvel 
10669e377b2SArd Biesheuvel 		if (dump_state || efi_enabled(EFI_DBG)) {
10769e377b2SArd Biesheuvel 			pr_info("CPSR at EFI stub entry        : 0x%08x\n",
10869e377b2SArd Biesheuvel 				state->cpsr_before_ebs);
10969e377b2SArd Biesheuvel 			pr_info("SCTLR at EFI stub entry       : 0x%08x\n",
11069e377b2SArd Biesheuvel 				state->sctlr_before_ebs);
11169e377b2SArd Biesheuvel 			pr_info("CPSR after ExitBootServices() : 0x%08x\n",
11269e377b2SArd Biesheuvel 				state->cpsr_after_ebs);
11369e377b2SArd Biesheuvel 			pr_info("SCTLR after ExitBootServices(): 0x%08x\n",
11469e377b2SArd Biesheuvel 				state->sctlr_after_ebs);
11569e377b2SArd Biesheuvel 		}
11669e377b2SArd Biesheuvel 		early_memunmap(state, sizeof(struct efi_arm_entry_state));
11769e377b2SArd Biesheuvel 	}
11869e377b2SArd Biesheuvel }
11969e377b2SArd Biesheuvel 
12069e377b2SArd Biesheuvel void __init arm_efi_init(void)
12169e377b2SArd Biesheuvel {
12269e377b2SArd Biesheuvel 	efi_init();
12369e377b2SArd Biesheuvel 
124732ea9dbSArd Biesheuvel 	if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
125732ea9dbSArd Biesheuvel 		/* dummycon on ARM needs non-zero values for columns/lines */
126732ea9dbSArd Biesheuvel 		screen_info.orig_video_cols = 80;
127732ea9dbSArd Biesheuvel 		screen_info.orig_video_lines = 25;
128732ea9dbSArd Biesheuvel 	}
12969e377b2SArd Biesheuvel 
13069e377b2SArd Biesheuvel 	/* ARM does not permit early mappings to persist across paging_init() */
13169e377b2SArd Biesheuvel 	efi_memmap_unmap();
13269e377b2SArd Biesheuvel 
13369e377b2SArd Biesheuvel 	load_cpu_state_table();
13469e377b2SArd Biesheuvel }
135