xref: /openbmc/linux/arch/arm/kernel/efi.c (revision d2912cb15bdda8ba4a5dd73396ad62641af2f520)
1*d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2da58fb65SArd Biesheuvel /*
3da58fb65SArd Biesheuvel  * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4da58fb65SArd Biesheuvel  */
5da58fb65SArd Biesheuvel 
6da58fb65SArd Biesheuvel #include <linux/efi.h>
7da58fb65SArd Biesheuvel #include <asm/efi.h>
8da58fb65SArd Biesheuvel #include <asm/mach/map.h>
9da58fb65SArd Biesheuvel #include <asm/mmu_context.h>
10da58fb65SArd Biesheuvel 
119fc68b71SArd Biesheuvel static int __init set_permissions(pte_t *ptep, pgtable_t token,
129fc68b71SArd Biesheuvel 				  unsigned long addr, void *data)
139fc68b71SArd Biesheuvel {
149fc68b71SArd Biesheuvel 	efi_memory_desc_t *md = data;
159fc68b71SArd Biesheuvel 	pte_t pte = *ptep;
169fc68b71SArd Biesheuvel 
179fc68b71SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_RO)
189fc68b71SArd Biesheuvel 		pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
199fc68b71SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_XP)
209fc68b71SArd Biesheuvel 		pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
219fc68b71SArd Biesheuvel 	set_pte_ext(ptep, pte, PTE_EXT_NG);
229fc68b71SArd Biesheuvel 	return 0;
239fc68b71SArd Biesheuvel }
249fc68b71SArd Biesheuvel 
259fc68b71SArd Biesheuvel int __init efi_set_mapping_permissions(struct mm_struct *mm,
269fc68b71SArd Biesheuvel 				       efi_memory_desc_t *md)
279fc68b71SArd Biesheuvel {
289fc68b71SArd Biesheuvel 	unsigned long base, size;
299fc68b71SArd Biesheuvel 
309fc68b71SArd Biesheuvel 	base = md->virt_addr;
319fc68b71SArd Biesheuvel 	size = md->num_pages << EFI_PAGE_SHIFT;
329fc68b71SArd Biesheuvel 
339fc68b71SArd Biesheuvel 	/*
349fc68b71SArd Biesheuvel 	 * We can only use apply_to_page_range() if we can guarantee that the
359fc68b71SArd Biesheuvel 	 * entire region was mapped using pages. This should be the case if the
369fc68b71SArd Biesheuvel 	 * region does not cover any naturally aligned SECTION_SIZE sized
379fc68b71SArd Biesheuvel 	 * blocks.
389fc68b71SArd Biesheuvel 	 */
399fc68b71SArd Biesheuvel 	if (round_down(base + size, SECTION_SIZE) <
409fc68b71SArd Biesheuvel 	    round_up(base, SECTION_SIZE) + SECTION_SIZE)
419fc68b71SArd Biesheuvel 		return apply_to_page_range(mm, base, size, set_permissions, md);
429fc68b71SArd Biesheuvel 
439fc68b71SArd Biesheuvel 	return 0;
449fc68b71SArd Biesheuvel }
459fc68b71SArd Biesheuvel 
46da58fb65SArd Biesheuvel int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
47da58fb65SArd Biesheuvel {
48da58fb65SArd Biesheuvel 	struct map_desc desc = {
49da58fb65SArd Biesheuvel 		.virtual	= md->virt_addr,
50da58fb65SArd Biesheuvel 		.pfn		= __phys_to_pfn(md->phys_addr),
51da58fb65SArd Biesheuvel 		.length		= md->num_pages * EFI_PAGE_SIZE,
52da58fb65SArd Biesheuvel 	};
53da58fb65SArd Biesheuvel 
54da58fb65SArd Biesheuvel 	/*
55da58fb65SArd Biesheuvel 	 * Order is important here: memory regions may have all of the
56da58fb65SArd Biesheuvel 	 * bits below set (and usually do), so we check them in order of
57da58fb65SArd Biesheuvel 	 * preference.
58da58fb65SArd Biesheuvel 	 */
59da58fb65SArd Biesheuvel 	if (md->attribute & EFI_MEMORY_WB)
60da58fb65SArd Biesheuvel 		desc.type = MT_MEMORY_RWX;
61da58fb65SArd Biesheuvel 	else if (md->attribute & EFI_MEMORY_WT)
62da58fb65SArd Biesheuvel 		desc.type = MT_MEMORY_RWX_NONCACHED;
63da58fb65SArd Biesheuvel 	else if (md->attribute & EFI_MEMORY_WC)
64da58fb65SArd Biesheuvel 		desc.type = MT_DEVICE_WC;
65da58fb65SArd Biesheuvel 	else
66da58fb65SArd Biesheuvel 		desc.type = MT_DEVICE;
67da58fb65SArd Biesheuvel 
68da58fb65SArd Biesheuvel 	create_mapping_late(mm, &desc, true);
699fc68b71SArd Biesheuvel 
709fc68b71SArd Biesheuvel 	/*
719fc68b71SArd Biesheuvel 	 * If stricter permissions were specified, apply them now.
729fc68b71SArd Biesheuvel 	 */
739fc68b71SArd Biesheuvel 	if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
749fc68b71SArd Biesheuvel 		return efi_set_mapping_permissions(mm, md);
75da58fb65SArd Biesheuvel 	return 0;
76da58fb65SArd Biesheuvel }
77