xref: /openbmc/linux/arch/x86/mm/ioremap.c (revision 88e378d4)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e64c8aa0SThomas Gleixner /*
3e64c8aa0SThomas Gleixner  * Re-map IO memory to kernel address space so that we can access it.
4e64c8aa0SThomas Gleixner  * This is needed for high PCI addresses that aren't mapped in the
5e64c8aa0SThomas Gleixner  * 640k-1MB IO memory area on PC's
6e64c8aa0SThomas Gleixner  *
7e64c8aa0SThomas Gleixner  * (C) Copyright 1995 1996 Linus Torvalds
8e64c8aa0SThomas Gleixner  */
9e64c8aa0SThomas Gleixner 
1057c8a661SMike Rapoport #include <linux/memblock.h>
11e64c8aa0SThomas Gleixner #include <linux/init.h>
12e64c8aa0SThomas Gleixner #include <linux/io.h>
139de94dbbSIngo Molnar #include <linux/ioport.h>
14e64c8aa0SThomas Gleixner #include <linux/slab.h>
15e64c8aa0SThomas Gleixner #include <linux/vmalloc.h>
16d61fc448SPekka Paalanen #include <linux/mmiotrace.h>
1732cb4d02STom Lendacky #include <linux/cc_platform.h>
188f716c9bSTom Lendacky #include <linux/efi.h>
1965fddcfcSMike Rapoport #include <linux/pgtable.h>
20b073d7f8SAlexander Potapenko #include <linux/kmsan.h>
21e64c8aa0SThomas Gleixner 
22d1163651SLaura Abbott #include <asm/set_memory.h>
2366441bd3SIngo Molnar #include <asm/e820/api.h>
24e55f31a5SArd Biesheuvel #include <asm/efi.h>
25e64c8aa0SThomas Gleixner #include <asm/fixmap.h>
26e64c8aa0SThomas Gleixner #include <asm/tlbflush.h>
27f6df72e7SJeremy Fitzhardinge #include <asm/pgalloc.h>
28eb243d1dSIngo Molnar #include <asm/memtype.h>
298f716c9bSTom Lendacky #include <asm/setup.h>
30e64c8aa0SThomas Gleixner 
3178c86e5eSJeremy Fitzhardinge #include "physaddr.h"
32e64c8aa0SThomas Gleixner 
335da04cc8SLianbo Jiang /*
345da04cc8SLianbo Jiang  * Descriptor controlling ioremap() behavior.
355da04cc8SLianbo Jiang  */
365da04cc8SLianbo Jiang struct ioremap_desc {
375da04cc8SLianbo Jiang 	unsigned int flags;
380e4c12b4STom Lendacky };
390e4c12b4STom Lendacky 
40e64c8aa0SThomas Gleixner /*
41e64c8aa0SThomas Gleixner  * Fix up the linear direct mapping of the kernel to avoid cache attribute
42e64c8aa0SThomas Gleixner  * conflicts.
43e64c8aa0SThomas Gleixner  */
ioremap_change_attr(unsigned long vaddr,unsigned long size,enum page_cache_mode pcm)443a96ce8cSvenkatesh.pallipadi@intel.com int ioremap_change_attr(unsigned long vaddr, unsigned long size,
45b14097bdSJuergen Gross 			enum page_cache_mode pcm)
46e64c8aa0SThomas Gleixner {
47d806e5eeSThomas Gleixner 	unsigned long nrpages = size >> PAGE_SHIFT;
4893809be8SHarvey Harrison 	int err;
49e64c8aa0SThomas Gleixner 
50b14097bdSJuergen Gross 	switch (pcm) {
51b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
52d806e5eeSThomas Gleixner 	default:
531219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_uc(vaddr, nrpages);
54d806e5eeSThomas Gleixner 		break;
55b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
56b310f381Svenkatesh.pallipadi@intel.com 		err = _set_memory_wc(vaddr, nrpages);
57b310f381Svenkatesh.pallipadi@intel.com 		break;
58623dffb2SToshi Kani 	case _PAGE_CACHE_MODE_WT:
59623dffb2SToshi Kani 		err = _set_memory_wt(vaddr, nrpages);
60623dffb2SToshi Kani 		break;
61b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
621219333dSvenkatesh.pallipadi@intel.com 		err = _set_memory_wb(vaddr, nrpages);
63d806e5eeSThomas Gleixner 		break;
64d806e5eeSThomas Gleixner 	}
65e64c8aa0SThomas Gleixner 
66e64c8aa0SThomas Gleixner 	return err;
67e64c8aa0SThomas Gleixner }
68e64c8aa0SThomas Gleixner 
695da04cc8SLianbo Jiang /* Does the range (or a subset of) contain normal RAM? */
__ioremap_check_ram(struct resource * res)705da04cc8SLianbo Jiang static unsigned int __ioremap_check_ram(struct resource *res)
71c81c8a1eSRoland Dreier {
720e4c12b4STom Lendacky 	unsigned long start_pfn, stop_pfn;
73c81c8a1eSRoland Dreier 	unsigned long i;
74c81c8a1eSRoland Dreier 
750e4c12b4STom Lendacky 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
765da04cc8SLianbo Jiang 		return 0;
770e4c12b4STom Lendacky 
780e4c12b4STom Lendacky 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
790e4c12b4STom Lendacky 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
800e4c12b4STom Lendacky 	if (stop_pfn > start_pfn) {
810e4c12b4STom Lendacky 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
82c81c8a1eSRoland Dreier 			if (pfn_valid(start_pfn + i) &&
83c81c8a1eSRoland Dreier 			    !PageReserved(pfn_to_page(start_pfn + i)))
845da04cc8SLianbo Jiang 				return IORES_MAP_SYSTEM_RAM;
850e4c12b4STom Lendacky 	}
86c81c8a1eSRoland Dreier 
875da04cc8SLianbo Jiang 	return 0;
880e4c12b4STom Lendacky }
890e4c12b4STom Lendacky 
905da04cc8SLianbo Jiang /*
915da04cc8SLianbo Jiang  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
925da04cc8SLianbo Jiang  * there the whole memory is already encrypted.
935da04cc8SLianbo Jiang  */
__ioremap_check_encrypted(struct resource * res)945da04cc8SLianbo Jiang static unsigned int __ioremap_check_encrypted(struct resource *res)
950e4c12b4STom Lendacky {
964d96f910STom Lendacky 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
975da04cc8SLianbo Jiang 		return 0;
985da04cc8SLianbo Jiang 
995da04cc8SLianbo Jiang 	switch (res->desc) {
1005da04cc8SLianbo Jiang 	case IORES_DESC_NONE:
1015da04cc8SLianbo Jiang 	case IORES_DESC_RESERVED:
1025da04cc8SLianbo Jiang 		break;
1035da04cc8SLianbo Jiang 	default:
1045da04cc8SLianbo Jiang 		return IORES_MAP_ENCRYPTED;
1050e4c12b4STom Lendacky 	}
1060e4c12b4STom Lendacky 
1075da04cc8SLianbo Jiang 	return 0;
1085da04cc8SLianbo Jiang }
1095da04cc8SLianbo Jiang 
110985e537aSTom Lendacky /*
111985e537aSTom Lendacky  * The EFI runtime services data area is not covered by walk_mem_res(), but must
112985e537aSTom Lendacky  * be mapped encrypted when SEV is active.
113985e537aSTom Lendacky  */
__ioremap_check_other(resource_size_t addr,struct ioremap_desc * desc)114985e537aSTom Lendacky static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
115985e537aSTom Lendacky {
1164d96f910STom Lendacky 	if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
117985e537aSTom Lendacky 		return;
118985e537aSTom Lendacky 
119*88e378d4SMichael Kelley 	if (x86_platform.hyper.is_private_mmio(addr)) {
120*88e378d4SMichael Kelley 		desc->flags |= IORES_MAP_ENCRYPTED;
121*88e378d4SMichael Kelley 		return;
122*88e378d4SMichael Kelley 	}
123*88e378d4SMichael Kelley 
124870b4333SBorislav Petkov 	if (!IS_ENABLED(CONFIG_EFI))
125870b4333SBorislav Petkov 		return;
126870b4333SBorislav Petkov 
1278d651ee9STom Lendacky 	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
1288d651ee9STom Lendacky 	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
1298d651ee9STom Lendacky 	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
130985e537aSTom Lendacky 		desc->flags |= IORES_MAP_ENCRYPTED;
131985e537aSTom Lendacky }
132985e537aSTom Lendacky 
__ioremap_collect_map_flags(struct resource * res,void * arg)1335da04cc8SLianbo Jiang static int __ioremap_collect_map_flags(struct resource *res, void *arg)
1340e4c12b4STom Lendacky {
1355da04cc8SLianbo Jiang 	struct ioremap_desc *desc = arg;
1360e4c12b4STom Lendacky 
1375da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
1385da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_ram(res);
1390e4c12b4STom Lendacky 
1405da04cc8SLianbo Jiang 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
1415da04cc8SLianbo Jiang 		desc->flags |= __ioremap_check_encrypted(res);
1420e4c12b4STom Lendacky 
1435da04cc8SLianbo Jiang 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
1445da04cc8SLianbo Jiang 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
1450e4c12b4STom Lendacky }
1460e4c12b4STom Lendacky 
1470e4c12b4STom Lendacky /*
1480e4c12b4STom Lendacky  * To avoid multiple resource walks, this function walks resources marked as
1490e4c12b4STom Lendacky  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
1500e4c12b4STom Lendacky  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
151985e537aSTom Lendacky  *
152985e537aSTom Lendacky  * After that, deal with misc other ranges in __ioremap_check_other() which do
153985e537aSTom Lendacky  * not fall into the above category.
1540e4c12b4STom Lendacky  */
__ioremap_check_mem(resource_size_t addr,unsigned long size,struct ioremap_desc * desc)1550e4c12b4STom Lendacky static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
1565da04cc8SLianbo Jiang 				struct ioremap_desc *desc)
1570e4c12b4STom Lendacky {
1580e4c12b4STom Lendacky 	u64 start, end;
1590e4c12b4STom Lendacky 
1600e4c12b4STom Lendacky 	start = (u64)addr;
1610e4c12b4STom Lendacky 	end = start + size - 1;
1625da04cc8SLianbo Jiang 	memset(desc, 0, sizeof(struct ioremap_desc));
1630e4c12b4STom Lendacky 
1645da04cc8SLianbo Jiang 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
165985e537aSTom Lendacky 
166985e537aSTom Lendacky 	__ioremap_check_other(addr, desc);
167c81c8a1eSRoland Dreier }
168c81c8a1eSRoland Dreier 
169e64c8aa0SThomas Gleixner /*
170e64c8aa0SThomas Gleixner  * Remap an arbitrary physical address space into the kernel virtual
1715d72b4fbSToshi Kani  * address space. It transparently creates kernel huge I/O mapping when
1725d72b4fbSToshi Kani  * the physical address is aligned by a huge page size (1GB or 2MB) and
1735d72b4fbSToshi Kani  * the requested size is at least the huge page size.
1745d72b4fbSToshi Kani  *
1755d72b4fbSToshi Kani  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
1765d72b4fbSToshi Kani  * Therefore, the mapping code falls back to use a smaller page toward 4KB
1775d72b4fbSToshi Kani  * when a mapping range is covered by non-WB type of MTRRs.
178e64c8aa0SThomas Gleixner  *
179e64c8aa0SThomas Gleixner  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
180e64c8aa0SThomas Gleixner  * have to convert them into an offset in a page-aligned mapping, but the
181e64c8aa0SThomas Gleixner  * caller shouldn't need to know that small detail.
182e64c8aa0SThomas Gleixner  */
1835da04cc8SLianbo Jiang static void __iomem *
__ioremap_caller(resource_size_t phys_addr,unsigned long size,enum page_cache_mode pcm,void * caller,bool encrypted)1845da04cc8SLianbo Jiang __ioremap_caller(resource_size_t phys_addr, unsigned long size,
1855da04cc8SLianbo Jiang 		 enum page_cache_mode pcm, void *caller, bool encrypted)
186e64c8aa0SThomas Gleixner {
187ffa71f33SKenji Kaneshige 	unsigned long offset, vaddr;
1880e4c12b4STom Lendacky 	resource_size_t last_addr;
18987e547feSPekka Paalanen 	const resource_size_t unaligned_phys_addr = phys_addr;
19087e547feSPekka Paalanen 	const unsigned long unaligned_size = size;
1915da04cc8SLianbo Jiang 	struct ioremap_desc io_desc;
192e64c8aa0SThomas Gleixner 	struct vm_struct *area;
193b14097bdSJuergen Gross 	enum page_cache_mode new_pcm;
194d806e5eeSThomas Gleixner 	pgprot_t prot;
195dee7cbb2SVenki Pallipadi 	int retval;
196d61fc448SPekka Paalanen 	void __iomem *ret_addr;
197e64c8aa0SThomas Gleixner 
198e64c8aa0SThomas Gleixner 	/* Don't allow wraparound or zero size */
199e64c8aa0SThomas Gleixner 	last_addr = phys_addr + size - 1;
200e64c8aa0SThomas Gleixner 	if (!size || last_addr < phys_addr)
201e64c8aa0SThomas Gleixner 		return NULL;
202e64c8aa0SThomas Gleixner 
203e3100c82SThomas Gleixner 	if (!phys_addr_valid(phys_addr)) {
2046997ab49Svenkatesh.pallipadi@intel.com 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
2054c8337acSRandy Dunlap 		       (unsigned long long)phys_addr);
206e3100c82SThomas Gleixner 		WARN_ON_ONCE(1);
207e3100c82SThomas Gleixner 		return NULL;
208e3100c82SThomas Gleixner 	}
209e3100c82SThomas Gleixner 
2105da04cc8SLianbo Jiang 	__ioremap_check_mem(phys_addr, size, &io_desc);
2110e4c12b4STom Lendacky 
212e64c8aa0SThomas Gleixner 	/*
213e64c8aa0SThomas Gleixner 	 * Don't allow anybody to remap normal RAM that we're using..
214e64c8aa0SThomas Gleixner 	 */
2155da04cc8SLianbo Jiang 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
2168a0a5da6SThomas Gleixner 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
2178a0a5da6SThomas Gleixner 			  &phys_addr, &last_addr);
218e64c8aa0SThomas Gleixner 		return NULL;
219906e36c5SMike Travis 	}
2209a58eebeSToshi Kani 
221d7677d40Svenkatesh.pallipadi@intel.com 	/*
222d7677d40Svenkatesh.pallipadi@intel.com 	 * Mappings have to be page-aligned
223d7677d40Svenkatesh.pallipadi@intel.com 	 */
224d7677d40Svenkatesh.pallipadi@intel.com 	offset = phys_addr & ~PAGE_MASK;
2254dbd6a3eSMichael Kelley 	phys_addr &= PAGE_MASK;
226d7677d40Svenkatesh.pallipadi@intel.com 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
227d7677d40Svenkatesh.pallipadi@intel.com 
2284dbd6a3eSMichael Kelley 	/*
2294dbd6a3eSMichael Kelley 	 * Mask out any bits not part of the actual physical
2304dbd6a3eSMichael Kelley 	 * address, like memory encryption bits.
2314dbd6a3eSMichael Kelley 	 */
2324dbd6a3eSMichael Kelley 	phys_addr &= PHYSICAL_PAGE_MASK;
2334dbd6a3eSMichael Kelley 
234ecdd6ee7SIngo Molnar 	retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
235e00c8cc9SJuergen Gross 						pcm, &new_pcm);
236dee7cbb2SVenki Pallipadi 	if (retval) {
237ecdd6ee7SIngo Molnar 		printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
238dee7cbb2SVenki Pallipadi 		return NULL;
239dee7cbb2SVenki Pallipadi 	}
240dee7cbb2SVenki Pallipadi 
241b14097bdSJuergen Gross 	if (pcm != new_pcm) {
242b14097bdSJuergen Gross 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
243279e669bSVenkatesh Pallipadi 			printk(KERN_ERR
244b14097bdSJuergen Gross 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
2454c8337acSRandy Dunlap 				(unsigned long long)phys_addr,
2464c8337acSRandy Dunlap 				(unsigned long long)(phys_addr + size),
247b14097bdSJuergen Gross 				pcm, new_pcm);
248de2a47cfSXiaotian Feng 			goto err_free_memtype;
249d7677d40Svenkatesh.pallipadi@intel.com 		}
250b14097bdSJuergen Gross 		pcm = new_pcm;
251d7677d40Svenkatesh.pallipadi@intel.com 	}
252d7677d40Svenkatesh.pallipadi@intel.com 
2530e4c12b4STom Lendacky 	/*
2540e4c12b4STom Lendacky 	 * If the page being mapped is in memory and SEV is active then
2550e4c12b4STom Lendacky 	 * make sure the memory encryption attribute is enabled in the
2560e4c12b4STom Lendacky 	 * resulting mapping.
2579aa6ea69SKirill A. Shutemov 	 * In TDX guests, memory is marked private by default. If encryption
2589aa6ea69SKirill A. Shutemov 	 * is not requested (using encrypted), explicitly set decrypt
2599aa6ea69SKirill A. Shutemov 	 * attribute in all IOREMAPPED memory.
2600e4c12b4STom Lendacky 	 */
261be43d728SJeremy Fitzhardinge 	prot = PAGE_KERNEL_IO;
2625da04cc8SLianbo Jiang 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
2630e4c12b4STom Lendacky 		prot = pgprot_encrypted(prot);
2649aa6ea69SKirill A. Shutemov 	else
2659aa6ea69SKirill A. Shutemov 		prot = pgprot_decrypted(prot);
2660e4c12b4STom Lendacky 
267b14097bdSJuergen Gross 	switch (pcm) {
268b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC:
269b14097bdSJuergen Gross 	default:
270b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
271b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC));
272b14097bdSJuergen Gross 		break;
273b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_UC_MINUS:
274b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
275b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
276b14097bdSJuergen Gross 		break;
277b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WC:
278b14097bdSJuergen Gross 		prot = __pgprot(pgprot_val(prot) |
279b14097bdSJuergen Gross 				cachemode2protval(_PAGE_CACHE_MODE_WC));
280b14097bdSJuergen Gross 		break;
281d838270eSToshi Kani 	case _PAGE_CACHE_MODE_WT:
282d838270eSToshi Kani 		prot = __pgprot(pgprot_val(prot) |
283d838270eSToshi Kani 				cachemode2protval(_PAGE_CACHE_MODE_WT));
284d838270eSToshi Kani 		break;
285b14097bdSJuergen Gross 	case _PAGE_CACHE_MODE_WB:
286d806e5eeSThomas Gleixner 		break;
287d806e5eeSThomas Gleixner 	}
288e64c8aa0SThomas Gleixner 
289e64c8aa0SThomas Gleixner 	/*
290e64c8aa0SThomas Gleixner 	 * Ok, go for it..
291e64c8aa0SThomas Gleixner 	 */
29223016969SChristoph Lameter 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
293e64c8aa0SThomas Gleixner 	if (!area)
294de2a47cfSXiaotian Feng 		goto err_free_memtype;
295e64c8aa0SThomas Gleixner 	area->phys_addr = phys_addr;
296e66aadbeSThomas Gleixner 	vaddr = (unsigned long) area->addr;
29743a432b1SSuresh Siddha 
298ecdd6ee7SIngo Molnar 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
299de2a47cfSXiaotian Feng 		goto err_free_area;
300e64c8aa0SThomas Gleixner 
301de2a47cfSXiaotian Feng 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
302de2a47cfSXiaotian Feng 		goto err_free_area;
303e64c8aa0SThomas Gleixner 
304d61fc448SPekka Paalanen 	ret_addr = (void __iomem *) (vaddr + offset);
30587e547feSPekka Paalanen 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
306d61fc448SPekka Paalanen 
307c7a7b814STim Gardner 	/*
308c7a7b814STim Gardner 	 * Check if the request spans more than any BAR in the iomem resource
309c7a7b814STim Gardner 	 * tree.
310c7a7b814STim Gardner 	 */
3119abb0ecdSLaura Abbott 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
3129abb0ecdSLaura Abbott 		pr_warn("caller %pS mapping multiple BARs\n", caller);
313c7a7b814STim Gardner 
314d61fc448SPekka Paalanen 	return ret_addr;
315de2a47cfSXiaotian Feng err_free_area:
316de2a47cfSXiaotian Feng 	free_vm_area(area);
317de2a47cfSXiaotian Feng err_free_memtype:
318ecdd6ee7SIngo Molnar 	memtype_free(phys_addr, phys_addr + size);
319de2a47cfSXiaotian Feng 	return NULL;
320e64c8aa0SThomas Gleixner }
321e64c8aa0SThomas Gleixner 
322e64c8aa0SThomas Gleixner /**
323c0d94aa5SChristoph Hellwig  * ioremap     -   map bus memory into CPU space
3249efc31b8SWanpeng Li  * @phys_addr:    bus address of the memory
325e64c8aa0SThomas Gleixner  * @size:      size of the resource to map
326e64c8aa0SThomas Gleixner  *
327c0d94aa5SChristoph Hellwig  * ioremap performs a platform specific sequence of operations to
328e64c8aa0SThomas Gleixner  * make bus memory CPU accessible via the readb/readw/readl/writeb/
329e64c8aa0SThomas Gleixner  * writew/writel functions and the other mmio helpers. The returned
330e64c8aa0SThomas Gleixner  * address is not guaranteed to be usable directly as a virtual
331e64c8aa0SThomas Gleixner  * address.
332e64c8aa0SThomas Gleixner  *
333e64c8aa0SThomas Gleixner  * This version of ioremap ensures that the memory is marked uncachable
334e64c8aa0SThomas Gleixner  * on the CPU as well as honouring existing caching rules from things like
335e64c8aa0SThomas Gleixner  * the PCI bus. Note that there are other caches and buffers on many
336e64c8aa0SThomas Gleixner  * busses. In particular driver authors should read up on PCI writes
337e64c8aa0SThomas Gleixner  *
338e64c8aa0SThomas Gleixner  * It's useful if some control registers are in such an area and
339e64c8aa0SThomas Gleixner  * write combining or read caching is not desirable:
340e64c8aa0SThomas Gleixner  *
341e64c8aa0SThomas Gleixner  * Must be freed with iounmap.
342e64c8aa0SThomas Gleixner  */
ioremap(resource_size_t phys_addr,unsigned long size)343c0d94aa5SChristoph Hellwig void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
344e64c8aa0SThomas Gleixner {
345de33c442SSuresh Siddha 	/*
346de33c442SSuresh Siddha 	 * Ideally, this should be:
347cb32edf6SLuis R. Rodriguez 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
348de33c442SSuresh Siddha 	 *
349de33c442SSuresh Siddha 	 * Till we fix all X drivers to use ioremap_wc(), we will use
350e4b6be33SLuis R. Rodriguez 	 * UC MINUS. Drivers that are certain they need or can already
351e4b6be33SLuis R. Rodriguez 	 * be converted over to strong UC can use ioremap_uc().
352de33c442SSuresh Siddha 	 */
353b14097bdSJuergen Gross 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
354de33c442SSuresh Siddha 
355b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, pcm,
356c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
357e64c8aa0SThomas Gleixner }
358c0d94aa5SChristoph Hellwig EXPORT_SYMBOL(ioremap);
359e64c8aa0SThomas Gleixner 
360b310f381Svenkatesh.pallipadi@intel.com /**
361e4b6be33SLuis R. Rodriguez  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
362e4b6be33SLuis R. Rodriguez  * @phys_addr:    bus address of the memory
363e4b6be33SLuis R. Rodriguez  * @size:      size of the resource to map
364e4b6be33SLuis R. Rodriguez  *
365e4b6be33SLuis R. Rodriguez  * ioremap_uc performs a platform specific sequence of operations to
366e4b6be33SLuis R. Rodriguez  * make bus memory CPU accessible via the readb/readw/readl/writeb/
367e4b6be33SLuis R. Rodriguez  * writew/writel functions and the other mmio helpers. The returned
368e4b6be33SLuis R. Rodriguez  * address is not guaranteed to be usable directly as a virtual
369e4b6be33SLuis R. Rodriguez  * address.
370e4b6be33SLuis R. Rodriguez  *
371e4b6be33SLuis R. Rodriguez  * This version of ioremap ensures that the memory is marked with a strong
372e4b6be33SLuis R. Rodriguez  * preference as completely uncachable on the CPU when possible. For non-PAT
373e4b6be33SLuis R. Rodriguez  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
374e4b6be33SLuis R. Rodriguez  * systems this will set the PAT entry for the pages as strong UC.  This call
375e4b6be33SLuis R. Rodriguez  * will honor existing caching rules from things like the PCI bus. Note that
376e4b6be33SLuis R. Rodriguez  * there are other caches and buffers on many busses. In particular driver
377e4b6be33SLuis R. Rodriguez  * authors should read up on PCI writes.
378e4b6be33SLuis R. Rodriguez  *
379e4b6be33SLuis R. Rodriguez  * It's useful if some control registers are in such an area and
380e4b6be33SLuis R. Rodriguez  * write combining or read caching is not desirable:
381e4b6be33SLuis R. Rodriguez  *
382e4b6be33SLuis R. Rodriguez  * Must be freed with iounmap.
383e4b6be33SLuis R. Rodriguez  */
ioremap_uc(resource_size_t phys_addr,unsigned long size)384e4b6be33SLuis R. Rodriguez void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
385e4b6be33SLuis R. Rodriguez {
386e4b6be33SLuis R. Rodriguez 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
387e4b6be33SLuis R. Rodriguez 
388e4b6be33SLuis R. Rodriguez 	return __ioremap_caller(phys_addr, size, pcm,
389c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
390e4b6be33SLuis R. Rodriguez }
391e4b6be33SLuis R. Rodriguez EXPORT_SYMBOL_GPL(ioremap_uc);
392e4b6be33SLuis R. Rodriguez 
393e4b6be33SLuis R. Rodriguez /**
394b310f381Svenkatesh.pallipadi@intel.com  * ioremap_wc	-	map memory into CPU space write combined
3959efc31b8SWanpeng Li  * @phys_addr:	bus address of the memory
396b310f381Svenkatesh.pallipadi@intel.com  * @size:	size of the resource to map
397b310f381Svenkatesh.pallipadi@intel.com  *
398b310f381Svenkatesh.pallipadi@intel.com  * This version of ioremap ensures that the memory is marked write combining.
399b310f381Svenkatesh.pallipadi@intel.com  * Write combining allows faster writes to some hardware devices.
400b310f381Svenkatesh.pallipadi@intel.com  *
401b310f381Svenkatesh.pallipadi@intel.com  * Must be freed with iounmap.
402b310f381Svenkatesh.pallipadi@intel.com  */
ioremap_wc(resource_size_t phys_addr,unsigned long size)403d639bab8Svenkatesh.pallipadi@intel.com void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
404b310f381Svenkatesh.pallipadi@intel.com {
405b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
406c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
407b310f381Svenkatesh.pallipadi@intel.com }
408b310f381Svenkatesh.pallipadi@intel.com EXPORT_SYMBOL(ioremap_wc);
409b310f381Svenkatesh.pallipadi@intel.com 
410d838270eSToshi Kani /**
411d838270eSToshi Kani  * ioremap_wt	-	map memory into CPU space write through
412d838270eSToshi Kani  * @phys_addr:	bus address of the memory
413d838270eSToshi Kani  * @size:	size of the resource to map
414d838270eSToshi Kani  *
415d838270eSToshi Kani  * This version of ioremap ensures that the memory is marked write through.
416d838270eSToshi Kani  * Write through stores data into memory while keeping the cache up-to-date.
417d838270eSToshi Kani  *
418d838270eSToshi Kani  * Must be freed with iounmap.
419d838270eSToshi Kani  */
ioremap_wt(resource_size_t phys_addr,unsigned long size)420d838270eSToshi Kani void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
421d838270eSToshi Kani {
422d838270eSToshi Kani 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
423c3a7a61cSLianbo Jiang 					__builtin_return_address(0), false);
424d838270eSToshi Kani }
425d838270eSToshi Kani EXPORT_SYMBOL(ioremap_wt);
426d838270eSToshi Kani 
ioremap_encrypted(resource_size_t phys_addr,unsigned long size)427c3a7a61cSLianbo Jiang void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
428c3a7a61cSLianbo Jiang {
429c3a7a61cSLianbo Jiang 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
430c3a7a61cSLianbo Jiang 				__builtin_return_address(0), true);
431c3a7a61cSLianbo Jiang }
432c3a7a61cSLianbo Jiang EXPORT_SYMBOL(ioremap_encrypted);
433c3a7a61cSLianbo Jiang 
ioremap_cache(resource_size_t phys_addr,unsigned long size)434b9e76a00SLinus Torvalds void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
4355f868152SThomas Gleixner {
436b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
437c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
4385f868152SThomas Gleixner }
4395f868152SThomas Gleixner EXPORT_SYMBOL(ioremap_cache);
4405f868152SThomas Gleixner 
ioremap_prot(resource_size_t phys_addr,unsigned long size,unsigned long prot_val)44128b2ee20SRik van Riel void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
44228b2ee20SRik van Riel 				unsigned long prot_val)
44328b2ee20SRik van Riel {
444b14097bdSJuergen Gross 	return __ioremap_caller(phys_addr, size,
445b14097bdSJuergen Gross 				pgprot2cachemode(__pgprot(prot_val)),
446c3a7a61cSLianbo Jiang 				__builtin_return_address(0), false);
44728b2ee20SRik van Riel }
44828b2ee20SRik van Riel EXPORT_SYMBOL(ioremap_prot);
44928b2ee20SRik van Riel 
450e64c8aa0SThomas Gleixner /**
451e64c8aa0SThomas Gleixner  * iounmap - Free a IO remapping
452e64c8aa0SThomas Gleixner  * @addr: virtual address from ioremap_*
453e64c8aa0SThomas Gleixner  *
454e64c8aa0SThomas Gleixner  * Caller must ensure there is only one unmapping for the same pointer.
455e64c8aa0SThomas Gleixner  */
iounmap(volatile void __iomem * addr)456e64c8aa0SThomas Gleixner void iounmap(volatile void __iomem *addr)
457e64c8aa0SThomas Gleixner {
458e64c8aa0SThomas Gleixner 	struct vm_struct *p, *o;
459e64c8aa0SThomas Gleixner 
460e64c8aa0SThomas Gleixner 	if ((void __force *)addr <= high_memory)
461e64c8aa0SThomas Gleixner 		return;
462e64c8aa0SThomas Gleixner 
463e64c8aa0SThomas Gleixner 	/*
46433c2b803STom Lendacky 	 * The PCI/ISA range special-casing was removed from __ioremap()
46533c2b803STom Lendacky 	 * so this check, in theory, can be removed. However, there are
46633c2b803STom Lendacky 	 * cases where iounmap() is called for addresses not obtained via
46733c2b803STom Lendacky 	 * ioremap() (vga16fb for example). Add a warning so that these
46833c2b803STom Lendacky 	 * cases can be caught and fixed.
469e64c8aa0SThomas Gleixner 	 */
4706e92a5a6SThomas Gleixner 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
47133c2b803STom Lendacky 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
47233c2b803STom Lendacky 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
473e64c8aa0SThomas Gleixner 		return;
47433c2b803STom Lendacky 	}
475e64c8aa0SThomas Gleixner 
4766d60ce38SKarol Herbst 	mmiotrace_iounmap(addr);
4776d60ce38SKarol Herbst 
478e64c8aa0SThomas Gleixner 	addr = (volatile void __iomem *)
479e64c8aa0SThomas Gleixner 		(PAGE_MASK & (unsigned long __force)addr);
480e64c8aa0SThomas Gleixner 
481e64c8aa0SThomas Gleixner 	/* Use the vm area unlocked, assuming the caller
482e64c8aa0SThomas Gleixner 	   ensures there isn't another iounmap for the same address
483e64c8aa0SThomas Gleixner 	   in parallel. Reuse of the virtual address is prevented by
484e64c8aa0SThomas Gleixner 	   leaving it in the global lists until we're done with it.
485e64c8aa0SThomas Gleixner 	   cpa takes care of the direct mappings. */
486ef932473SJoonsoo Kim 	p = find_vm_area((void __force *)addr);
487e64c8aa0SThomas Gleixner 
488e64c8aa0SThomas Gleixner 	if (!p) {
489e64c8aa0SThomas Gleixner 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
490e64c8aa0SThomas Gleixner 		dump_stack();
491e64c8aa0SThomas Gleixner 		return;
492e64c8aa0SThomas Gleixner 	}
493e64c8aa0SThomas Gleixner 
494b073d7f8SAlexander Potapenko 	kmsan_iounmap_page_range((unsigned long)addr,
495b073d7f8SAlexander Potapenko 		(unsigned long)addr + get_vm_area_size(p));
496ecdd6ee7SIngo Molnar 	memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
497d7677d40Svenkatesh.pallipadi@intel.com 
498e64c8aa0SThomas Gleixner 	/* Finally remove it */
4996e92a5a6SThomas Gleixner 	o = remove_vm_area((void __force *)addr);
500e64c8aa0SThomas Gleixner 	BUG_ON(p != o || o == NULL);
501e64c8aa0SThomas Gleixner 	kfree(p);
502e64c8aa0SThomas Gleixner }
503e64c8aa0SThomas Gleixner EXPORT_SYMBOL(iounmap);
504e64c8aa0SThomas Gleixner 
505e045fb2aSvenkatesh.pallipadi@intel.com /*
506e045fb2aSvenkatesh.pallipadi@intel.com  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
507e045fb2aSvenkatesh.pallipadi@intel.com  * access
508e045fb2aSvenkatesh.pallipadi@intel.com  */
xlate_dev_mem_ptr(phys_addr_t phys)5094707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t phys)
510e045fb2aSvenkatesh.pallipadi@intel.com {
511e045fb2aSvenkatesh.pallipadi@intel.com 	unsigned long start  = phys &  PAGE_MASK;
51294d4b476SIngo Molnar 	unsigned long offset = phys & ~PAGE_MASK;
513562bfca4SIngo Molnar 	void *vaddr;
514e045fb2aSvenkatesh.pallipadi@intel.com 
5158458bf94STom Lendacky 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
5168458bf94STom Lendacky 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
517e045fb2aSvenkatesh.pallipadi@intel.com 
5188458bf94STom Lendacky 	/* Only add the offset on success and return NULL if memremap() failed */
51994d4b476SIngo Molnar 	if (vaddr)
52094d4b476SIngo Molnar 		vaddr += offset;
521e045fb2aSvenkatesh.pallipadi@intel.com 
522562bfca4SIngo Molnar 	return vaddr;
523e045fb2aSvenkatesh.pallipadi@intel.com }
524e045fb2aSvenkatesh.pallipadi@intel.com 
unxlate_dev_mem_ptr(phys_addr_t phys,void * addr)5254707a341SThierry Reding void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
526e045fb2aSvenkatesh.pallipadi@intel.com {
5278458bf94STom Lendacky 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
528e045fb2aSvenkatesh.pallipadi@intel.com }
529e045fb2aSvenkatesh.pallipadi@intel.com 
530402fe0cbSTom Lendacky #ifdef CONFIG_AMD_MEM_ENCRYPT
5318f716c9bSTom Lendacky /*
5328f716c9bSTom Lendacky  * Examine the physical address to determine if it is an area of memory
5338f716c9bSTom Lendacky  * that should be mapped decrypted.  If the memory is not part of the
5348f716c9bSTom Lendacky  * kernel usable area it was accessed and created decrypted, so these
5351de32862STom Lendacky  * areas should be mapped decrypted. And since the encryption key can
5361de32862STom Lendacky  * change across reboots, persistent memory should also be mapped
5371de32862STom Lendacky  * decrypted.
538072f58c6STom Lendacky  *
539072f58c6STom Lendacky  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
540072f58c6STom Lendacky  * only persistent memory should be mapped decrypted.
5418f716c9bSTom Lendacky  */
memremap_should_map_decrypted(resource_size_t phys_addr,unsigned long size)5428f716c9bSTom Lendacky static bool memremap_should_map_decrypted(resource_size_t phys_addr,
5438f716c9bSTom Lendacky 					  unsigned long size)
5448f716c9bSTom Lendacky {
5451de32862STom Lendacky 	int is_pmem;
5461de32862STom Lendacky 
5471de32862STom Lendacky 	/*
5481de32862STom Lendacky 	 * Check if the address is part of a persistent memory region.
5491de32862STom Lendacky 	 * This check covers areas added by E820, EFI and ACPI.
5501de32862STom Lendacky 	 */
5511de32862STom Lendacky 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
5521de32862STom Lendacky 				    IORES_DESC_PERSISTENT_MEMORY);
5531de32862STom Lendacky 	if (is_pmem != REGION_DISJOINT)
5541de32862STom Lendacky 		return true;
5551de32862STom Lendacky 
5561de32862STom Lendacky 	/*
5571de32862STom Lendacky 	 * Check if the non-volatile attribute is set for an EFI
5581de32862STom Lendacky 	 * reserved area.
5591de32862STom Lendacky 	 */
5601de32862STom Lendacky 	if (efi_enabled(EFI_BOOT)) {
5611de32862STom Lendacky 		switch (efi_mem_type(phys_addr)) {
5621de32862STom Lendacky 		case EFI_RESERVED_TYPE:
5631de32862STom Lendacky 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
5641de32862STom Lendacky 				return true;
5651de32862STom Lendacky 			break;
5661de32862STom Lendacky 		default:
5671de32862STom Lendacky 			break;
5681de32862STom Lendacky 		}
5691de32862STom Lendacky 	}
5701de32862STom Lendacky 
5718f716c9bSTom Lendacky 	/* Check if the address is outside kernel usable area */
5728f716c9bSTom Lendacky 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
5738f716c9bSTom Lendacky 	case E820_TYPE_RESERVED:
5748f716c9bSTom Lendacky 	case E820_TYPE_ACPI:
5758f716c9bSTom Lendacky 	case E820_TYPE_NVS:
5768f716c9bSTom Lendacky 	case E820_TYPE_UNUSABLE:
577072f58c6STom Lendacky 		/* For SEV, these areas are encrypted */
5784d96f910STom Lendacky 		if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
579072f58c6STom Lendacky 			break;
580df561f66SGustavo A. R. Silva 		fallthrough;
581072f58c6STom Lendacky 
5821de32862STom Lendacky 	case E820_TYPE_PRAM:
5838f716c9bSTom Lendacky 		return true;
5848f716c9bSTom Lendacky 	default:
5858f716c9bSTom Lendacky 		break;
5868f716c9bSTom Lendacky 	}
5878f716c9bSTom Lendacky 
5888f716c9bSTom Lendacky 	return false;
5898f716c9bSTom Lendacky }
5908f716c9bSTom Lendacky 
5918f716c9bSTom Lendacky /*
5928f716c9bSTom Lendacky  * Examine the physical address to determine if it is EFI data. Check
5938f716c9bSTom Lendacky  * it against the boot params structure and EFI tables and memory types.
5948f716c9bSTom Lendacky  */
memremap_is_efi_data(resource_size_t phys_addr,unsigned long size)5958f716c9bSTom Lendacky static bool memremap_is_efi_data(resource_size_t phys_addr,
5968f716c9bSTom Lendacky 				 unsigned long size)
5978f716c9bSTom Lendacky {
5988f716c9bSTom Lendacky 	u64 paddr;
5998f716c9bSTom Lendacky 
6008f716c9bSTom Lendacky 	/* Check if the address is part of EFI boot/runtime data */
6018f716c9bSTom Lendacky 	if (!efi_enabled(EFI_BOOT))
6028f716c9bSTom Lendacky 		return false;
6038f716c9bSTom Lendacky 
6048f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_memmap_hi;
6058f716c9bSTom Lendacky 	paddr <<= 32;
6068f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_memmap;
6078f716c9bSTom Lendacky 	if (phys_addr == paddr)
6088f716c9bSTom Lendacky 		return true;
6098f716c9bSTom Lendacky 
6108f716c9bSTom Lendacky 	paddr = boot_params.efi_info.efi_systab_hi;
6118f716c9bSTom Lendacky 	paddr <<= 32;
6128f716c9bSTom Lendacky 	paddr |= boot_params.efi_info.efi_systab;
6138f716c9bSTom Lendacky 	if (phys_addr == paddr)
6148f716c9bSTom Lendacky 		return true;
6158f716c9bSTom Lendacky 
6168f716c9bSTom Lendacky 	if (efi_is_table_address(phys_addr))
6178f716c9bSTom Lendacky 		return true;
6188f716c9bSTom Lendacky 
6198f716c9bSTom Lendacky 	switch (efi_mem_type(phys_addr)) {
6208f716c9bSTom Lendacky 	case EFI_BOOT_SERVICES_DATA:
6218f716c9bSTom Lendacky 	case EFI_RUNTIME_SERVICES_DATA:
6228f716c9bSTom Lendacky 		return true;
6238f716c9bSTom Lendacky 	default:
6248f716c9bSTom Lendacky 		break;
6258f716c9bSTom Lendacky 	}
6268f716c9bSTom Lendacky 
6278f716c9bSTom Lendacky 	return false;
6288f716c9bSTom Lendacky }
6298f716c9bSTom Lendacky 
6308f716c9bSTom Lendacky /*
6318f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6328f716c9bSTom Lendacky  * it against the boot params setup_data chain.
6338f716c9bSTom Lendacky  */
memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)6348f716c9bSTom Lendacky static bool memremap_is_setup_data(resource_size_t phys_addr,
6358f716c9bSTom Lendacky 				   unsigned long size)
6368f716c9bSTom Lendacky {
6377228918bSRoss Philipson 	struct setup_indirect *indirect;
6388f716c9bSTom Lendacky 	struct setup_data *data;
6398f716c9bSTom Lendacky 	u64 paddr, paddr_next;
6408f716c9bSTom Lendacky 
6418f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
6428f716c9bSTom Lendacky 	while (paddr) {
6438f716c9bSTom Lendacky 		unsigned int len;
6448f716c9bSTom Lendacky 
6458f716c9bSTom Lendacky 		if (phys_addr == paddr)
6468f716c9bSTom Lendacky 			return true;
6478f716c9bSTom Lendacky 
6488f716c9bSTom Lendacky 		data = memremap(paddr, sizeof(*data),
6498f716c9bSTom Lendacky 				MEMREMAP_WB | MEMREMAP_DEC);
6507228918bSRoss Philipson 		if (!data) {
6517228918bSRoss Philipson 			pr_warn("failed to memremap setup_data entry\n");
6527228918bSRoss Philipson 			return false;
6537228918bSRoss Philipson 		}
6548f716c9bSTom Lendacky 
6558f716c9bSTom Lendacky 		paddr_next = data->next;
6568f716c9bSTom Lendacky 		len = data->len;
6578f716c9bSTom Lendacky 
658b3c72fc9SDaniel Kiper 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
659b3c72fc9SDaniel Kiper 			memunmap(data);
660b3c72fc9SDaniel Kiper 			return true;
661b3c72fc9SDaniel Kiper 		}
662b3c72fc9SDaniel Kiper 
6637228918bSRoss Philipson 		if (data->type == SETUP_INDIRECT) {
6647228918bSRoss Philipson 			memunmap(data);
6657228918bSRoss Philipson 			data = memremap(paddr, sizeof(*data) + len,
6667228918bSRoss Philipson 					MEMREMAP_WB | MEMREMAP_DEC);
6677228918bSRoss Philipson 			if (!data) {
6687228918bSRoss Philipson 				pr_warn("failed to memremap indirect setup_data\n");
6697228918bSRoss Philipson 				return false;
6707228918bSRoss Philipson 			}
6717228918bSRoss Philipson 
6727228918bSRoss Philipson 			indirect = (struct setup_indirect *)data->data;
6737228918bSRoss Philipson 
6747228918bSRoss Philipson 			if (indirect->type != SETUP_INDIRECT) {
6757228918bSRoss Philipson 				paddr = indirect->addr;
6767228918bSRoss Philipson 				len = indirect->len;
6777228918bSRoss Philipson 			}
678b3c72fc9SDaniel Kiper 		}
679b3c72fc9SDaniel Kiper 
6808f716c9bSTom Lendacky 		memunmap(data);
6818f716c9bSTom Lendacky 
6828f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
6838f716c9bSTom Lendacky 			return true;
6848f716c9bSTom Lendacky 
6858f716c9bSTom Lendacky 		paddr = paddr_next;
6868f716c9bSTom Lendacky 	}
6878f716c9bSTom Lendacky 
6888f716c9bSTom Lendacky 	return false;
6898f716c9bSTom Lendacky }
6908f716c9bSTom Lendacky 
6918f716c9bSTom Lendacky /*
6928f716c9bSTom Lendacky  * Examine the physical address to determine if it is boot data by checking
6938f716c9bSTom Lendacky  * it against the boot params setup_data chain (early boot version).
6948f716c9bSTom Lendacky  */
early_memremap_is_setup_data(resource_size_t phys_addr,unsigned long size)6958f716c9bSTom Lendacky static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
6968f716c9bSTom Lendacky 						unsigned long size)
6978f716c9bSTom Lendacky {
698445c1470SRoss Philipson 	struct setup_indirect *indirect;
6998f716c9bSTom Lendacky 	struct setup_data *data;
7008f716c9bSTom Lendacky 	u64 paddr, paddr_next;
7018f716c9bSTom Lendacky 
7028f716c9bSTom Lendacky 	paddr = boot_params.hdr.setup_data;
7038f716c9bSTom Lendacky 	while (paddr) {
704445c1470SRoss Philipson 		unsigned int len, size;
7058f716c9bSTom Lendacky 
7068f716c9bSTom Lendacky 		if (phys_addr == paddr)
7078f716c9bSTom Lendacky 			return true;
7088f716c9bSTom Lendacky 
7098f716c9bSTom Lendacky 		data = early_memremap_decrypted(paddr, sizeof(*data));
710445c1470SRoss Philipson 		if (!data) {
711445c1470SRoss Philipson 			pr_warn("failed to early memremap setup_data entry\n");
712445c1470SRoss Philipson 			return false;
713445c1470SRoss Philipson 		}
714445c1470SRoss Philipson 
715445c1470SRoss Philipson 		size = sizeof(*data);
7168f716c9bSTom Lendacky 
7178f716c9bSTom Lendacky 		paddr_next = data->next;
7188f716c9bSTom Lendacky 		len = data->len;
7198f716c9bSTom Lendacky 
720445c1470SRoss Philipson 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
7218f716c9bSTom Lendacky 			early_memunmap(data, sizeof(*data));
722445c1470SRoss Philipson 			return true;
723445c1470SRoss Philipson 		}
724445c1470SRoss Philipson 
725445c1470SRoss Philipson 		if (data->type == SETUP_INDIRECT) {
726445c1470SRoss Philipson 			size += len;
727445c1470SRoss Philipson 			early_memunmap(data, sizeof(*data));
728445c1470SRoss Philipson 			data = early_memremap_decrypted(paddr, size);
729445c1470SRoss Philipson 			if (!data) {
730445c1470SRoss Philipson 				pr_warn("failed to early memremap indirect setup_data\n");
731445c1470SRoss Philipson 				return false;
732445c1470SRoss Philipson 			}
733445c1470SRoss Philipson 
734445c1470SRoss Philipson 			indirect = (struct setup_indirect *)data->data;
735445c1470SRoss Philipson 
736445c1470SRoss Philipson 			if (indirect->type != SETUP_INDIRECT) {
737445c1470SRoss Philipson 				paddr = indirect->addr;
738445c1470SRoss Philipson 				len = indirect->len;
739445c1470SRoss Philipson 			}
740445c1470SRoss Philipson 		}
741445c1470SRoss Philipson 
742445c1470SRoss Philipson 		early_memunmap(data, size);
7438f716c9bSTom Lendacky 
7448f716c9bSTom Lendacky 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
7458f716c9bSTom Lendacky 			return true;
7468f716c9bSTom Lendacky 
7478f716c9bSTom Lendacky 		paddr = paddr_next;
7488f716c9bSTom Lendacky 	}
7498f716c9bSTom Lendacky 
7508f716c9bSTom Lendacky 	return false;
7518f716c9bSTom Lendacky }
7528f716c9bSTom Lendacky 
7538f716c9bSTom Lendacky /*
7548f716c9bSTom Lendacky  * Architecture function to determine if RAM remap is allowed. By default, a
7558f716c9bSTom Lendacky  * RAM remap will map the data as encrypted. Determine if a RAM remap should
7568f716c9bSTom Lendacky  * not be done so that the data will be mapped decrypted.
7578f716c9bSTom Lendacky  */
arch_memremap_can_ram_remap(resource_size_t phys_addr,unsigned long size,unsigned long flags)7588f716c9bSTom Lendacky bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
7598f716c9bSTom Lendacky 				 unsigned long flags)
7608f716c9bSTom Lendacky {
761e9d1d2bbSTom Lendacky 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
7628f716c9bSTom Lendacky 		return true;
7638f716c9bSTom Lendacky 
7648f716c9bSTom Lendacky 	if (flags & MEMREMAP_ENC)
7658f716c9bSTom Lendacky 		return true;
7668f716c9bSTom Lendacky 
7678f716c9bSTom Lendacky 	if (flags & MEMREMAP_DEC)
7688f716c9bSTom Lendacky 		return false;
7698f716c9bSTom Lendacky 
77032cb4d02STom Lendacky 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7718f716c9bSTom Lendacky 		if (memremap_is_setup_data(phys_addr, size) ||
772072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
7738f716c9bSTom Lendacky 			return false;
774072f58c6STom Lendacky 	}
7758f716c9bSTom Lendacky 
776072f58c6STom Lendacky 	return !memremap_should_map_decrypted(phys_addr, size);
7778f716c9bSTom Lendacky }
7788f716c9bSTom Lendacky 
7798f716c9bSTom Lendacky /*
7808f716c9bSTom Lendacky  * Architecture override of __weak function to adjust the protection attributes
7818f716c9bSTom Lendacky  * used when remapping memory. By default, early_memremap() will map the data
7828f716c9bSTom Lendacky  * as encrypted. Determine if an encrypted mapping should not be done and set
7838f716c9bSTom Lendacky  * the appropriate protection attributes.
7848f716c9bSTom Lendacky  */
early_memremap_pgprot_adjust(resource_size_t phys_addr,unsigned long size,pgprot_t prot)7858f716c9bSTom Lendacky pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
7868f716c9bSTom Lendacky 					     unsigned long size,
7878f716c9bSTom Lendacky 					     pgprot_t prot)
7888f716c9bSTom Lendacky {
789072f58c6STom Lendacky 	bool encrypted_prot;
790072f58c6STom Lendacky 
791e9d1d2bbSTom Lendacky 	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
7928f716c9bSTom Lendacky 		return prot;
7938f716c9bSTom Lendacky 
794072f58c6STom Lendacky 	encrypted_prot = true;
795072f58c6STom Lendacky 
79632cb4d02STom Lendacky 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
7978f716c9bSTom Lendacky 		if (early_memremap_is_setup_data(phys_addr, size) ||
798072f58c6STom Lendacky 		    memremap_is_efi_data(phys_addr, size))
799072f58c6STom Lendacky 			encrypted_prot = false;
800072f58c6STom Lendacky 	}
8018f716c9bSTom Lendacky 
802072f58c6STom Lendacky 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
803072f58c6STom Lendacky 		encrypted_prot = false;
804072f58c6STom Lendacky 
805072f58c6STom Lendacky 	return encrypted_prot ? pgprot_encrypted(prot)
806072f58c6STom Lendacky 			      : pgprot_decrypted(prot);
8078f716c9bSTom Lendacky }
8088f716c9bSTom Lendacky 
phys_mem_access_encrypted(unsigned long phys_addr,unsigned long size)8098458bf94STom Lendacky bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
8108458bf94STom Lendacky {
8118458bf94STom Lendacky 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
8128458bf94STom Lendacky }
8138458bf94STom Lendacky 
814f88a68faSTom Lendacky /* Remap memory with encryption */
early_memremap_encrypted(resource_size_t phys_addr,unsigned long size)815f88a68faSTom Lendacky void __init *early_memremap_encrypted(resource_size_t phys_addr,
816f88a68faSTom Lendacky 				      unsigned long size)
817f88a68faSTom Lendacky {
818f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
819f88a68faSTom Lendacky }
820f88a68faSTom Lendacky 
821f88a68faSTom Lendacky /*
822f88a68faSTom Lendacky  * Remap memory with encryption and write-protected - cannot be called
823f88a68faSTom Lendacky  * before pat_init() is called
824f88a68faSTom Lendacky  */
early_memremap_encrypted_wp(resource_size_t phys_addr,unsigned long size)825f88a68faSTom Lendacky void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
826f88a68faSTom Lendacky 					 unsigned long size)
827f88a68faSTom Lendacky {
8281f6f655eSChristoph Hellwig 	if (!x86_has_pat_wp())
829f88a68faSTom Lendacky 		return NULL;
830f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
831f88a68faSTom Lendacky }
832f88a68faSTom Lendacky 
833f88a68faSTom Lendacky /* Remap memory without encryption */
early_memremap_decrypted(resource_size_t phys_addr,unsigned long size)834f88a68faSTom Lendacky void __init *early_memremap_decrypted(resource_size_t phys_addr,
835f88a68faSTom Lendacky 				      unsigned long size)
836f88a68faSTom Lendacky {
837f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
838f88a68faSTom Lendacky }
839f88a68faSTom Lendacky 
840f88a68faSTom Lendacky /*
841f88a68faSTom Lendacky  * Remap memory without encryption and write-protected - cannot be called
842f88a68faSTom Lendacky  * before pat_init() is called
843f88a68faSTom Lendacky  */
early_memremap_decrypted_wp(resource_size_t phys_addr,unsigned long size)844f88a68faSTom Lendacky void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
845f88a68faSTom Lendacky 					 unsigned long size)
846f88a68faSTom Lendacky {
8471f6f655eSChristoph Hellwig 	if (!x86_has_pat_wp())
848f88a68faSTom Lendacky 		return NULL;
849f88a68faSTom Lendacky 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
850f88a68faSTom Lendacky }
851ce9084baSArd Biesheuvel #endif	/* CONFIG_AMD_MEM_ENCRYPT */
852f88a68faSTom Lendacky 
85345c7b28fSJeremy Fitzhardinge static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
854e64c8aa0SThomas Gleixner 
early_ioremap_pmd(unsigned long addr)855551889a6SIan Campbell static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
856e64c8aa0SThomas Gleixner {
85737cc8d7fSJeremy Fitzhardinge 	/* Don't assume we're using swapper_pg_dir at this point */
8586c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
85937cc8d7fSJeremy Fitzhardinge 	pgd_t *pgd = &base[pgd_index(addr)];
860e0c4f675SKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, addr);
861e0c4f675SKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, addr);
862551889a6SIan Campbell 	pmd_t *pmd = pmd_offset(pud, addr);
863551889a6SIan Campbell 
864551889a6SIan Campbell 	return pmd;
865e64c8aa0SThomas Gleixner }
866e64c8aa0SThomas Gleixner 
early_ioremap_pte(unsigned long addr)867551889a6SIan Campbell static inline pte_t * __init early_ioremap_pte(unsigned long addr)
868e64c8aa0SThomas Gleixner {
869551889a6SIan Campbell 	return &bm_pte[pte_index(addr)];
870e64c8aa0SThomas Gleixner }
871e64c8aa0SThomas Gleixner 
is_early_ioremap_ptep(pte_t * ptep)872fef5ba79SJeremy Fitzhardinge bool __init is_early_ioremap_ptep(pte_t *ptep)
873fef5ba79SJeremy Fitzhardinge {
874fef5ba79SJeremy Fitzhardinge 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
875fef5ba79SJeremy Fitzhardinge }
876fef5ba79SJeremy Fitzhardinge 
early_ioremap_init(void)877e64c8aa0SThomas Gleixner void __init early_ioremap_init(void)
878e64c8aa0SThomas Gleixner {
879551889a6SIan Campbell 	pmd_t *pmd;
880e64c8aa0SThomas Gleixner 
88173159fdcSAndy Lutomirski #ifdef CONFIG_X86_64
88273159fdcSAndy Lutomirski 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
88373159fdcSAndy Lutomirski #else
88473159fdcSAndy Lutomirski 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
88573159fdcSAndy Lutomirski #endif
88673159fdcSAndy Lutomirski 
8875b7c73e0SMark Salter 	early_ioremap_setup();
8888827247fSWang Chen 
889551889a6SIan Campbell 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
890e64c8aa0SThomas Gleixner 	memset(bm_pte, 0, sizeof(bm_pte));
891b6fbb669SIan Campbell 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
892551889a6SIan Campbell 
893e64c8aa0SThomas Gleixner 	/*
894551889a6SIan Campbell 	 * The boot-ioremap range spans multiple pmds, for which
895e64c8aa0SThomas Gleixner 	 * we are not prepared:
896e64c8aa0SThomas Gleixner 	 */
897499a5f1eSJan Beulich #define __FIXADDR_TOP (-PAGE_SIZE)
898499a5f1eSJan Beulich 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
899499a5f1eSJan Beulich 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
900499a5f1eSJan Beulich #undef __FIXADDR_TOP
901551889a6SIan Campbell 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
902e64c8aa0SThomas Gleixner 		WARN_ON(1);
903551889a6SIan Campbell 		printk(KERN_WARNING "pmd %p != %p\n",
904551889a6SIan Campbell 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
905e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
906e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_BEGIN));
907e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
908e64c8aa0SThomas Gleixner 			fix_to_virt(FIX_BTMAP_END));
909e64c8aa0SThomas Gleixner 
910e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
911e64c8aa0SThomas Gleixner 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
912e64c8aa0SThomas Gleixner 		       FIX_BTMAP_BEGIN);
913e64c8aa0SThomas Gleixner 	}
914e64c8aa0SThomas Gleixner }
915e64c8aa0SThomas Gleixner 
__early_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)9165b7c73e0SMark Salter void __init __early_set_fixmap(enum fixed_addresses idx,
9179b987aebSMasami Hiramatsu 			       phys_addr_t phys, pgprot_t flags)
918e64c8aa0SThomas Gleixner {
919551889a6SIan Campbell 	unsigned long addr = __fix_to_virt(idx);
920551889a6SIan Campbell 	pte_t *pte;
921e64c8aa0SThomas Gleixner 
922e64c8aa0SThomas Gleixner 	if (idx >= __end_of_fixed_addresses) {
923e64c8aa0SThomas Gleixner 		BUG();
924e64c8aa0SThomas Gleixner 		return;
925e64c8aa0SThomas Gleixner 	}
926e64c8aa0SThomas Gleixner 	pte = early_ioremap_pte(addr);
9274583ed51SJeremy Fitzhardinge 
928fb43d6cbSDave Hansen 	/* Sanitize 'prot' against any unsupported bits: */
929510bb96fSThomas Gleixner 	pgprot_val(flags) &= __supported_pte_mask;
930fb43d6cbSDave Hansen 
931e64c8aa0SThomas Gleixner 	if (pgprot_val(flags))
932551889a6SIan Campbell 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
933e64c8aa0SThomas Gleixner 	else
9344f9c11ddSJeremy Fitzhardinge 		pte_clear(&init_mm, addr, pte);
93558430c5dSThomas Gleixner 	flush_tlb_one_kernel(addr);
936e64c8aa0SThomas Gleixner }
937