xref: /openbmc/linux/arch/x86/mm/ioremap.c (revision 82b5d164)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Re-map IO memory to kernel address space so that we can access it.
4  * This is needed for high PCI addresses that aren't mapped in the
5  * 640k-1MB IO memory area on PC's
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  */
9 
10 #include <linux/memblock.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/efi.h>
19 
20 #include <asm/set_memory.h>
21 #include <asm/e820/api.h>
22 #include <asm/efi.h>
23 #include <asm/fixmap.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pat.h>
28 #include <asm/setup.h>
29 
30 #include "physaddr.h"
31 
32 /*
33  * Descriptor controlling ioremap() behavior.
34  */
35 struct ioremap_desc {
36 	unsigned int flags;
37 };
38 
39 /*
40  * Fix up the linear direct mapping of the kernel to avoid cache attribute
41  * conflicts.
42  */
43 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44 			enum page_cache_mode pcm)
45 {
46 	unsigned long nrpages = size >> PAGE_SHIFT;
47 	int err;
48 
49 	switch (pcm) {
50 	case _PAGE_CACHE_MODE_UC:
51 	default:
52 		err = _set_memory_uc(vaddr, nrpages);
53 		break;
54 	case _PAGE_CACHE_MODE_WC:
55 		err = _set_memory_wc(vaddr, nrpages);
56 		break;
57 	case _PAGE_CACHE_MODE_WT:
58 		err = _set_memory_wt(vaddr, nrpages);
59 		break;
60 	case _PAGE_CACHE_MODE_WB:
61 		err = _set_memory_wb(vaddr, nrpages);
62 		break;
63 	}
64 
65 	return err;
66 }
67 
68 /* Does the range (or a subset of) contain normal RAM? */
69 static unsigned int __ioremap_check_ram(struct resource *res)
70 {
71 	unsigned long start_pfn, stop_pfn;
72 	unsigned long i;
73 
74 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
75 		return 0;
76 
77 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
79 	if (stop_pfn > start_pfn) {
80 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
81 			if (pfn_valid(start_pfn + i) &&
82 			    !PageReserved(pfn_to_page(start_pfn + i)))
83 				return IORES_MAP_SYSTEM_RAM;
84 	}
85 
86 	return 0;
87 }
88 
89 /*
90  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
91  * there the whole memory is already encrypted.
92  */
93 static unsigned int __ioremap_check_encrypted(struct resource *res)
94 {
95 	if (!sev_active())
96 		return 0;
97 
98 	switch (res->desc) {
99 	case IORES_DESC_NONE:
100 	case IORES_DESC_RESERVED:
101 		break;
102 	default:
103 		return IORES_MAP_ENCRYPTED;
104 	}
105 
106 	return 0;
107 }
108 
109 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
110 {
111 	struct ioremap_desc *desc = arg;
112 
113 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
114 		desc->flags |= __ioremap_check_ram(res);
115 
116 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
117 		desc->flags |= __ioremap_check_encrypted(res);
118 
119 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
120 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
121 }
122 
123 /*
124  * To avoid multiple resource walks, this function walks resources marked as
125  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
126  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
127  */
128 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
129 				struct ioremap_desc *desc)
130 {
131 	u64 start, end;
132 
133 	start = (u64)addr;
134 	end = start + size - 1;
135 	memset(desc, 0, sizeof(struct ioremap_desc));
136 
137 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
138 }
139 
140 /*
141  * Remap an arbitrary physical address space into the kernel virtual
142  * address space. It transparently creates kernel huge I/O mapping when
143  * the physical address is aligned by a huge page size (1GB or 2MB) and
144  * the requested size is at least the huge page size.
145  *
146  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
147  * Therefore, the mapping code falls back to use a smaller page toward 4KB
148  * when a mapping range is covered by non-WB type of MTRRs.
149  *
150  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
151  * have to convert them into an offset in a page-aligned mapping, but the
152  * caller shouldn't need to know that small detail.
153  */
154 static void __iomem *
155 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
156 		 enum page_cache_mode pcm, void *caller, bool encrypted)
157 {
158 	unsigned long offset, vaddr;
159 	resource_size_t last_addr;
160 	const resource_size_t unaligned_phys_addr = phys_addr;
161 	const unsigned long unaligned_size = size;
162 	struct ioremap_desc io_desc;
163 	struct vm_struct *area;
164 	enum page_cache_mode new_pcm;
165 	pgprot_t prot;
166 	int retval;
167 	void __iomem *ret_addr;
168 
169 	/* Don't allow wraparound or zero size */
170 	last_addr = phys_addr + size - 1;
171 	if (!size || last_addr < phys_addr)
172 		return NULL;
173 
174 	if (!phys_addr_valid(phys_addr)) {
175 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
176 		       (unsigned long long)phys_addr);
177 		WARN_ON_ONCE(1);
178 		return NULL;
179 	}
180 
181 	__ioremap_check_mem(phys_addr, size, &io_desc);
182 
183 	/*
184 	 * Don't allow anybody to remap normal RAM that we're using..
185 	 */
186 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
187 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
188 			  &phys_addr, &last_addr);
189 		return NULL;
190 	}
191 
192 	/*
193 	 * Mappings have to be page-aligned
194 	 */
195 	offset = phys_addr & ~PAGE_MASK;
196 	phys_addr &= PHYSICAL_PAGE_MASK;
197 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
198 
199 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
200 						pcm, &new_pcm);
201 	if (retval) {
202 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
203 		return NULL;
204 	}
205 
206 	if (pcm != new_pcm) {
207 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
208 			printk(KERN_ERR
209 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
210 				(unsigned long long)phys_addr,
211 				(unsigned long long)(phys_addr + size),
212 				pcm, new_pcm);
213 			goto err_free_memtype;
214 		}
215 		pcm = new_pcm;
216 	}
217 
218 	/*
219 	 * If the page being mapped is in memory and SEV is active then
220 	 * make sure the memory encryption attribute is enabled in the
221 	 * resulting mapping.
222 	 */
223 	prot = PAGE_KERNEL_IO;
224 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
225 		prot = pgprot_encrypted(prot);
226 
227 	switch (pcm) {
228 	case _PAGE_CACHE_MODE_UC:
229 	default:
230 		prot = __pgprot(pgprot_val(prot) |
231 				cachemode2protval(_PAGE_CACHE_MODE_UC));
232 		break;
233 	case _PAGE_CACHE_MODE_UC_MINUS:
234 		prot = __pgprot(pgprot_val(prot) |
235 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
236 		break;
237 	case _PAGE_CACHE_MODE_WC:
238 		prot = __pgprot(pgprot_val(prot) |
239 				cachemode2protval(_PAGE_CACHE_MODE_WC));
240 		break;
241 	case _PAGE_CACHE_MODE_WT:
242 		prot = __pgprot(pgprot_val(prot) |
243 				cachemode2protval(_PAGE_CACHE_MODE_WT));
244 		break;
245 	case _PAGE_CACHE_MODE_WB:
246 		break;
247 	}
248 
249 	/*
250 	 * Ok, go for it..
251 	 */
252 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 	if (!area)
254 		goto err_free_memtype;
255 	area->phys_addr = phys_addr;
256 	vaddr = (unsigned long) area->addr;
257 
258 	if (kernel_map_sync_memtype(phys_addr, size, pcm))
259 		goto err_free_area;
260 
261 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
262 		goto err_free_area;
263 
264 	ret_addr = (void __iomem *) (vaddr + offset);
265 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
266 
267 	/*
268 	 * Check if the request spans more than any BAR in the iomem resource
269 	 * tree.
270 	 */
271 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
272 		pr_warn("caller %pS mapping multiple BARs\n", caller);
273 
274 	return ret_addr;
275 err_free_area:
276 	free_vm_area(area);
277 err_free_memtype:
278 	free_memtype(phys_addr, phys_addr + size);
279 	return NULL;
280 }
281 
282 /**
283  * ioremap_nocache     -   map bus memory into CPU space
284  * @phys_addr:    bus address of the memory
285  * @size:      size of the resource to map
286  *
287  * ioremap_nocache performs a platform specific sequence of operations to
288  * make bus memory CPU accessible via the readb/readw/readl/writeb/
289  * writew/writel functions and the other mmio helpers. The returned
290  * address is not guaranteed to be usable directly as a virtual
291  * address.
292  *
293  * This version of ioremap ensures that the memory is marked uncachable
294  * on the CPU as well as honouring existing caching rules from things like
295  * the PCI bus. Note that there are other caches and buffers on many
296  * busses. In particular driver authors should read up on PCI writes
297  *
298  * It's useful if some control registers are in such an area and
299  * write combining or read caching is not desirable:
300  *
301  * Must be freed with iounmap.
302  */
303 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
304 {
305 	/*
306 	 * Ideally, this should be:
307 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
308 	 *
309 	 * Till we fix all X drivers to use ioremap_wc(), we will use
310 	 * UC MINUS. Drivers that are certain they need or can already
311 	 * be converted over to strong UC can use ioremap_uc().
312 	 */
313 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
314 
315 	return __ioremap_caller(phys_addr, size, pcm,
316 				__builtin_return_address(0), false);
317 }
318 EXPORT_SYMBOL(ioremap_nocache);
319 
320 /**
321  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
322  * @phys_addr:    bus address of the memory
323  * @size:      size of the resource to map
324  *
325  * ioremap_uc performs a platform specific sequence of operations to
326  * make bus memory CPU accessible via the readb/readw/readl/writeb/
327  * writew/writel functions and the other mmio helpers. The returned
328  * address is not guaranteed to be usable directly as a virtual
329  * address.
330  *
331  * This version of ioremap ensures that the memory is marked with a strong
332  * preference as completely uncachable on the CPU when possible. For non-PAT
333  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
334  * systems this will set the PAT entry for the pages as strong UC.  This call
335  * will honor existing caching rules from things like the PCI bus. Note that
336  * there are other caches and buffers on many busses. In particular driver
337  * authors should read up on PCI writes.
338  *
339  * It's useful if some control registers are in such an area and
340  * write combining or read caching is not desirable:
341  *
342  * Must be freed with iounmap.
343  */
344 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
345 {
346 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
347 
348 	return __ioremap_caller(phys_addr, size, pcm,
349 				__builtin_return_address(0), false);
350 }
351 EXPORT_SYMBOL_GPL(ioremap_uc);
352 
353 /**
354  * ioremap_wc	-	map memory into CPU space write combined
355  * @phys_addr:	bus address of the memory
356  * @size:	size of the resource to map
357  *
358  * This version of ioremap ensures that the memory is marked write combining.
359  * Write combining allows faster writes to some hardware devices.
360  *
361  * Must be freed with iounmap.
362  */
363 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
364 {
365 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
366 					__builtin_return_address(0), false);
367 }
368 EXPORT_SYMBOL(ioremap_wc);
369 
370 /**
371  * ioremap_wt	-	map memory into CPU space write through
372  * @phys_addr:	bus address of the memory
373  * @size:	size of the resource to map
374  *
375  * This version of ioremap ensures that the memory is marked write through.
376  * Write through stores data into memory while keeping the cache up-to-date.
377  *
378  * Must be freed with iounmap.
379  */
380 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
381 {
382 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
383 					__builtin_return_address(0), false);
384 }
385 EXPORT_SYMBOL(ioremap_wt);
386 
387 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
388 {
389 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
390 				__builtin_return_address(0), true);
391 }
392 EXPORT_SYMBOL(ioremap_encrypted);
393 
394 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
395 {
396 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
397 				__builtin_return_address(0), false);
398 }
399 EXPORT_SYMBOL(ioremap_cache);
400 
401 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
402 				unsigned long prot_val)
403 {
404 	return __ioremap_caller(phys_addr, size,
405 				pgprot2cachemode(__pgprot(prot_val)),
406 				__builtin_return_address(0), false);
407 }
408 EXPORT_SYMBOL(ioremap_prot);
409 
410 /**
411  * iounmap - Free a IO remapping
412  * @addr: virtual address from ioremap_*
413  *
414  * Caller must ensure there is only one unmapping for the same pointer.
415  */
416 void iounmap(volatile void __iomem *addr)
417 {
418 	struct vm_struct *p, *o;
419 
420 	if ((void __force *)addr <= high_memory)
421 		return;
422 
423 	/*
424 	 * The PCI/ISA range special-casing was removed from __ioremap()
425 	 * so this check, in theory, can be removed. However, there are
426 	 * cases where iounmap() is called for addresses not obtained via
427 	 * ioremap() (vga16fb for example). Add a warning so that these
428 	 * cases can be caught and fixed.
429 	 */
430 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
431 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
432 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
433 		return;
434 	}
435 
436 	mmiotrace_iounmap(addr);
437 
438 	addr = (volatile void __iomem *)
439 		(PAGE_MASK & (unsigned long __force)addr);
440 
441 	/* Use the vm area unlocked, assuming the caller
442 	   ensures there isn't another iounmap for the same address
443 	   in parallel. Reuse of the virtual address is prevented by
444 	   leaving it in the global lists until we're done with it.
445 	   cpa takes care of the direct mappings. */
446 	p = find_vm_area((void __force *)addr);
447 
448 	if (!p) {
449 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
450 		dump_stack();
451 		return;
452 	}
453 
454 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
455 
456 	/* Finally remove it */
457 	o = remove_vm_area((void __force *)addr);
458 	BUG_ON(p != o || o == NULL);
459 	kfree(p);
460 }
461 EXPORT_SYMBOL(iounmap);
462 
463 int __init arch_ioremap_p4d_supported(void)
464 {
465 	return 0;
466 }
467 
468 int __init arch_ioremap_pud_supported(void)
469 {
470 #ifdef CONFIG_X86_64
471 	return boot_cpu_has(X86_FEATURE_GBPAGES);
472 #else
473 	return 0;
474 #endif
475 }
476 
477 int __init arch_ioremap_pmd_supported(void)
478 {
479 	return boot_cpu_has(X86_FEATURE_PSE);
480 }
481 
482 /*
483  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
484  * access
485  */
486 void *xlate_dev_mem_ptr(phys_addr_t phys)
487 {
488 	unsigned long start  = phys &  PAGE_MASK;
489 	unsigned long offset = phys & ~PAGE_MASK;
490 	void *vaddr;
491 
492 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
493 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
494 
495 	/* Only add the offset on success and return NULL if memremap() failed */
496 	if (vaddr)
497 		vaddr += offset;
498 
499 	return vaddr;
500 }
501 
502 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
503 {
504 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
505 }
506 
507 /*
508  * Examine the physical address to determine if it is an area of memory
509  * that should be mapped decrypted.  If the memory is not part of the
510  * kernel usable area it was accessed and created decrypted, so these
511  * areas should be mapped decrypted. And since the encryption key can
512  * change across reboots, persistent memory should also be mapped
513  * decrypted.
514  *
515  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
516  * only persistent memory should be mapped decrypted.
517  */
518 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
519 					  unsigned long size)
520 {
521 	int is_pmem;
522 
523 	/*
524 	 * Check if the address is part of a persistent memory region.
525 	 * This check covers areas added by E820, EFI and ACPI.
526 	 */
527 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
528 				    IORES_DESC_PERSISTENT_MEMORY);
529 	if (is_pmem != REGION_DISJOINT)
530 		return true;
531 
532 	/*
533 	 * Check if the non-volatile attribute is set for an EFI
534 	 * reserved area.
535 	 */
536 	if (efi_enabled(EFI_BOOT)) {
537 		switch (efi_mem_type(phys_addr)) {
538 		case EFI_RESERVED_TYPE:
539 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
540 				return true;
541 			break;
542 		default:
543 			break;
544 		}
545 	}
546 
547 	/* Check if the address is outside kernel usable area */
548 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
549 	case E820_TYPE_RESERVED:
550 	case E820_TYPE_ACPI:
551 	case E820_TYPE_NVS:
552 	case E820_TYPE_UNUSABLE:
553 		/* For SEV, these areas are encrypted */
554 		if (sev_active())
555 			break;
556 		/* Fallthrough */
557 
558 	case E820_TYPE_PRAM:
559 		return true;
560 	default:
561 		break;
562 	}
563 
564 	return false;
565 }
566 
567 /*
568  * Examine the physical address to determine if it is EFI data. Check
569  * it against the boot params structure and EFI tables and memory types.
570  */
571 static bool memremap_is_efi_data(resource_size_t phys_addr,
572 				 unsigned long size)
573 {
574 	u64 paddr;
575 
576 	/* Check if the address is part of EFI boot/runtime data */
577 	if (!efi_enabled(EFI_BOOT))
578 		return false;
579 
580 	paddr = boot_params.efi_info.efi_memmap_hi;
581 	paddr <<= 32;
582 	paddr |= boot_params.efi_info.efi_memmap;
583 	if (phys_addr == paddr)
584 		return true;
585 
586 	paddr = boot_params.efi_info.efi_systab_hi;
587 	paddr <<= 32;
588 	paddr |= boot_params.efi_info.efi_systab;
589 	if (phys_addr == paddr)
590 		return true;
591 
592 	if (efi_is_table_address(phys_addr))
593 		return true;
594 
595 	switch (efi_mem_type(phys_addr)) {
596 	case EFI_BOOT_SERVICES_DATA:
597 	case EFI_RUNTIME_SERVICES_DATA:
598 		return true;
599 	default:
600 		break;
601 	}
602 
603 	return false;
604 }
605 
606 /*
607  * Examine the physical address to determine if it is boot data by checking
608  * it against the boot params setup_data chain.
609  */
610 static bool memremap_is_setup_data(resource_size_t phys_addr,
611 				   unsigned long size)
612 {
613 	struct setup_data *data;
614 	u64 paddr, paddr_next;
615 
616 	paddr = boot_params.hdr.setup_data;
617 	while (paddr) {
618 		unsigned int len;
619 
620 		if (phys_addr == paddr)
621 			return true;
622 
623 		data = memremap(paddr, sizeof(*data),
624 				MEMREMAP_WB | MEMREMAP_DEC);
625 
626 		paddr_next = data->next;
627 		len = data->len;
628 
629 		memunmap(data);
630 
631 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
632 			return true;
633 
634 		paddr = paddr_next;
635 	}
636 
637 	return false;
638 }
639 
640 /*
641  * Examine the physical address to determine if it is boot data by checking
642  * it against the boot params setup_data chain (early boot version).
643  */
644 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
645 						unsigned long size)
646 {
647 	struct setup_data *data;
648 	u64 paddr, paddr_next;
649 
650 	paddr = boot_params.hdr.setup_data;
651 	while (paddr) {
652 		unsigned int len;
653 
654 		if (phys_addr == paddr)
655 			return true;
656 
657 		data = early_memremap_decrypted(paddr, sizeof(*data));
658 
659 		paddr_next = data->next;
660 		len = data->len;
661 
662 		early_memunmap(data, sizeof(*data));
663 
664 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
665 			return true;
666 
667 		paddr = paddr_next;
668 	}
669 
670 	return false;
671 }
672 
673 /*
674  * Architecture function to determine if RAM remap is allowed. By default, a
675  * RAM remap will map the data as encrypted. Determine if a RAM remap should
676  * not be done so that the data will be mapped decrypted.
677  */
678 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
679 				 unsigned long flags)
680 {
681 	if (!mem_encrypt_active())
682 		return true;
683 
684 	if (flags & MEMREMAP_ENC)
685 		return true;
686 
687 	if (flags & MEMREMAP_DEC)
688 		return false;
689 
690 	if (sme_active()) {
691 		if (memremap_is_setup_data(phys_addr, size) ||
692 		    memremap_is_efi_data(phys_addr, size))
693 			return false;
694 	}
695 
696 	return !memremap_should_map_decrypted(phys_addr, size);
697 }
698 
699 /*
700  * Architecture override of __weak function to adjust the protection attributes
701  * used when remapping memory. By default, early_memremap() will map the data
702  * as encrypted. Determine if an encrypted mapping should not be done and set
703  * the appropriate protection attributes.
704  */
705 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
706 					     unsigned long size,
707 					     pgprot_t prot)
708 {
709 	bool encrypted_prot;
710 
711 	if (!mem_encrypt_active())
712 		return prot;
713 
714 	encrypted_prot = true;
715 
716 	if (sme_active()) {
717 		if (early_memremap_is_setup_data(phys_addr, size) ||
718 		    memremap_is_efi_data(phys_addr, size))
719 			encrypted_prot = false;
720 	}
721 
722 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
723 		encrypted_prot = false;
724 
725 	return encrypted_prot ? pgprot_encrypted(prot)
726 			      : pgprot_decrypted(prot);
727 }
728 
729 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
730 {
731 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
732 }
733 
734 #ifdef CONFIG_AMD_MEM_ENCRYPT
735 /* Remap memory with encryption */
736 void __init *early_memremap_encrypted(resource_size_t phys_addr,
737 				      unsigned long size)
738 {
739 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
740 }
741 
742 /*
743  * Remap memory with encryption and write-protected - cannot be called
744  * before pat_init() is called
745  */
746 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
747 					 unsigned long size)
748 {
749 	/* Be sure the write-protect PAT entry is set for write-protect */
750 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
751 		return NULL;
752 
753 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
754 }
755 
756 /* Remap memory without encryption */
757 void __init *early_memremap_decrypted(resource_size_t phys_addr,
758 				      unsigned long size)
759 {
760 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
761 }
762 
763 /*
764  * Remap memory without encryption and write-protected - cannot be called
765  * before pat_init() is called
766  */
767 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
768 					 unsigned long size)
769 {
770 	/* Be sure the write-protect PAT entry is set for write-protect */
771 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
772 		return NULL;
773 
774 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
775 }
776 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
777 
778 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
779 
780 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
781 {
782 	/* Don't assume we're using swapper_pg_dir at this point */
783 	pgd_t *base = __va(read_cr3_pa());
784 	pgd_t *pgd = &base[pgd_index(addr)];
785 	p4d_t *p4d = p4d_offset(pgd, addr);
786 	pud_t *pud = pud_offset(p4d, addr);
787 	pmd_t *pmd = pmd_offset(pud, addr);
788 
789 	return pmd;
790 }
791 
792 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
793 {
794 	return &bm_pte[pte_index(addr)];
795 }
796 
797 bool __init is_early_ioremap_ptep(pte_t *ptep)
798 {
799 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
800 }
801 
802 void __init early_ioremap_init(void)
803 {
804 	pmd_t *pmd;
805 
806 #ifdef CONFIG_X86_64
807 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
808 #else
809 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
810 #endif
811 
812 	early_ioremap_setup();
813 
814 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
815 	memset(bm_pte, 0, sizeof(bm_pte));
816 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
817 
818 	/*
819 	 * The boot-ioremap range spans multiple pmds, for which
820 	 * we are not prepared:
821 	 */
822 #define __FIXADDR_TOP (-PAGE_SIZE)
823 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
824 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
825 #undef __FIXADDR_TOP
826 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
827 		WARN_ON(1);
828 		printk(KERN_WARNING "pmd %p != %p\n",
829 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
830 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
831 			fix_to_virt(FIX_BTMAP_BEGIN));
832 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
833 			fix_to_virt(FIX_BTMAP_END));
834 
835 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
836 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
837 		       FIX_BTMAP_BEGIN);
838 	}
839 }
840 
841 void __init __early_set_fixmap(enum fixed_addresses idx,
842 			       phys_addr_t phys, pgprot_t flags)
843 {
844 	unsigned long addr = __fix_to_virt(idx);
845 	pte_t *pte;
846 
847 	if (idx >= __end_of_fixed_addresses) {
848 		BUG();
849 		return;
850 	}
851 	pte = early_ioremap_pte(addr);
852 
853 	/* Sanitize 'prot' against any unsupported bits: */
854 	pgprot_val(flags) &= __supported_pte_mask;
855 
856 	if (pgprot_val(flags))
857 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
858 	else
859 		pte_clear(&init_mm, addr, pte);
860 	__flush_tlb_one_kernel(addr);
861 }
862