xref: /openbmc/linux/arch/x86/mm/ioremap.c (revision 8440bb9b)
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8 
9 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/ioport.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16 #include <linux/mem_encrypt.h>
17 #include <linux/efi.h>
18 
19 #include <asm/set_memory.h>
20 #include <asm/e820/api.h>
21 #include <asm/fixmap.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pat.h>
26 #include <asm/setup.h>
27 
28 #include "physaddr.h"
29 
30 struct ioremap_mem_flags {
31 	bool system_ram;
32 	bool desc_other;
33 };
34 
35 /*
36  * Fix up the linear direct mapping of the kernel to avoid cache attribute
37  * conflicts.
38  */
39 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
40 			enum page_cache_mode pcm)
41 {
42 	unsigned long nrpages = size >> PAGE_SHIFT;
43 	int err;
44 
45 	switch (pcm) {
46 	case _PAGE_CACHE_MODE_UC:
47 	default:
48 		err = _set_memory_uc(vaddr, nrpages);
49 		break;
50 	case _PAGE_CACHE_MODE_WC:
51 		err = _set_memory_wc(vaddr, nrpages);
52 		break;
53 	case _PAGE_CACHE_MODE_WT:
54 		err = _set_memory_wt(vaddr, nrpages);
55 		break;
56 	case _PAGE_CACHE_MODE_WB:
57 		err = _set_memory_wb(vaddr, nrpages);
58 		break;
59 	}
60 
61 	return err;
62 }
63 
64 static bool __ioremap_check_ram(struct resource *res)
65 {
66 	unsigned long start_pfn, stop_pfn;
67 	unsigned long i;
68 
69 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
70 		return false;
71 
72 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
73 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
74 	if (stop_pfn > start_pfn) {
75 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
76 			if (pfn_valid(start_pfn + i) &&
77 			    !PageReserved(pfn_to_page(start_pfn + i)))
78 				return true;
79 	}
80 
81 	return false;
82 }
83 
84 static int __ioremap_check_desc_other(struct resource *res)
85 {
86 	return (res->desc != IORES_DESC_NONE);
87 }
88 
89 static int __ioremap_res_check(struct resource *res, void *arg)
90 {
91 	struct ioremap_mem_flags *flags = arg;
92 
93 	if (!flags->system_ram)
94 		flags->system_ram = __ioremap_check_ram(res);
95 
96 	if (!flags->desc_other)
97 		flags->desc_other = __ioremap_check_desc_other(res);
98 
99 	return flags->system_ram && flags->desc_other;
100 }
101 
102 /*
103  * To avoid multiple resource walks, this function walks resources marked as
104  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
105  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
106  */
107 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
108 				struct ioremap_mem_flags *flags)
109 {
110 	u64 start, end;
111 
112 	start = (u64)addr;
113 	end = start + size - 1;
114 	memset(flags, 0, sizeof(*flags));
115 
116 	walk_mem_res(start, end, flags, __ioremap_res_check);
117 }
118 
119 /*
120  * Remap an arbitrary physical address space into the kernel virtual
121  * address space. It transparently creates kernel huge I/O mapping when
122  * the physical address is aligned by a huge page size (1GB or 2MB) and
123  * the requested size is at least the huge page size.
124  *
125  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
126  * Therefore, the mapping code falls back to use a smaller page toward 4KB
127  * when a mapping range is covered by non-WB type of MTRRs.
128  *
129  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
130  * have to convert them into an offset in a page-aligned mapping, but the
131  * caller shouldn't need to know that small detail.
132  */
133 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
134 		unsigned long size, enum page_cache_mode pcm,
135 		void *caller, bool encrypted)
136 {
137 	unsigned long offset, vaddr;
138 	resource_size_t last_addr;
139 	const resource_size_t unaligned_phys_addr = phys_addr;
140 	const unsigned long unaligned_size = size;
141 	struct ioremap_mem_flags mem_flags;
142 	struct vm_struct *area;
143 	enum page_cache_mode new_pcm;
144 	pgprot_t prot;
145 	int retval;
146 	void __iomem *ret_addr;
147 
148 	/* Don't allow wraparound or zero size */
149 	last_addr = phys_addr + size - 1;
150 	if (!size || last_addr < phys_addr)
151 		return NULL;
152 
153 	if (!phys_addr_valid(phys_addr)) {
154 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
155 		       (unsigned long long)phys_addr);
156 		WARN_ON_ONCE(1);
157 		return NULL;
158 	}
159 
160 	__ioremap_check_mem(phys_addr, size, &mem_flags);
161 
162 	/*
163 	 * Don't allow anybody to remap normal RAM that we're using..
164 	 */
165 	if (mem_flags.system_ram) {
166 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
167 			  &phys_addr, &last_addr);
168 		return NULL;
169 	}
170 
171 	/*
172 	 * Mappings have to be page-aligned
173 	 */
174 	offset = phys_addr & ~PAGE_MASK;
175 	phys_addr &= PHYSICAL_PAGE_MASK;
176 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
177 
178 	retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
179 						pcm, &new_pcm);
180 	if (retval) {
181 		printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
182 		return NULL;
183 	}
184 
185 	if (pcm != new_pcm) {
186 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
187 			printk(KERN_ERR
188 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
189 				(unsigned long long)phys_addr,
190 				(unsigned long long)(phys_addr + size),
191 				pcm, new_pcm);
192 			goto err_free_memtype;
193 		}
194 		pcm = new_pcm;
195 	}
196 
197 	/*
198 	 * If the page being mapped is in memory and SEV is active then
199 	 * make sure the memory encryption attribute is enabled in the
200 	 * resulting mapping.
201 	 */
202 	prot = PAGE_KERNEL_IO;
203 	if ((sev_active() && mem_flags.desc_other) || encrypted)
204 		prot = pgprot_encrypted(prot);
205 
206 	switch (pcm) {
207 	case _PAGE_CACHE_MODE_UC:
208 	default:
209 		prot = __pgprot(pgprot_val(prot) |
210 				cachemode2protval(_PAGE_CACHE_MODE_UC));
211 		break;
212 	case _PAGE_CACHE_MODE_UC_MINUS:
213 		prot = __pgprot(pgprot_val(prot) |
214 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
215 		break;
216 	case _PAGE_CACHE_MODE_WC:
217 		prot = __pgprot(pgprot_val(prot) |
218 				cachemode2protval(_PAGE_CACHE_MODE_WC));
219 		break;
220 	case _PAGE_CACHE_MODE_WT:
221 		prot = __pgprot(pgprot_val(prot) |
222 				cachemode2protval(_PAGE_CACHE_MODE_WT));
223 		break;
224 	case _PAGE_CACHE_MODE_WB:
225 		break;
226 	}
227 
228 	/*
229 	 * Ok, go for it..
230 	 */
231 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
232 	if (!area)
233 		goto err_free_memtype;
234 	area->phys_addr = phys_addr;
235 	vaddr = (unsigned long) area->addr;
236 
237 	if (kernel_map_sync_memtype(phys_addr, size, pcm))
238 		goto err_free_area;
239 
240 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
241 		goto err_free_area;
242 
243 	ret_addr = (void __iomem *) (vaddr + offset);
244 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
245 
246 	/*
247 	 * Check if the request spans more than any BAR in the iomem resource
248 	 * tree.
249 	 */
250 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
251 		pr_warn("caller %pS mapping multiple BARs\n", caller);
252 
253 	return ret_addr;
254 err_free_area:
255 	free_vm_area(area);
256 err_free_memtype:
257 	free_memtype(phys_addr, phys_addr + size);
258 	return NULL;
259 }
260 
261 /**
262  * ioremap_nocache     -   map bus memory into CPU space
263  * @phys_addr:    bus address of the memory
264  * @size:      size of the resource to map
265  *
266  * ioremap_nocache performs a platform specific sequence of operations to
267  * make bus memory CPU accessible via the readb/readw/readl/writeb/
268  * writew/writel functions and the other mmio helpers. The returned
269  * address is not guaranteed to be usable directly as a virtual
270  * address.
271  *
272  * This version of ioremap ensures that the memory is marked uncachable
273  * on the CPU as well as honouring existing caching rules from things like
274  * the PCI bus. Note that there are other caches and buffers on many
275  * busses. In particular driver authors should read up on PCI writes
276  *
277  * It's useful if some control registers are in such an area and
278  * write combining or read caching is not desirable:
279  *
280  * Must be freed with iounmap.
281  */
282 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
283 {
284 	/*
285 	 * Ideally, this should be:
286 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
287 	 *
288 	 * Till we fix all X drivers to use ioremap_wc(), we will use
289 	 * UC MINUS. Drivers that are certain they need or can already
290 	 * be converted over to strong UC can use ioremap_uc().
291 	 */
292 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
293 
294 	return __ioremap_caller(phys_addr, size, pcm,
295 				__builtin_return_address(0), false);
296 }
297 EXPORT_SYMBOL(ioremap_nocache);
298 
299 /**
300  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
301  * @phys_addr:    bus address of the memory
302  * @size:      size of the resource to map
303  *
304  * ioremap_uc performs a platform specific sequence of operations to
305  * make bus memory CPU accessible via the readb/readw/readl/writeb/
306  * writew/writel functions and the other mmio helpers. The returned
307  * address is not guaranteed to be usable directly as a virtual
308  * address.
309  *
310  * This version of ioremap ensures that the memory is marked with a strong
311  * preference as completely uncachable on the CPU when possible. For non-PAT
312  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
313  * systems this will set the PAT entry for the pages as strong UC.  This call
314  * will honor existing caching rules from things like the PCI bus. Note that
315  * there are other caches and buffers on many busses. In particular driver
316  * authors should read up on PCI writes.
317  *
318  * It's useful if some control registers are in such an area and
319  * write combining or read caching is not desirable:
320  *
321  * Must be freed with iounmap.
322  */
323 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
324 {
325 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
326 
327 	return __ioremap_caller(phys_addr, size, pcm,
328 				__builtin_return_address(0), false);
329 }
330 EXPORT_SYMBOL_GPL(ioremap_uc);
331 
332 /**
333  * ioremap_wc	-	map memory into CPU space write combined
334  * @phys_addr:	bus address of the memory
335  * @size:	size of the resource to map
336  *
337  * This version of ioremap ensures that the memory is marked write combining.
338  * Write combining allows faster writes to some hardware devices.
339  *
340  * Must be freed with iounmap.
341  */
342 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
343 {
344 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
345 					__builtin_return_address(0), false);
346 }
347 EXPORT_SYMBOL(ioremap_wc);
348 
349 /**
350  * ioremap_wt	-	map memory into CPU space write through
351  * @phys_addr:	bus address of the memory
352  * @size:	size of the resource to map
353  *
354  * This version of ioremap ensures that the memory is marked write through.
355  * Write through stores data into memory while keeping the cache up-to-date.
356  *
357  * Must be freed with iounmap.
358  */
359 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
360 {
361 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
362 					__builtin_return_address(0), false);
363 }
364 EXPORT_SYMBOL(ioremap_wt);
365 
366 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
367 {
368 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
369 				__builtin_return_address(0), true);
370 }
371 EXPORT_SYMBOL(ioremap_encrypted);
372 
373 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
374 {
375 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
376 				__builtin_return_address(0), false);
377 }
378 EXPORT_SYMBOL(ioremap_cache);
379 
380 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
381 				unsigned long prot_val)
382 {
383 	return __ioremap_caller(phys_addr, size,
384 				pgprot2cachemode(__pgprot(prot_val)),
385 				__builtin_return_address(0), false);
386 }
387 EXPORT_SYMBOL(ioremap_prot);
388 
389 /**
390  * iounmap - Free a IO remapping
391  * @addr: virtual address from ioremap_*
392  *
393  * Caller must ensure there is only one unmapping for the same pointer.
394  */
395 void iounmap(volatile void __iomem *addr)
396 {
397 	struct vm_struct *p, *o;
398 
399 	if ((void __force *)addr <= high_memory)
400 		return;
401 
402 	/*
403 	 * The PCI/ISA range special-casing was removed from __ioremap()
404 	 * so this check, in theory, can be removed. However, there are
405 	 * cases where iounmap() is called for addresses not obtained via
406 	 * ioremap() (vga16fb for example). Add a warning so that these
407 	 * cases can be caught and fixed.
408 	 */
409 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
410 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
411 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
412 		return;
413 	}
414 
415 	mmiotrace_iounmap(addr);
416 
417 	addr = (volatile void __iomem *)
418 		(PAGE_MASK & (unsigned long __force)addr);
419 
420 	/* Use the vm area unlocked, assuming the caller
421 	   ensures there isn't another iounmap for the same address
422 	   in parallel. Reuse of the virtual address is prevented by
423 	   leaving it in the global lists until we're done with it.
424 	   cpa takes care of the direct mappings. */
425 	p = find_vm_area((void __force *)addr);
426 
427 	if (!p) {
428 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
429 		dump_stack();
430 		return;
431 	}
432 
433 	free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
434 
435 	/* Finally remove it */
436 	o = remove_vm_area((void __force *)addr);
437 	BUG_ON(p != o || o == NULL);
438 	kfree(p);
439 }
440 EXPORT_SYMBOL(iounmap);
441 
442 int __init arch_ioremap_pud_supported(void)
443 {
444 #ifdef CONFIG_X86_64
445 	return boot_cpu_has(X86_FEATURE_GBPAGES);
446 #else
447 	return 0;
448 #endif
449 }
450 
451 int __init arch_ioremap_pmd_supported(void)
452 {
453 	return boot_cpu_has(X86_FEATURE_PSE);
454 }
455 
456 /*
457  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
458  * access
459  */
460 void *xlate_dev_mem_ptr(phys_addr_t phys)
461 {
462 	unsigned long start  = phys &  PAGE_MASK;
463 	unsigned long offset = phys & ~PAGE_MASK;
464 	void *vaddr;
465 
466 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
467 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
468 
469 	/* Only add the offset on success and return NULL if memremap() failed */
470 	if (vaddr)
471 		vaddr += offset;
472 
473 	return vaddr;
474 }
475 
476 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
477 {
478 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
479 }
480 
481 /*
482  * Examine the physical address to determine if it is an area of memory
483  * that should be mapped decrypted.  If the memory is not part of the
484  * kernel usable area it was accessed and created decrypted, so these
485  * areas should be mapped decrypted. And since the encryption key can
486  * change across reboots, persistent memory should also be mapped
487  * decrypted.
488  *
489  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
490  * only persistent memory should be mapped decrypted.
491  */
492 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
493 					  unsigned long size)
494 {
495 	int is_pmem;
496 
497 	/*
498 	 * Check if the address is part of a persistent memory region.
499 	 * This check covers areas added by E820, EFI and ACPI.
500 	 */
501 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
502 				    IORES_DESC_PERSISTENT_MEMORY);
503 	if (is_pmem != REGION_DISJOINT)
504 		return true;
505 
506 	/*
507 	 * Check if the non-volatile attribute is set for an EFI
508 	 * reserved area.
509 	 */
510 	if (efi_enabled(EFI_BOOT)) {
511 		switch (efi_mem_type(phys_addr)) {
512 		case EFI_RESERVED_TYPE:
513 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
514 				return true;
515 			break;
516 		default:
517 			break;
518 		}
519 	}
520 
521 	/* Check if the address is outside kernel usable area */
522 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
523 	case E820_TYPE_RESERVED:
524 	case E820_TYPE_ACPI:
525 	case E820_TYPE_NVS:
526 	case E820_TYPE_UNUSABLE:
527 		/* For SEV, these areas are encrypted */
528 		if (sev_active())
529 			break;
530 		/* Fallthrough */
531 
532 	case E820_TYPE_PRAM:
533 		return true;
534 	default:
535 		break;
536 	}
537 
538 	return false;
539 }
540 
541 /*
542  * Examine the physical address to determine if it is EFI data. Check
543  * it against the boot params structure and EFI tables and memory types.
544  */
545 static bool memremap_is_efi_data(resource_size_t phys_addr,
546 				 unsigned long size)
547 {
548 	u64 paddr;
549 
550 	/* Check if the address is part of EFI boot/runtime data */
551 	if (!efi_enabled(EFI_BOOT))
552 		return false;
553 
554 	paddr = boot_params.efi_info.efi_memmap_hi;
555 	paddr <<= 32;
556 	paddr |= boot_params.efi_info.efi_memmap;
557 	if (phys_addr == paddr)
558 		return true;
559 
560 	paddr = boot_params.efi_info.efi_systab_hi;
561 	paddr <<= 32;
562 	paddr |= boot_params.efi_info.efi_systab;
563 	if (phys_addr == paddr)
564 		return true;
565 
566 	if (efi_is_table_address(phys_addr))
567 		return true;
568 
569 	switch (efi_mem_type(phys_addr)) {
570 	case EFI_BOOT_SERVICES_DATA:
571 	case EFI_RUNTIME_SERVICES_DATA:
572 		return true;
573 	default:
574 		break;
575 	}
576 
577 	return false;
578 }
579 
580 /*
581  * Examine the physical address to determine if it is boot data by checking
582  * it against the boot params setup_data chain.
583  */
584 static bool memremap_is_setup_data(resource_size_t phys_addr,
585 				   unsigned long size)
586 {
587 	struct setup_data *data;
588 	u64 paddr, paddr_next;
589 
590 	paddr = boot_params.hdr.setup_data;
591 	while (paddr) {
592 		unsigned int len;
593 
594 		if (phys_addr == paddr)
595 			return true;
596 
597 		data = memremap(paddr, sizeof(*data),
598 				MEMREMAP_WB | MEMREMAP_DEC);
599 
600 		paddr_next = data->next;
601 		len = data->len;
602 
603 		memunmap(data);
604 
605 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
606 			return true;
607 
608 		paddr = paddr_next;
609 	}
610 
611 	return false;
612 }
613 
614 /*
615  * Examine the physical address to determine if it is boot data by checking
616  * it against the boot params setup_data chain (early boot version).
617  */
618 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
619 						unsigned long size)
620 {
621 	struct setup_data *data;
622 	u64 paddr, paddr_next;
623 
624 	paddr = boot_params.hdr.setup_data;
625 	while (paddr) {
626 		unsigned int len;
627 
628 		if (phys_addr == paddr)
629 			return true;
630 
631 		data = early_memremap_decrypted(paddr, sizeof(*data));
632 
633 		paddr_next = data->next;
634 		len = data->len;
635 
636 		early_memunmap(data, sizeof(*data));
637 
638 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
639 			return true;
640 
641 		paddr = paddr_next;
642 	}
643 
644 	return false;
645 }
646 
647 /*
648  * Architecture function to determine if RAM remap is allowed. By default, a
649  * RAM remap will map the data as encrypted. Determine if a RAM remap should
650  * not be done so that the data will be mapped decrypted.
651  */
652 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
653 				 unsigned long flags)
654 {
655 	if (!mem_encrypt_active())
656 		return true;
657 
658 	if (flags & MEMREMAP_ENC)
659 		return true;
660 
661 	if (flags & MEMREMAP_DEC)
662 		return false;
663 
664 	if (sme_active()) {
665 		if (memremap_is_setup_data(phys_addr, size) ||
666 		    memremap_is_efi_data(phys_addr, size))
667 			return false;
668 	}
669 
670 	return !memremap_should_map_decrypted(phys_addr, size);
671 }
672 
673 /*
674  * Architecture override of __weak function to adjust the protection attributes
675  * used when remapping memory. By default, early_memremap() will map the data
676  * as encrypted. Determine if an encrypted mapping should not be done and set
677  * the appropriate protection attributes.
678  */
679 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
680 					     unsigned long size,
681 					     pgprot_t prot)
682 {
683 	bool encrypted_prot;
684 
685 	if (!mem_encrypt_active())
686 		return prot;
687 
688 	encrypted_prot = true;
689 
690 	if (sme_active()) {
691 		if (early_memremap_is_setup_data(phys_addr, size) ||
692 		    memremap_is_efi_data(phys_addr, size))
693 			encrypted_prot = false;
694 	}
695 
696 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
697 		encrypted_prot = false;
698 
699 	return encrypted_prot ? pgprot_encrypted(prot)
700 			      : pgprot_decrypted(prot);
701 }
702 
703 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
704 {
705 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
706 }
707 
708 #ifdef CONFIG_AMD_MEM_ENCRYPT
709 /* Remap memory with encryption */
710 void __init *early_memremap_encrypted(resource_size_t phys_addr,
711 				      unsigned long size)
712 {
713 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
714 }
715 
716 /*
717  * Remap memory with encryption and write-protected - cannot be called
718  * before pat_init() is called
719  */
720 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
721 					 unsigned long size)
722 {
723 	/* Be sure the write-protect PAT entry is set for write-protect */
724 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
725 		return NULL;
726 
727 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
728 }
729 
730 /* Remap memory without encryption */
731 void __init *early_memremap_decrypted(resource_size_t phys_addr,
732 				      unsigned long size)
733 {
734 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
735 }
736 
737 /*
738  * Remap memory without encryption and write-protected - cannot be called
739  * before pat_init() is called
740  */
741 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
742 					 unsigned long size)
743 {
744 	/* Be sure the write-protect PAT entry is set for write-protect */
745 	if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
746 		return NULL;
747 
748 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
749 }
750 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
751 
752 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
753 
754 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
755 {
756 	/* Don't assume we're using swapper_pg_dir at this point */
757 	pgd_t *base = __va(read_cr3_pa());
758 	pgd_t *pgd = &base[pgd_index(addr)];
759 	p4d_t *p4d = p4d_offset(pgd, addr);
760 	pud_t *pud = pud_offset(p4d, addr);
761 	pmd_t *pmd = pmd_offset(pud, addr);
762 
763 	return pmd;
764 }
765 
766 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
767 {
768 	return &bm_pte[pte_index(addr)];
769 }
770 
771 bool __init is_early_ioremap_ptep(pte_t *ptep)
772 {
773 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
774 }
775 
776 void __init early_ioremap_init(void)
777 {
778 	pmd_t *pmd;
779 
780 #ifdef CONFIG_X86_64
781 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
782 #else
783 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
784 #endif
785 
786 	early_ioremap_setup();
787 
788 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
789 	memset(bm_pte, 0, sizeof(bm_pte));
790 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
791 
792 	/*
793 	 * The boot-ioremap range spans multiple pmds, for which
794 	 * we are not prepared:
795 	 */
796 #define __FIXADDR_TOP (-PAGE_SIZE)
797 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
798 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
799 #undef __FIXADDR_TOP
800 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
801 		WARN_ON(1);
802 		printk(KERN_WARNING "pmd %p != %p\n",
803 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
804 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
805 			fix_to_virt(FIX_BTMAP_BEGIN));
806 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
807 			fix_to_virt(FIX_BTMAP_END));
808 
809 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
810 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
811 		       FIX_BTMAP_BEGIN);
812 	}
813 }
814 
815 void __init __early_set_fixmap(enum fixed_addresses idx,
816 			       phys_addr_t phys, pgprot_t flags)
817 {
818 	unsigned long addr = __fix_to_virt(idx);
819 	pte_t *pte;
820 
821 	if (idx >= __end_of_fixed_addresses) {
822 		BUG();
823 		return;
824 	}
825 	pte = early_ioremap_pte(addr);
826 
827 	/* Sanitize 'prot' against any unsupported bits: */
828 	pgprot_val(flags) &= __default_kernel_pte_mask;
829 
830 	if (pgprot_val(flags))
831 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
832 	else
833 		pte_clear(&init_mm, addr, pte);
834 	__flush_tlb_one_kernel(addr);
835 }
836