xref: /openbmc/linux/arch/ia64/mm/ioremap.c (revision 95e9fd10)
1 /*
2  * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
3  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/compiler.h>
11 #include <linux/module.h>
12 #include <linux/efi.h>
13 #include <linux/io.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/meminit.h>
17 
18 static inline void __iomem *
19 __ioremap (unsigned long phys_addr)
20 {
21 	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
22 }
23 
24 void __iomem *
25 early_ioremap (unsigned long phys_addr, unsigned long size)
26 {
27 	return __ioremap(phys_addr);
28 }
29 
30 void __iomem *
31 ioremap (unsigned long phys_addr, unsigned long size)
32 {
33 	void __iomem *addr;
34 	struct vm_struct *area;
35 	unsigned long offset;
36 	pgprot_t prot;
37 	u64 attr;
38 	unsigned long gran_base, gran_size;
39 	unsigned long page_base;
40 
41 	/*
42 	 * For things in kern_memmap, we must use the same attribute
43 	 * as the rest of the kernel.  For more details, see
44 	 * Documentation/ia64/aliasing.txt.
45 	 */
46 	attr = kern_mem_attribute(phys_addr, size);
47 	if (attr & EFI_MEMORY_WB)
48 		return (void __iomem *) phys_to_virt(phys_addr);
49 	else if (attr & EFI_MEMORY_UC)
50 		return __ioremap(phys_addr);
51 
52 	/*
53 	 * Some chipsets don't support UC access to memory.  If
54 	 * WB is supported for the whole granule, we prefer that.
55 	 */
56 	gran_base = GRANULEROUNDDOWN(phys_addr);
57 	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
58 	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
59 		return (void __iomem *) phys_to_virt(phys_addr);
60 
61 	/*
62 	 * WB is not supported for the whole granule, so we can't use
63 	 * the region 7 identity mapping.  If we can safely cover the
64 	 * area with kernel page table mappings, we can use those
65 	 * instead.
66 	 */
67 	page_base = phys_addr & PAGE_MASK;
68 	size = PAGE_ALIGN(phys_addr + size) - page_base;
69 	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
70 		prot = PAGE_KERNEL;
71 
72 		/*
73 		 * Mappings have to be page-aligned
74 		 */
75 		offset = phys_addr & ~PAGE_MASK;
76 		phys_addr &= PAGE_MASK;
77 
78 		/*
79 		 * Ok, go for it..
80 		 */
81 		area = get_vm_area(size, VM_IOREMAP);
82 		if (!area)
83 			return NULL;
84 
85 		area->phys_addr = phys_addr;
86 		addr = (void __iomem *) area->addr;
87 		if (ioremap_page_range((unsigned long) addr,
88 				(unsigned long) addr + size, phys_addr, prot)) {
89 			vunmap((void __force *) addr);
90 			return NULL;
91 		}
92 
93 		return (void __iomem *) (offset + (char __iomem *)addr);
94 	}
95 
96 	return __ioremap(phys_addr);
97 }
98 EXPORT_SYMBOL(ioremap);
99 
100 void __iomem *
101 ioremap_nocache (unsigned long phys_addr, unsigned long size)
102 {
103 	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
104 		return NULL;
105 
106 	return __ioremap(phys_addr);
107 }
108 EXPORT_SYMBOL(ioremap_nocache);
109 
110 void
111 early_iounmap (volatile void __iomem *addr, unsigned long size)
112 {
113 }
114 
115 void
116 iounmap (volatile void __iomem *addr)
117 {
118 	if (REGION_NUMBER(addr) == RGN_GATE)
119 		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
120 }
121 EXPORT_SYMBOL(iounmap);
122