xref: /openbmc/linux/arch/parisc/mm/ioremap.c (revision 87c2ce3b)
1 /*
2  * arch/parisc/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  * (C) Copyright 2001 Helge Deller <deller@gmx.de>
10  */
11 
12 #include <linux/vmalloc.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 
18 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
19 	unsigned long phys_addr, unsigned long flags)
20 {
21 	unsigned long end;
22 
23 	address &= ~PMD_MASK;
24 	end = address + size;
25 	if (end > PMD_SIZE)
26 		end = PMD_SIZE;
27 	if (address >= end)
28 		BUG();
29 	do {
30 		if (!pte_none(*pte)) {
31 			printk(KERN_ERR "remap_area_pte: page already exists\n");
32 			BUG();
33 		}
34 		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
35 					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
36 		address += PAGE_SIZE;
37 		phys_addr += PAGE_SIZE;
38 		pte++;
39 	} while (address && (address < end));
40 }
41 
42 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
43 	unsigned long phys_addr, unsigned long flags)
44 {
45 	unsigned long end;
46 
47 	address &= ~PGDIR_MASK;
48 	end = address + size;
49 	if (end > PGDIR_SIZE)
50 		end = PGDIR_SIZE;
51 	phys_addr -= address;
52 	if (address >= end)
53 		BUG();
54 	do {
55 		pte_t * pte = pte_alloc_kernel(pmd, address);
56 		if (!pte)
57 			return -ENOMEM;
58 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
59 		address = (address + PMD_SIZE) & PMD_MASK;
60 		pmd++;
61 	} while (address && (address < end));
62 	return 0;
63 }
64 
65 #if (USE_HPPA_IOREMAP)
66 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
67 				 unsigned long size, unsigned long flags)
68 {
69 	int error;
70 	pgd_t * dir;
71 	unsigned long end = address + size;
72 
73 	phys_addr -= address;
74 	dir = pgd_offset(&init_mm, address);
75 	flush_cache_all();
76 	if (address >= end)
77 		BUG();
78 	do {
79 		pmd_t *pmd;
80 		pmd = pmd_alloc(&init_mm, dir, address);
81 		error = -ENOMEM;
82 		if (!pmd)
83 			break;
84 		if (remap_area_pmd(pmd, address, end - address,
85 					 phys_addr + address, flags))
86 			break;
87 		error = 0;
88 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
89 		dir++;
90 	} while (address && (address < end));
91 	flush_tlb_all();
92 	return error;
93 }
94 #endif /* USE_HPPA_IOREMAP */
95 
96 #ifdef CONFIG_DEBUG_IOREMAP
97 static unsigned long last = 0;
98 
99 void gsc_bad_addr(unsigned long addr)
100 {
101 	if (time_after(jiffies, last + HZ*10)) {
102 		printk("gsc_foo() called with bad address 0x%lx\n", addr);
103 		dump_stack();
104 		last = jiffies;
105 	}
106 }
107 EXPORT_SYMBOL(gsc_bad_addr);
108 
109 void __raw_bad_addr(const volatile void __iomem *addr)
110 {
111 	if (time_after(jiffies, last + HZ*10)) {
112 		printk("__raw_foo() called with bad address 0x%p\n", addr);
113 		dump_stack();
114 		last = jiffies;
115 	}
116 }
117 EXPORT_SYMBOL(__raw_bad_addr);
118 #endif
119 
120 /*
121  * Generic mapping function (not visible outside):
122  */
123 
124 /*
125  * Remap an arbitrary physical address space into the kernel virtual
126  * address space. Needed when the kernel wants to access high addresses
127  * directly.
128  *
129  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
130  * have to convert them into an offset in a page-aligned mapping, but the
131  * caller shouldn't need to know that small detail.
132  */
133 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
134 {
135 #if !(USE_HPPA_IOREMAP)
136 
137 	unsigned long end = phys_addr + size - 1;
138 	/* Support EISA addresses */
139 	if ((phys_addr >= 0x00080000 && end < 0x000fffff)
140 			|| (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
141 		phys_addr |= 0xfc000000;
142 	}
143 
144 #ifdef CONFIG_DEBUG_IOREMAP
145 	return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
146 #else
147 	return (void __iomem *)phys_addr;
148 #endif
149 
150 #else
151 	void * addr;
152 	struct vm_struct * area;
153 	unsigned long offset, last_addr;
154 
155 	/* Don't allow wraparound or zero size */
156 	last_addr = phys_addr + size - 1;
157 	if (!size || last_addr < phys_addr)
158 		return NULL;
159 
160 	/*
161 	 * Don't allow anybody to remap normal RAM that we're using..
162 	 */
163 	if (phys_addr < virt_to_phys(high_memory)) {
164 		char *t_addr, *t_end;
165 		struct page *page;
166 
167 		t_addr = __va(phys_addr);
168 		t_end = t_addr + (size - 1);
169 
170 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
171 			if(!PageReserved(page))
172 				return NULL;
173 	}
174 
175 	/*
176 	 * Mappings have to be page-aligned
177 	 */
178 	offset = phys_addr & ~PAGE_MASK;
179 	phys_addr &= PAGE_MASK;
180 	size = PAGE_ALIGN(last_addr) - phys_addr;
181 
182 	/*
183 	 * Ok, go for it..
184 	 */
185 	area = get_vm_area(size, VM_IOREMAP);
186 	if (!area)
187 		return NULL;
188 	addr = area->addr;
189 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
190 		vfree(addr);
191 		return NULL;
192 	}
193 	return (void __iomem *) (offset + (char *)addr);
194 #endif
195 }
196 
197 void iounmap(void __iomem *addr)
198 {
199 #if !(USE_HPPA_IOREMAP)
200 	return;
201 #else
202 	if (addr > high_memory)
203 		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
204 #endif
205 }
206