xref: /openbmc/linux/arch/parisc/mm/ioremap.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * arch/parisc/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  * (C) Copyright 2001 Helge Deller <deller@gmx.de>
10  */
11 
12 #include <linux/vmalloc.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 
18 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
19 	unsigned long phys_addr, unsigned long flags)
20 {
21 	unsigned long end;
22 
23 	address &= ~PMD_MASK;
24 	end = address + size;
25 	if (end > PMD_SIZE)
26 		end = PMD_SIZE;
27 	if (address >= end)
28 		BUG();
29 	do {
30 		if (!pte_none(*pte)) {
31 			printk(KERN_ERR "remap_area_pte: page already exists\n");
32 			BUG();
33 		}
34 		set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
35 					_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
36 		address += PAGE_SIZE;
37 		phys_addr += PAGE_SIZE;
38 		pte++;
39 	} while (address && (address < end));
40 }
41 
42 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
43 	unsigned long phys_addr, unsigned long flags)
44 {
45 	unsigned long end;
46 
47 	address &= ~PGDIR_MASK;
48 	end = address + size;
49 	if (end > PGDIR_SIZE)
50 		end = PGDIR_SIZE;
51 	phys_addr -= address;
52 	if (address >= end)
53 		BUG();
54 	do {
55 		pte_t * pte = pte_alloc_kernel(NULL, pmd, address);
56 		if (!pte)
57 			return -ENOMEM;
58 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
59 		address = (address + PMD_SIZE) & PMD_MASK;
60 		pmd++;
61 	} while (address && (address < end));
62 	return 0;
63 }
64 
65 #if (USE_HPPA_IOREMAP)
66 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
67 				 unsigned long size, unsigned long flags)
68 {
69 	int error;
70 	pgd_t * dir;
71 	unsigned long end = address + size;
72 
73 	phys_addr -= address;
74 	dir = pgd_offset(&init_mm, address);
75 	flush_cache_all();
76 	if (address >= end)
77 		BUG();
78 	spin_lock(&init_mm.page_table_lock);
79 	do {
80 		pmd_t *pmd;
81 		pmd = pmd_alloc(dir, address);
82 		error = -ENOMEM;
83 		if (!pmd)
84 			break;
85 		if (remap_area_pmd(pmd, address, end - address,
86 					 phys_addr + address, flags))
87 			break;
88 		error = 0;
89 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
90 		dir++;
91 	} while (address && (address < end));
92 	spin_unlock(&init_mm.page_table_lock);
93 	flush_tlb_all();
94 	return error;
95 }
96 #endif /* USE_HPPA_IOREMAP */
97 
98 #ifdef CONFIG_DEBUG_IOREMAP
99 static unsigned long last = 0;
100 
101 void gsc_bad_addr(unsigned long addr)
102 {
103 	if (time_after(jiffies, last + HZ*10)) {
104 		printk("gsc_foo() called with bad address 0x%lx\n", addr);
105 		dump_stack();
106 		last = jiffies;
107 	}
108 }
109 EXPORT_SYMBOL(gsc_bad_addr);
110 
111 void __raw_bad_addr(const volatile void __iomem *addr)
112 {
113 	if (time_after(jiffies, last + HZ*10)) {
114 		printk("__raw_foo() called with bad address 0x%p\n", addr);
115 		dump_stack();
116 		last = jiffies;
117 	}
118 }
119 EXPORT_SYMBOL(__raw_bad_addr);
120 #endif
121 
122 /*
123  * Generic mapping function (not visible outside):
124  */
125 
126 /*
127  * Remap an arbitrary physical address space into the kernel virtual
128  * address space. Needed when the kernel wants to access high addresses
129  * directly.
130  *
131  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
132  * have to convert them into an offset in a page-aligned mapping, but the
133  * caller shouldn't need to know that small detail.
134  */
135 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
136 {
137 #if !(USE_HPPA_IOREMAP)
138 
139 	unsigned long end = phys_addr + size - 1;
140 	/* Support EISA addresses */
141 	if ((phys_addr >= 0x00080000 && end < 0x000fffff)
142 			|| (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
143 		phys_addr |= 0xfc000000;
144 	}
145 
146 #ifdef CONFIG_DEBUG_IOREMAP
147 	return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
148 #else
149 	return (void __iomem *)phys_addr;
150 #endif
151 
152 #else
153 	void * addr;
154 	struct vm_struct * area;
155 	unsigned long offset, last_addr;
156 
157 	/* Don't allow wraparound or zero size */
158 	last_addr = phys_addr + size - 1;
159 	if (!size || last_addr < phys_addr)
160 		return NULL;
161 
162 	/*
163 	 * Don't allow anybody to remap normal RAM that we're using..
164 	 */
165 	if (phys_addr < virt_to_phys(high_memory)) {
166 		char *t_addr, *t_end;
167 		struct page *page;
168 
169 		t_addr = __va(phys_addr);
170 		t_end = t_addr + (size - 1);
171 
172 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
173 			if(!PageReserved(page))
174 				return NULL;
175 	}
176 
177 	/*
178 	 * Mappings have to be page-aligned
179 	 */
180 	offset = phys_addr & ~PAGE_MASK;
181 	phys_addr &= PAGE_MASK;
182 	size = PAGE_ALIGN(last_addr) - phys_addr;
183 
184 	/*
185 	 * Ok, go for it..
186 	 */
187 	area = get_vm_area(size, VM_IOREMAP);
188 	if (!area)
189 		return NULL;
190 	addr = area->addr;
191 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
192 		vfree(addr);
193 		return NULL;
194 	}
195 	return (void __iomem *) (offset + (char *)addr);
196 #endif
197 }
198 
199 void iounmap(void __iomem *addr)
200 {
201 #if !(USE_HPPA_IOREMAP)
202 	return;
203 #else
204 	if (addr > high_memory)
205 		return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
206 #endif
207 }
208