xref: /openbmc/linux/arch/mips/mm/ioremap.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  * (C) Copyright 2001, 2002 Ralf Baechle
8  */
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 
13 #include <linux/vmalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/io.h>
16 #include <asm/tlbflush.h>
17 
18 static inline void remap_area_pte(pte_t * pte, unsigned long address,
19 	phys_t size, phys_t phys_addr, unsigned long flags)
20 {
21 	phys_t end;
22 	unsigned long pfn;
23 	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
24 	                           | __WRITEABLE | flags);
25 
26 	address &= ~PMD_MASK;
27 	end = address + size;
28 	if (end > PMD_SIZE)
29 		end = PMD_SIZE;
30 	if (address >= end)
31 		BUG();
32 	pfn = phys_addr >> PAGE_SHIFT;
33 	do {
34 		if (!pte_none(*pte)) {
35 			printk("remap_area_pte: page already exists\n");
36 			BUG();
37 		}
38 		set_pte(pte, pfn_pte(pfn, pgprot));
39 		address += PAGE_SIZE;
40 		pfn++;
41 		pte++;
42 	} while (address && (address < end));
43 }
44 
45 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46 	phys_t size, phys_t phys_addr, unsigned long flags)
47 {
48 	phys_t end;
49 
50 	address &= ~PGDIR_MASK;
51 	end = address + size;
52 	if (end > PGDIR_SIZE)
53 		end = PGDIR_SIZE;
54 	phys_addr -= address;
55 	if (address >= end)
56 		BUG();
57 	do {
58 		pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
59 		if (!pte)
60 			return -ENOMEM;
61 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
62 		address = (address + PMD_SIZE) & PMD_MASK;
63 		pmd++;
64 	} while (address && (address < end));
65 	return 0;
66 }
67 
68 static int remap_area_pages(unsigned long address, phys_t phys_addr,
69 	phys_t size, unsigned long flags)
70 {
71 	int error;
72 	pgd_t * dir;
73 	unsigned long end = address + size;
74 
75 	phys_addr -= address;
76 	dir = pgd_offset(&init_mm, address);
77 	flush_cache_all();
78 	if (address >= end)
79 		BUG();
80 	spin_lock(&init_mm.page_table_lock);
81 	do {
82 		pmd_t *pmd;
83 		pmd = pmd_alloc(&init_mm, dir, address);
84 		error = -ENOMEM;
85 		if (!pmd)
86 			break;
87 		if (remap_area_pmd(pmd, address, end - address,
88 					 phys_addr + address, flags))
89 			break;
90 		error = 0;
91 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
92 		dir++;
93 	} while (address && (address < end));
94 	spin_unlock(&init_mm.page_table_lock);
95 	flush_tlb_all();
96 	return error;
97 }
98 
99 /*
100  * Allow physical addresses to be fixed up to help 36 bit peripherals.
101  */
102 phys_t __attribute__ ((weak))
103 fixup_bigphys_addr(phys_t phys_addr, phys_t size)
104 {
105 	return phys_addr;
106 }
107 
108 /*
109  * Generic mapping function (not visible outside):
110  */
111 
112 /*
113  * Remap an arbitrary physical address space into the kernel virtual
114  * address space. Needed when the kernel wants to access high addresses
115  * directly.
116  *
117  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118  * have to convert them into an offset in a page-aligned mapping, but the
119  * caller shouldn't need to know that small detail.
120  */
121 
122 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
123 
124 void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
125 {
126 	struct vm_struct * area;
127 	unsigned long offset;
128 	phys_t last_addr;
129 	void * addr;
130 
131 	phys_addr = fixup_bigphys_addr(phys_addr, size);
132 
133 	/* Don't allow wraparound or zero size */
134 	last_addr = phys_addr + size - 1;
135 	if (!size || last_addr < phys_addr)
136 		return NULL;
137 
138 	/*
139 	 * Map uncached objects in the low 512mb of address space using KSEG1,
140 	 * otherwise map using page tables.
141 	 */
142 	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
143 	    flags == _CACHE_UNCACHED)
144 		return (void *) KSEG1ADDR(phys_addr);
145 
146 	/*
147 	 * Don't allow anybody to remap normal RAM that we're using..
148 	 */
149 	if (phys_addr < virt_to_phys(high_memory)) {
150 		char *t_addr, *t_end;
151 		struct page *page;
152 
153 		t_addr = __va(phys_addr);
154 		t_end = t_addr + (size - 1);
155 
156 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
157 			if(!PageReserved(page))
158 				return NULL;
159 	}
160 
161 	/*
162 	 * Mappings have to be page-aligned
163 	 */
164 	offset = phys_addr & ~PAGE_MASK;
165 	phys_addr &= PAGE_MASK;
166 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
167 
168 	/*
169 	 * Ok, go for it..
170 	 */
171 	area = get_vm_area(size, VM_IOREMAP);
172 	if (!area)
173 		return NULL;
174 	addr = area->addr;
175 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
176 		vunmap(addr);
177 		return NULL;
178 	}
179 
180 	return (void *) (offset + (char *)addr);
181 }
182 
183 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
184 
185 void __iounmap(volatile void __iomem *addr)
186 {
187 	struct vm_struct *p;
188 
189 	if (IS_KSEG1(addr))
190 		return;
191 
192 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
193 	if (!p) {
194 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
195 		return;
196 	}
197 
198         kfree(p);
199 }
200 
201 EXPORT_SYMBOL(__ioremap);
202 EXPORT_SYMBOL(__iounmap);
203