xref: /openbmc/linux/arch/mips/mm/ioremap.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  * (C) Copyright 2001, 2002 Ralf Baechle
8  */
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/sched.h>
13 #include <linux/vmalloc.h>
14 #include <asm/cacheflush.h>
15 #include <asm/io.h>
16 #include <asm/tlbflush.h>
17 
18 static inline void remap_area_pte(pte_t * pte, unsigned long address,
19 	phys_t size, phys_t phys_addr, unsigned long flags)
20 {
21 	phys_t end;
22 	unsigned long pfn;
23 	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
24 	                           | __WRITEABLE | flags);
25 
26 	address &= ~PMD_MASK;
27 	end = address + size;
28 	if (end > PMD_SIZE)
29 		end = PMD_SIZE;
30 	if (address >= end)
31 		BUG();
32 	pfn = phys_addr >> PAGE_SHIFT;
33 	do {
34 		if (!pte_none(*pte)) {
35 			printk("remap_area_pte: page already exists\n");
36 			BUG();
37 		}
38 		set_pte(pte, pfn_pte(pfn, pgprot));
39 		address += PAGE_SIZE;
40 		pfn++;
41 		pte++;
42 	} while (address && (address < end));
43 }
44 
45 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46 	phys_t size, phys_t phys_addr, unsigned long flags)
47 {
48 	phys_t end;
49 
50 	address &= ~PGDIR_MASK;
51 	end = address + size;
52 	if (end > PGDIR_SIZE)
53 		end = PGDIR_SIZE;
54 	phys_addr -= address;
55 	if (address >= end)
56 		BUG();
57 	do {
58 		pte_t * pte = pte_alloc_kernel(pmd, address);
59 		if (!pte)
60 			return -ENOMEM;
61 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
62 		address = (address + PMD_SIZE) & PMD_MASK;
63 		pmd++;
64 	} while (address && (address < end));
65 	return 0;
66 }
67 
68 static int remap_area_pages(unsigned long address, phys_t phys_addr,
69 	phys_t size, unsigned long flags)
70 {
71 	int error;
72 	pgd_t * dir;
73 	unsigned long end = address + size;
74 
75 	phys_addr -= address;
76 	dir = pgd_offset(&init_mm, address);
77 	flush_cache_all();
78 	if (address >= end)
79 		BUG();
80 	do {
81 		pud_t *pud;
82 		pmd_t *pmd;
83 
84 		error = -ENOMEM;
85 		pud = pud_alloc(&init_mm, dir, address);
86 		if (!pud)
87 			break;
88 		pmd = pmd_alloc(&init_mm, pud, address);
89 		if (!pmd)
90 			break;
91 		if (remap_area_pmd(pmd, address, end - address,
92 					 phys_addr + address, flags))
93 			break;
94 		error = 0;
95 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
96 		dir++;
97 	} while (address && (address < end));
98 	flush_tlb_all();
99 	return error;
100 }
101 
102 /*
103  * Generic mapping function (not visible outside):
104  */
105 
106 /*
107  * Remap an arbitrary physical address space into the kernel virtual
108  * address space. Needed when the kernel wants to access high addresses
109  * directly.
110  *
111  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
112  * have to convert them into an offset in a page-aligned mapping, but the
113  * caller shouldn't need to know that small detail.
114  */
115 
116 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
117 
118 void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
119 {
120 	struct vm_struct * area;
121 	unsigned long offset;
122 	phys_t last_addr;
123 	void * addr;
124 
125 	phys_addr = fixup_bigphys_addr(phys_addr, size);
126 
127 	/* Don't allow wraparound or zero size */
128 	last_addr = phys_addr + size - 1;
129 	if (!size || last_addr < phys_addr)
130 		return NULL;
131 
132 	/*
133 	 * Map uncached objects in the low 512mb of address space using KSEG1,
134 	 * otherwise map using page tables.
135 	 */
136 	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
137 	    flags == _CACHE_UNCACHED)
138 		return (void __iomem *) CKSEG1ADDR(phys_addr);
139 
140 	/*
141 	 * Don't allow anybody to remap normal RAM that we're using..
142 	 */
143 	if (phys_addr < virt_to_phys(high_memory)) {
144 		char *t_addr, *t_end;
145 		struct page *page;
146 
147 		t_addr = __va(phys_addr);
148 		t_end = t_addr + (size - 1);
149 
150 		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
151 			if(!PageReserved(page))
152 				return NULL;
153 	}
154 
155 	/*
156 	 * Mappings have to be page-aligned
157 	 */
158 	offset = phys_addr & ~PAGE_MASK;
159 	phys_addr &= PAGE_MASK;
160 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
161 
162 	/*
163 	 * Ok, go for it..
164 	 */
165 	area = get_vm_area(size, VM_IOREMAP);
166 	if (!area)
167 		return NULL;
168 	addr = area->addr;
169 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
170 		vunmap(addr);
171 		return NULL;
172 	}
173 
174 	return (void __iomem *) (offset + (char *)addr);
175 }
176 
177 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
178 
179 void __iounmap(const volatile void __iomem *addr)
180 {
181 	struct vm_struct *p;
182 
183 	if (IS_KSEG1(addr))
184 		return;
185 
186 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
187 	if (!p)
188 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
189 
190         kfree(p);
191 }
192 
193 EXPORT_SYMBOL(__ioremap);
194 EXPORT_SYMBOL(__iounmap);
195