xref: /openbmc/linux/arch/mips/mm/ioremap.c (revision 0e17c50f)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  * (C) Copyright 2001, 2002 Ralf Baechle
8  */
9 #include <linux/export.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/ioport.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mm_types.h>
17 #include <asm/cacheflush.h>
18 #include <asm/io.h>
19 #include <asm/tlbflush.h>
20 
21 static inline void remap_area_pte(pte_t * pte, unsigned long address,
22 	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
23 {
24 	phys_addr_t end;
25 	unsigned long pfn;
26 	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
27 				   | __WRITEABLE | flags);
28 
29 	address &= ~PMD_MASK;
30 	end = address + size;
31 	if (end > PMD_SIZE)
32 		end = PMD_SIZE;
33 	BUG_ON(address >= end);
34 	pfn = phys_addr >> PAGE_SHIFT;
35 	do {
36 		if (!pte_none(*pte)) {
37 			printk("remap_area_pte: page already exists\n");
38 			BUG();
39 		}
40 		set_pte(pte, pfn_pte(pfn, pgprot));
41 		address += PAGE_SIZE;
42 		pfn++;
43 		pte++;
44 	} while (address && (address < end));
45 }
46 
47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
48 	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
49 {
50 	phys_addr_t end;
51 
52 	address &= ~PGDIR_MASK;
53 	end = address + size;
54 	if (end > PGDIR_SIZE)
55 		end = PGDIR_SIZE;
56 	phys_addr -= address;
57 	BUG_ON(address >= end);
58 	do {
59 		pte_t * pte = pte_alloc_kernel(pmd, address);
60 		if (!pte)
61 			return -ENOMEM;
62 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
63 		address = (address + PMD_SIZE) & PMD_MASK;
64 		pmd++;
65 	} while (address && (address < end));
66 	return 0;
67 }
68 
69 static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
70 	phys_addr_t size, unsigned long flags)
71 {
72 	int error;
73 	pgd_t * dir;
74 	unsigned long end = address + size;
75 
76 	phys_addr -= address;
77 	dir = pgd_offset(&init_mm, address);
78 	flush_cache_all();
79 	BUG_ON(address >= end);
80 	do {
81 		p4d_t *p4d;
82 		pud_t *pud;
83 		pmd_t *pmd;
84 
85 		error = -ENOMEM;
86 		p4d = p4d_alloc(&init_mm, dir, address);
87 		if (!p4d)
88 			break;
89 		pud = pud_alloc(&init_mm, p4d, address);
90 		if (!pud)
91 			break;
92 		pmd = pmd_alloc(&init_mm, pud, address);
93 		if (!pmd)
94 			break;
95 		if (remap_area_pmd(pmd, address, end - address,
96 					 phys_addr + address, flags))
97 			break;
98 		error = 0;
99 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
100 		dir++;
101 	} while (address && (address < end));
102 	flush_tlb_all();
103 	return error;
104 }
105 
106 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
107 			       void *arg)
108 {
109 	unsigned long i;
110 
111 	for (i = 0; i < nr_pages; i++) {
112 		if (pfn_valid(start_pfn + i) &&
113 		    !PageReserved(pfn_to_page(start_pfn + i)))
114 			return 1;
115 	}
116 
117 	return 0;
118 }
119 
120 /*
121  * Generic mapping function (not visible outside):
122  */
123 
124 /*
125  * Remap an arbitrary physical address space into the kernel virtual
126  * address space. Needed when the kernel wants to access high addresses
127  * directly.
128  *
129  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
130  * have to convert them into an offset in a page-aligned mapping, but the
131  * caller shouldn't need to know that small detail.
132  */
133 
134 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
135 
136 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
137 {
138 	unsigned long offset, pfn, last_pfn;
139 	struct vm_struct * area;
140 	phys_addr_t last_addr;
141 	void * addr;
142 
143 	phys_addr = fixup_bigphys_addr(phys_addr, size);
144 
145 	/* Don't allow wraparound or zero size */
146 	last_addr = phys_addr + size - 1;
147 	if (!size || last_addr < phys_addr)
148 		return NULL;
149 
150 	/*
151 	 * Map uncached objects in the low 512mb of address space using KSEG1,
152 	 * otherwise map using page tables.
153 	 */
154 	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
155 	    flags == _CACHE_UNCACHED)
156 		return (void __iomem *) CKSEG1ADDR(phys_addr);
157 
158 	/*
159 	 * Don't allow anybody to remap RAM that may be allocated by the page
160 	 * allocator, since that could lead to races & data clobbering.
161 	 */
162 	pfn = PFN_DOWN(phys_addr);
163 	last_pfn = PFN_DOWN(last_addr);
164 	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
165 				  __ioremap_check_ram) == 1) {
166 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
167 			  &phys_addr, &last_addr);
168 		return NULL;
169 	}
170 
171 	/*
172 	 * Mappings have to be page-aligned
173 	 */
174 	offset = phys_addr & ~PAGE_MASK;
175 	phys_addr &= PAGE_MASK;
176 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
177 
178 	/*
179 	 * Ok, go for it..
180 	 */
181 	area = get_vm_area(size, VM_IOREMAP);
182 	if (!area)
183 		return NULL;
184 	addr = area->addr;
185 	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
186 		vunmap(addr);
187 		return NULL;
188 	}
189 
190 	return (void __iomem *) (offset + (char *)addr);
191 }
192 
193 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
194 
195 void __iounmap(const volatile void __iomem *addr)
196 {
197 	struct vm_struct *p;
198 
199 	if (IS_KSEG1(addr))
200 		return;
201 
202 	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
203 	if (!p)
204 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
205 
206 	kfree(p);
207 }
208 
209 EXPORT_SYMBOL(__ioremap);
210 EXPORT_SYMBOL(__iounmap);
211