xref: /openbmc/linux/arch/powerpc/mm/pgtable_32.c (revision e6dec923)
1 /*
2  * This file contains the routines setting up the linux page tables.
3  *  -- paulus
4  *
5  *  Derived from arch/ppc/mm/init.c:
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  *
8  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10  *    Copyright (C) 1996 Paul Mackerras
11  *
12  *  Derived from "arch/i386/mm/init.c"
13  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation; either version
18  *  2 of the License, or (at your option) any later version.
19  *
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/fixmap.h>
35 #include <asm/io.h>
36 #include <asm/setup.h>
37 
38 #include "mmu_decl.h"
39 
40 unsigned long ioremap_bot;
41 EXPORT_SYMBOL(ioremap_bot);	/* aka VMALLOC_END */
42 
43 extern char etext[], _stext[], _sinittext[], _einittext[];
44 
45 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
46 {
47 	pte_t *pte;
48 
49 	if (slab_is_available()) {
50 		pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
51 	} else {
52 		pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
53 		if (pte)
54 			clear_page(pte);
55 	}
56 	return pte;
57 }
58 
59 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
60 {
61 	struct page *ptepage;
62 
63 	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
64 
65 	ptepage = alloc_pages(flags, 0);
66 	if (!ptepage)
67 		return NULL;
68 	if (!pgtable_page_ctor(ptepage)) {
69 		__free_page(ptepage);
70 		return NULL;
71 	}
72 	return ptepage;
73 }
74 
75 void __iomem *
76 ioremap(phys_addr_t addr, unsigned long size)
77 {
78 	return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
79 				__builtin_return_address(0));
80 }
81 EXPORT_SYMBOL(ioremap);
82 
83 void __iomem *
84 ioremap_wc(phys_addr_t addr, unsigned long size)
85 {
86 	return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
87 				__builtin_return_address(0));
88 }
89 EXPORT_SYMBOL(ioremap_wc);
90 
91 void __iomem *
92 ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
93 {
94 	/* writeable implies dirty for kernel addresses */
95 	if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
96 		flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
97 
98 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
99 	flags &= ~(_PAGE_USER | _PAGE_EXEC);
100 
101 #ifdef _PAGE_BAP_SR
102 	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
103 	 * which means that we just cleared supervisor access... oops ;-) This
104 	 * restores it
105 	 */
106 	flags |= _PAGE_BAP_SR;
107 #endif
108 
109 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
110 }
111 EXPORT_SYMBOL(ioremap_prot);
112 
113 void __iomem *
114 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
115 {
116 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
117 }
118 
119 void __iomem *
120 __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
121 		 void *caller)
122 {
123 	unsigned long v, i;
124 	phys_addr_t p;
125 	int err;
126 
127 	/* Make sure we have the base flags */
128 	if ((flags & _PAGE_PRESENT) == 0)
129 		flags |= pgprot_val(PAGE_KERNEL);
130 
131 	/* Non-cacheable page cannot be coherent */
132 	if (flags & _PAGE_NO_CACHE)
133 		flags &= ~_PAGE_COHERENT;
134 
135 	/*
136 	 * Choose an address to map it to.
137 	 * Once the vmalloc system is running, we use it.
138 	 * Before then, we use space going down from IOREMAP_TOP
139 	 * (ioremap_bot records where we're up to).
140 	 */
141 	p = addr & PAGE_MASK;
142 	size = PAGE_ALIGN(addr + size) - p;
143 
144 	/*
145 	 * If the address lies within the first 16 MB, assume it's in ISA
146 	 * memory space
147 	 */
148 	if (p < 16*1024*1024)
149 		p += _ISA_MEM_BASE;
150 
151 #ifndef CONFIG_CRASH_DUMP
152 	/*
153 	 * Don't allow anybody to remap normal RAM that we're using.
154 	 * mem_init() sets high_memory so only do the check after that.
155 	 */
156 	if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
157 	    !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
158 		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
159 		       (unsigned long long)p, __builtin_return_address(0));
160 		return NULL;
161 	}
162 #endif
163 
164 	if (size == 0)
165 		return NULL;
166 
167 	/*
168 	 * Is it already mapped?  Perhaps overlapped by a previous
169 	 * mapping.
170 	 */
171 	v = p_block_mapped(p);
172 	if (v)
173 		goto out;
174 
175 	if (slab_is_available()) {
176 		struct vm_struct *area;
177 		area = get_vm_area_caller(size, VM_IOREMAP, caller);
178 		if (area == 0)
179 			return NULL;
180 		area->phys_addr = p;
181 		v = (unsigned long) area->addr;
182 	} else {
183 		v = (ioremap_bot -= size);
184 	}
185 
186 	/*
187 	 * Should check if it is a candidate for a BAT mapping
188 	 */
189 
190 	err = 0;
191 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
192 		err = map_kernel_page(v+i, p+i, flags);
193 	if (err) {
194 		if (slab_is_available())
195 			vunmap((void *)v);
196 		return NULL;
197 	}
198 
199 out:
200 	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
201 }
202 EXPORT_SYMBOL(__ioremap);
203 
204 void iounmap(volatile void __iomem *addr)
205 {
206 	/*
207 	 * If mapped by BATs then there is nothing to do.
208 	 * Calling vfree() generates a benign warning.
209 	 */
210 	if (v_block_mapped((unsigned long)addr))
211 		return;
212 
213 	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
214 		vunmap((void *) (PAGE_MASK & (unsigned long)addr));
215 }
216 EXPORT_SYMBOL(iounmap);
217 
218 int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
219 {
220 	pmd_t *pd;
221 	pte_t *pg;
222 	int err = -ENOMEM;
223 
224 	/* Use upper 10 bits of VA to index the first level map */
225 	pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
226 	/* Use middle 10 bits of VA to index the second-level map */
227 	pg = pte_alloc_kernel(pd, va);
228 	if (pg != 0) {
229 		err = 0;
230 		/* The PTE should never be already set nor present in the
231 		 * hash table
232 		 */
233 		BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
234 		       flags);
235 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
236 						     __pgprot(flags)));
237 	}
238 	smp_wmb();
239 	return err;
240 }
241 
242 /*
243  * Map in a chunk of physical memory starting at start.
244  */
245 void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
246 {
247 	unsigned long v, s, f;
248 	phys_addr_t p;
249 	int ktext;
250 
251 	s = offset;
252 	v = PAGE_OFFSET + s;
253 	p = memstart_addr + s;
254 	for (; s < top; s += PAGE_SIZE) {
255 		ktext = ((char *)v >= _stext && (char *)v < etext) ||
256 			((char *)v >= _sinittext && (char *)v < _einittext);
257 		f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
258 		map_kernel_page(v, p, f);
259 #ifdef CONFIG_PPC_STD_MMU_32
260 		if (ktext)
261 			hash_preload(&init_mm, v, 0, 0x300);
262 #endif
263 		v += PAGE_SIZE;
264 		p += PAGE_SIZE;
265 	}
266 }
267 
268 void __init mapin_ram(void)
269 {
270 	unsigned long s, top;
271 
272 #ifndef CONFIG_WII
273 	top = total_lowmem;
274 	s = mmu_mapin_ram(top);
275 	__mapin_ram_chunk(s, top);
276 #else
277 	if (!wii_hole_size) {
278 		s = mmu_mapin_ram(total_lowmem);
279 		__mapin_ram_chunk(s, total_lowmem);
280 	} else {
281 		top = wii_hole_start;
282 		s = mmu_mapin_ram(top);
283 		__mapin_ram_chunk(s, top);
284 
285 		top = memblock_end_of_DRAM();
286 		s = wii_mmu_mapin_mem2(top);
287 		__mapin_ram_chunk(s, top);
288 	}
289 #endif
290 }
291 
292 /* Scan the real Linux page tables and return a PTE pointer for
293  * a virtual address in a context.
294  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
295  * the PTE pointer is unmodified if PTE is not found.
296  */
297 int
298 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
299 {
300         pgd_t	*pgd;
301 	pud_t	*pud;
302         pmd_t	*pmd;
303         pte_t	*pte;
304         int     retval = 0;
305 
306         pgd = pgd_offset(mm, addr & PAGE_MASK);
307         if (pgd) {
308 		pud = pud_offset(pgd, addr & PAGE_MASK);
309 		if (pud && pud_present(*pud)) {
310 			pmd = pmd_offset(pud, addr & PAGE_MASK);
311 			if (pmd_present(*pmd)) {
312 				pte = pte_offset_map(pmd, addr & PAGE_MASK);
313 				if (pte) {
314 					retval = 1;
315 					*ptep = pte;
316 					if (pmdp)
317 						*pmdp = pmd;
318 					/* XXX caller needs to do pte_unmap, yuck */
319 				}
320 			}
321 		}
322         }
323         return(retval);
324 }
325 
326 #ifdef CONFIG_DEBUG_PAGEALLOC
327 
328 static int __change_page_attr(struct page *page, pgprot_t prot)
329 {
330 	pte_t *kpte;
331 	pmd_t *kpmd;
332 	unsigned long address;
333 
334 	BUG_ON(PageHighMem(page));
335 	address = (unsigned long)page_address(page);
336 
337 	if (v_block_mapped(address))
338 		return 0;
339 	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
340 		return -EINVAL;
341 	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
342 	wmb();
343 	flush_tlb_page(NULL, address);
344 	pte_unmap(kpte);
345 
346 	return 0;
347 }
348 
349 /*
350  * Change the page attributes of an page in the linear mapping.
351  *
352  * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
353  */
354 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
355 {
356 	int i, err = 0;
357 	unsigned long flags;
358 
359 	local_irq_save(flags);
360 	for (i = 0; i < numpages; i++, page++) {
361 		err = __change_page_attr(page, prot);
362 		if (err)
363 			break;
364 	}
365 	local_irq_restore(flags);
366 	return err;
367 }
368 
369 
370 void __kernel_map_pages(struct page *page, int numpages, int enable)
371 {
372 	if (PageHighMem(page))
373 		return;
374 
375 	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
376 }
377 #endif /* CONFIG_DEBUG_PAGEALLOC */
378 
379 static int fixmaps;
380 
381 void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
382 {
383 	unsigned long address = __fix_to_virt(idx);
384 
385 	if (idx >= __end_of_fixed_addresses) {
386 		BUG();
387 		return;
388 	}
389 
390 	map_kernel_page(address, phys, pgprot_val(flags));
391 	fixmaps++;
392 }
393