xref: /openbmc/linux/arch/microblaze/mm/pgtable.c (revision d0b73b48)
1 /*
2  *  This file contains the routines setting up the linux page tables.
3  *
4  * Copyright (C) 2008 Michal Simek
5  * Copyright (C) 2008 PetaLogix
6  *
7  *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
8  *
9  *  Derived from arch/ppc/mm/pgtable.c:
10  *    -- paulus
11  *
12  *  Derived from arch/ppc/mm/init.c:
13  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14  *
15  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
17  *    Copyright (C) 1996 Paul Mackerras
18  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19  *
20  *  Derived from "arch/i386/mm/init.c"
21  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
22  *
23  *  This file is subject to the terms and conditions of the GNU General
24  *  Public License.  See the file COPYING in the main directory of this
25  *  archive for more details.
26  *
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <linux/io.h>
38 #include <asm/mmu.h>
39 #include <asm/sections.h>
40 #include <asm/fixmap.h>
41 
42 #define flush_HPTE(X, va, pg)	_tlbie(va)
43 
44 unsigned long ioremap_base;
45 unsigned long ioremap_bot;
46 EXPORT_SYMBOL(ioremap_bot);
47 
48 #ifndef CONFIG_SMP
49 struct pgtable_cache_struct quicklists;
50 #endif
51 
52 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
53 		unsigned long flags)
54 {
55 	unsigned long v, i;
56 	phys_addr_t p;
57 	int err;
58 
59 	/*
60 	 * Choose an address to map it to.
61 	 * Once the vmalloc system is running, we use it.
62 	 * Before then, we use space going down from ioremap_base
63 	 * (ioremap_bot records where we're up to).
64 	 */
65 	p = addr & PAGE_MASK;
66 	size = PAGE_ALIGN(addr + size) - p;
67 
68 	/*
69 	 * Don't allow anybody to remap normal RAM that we're using.
70 	 * mem_init() sets high_memory so only do the check after that.
71 	 *
72 	 * However, allow remap of rootfs: TBD
73 	 */
74 	if (mem_init_done &&
75 		p >= memory_start && p < virt_to_phys(high_memory) &&
76 		!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
77 		p < virt_to_phys((unsigned long)__bss_stop))) {
78 		printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
79 			" is RAM lr %pf\n", (unsigned long)p,
80 			__builtin_return_address(0));
81 		return NULL;
82 	}
83 
84 	if (size == 0)
85 		return NULL;
86 
87 	/*
88 	 * Is it already mapped? If the whole area is mapped then we're
89 	 * done, otherwise remap it since we want to keep the virt addrs for
90 	 * each request contiguous.
91 	 *
92 	 * We make the assumption here that if the bottom and top
93 	 * of the range we want are mapped then it's mapped to the
94 	 * same virt address (and this is contiguous).
95 	 *  -- Cort
96 	 */
97 
98 	if (mem_init_done) {
99 		struct vm_struct *area;
100 		area = get_vm_area(size, VM_IOREMAP);
101 		if (area == NULL)
102 			return NULL;
103 		v = (unsigned long) area->addr;
104 	} else {
105 		v = (ioremap_bot -= size);
106 	}
107 
108 	if ((flags & _PAGE_PRESENT) == 0)
109 		flags |= _PAGE_KERNEL;
110 	if (flags & _PAGE_NO_CACHE)
111 		flags |= _PAGE_GUARDED;
112 
113 	err = 0;
114 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
115 		err = map_page(v + i, p + i, flags);
116 	if (err) {
117 		if (mem_init_done)
118 			vfree((void *)v);
119 		return NULL;
120 	}
121 
122 	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
123 }
124 
125 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
126 {
127 	return __ioremap(addr, size, _PAGE_NO_CACHE);
128 }
129 EXPORT_SYMBOL(ioremap);
130 
131 void iounmap(void *addr)
132 {
133 	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
134 		vfree((void *) (PAGE_MASK & (unsigned long) addr));
135 }
136 EXPORT_SYMBOL(iounmap);
137 
138 
139 int map_page(unsigned long va, phys_addr_t pa, int flags)
140 {
141 	pmd_t *pd;
142 	pte_t *pg;
143 	int err = -ENOMEM;
144 	/* Use upper 10 bits of VA to index the first level map */
145 	pd = pmd_offset(pgd_offset_k(va), va);
146 	/* Use middle 10 bits of VA to index the second-level map */
147 	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
148 	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
149 
150 	if (pg != NULL) {
151 		err = 0;
152 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
153 				__pgprot(flags)));
154 		if (unlikely(mem_init_done))
155 			flush_HPTE(0, va, pmd_val(*pd));
156 			/* flush_HPTE(0, va, pg); */
157 	}
158 	return err;
159 }
160 
161 /*
162  * Map in all of physical memory starting at CONFIG_KERNEL_START.
163  */
164 void __init mapin_ram(void)
165 {
166 	unsigned long v, p, s, f;
167 
168 	v = CONFIG_KERNEL_START;
169 	p = memory_start;
170 	for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
171 		f = _PAGE_PRESENT | _PAGE_ACCESSED |
172 				_PAGE_SHARED | _PAGE_HWEXEC;
173 		if ((char *) v < _stext || (char *) v >= _etext)
174 			f |= _PAGE_WRENABLE;
175 		else
176 			/* On the MicroBlaze, no user access
177 			   forces R/W kernel access */
178 			f |= _PAGE_USER;
179 		map_page(v, p, f);
180 		v += PAGE_SIZE;
181 		p += PAGE_SIZE;
182 	}
183 }
184 
185 /* is x a power of 2? */
186 #define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
187 
188 /* Scan the real Linux page tables and return a PTE pointer for
189  * a virtual address in a context.
190  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
191  * the PTE pointer is unmodified if PTE is not found.
192  */
193 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
194 {
195 	pgd_t	*pgd;
196 	pmd_t	*pmd;
197 	pte_t	*pte;
198 	int     retval = 0;
199 
200 	pgd = pgd_offset(mm, addr & PAGE_MASK);
201 	if (pgd) {
202 		pmd = pmd_offset(pgd, addr & PAGE_MASK);
203 		if (pmd_present(*pmd)) {
204 			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
205 			if (pte) {
206 				retval = 1;
207 				*ptep = pte;
208 			}
209 		}
210 	}
211 	return retval;
212 }
213 
214 /* Find physical address for this virtual address.  Normally used by
215  * I/O functions, but anyone can call it.
216  */
217 unsigned long iopa(unsigned long addr)
218 {
219 	unsigned long pa;
220 
221 	pte_t *pte;
222 	struct mm_struct *mm;
223 
224 	/* Allow mapping of user addresses (within the thread)
225 	 * for DMA if necessary.
226 	 */
227 	if (addr < TASK_SIZE)
228 		mm = current->mm;
229 	else
230 		mm = &init_mm;
231 
232 	pa = 0;
233 	if (get_pteptr(mm, addr, &pte))
234 		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
235 
236 	return pa;
237 }
238 
239 __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
240 		unsigned long address)
241 {
242 	pte_t *pte;
243 	if (mem_init_done) {
244 		pte = (pte_t *)__get_free_page(GFP_KERNEL |
245 					__GFP_REPEAT | __GFP_ZERO);
246 	} else {
247 		pte = (pte_t *)early_get_page();
248 		if (pte)
249 			clear_page(pte);
250 	}
251 	return pte;
252 }
253 
254 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
255 {
256 	unsigned long address = __fix_to_virt(idx);
257 
258 	if (idx >= __end_of_fixed_addresses)
259 		BUG();
260 
261 	map_page(address, phys, pgprot_val(flags));
262 }
263