xref: /openbmc/linux/arch/microblaze/mm/pgtable.c (revision fd589a8f)
1 /*
2  *  This file contains the routines setting up the linux page tables.
3  *
4  * Copyright (C) 2008 Michal Simek
5  * Copyright (C) 2008 PetaLogix
6  *
7  *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
8  *
9  *  Derived from arch/ppc/mm/pgtable.c:
10  *    -- paulus
11  *
12  *  Derived from arch/ppc/mm/init.c:
13  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
14  *
15  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
17  *    Copyright (C) 1996 Paul Mackerras
18  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
19  *
20  *  Derived from "arch/i386/mm/init.c"
21  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
22  *
23  *  This file is subject to the terms and conditions of the GNU General
24  *  Public License.  See the file COPYING in the main directory of this
25  *  archive for more details.
26  *
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <linux/io.h>
38 #include <asm/mmu.h>
39 #include <asm/sections.h>
40 
41 #define flush_HPTE(X, va, pg)	_tlbie(va)
42 
43 unsigned long ioremap_base;
44 unsigned long ioremap_bot;
45 
46 /* The maximum lowmem defaults to 768Mb, but this can be configured to
47  * another value.
48  */
49 #define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
50 
51 #ifndef CONFIG_SMP
52 struct pgtable_cache_struct quicklists;
53 #endif
54 
55 static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
56 		unsigned long flags)
57 {
58 	unsigned long v, i;
59 	phys_addr_t p;
60 	int err;
61 
62 	/*
63 	 * Choose an address to map it to.
64 	 * Once the vmalloc system is running, we use it.
65 	 * Before then, we use space going down from ioremap_base
66 	 * (ioremap_bot records where we're up to).
67 	 */
68 	p = addr & PAGE_MASK;
69 	size = PAGE_ALIGN(addr + size) - p;
70 
71 	/*
72 	 * Don't allow anybody to remap normal RAM that we're using.
73 	 * mem_init() sets high_memory so only do the check after that.
74 	 *
75 	 * However, allow remap of rootfs: TBD
76 	 */
77 	if (mem_init_done &&
78 		p >= memory_start && p < virt_to_phys(high_memory) &&
79 		!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
80 		p < virt_to_phys((unsigned long)__bss_stop))) {
81 		printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
82 			" is RAM lr %p\n", (unsigned long)p,
83 			__builtin_return_address(0));
84 		return NULL;
85 	}
86 
87 	if (size == 0)
88 		return NULL;
89 
90 	/*
91 	 * Is it already mapped? If the whole area is mapped then we're
92 	 * done, otherwise remap it since we want to keep the virt addrs for
93 	 * each request contiguous.
94 	 *
95 	 * We make the assumption here that if the bottom and top
96 	 * of the range we want are mapped then it's mapped to the
97 	 * same virt address (and this is contiguous).
98 	 *  -- Cort
99 	 */
100 
101 	if (mem_init_done) {
102 		struct vm_struct *area;
103 		area = get_vm_area(size, VM_IOREMAP);
104 		if (area == NULL)
105 			return NULL;
106 		v = VMALLOC_VMADDR(area->addr);
107 	} else {
108 		v = (ioremap_bot -= size);
109 	}
110 
111 	if ((flags & _PAGE_PRESENT) == 0)
112 		flags |= _PAGE_KERNEL;
113 	if (flags & _PAGE_NO_CACHE)
114 		flags |= _PAGE_GUARDED;
115 
116 	err = 0;
117 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
118 		err = map_page(v + i, p + i, flags);
119 	if (err) {
120 		if (mem_init_done)
121 			vfree((void *)v);
122 		return NULL;
123 	}
124 
125 	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
126 }
127 
128 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
129 {
130 	return __ioremap(addr, size, _PAGE_NO_CACHE);
131 }
132 EXPORT_SYMBOL(ioremap);
133 
134 void iounmap(void *addr)
135 {
136 	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
137 		vfree((void *) (PAGE_MASK & (unsigned long) addr));
138 }
139 EXPORT_SYMBOL(iounmap);
140 
141 
142 int map_page(unsigned long va, phys_addr_t pa, int flags)
143 {
144 	pmd_t *pd;
145 	pte_t *pg;
146 	int err = -ENOMEM;
147 	/* spin_lock(&init_mm.page_table_lock); */
148 	/* Use upper 10 bits of VA to index the first level map */
149 	pd = pmd_offset(pgd_offset_k(va), va);
150 	/* Use middle 10 bits of VA to index the second-level map */
151 	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
152 	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
153 
154 	if (pg != NULL) {
155 		err = 0;
156 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
157 				__pgprot(flags)));
158 		if (mem_init_done)
159 			flush_HPTE(0, va, pmd_val(*pd));
160 			/* flush_HPTE(0, va, pg); */
161 
162 	}
163 	/* spin_unlock(&init_mm.page_table_lock); */
164 	return err;
165 }
166 
167 void __init adjust_total_lowmem(void)
168 {
169 /* TBD */
170 #if 0
171 	unsigned long max_low_mem = MAX_LOW_MEM;
172 
173 	if (total_lowmem > max_low_mem) {
174 		total_lowmem = max_low_mem;
175 #ifndef CONFIG_HIGHMEM
176 		printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
177 				"CONFIG_HIGHMEM to reach %ld Mb\n",
178 				max_low_mem >> 20, total_memory >> 20);
179 		total_memory = total_lowmem;
180 #endif /* CONFIG_HIGHMEM */
181 	}
182 #endif
183 }
184 
185 static void show_tmem(unsigned long tmem)
186 {
187 	volatile unsigned long a;
188 	a = a + tmem;
189 }
190 
191 /*
192  * Map in all of physical memory starting at CONFIG_KERNEL_START.
193  */
194 void __init mapin_ram(void)
195 {
196 	unsigned long v, p, s, f;
197 
198 	v = CONFIG_KERNEL_START;
199 	p = memory_start;
200 	show_tmem(memory_size);
201 	for (s = 0; s < memory_size; s += PAGE_SIZE) {
202 		f = _PAGE_PRESENT | _PAGE_ACCESSED |
203 				_PAGE_SHARED | _PAGE_HWEXEC;
204 		if ((char *) v < _stext || (char *) v >= _etext)
205 			f |= _PAGE_WRENABLE;
206 		else
207 			/* On the MicroBlaze, no user access
208 			   forces R/W kernel access */
209 			f |= _PAGE_USER;
210 		map_page(v, p, f);
211 		v += PAGE_SIZE;
212 		p += PAGE_SIZE;
213 	}
214 }
215 
216 /* is x a power of 2? */
217 #define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
218 
219 /*
220  * Set up a mapping for a block of I/O.
221  * virt, phys, size must all be page-aligned.
222  * This should only be called before ioremap is called.
223  */
224 void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
225 			     unsigned int size, int flags)
226 {
227 	int i;
228 
229 	if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
230 		ioremap_bot = ioremap_base = virt;
231 
232 	/* Put it in the page tables. */
233 	for (i = 0; i < size; i += PAGE_SIZE)
234 		map_page(virt + i, phys + i, flags);
235 }
236 
237 /* Scan the real Linux page tables and return a PTE pointer for
238  * a virtual address in a context.
239  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
240  * the PTE pointer is unmodified if PTE is not found.
241  */
242 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
243 {
244 	pgd_t	*pgd;
245 	pmd_t	*pmd;
246 	pte_t	*pte;
247 	int     retval = 0;
248 
249 	pgd = pgd_offset(mm, addr & PAGE_MASK);
250 	if (pgd) {
251 		pmd = pmd_offset(pgd, addr & PAGE_MASK);
252 		if (pmd_present(*pmd)) {
253 			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
254 			if (pte) {
255 				retval = 1;
256 				*ptep = pte;
257 			}
258 		}
259 	}
260 	return retval;
261 }
262 
263 /* Find physical address for this virtual address.  Normally used by
264  * I/O functions, but anyone can call it.
265  */
266 unsigned long iopa(unsigned long addr)
267 {
268 	unsigned long pa;
269 
270 	pte_t *pte;
271 	struct mm_struct *mm;
272 
273 	/* Allow mapping of user addresses (within the thread)
274 	 * for DMA if necessary.
275 	 */
276 	if (addr < TASK_SIZE)
277 		mm = current->mm;
278 	else
279 		mm = &init_mm;
280 
281 	pa = 0;
282 	if (get_pteptr(mm, addr, &pte))
283 		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
284 
285 	return pa;
286 }
287