1 /*
2  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2008-2009 PetaLogix
4  * Copyright (C) 2006 Atmark Techno, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 
11 #ifndef _ASM_MICROBLAZE_PGTABLE_H
12 #define _ASM_MICROBLAZE_PGTABLE_H
13 
14 #include <asm/setup.h>
15 
16 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
17 		remap_pfn_range(vma, vaddr, pfn, size, prot)
18 
19 #ifndef __ASSEMBLY__
20 extern int mem_init_done;
21 #endif
22 
23 #ifndef CONFIG_MMU
24 
25 #define pgd_present(pgd)	(1) /* pages are always present on non MMU */
26 #define pgd_none(pgd)		(0)
27 #define pgd_bad(pgd)		(0)
28 #define pgd_clear(pgdp)
29 #define kern_addr_valid(addr)	(1)
30 #define	pmd_offset(a, b)	((void *) 0)
31 
32 #define PAGE_NONE		__pgprot(0) /* these mean nothing to non MMU */
33 #define PAGE_SHARED		__pgprot(0) /* these mean nothing to non MMU */
34 #define PAGE_COPY		__pgprot(0) /* these mean nothing to non MMU */
35 #define PAGE_READONLY		__pgprot(0) /* these mean nothing to non MMU */
36 #define PAGE_KERNEL		__pgprot(0) /* these mean nothing to non MMU */
37 
38 #define pgprot_noncached(x)	(x)
39 
40 #define __swp_type(x)		(0)
41 #define __swp_offset(x)		(0)
42 #define __swp_entry(typ, off)	((swp_entry_t) { ((typ) | ((off) << 7)) })
43 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
44 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
45 
46 #ifndef __ASSEMBLY__
47 static inline int pte_file(pte_t pte) { return 0; }
48 #endif /* __ASSEMBLY__ */
49 
50 #define ZERO_PAGE(vaddr)	({ BUG(); NULL; })
51 
52 #define swapper_pg_dir ((pgd_t *) NULL)
53 
54 #define pgtable_cache_init()	do {} while (0)
55 
56 #define arch_enter_lazy_cpu_mode()	do {} while (0)
57 
58 #define pgprot_noncached_wc(prot)	prot
59 
60 /*
61  * All 32bit addresses are effectively valid for vmalloc...
62  * Sort of meaningless for non-VM targets.
63  */
64 #define	VMALLOC_START	0
65 #define	VMALLOC_END	0xffffffff
66 
67 #else /* CONFIG_MMU */
68 
69 #include <asm-generic/4level-fixup.h>
70 
71 #ifdef __KERNEL__
72 #ifndef __ASSEMBLY__
73 
74 #include <linux/sched.h>
75 #include <linux/threads.h>
76 #include <asm/processor.h>		/* For TASK_SIZE */
77 #include <asm/mmu.h>
78 #include <asm/page.h>
79 
80 #define FIRST_USER_ADDRESS	0
81 
82 extern unsigned long va_to_phys(unsigned long address);
83 extern pte_t *va_to_pte(unsigned long address);
84 
85 /*
86  * The following only work if pte_present() is true.
87  * Undefined behaviour if not..
88  */
89 
90 static inline int pte_special(pte_t pte)	{ return 0; }
91 
92 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
93 
94 /* Start and end of the vmalloc area. */
95 /* Make sure to map the vmalloc area above the pinned kernel memory area
96    of 32Mb.  */
97 #define VMALLOC_START	(CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
98 #define VMALLOC_END	ioremap_bot
99 
100 #endif /* __ASSEMBLY__ */
101 
102 /*
103  * Macro to mark a page protection value as "uncacheable".
104  */
105 
106 #define _PAGE_CACHE_CTL	(_PAGE_GUARDED | _PAGE_NO_CACHE | \
107 							_PAGE_WRITETHRU)
108 
109 #define pgprot_noncached(prot) \
110 			(__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
111 					_PAGE_NO_CACHE | _PAGE_GUARDED))
112 
113 #define pgprot_noncached_wc(prot) \
114 			 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
115 							_PAGE_NO_CACHE))
116 
117 /*
118  * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
119  * table containing PTEs, together with a set of 16 segment registers, to
120  * define the virtual to physical address mapping.
121  *
122  * We use the hash table as an extended TLB, i.e. a cache of currently
123  * active mappings.  We maintain a two-level page table tree, much
124  * like that used by the i386, for the sake of the Linux memory
125  * management code.  Low-level assembler code in hashtable.S
126  * (procedure hash_page) is responsible for extracting ptes from the
127  * tree and putting them into the hash table when necessary, and
128  * updating the accessed and modified bits in the page table tree.
129  */
130 
131 /*
132  * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
133  * instruction and data sides share a unified, 64-entry, semi-associative
134  * TLB which is maintained totally under software control. In addition, the
135  * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
136  * TLB which serves as a first level to the shared TLB. These two TLBs are
137  * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
138  */
139 
140 /*
141  * The normal case is that PTEs are 32-bits and we have a 1-page
142  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
143  *
144  */
145 
146 /* PMD_SHIFT determines the size of the area mapped by the PTE pages */
147 #define PMD_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
148 #define PMD_SIZE	(1UL << PMD_SHIFT)
149 #define PMD_MASK	(~(PMD_SIZE-1))
150 
151 /* PGDIR_SHIFT determines what a top-level page table entry can map */
152 #define PGDIR_SHIFT	PMD_SHIFT
153 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
154 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
155 
156 /*
157  * entries per page directory level: our page-table tree is two-level, so
158  * we don't really have any PMD directory.
159  */
160 #define PTRS_PER_PTE	(1 << PTE_SHIFT)
161 #define PTRS_PER_PMD	1
162 #define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
163 
164 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
165 #define FIRST_USER_PGD_NR	0
166 
167 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
168 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
169 
170 #define pte_ERROR(e) \
171 	printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
172 		__FILE__, __LINE__, pte_val(e))
173 #define pmd_ERROR(e) \
174 	printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
175 		__FILE__, __LINE__, pmd_val(e))
176 #define pgd_ERROR(e) \
177 	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
178 		__FILE__, __LINE__, pgd_val(e))
179 
180 /*
181  * Bits in a linux-style PTE.  These match the bits in the
182  * (hardware-defined) PTE as closely as possible.
183  */
184 
185 /* There are several potential gotchas here.  The hardware TLBLO
186  * field looks like this:
187  *
188  * 0  1  2  3  4  ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
189  * RPN.....................  0  0 EX WR ZSEL.......  W  I  M  G
190  *
191  * Where possible we make the Linux PTE bits match up with this
192  *
193  * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
194  * support down to 1k pages), this is done in the TLBMiss exception
195  * handler.
196  * - We use only zones 0 (for kernel pages) and 1 (for user pages)
197  * of the 16 available.  Bit 24-26 of the TLB are cleared in the TLB
198  * miss handler.  Bit 27 is PAGE_USER, thus selecting the correct
199  * zone.
200  * - PRESENT *must* be in the bottom two bits because swap cache
201  * entries use the top 30 bits.  Because 4xx doesn't support SMP
202  * anyway, M is irrelevant so we borrow it for PAGE_PRESENT.  Bit 30
203  * is cleared in the TLB miss handler before the TLB entry is loaded.
204  * - All other bits of the PTE are loaded into TLBLO without
205  *  * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
206  * software PTE bits.  We actually use use bits 21, 24, 25, and
207  * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
208  * PRESENT.
209  */
210 
211 /* Definitions for MicroBlaze. */
212 #define	_PAGE_GUARDED	0x001	/* G: page is guarded from prefetch */
213 #define _PAGE_FILE	0x001	/* when !present: nonlinear file mapping */
214 #define _PAGE_PRESENT	0x002	/* software: PTE contains a translation */
215 #define	_PAGE_NO_CACHE	0x004	/* I: caching is inhibited */
216 #define	_PAGE_WRITETHRU	0x008	/* W: caching is write-through */
217 #define	_PAGE_USER	0x010	/* matches one of the zone permission bits */
218 #define	_PAGE_RW	0x040	/* software: Writes permitted */
219 #define	_PAGE_DIRTY	0x080	/* software: dirty page */
220 #define _PAGE_HWWRITE	0x100	/* hardware: Dirty & RW, set in exception */
221 #define _PAGE_HWEXEC	0x200	/* hardware: EX permission */
222 #define _PAGE_ACCESSED	0x400	/* software: R: page referenced */
223 #define _PMD_PRESENT	PAGE_MASK
224 
225 /*
226  * Some bits are unused...
227  */
228 #ifndef _PAGE_HASHPTE
229 #define _PAGE_HASHPTE	0
230 #endif
231 #ifndef _PTE_NONE_MASK
232 #define _PTE_NONE_MASK	0
233 #endif
234 #ifndef _PAGE_SHARED
235 #define _PAGE_SHARED	0
236 #endif
237 #ifndef _PAGE_EXEC
238 #define _PAGE_EXEC	0
239 #endif
240 
241 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
242 
243 /*
244  * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
245  * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
246  * to have it in the Linux PTE, and in fact the bit could be reused for
247  * another purpose.  -- paulus.
248  */
249 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
250 #define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
251 
252 #define _PAGE_KERNEL \
253 	(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
254 
255 #define _PAGE_IO	(_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
256 
257 #define PAGE_NONE	__pgprot(_PAGE_BASE)
258 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
259 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
260 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
261 #define PAGE_SHARED_X \
262 		__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
263 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
264 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
265 
266 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
267 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_SHARED)
268 #define PAGE_KERNEL_CI	__pgprot(_PAGE_IO)
269 
270 /*
271  * We consider execute permission the same as read.
272  * Also, write permissions imply read permissions.
273  */
274 #define __P000	PAGE_NONE
275 #define __P001	PAGE_READONLY_X
276 #define __P010	PAGE_COPY
277 #define __P011	PAGE_COPY_X
278 #define __P100	PAGE_READONLY
279 #define __P101	PAGE_READONLY_X
280 #define __P110	PAGE_COPY
281 #define __P111	PAGE_COPY_X
282 
283 #define __S000	PAGE_NONE
284 #define __S001	PAGE_READONLY_X
285 #define __S010	PAGE_SHARED
286 #define __S011	PAGE_SHARED_X
287 #define __S100	PAGE_READONLY
288 #define __S101	PAGE_READONLY_X
289 #define __S110	PAGE_SHARED
290 #define __S111	PAGE_SHARED_X
291 
292 #ifndef __ASSEMBLY__
293 /*
294  * ZERO_PAGE is a global shared page that is always zero: used
295  * for zero-mapped memory areas etc..
296  */
297 extern unsigned long empty_zero_page[1024];
298 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
299 
300 #endif /* __ASSEMBLY__ */
301 
302 #define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
303 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
304 #define pte_clear(mm, addr, ptep) \
305 	do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
306 
307 #define pmd_none(pmd)		(!pmd_val(pmd))
308 #define	pmd_bad(pmd)		((pmd_val(pmd) & _PMD_PRESENT) == 0)
309 #define	pmd_present(pmd)	((pmd_val(pmd) & _PMD_PRESENT) != 0)
310 #define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
311 
312 #define pte_page(x)		(mem_map + (unsigned long) \
313 				((pte_val(x) - memory_start) >> PAGE_SHIFT))
314 #define PFN_SHIFT_OFFSET	(PAGE_SHIFT)
315 
316 #define pte_pfn(x)		(pte_val(x) >> PFN_SHIFT_OFFSET)
317 
318 #define pfn_pte(pfn, prot) \
319 	__pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
320 
321 #ifndef __ASSEMBLY__
322 /*
323  * The "pgd_xxx()" functions here are trivial for a folded two-level
324  * setup: the pgd is never bad, and a pmd always exists (as it's folded
325  * into the pgd entry)
326  */
327 static inline int pgd_none(pgd_t pgd)		{ return 0; }
328 static inline int pgd_bad(pgd_t pgd)		{ return 0; }
329 static inline int pgd_present(pgd_t pgd)	{ return 1; }
330 #define pgd_clear(xp)				do { } while (0)
331 #define pgd_page(pgd) \
332 	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
333 
334 /*
335  * The following only work if pte_present() is true.
336  * Undefined behaviour if not..
337  */
338 static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER; }
339 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
340 static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
341 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
342 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
343 static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
344 
345 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
346 static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
347 
348 static inline pte_t pte_rdprotect(pte_t pte) \
349 		{ pte_val(pte) &= ~_PAGE_USER; return pte; }
350 static inline pte_t pte_wrprotect(pte_t pte) \
351 	{ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
352 static inline pte_t pte_exprotect(pte_t pte) \
353 	{ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
354 static inline pte_t pte_mkclean(pte_t pte) \
355 	{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
356 static inline pte_t pte_mkold(pte_t pte) \
357 	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
358 
359 static inline pte_t pte_mkread(pte_t pte) \
360 	{ pte_val(pte) |= _PAGE_USER; return pte; }
361 static inline pte_t pte_mkexec(pte_t pte) \
362 	{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
363 static inline pte_t pte_mkwrite(pte_t pte) \
364 	{ pte_val(pte) |= _PAGE_RW; return pte; }
365 static inline pte_t pte_mkdirty(pte_t pte) \
366 	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
367 static inline pte_t pte_mkyoung(pte_t pte) \
368 	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
369 
370 /*
371  * Conversion functions: convert a page and protection to a page entry,
372  * and a page entry and page directory to the page they refer to.
373  */
374 
375 static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
376 {
377 	pte_t pte;
378 	pte_val(pte) = physpage | pgprot_val(pgprot);
379 	return pte;
380 }
381 
382 #define mk_pte(page, pgprot) \
383 ({									   \
384 	pte_t pte;							   \
385 	pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) |  \
386 			pgprot_val(pgprot);				   \
387 	pte;								   \
388 })
389 
390 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
391 {
392 	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
393 	return pte;
394 }
395 
396 /*
397  * Atomic PTE updates.
398  *
399  * pte_update clears and sets bit atomically, and returns
400  * the old pte value.
401  * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
402  * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
403  */
404 static inline unsigned long pte_update(pte_t *p, unsigned long clr,
405 				unsigned long set)
406 {
407 	unsigned long flags, old, tmp;
408 
409 	raw_local_irq_save(flags);
410 
411 	__asm__ __volatile__(	"lw	%0, %2, r0	\n"
412 				"andn	%1, %0, %3	\n"
413 				"or	%1, %1, %4	\n"
414 				"sw	%1, %2, r0	\n"
415 			: "=&r" (old), "=&r" (tmp)
416 			: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
417 			: "cc");
418 
419 	raw_local_irq_restore(flags);
420 
421 	return old;
422 }
423 
424 /*
425  * set_pte stores a linux PTE into the linux page table.
426  */
427 static inline void set_pte(struct mm_struct *mm, unsigned long addr,
428 		pte_t *ptep, pte_t pte)
429 {
430 	*ptep = pte;
431 }
432 
433 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
434 		pte_t *ptep, pte_t pte)
435 {
436 	*ptep = pte;
437 }
438 
439 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
440 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
441 		unsigned long address, pte_t *ptep)
442 {
443 	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
444 }
445 
446 static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
447 		unsigned long addr, pte_t *ptep)
448 {
449 	return (pte_update(ptep, \
450 		(_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
451 }
452 
453 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
454 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
455 		unsigned long addr, pte_t *ptep)
456 {
457 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
458 }
459 
460 /*static inline void ptep_set_wrprotect(struct mm_struct *mm,
461 		unsigned long addr, pte_t *ptep)
462 {
463 	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
464 }*/
465 
466 static inline void ptep_mkdirty(struct mm_struct *mm,
467 		unsigned long addr, pte_t *ptep)
468 {
469 	pte_update(ptep, 0, _PAGE_DIRTY);
470 }
471 
472 /*#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
473 
474 /* Convert pmd entry to page */
475 /* our pmd entry is an effective address of pte table*/
476 /* returns effective address of the pmd entry*/
477 #define pmd_page_kernel(pmd)	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
478 
479 /* returns struct *page of the pmd entry*/
480 #define pmd_page(pmd)	(pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
481 
482 /* to find an entry in a kernel page-table-directory */
483 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
484 
485 /* to find an entry in a page-table-directory */
486 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
487 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
488 
489 /* Find an entry in the second-level page table.. */
490 static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
491 {
492 	return (pmd_t *) dir;
493 }
494 
495 /* Find an entry in the third-level page table.. */
496 #define pte_index(address)		\
497 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
498 #define pte_offset_kernel(dir, addr)	\
499 	((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
500 #define pte_offset_map(dir, addr)		\
501 	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
502 
503 #define pte_unmap(pte)		kunmap_atomic(pte)
504 
505 /* Encode and decode a nonlinear file mapping entry */
506 #define PTE_FILE_MAX_BITS	29
507 #define pte_to_pgoff(pte)	(pte_val(pte) >> 3)
508 #define pgoff_to_pte(off)	((pte_t) { ((off) << 3) | _PAGE_FILE })
509 
510 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
511 
512 /*
513  * Encode and decode a swap entry.
514  * Note that the bits we use in a PTE for representing a swap entry
515  * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
516  * (if used).  -- paulus
517  */
518 #define __swp_type(entry)		((entry).val & 0x3f)
519 #define __swp_offset(entry)	((entry).val >> 6)
520 #define __swp_entry(type, offset) \
521 		((swp_entry_t) { (type) | ((offset) << 6) })
522 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) >> 2 })
523 #define __swp_entry_to_pte(x)	((pte_t) { (x).val << 2 })
524 
525 extern unsigned long iopa(unsigned long addr);
526 
527 /* Values for nocacheflag and cmode */
528 /* These are not used by the APUS kernel_map, but prevents
529  * compilation errors.
530  */
531 #define	IOMAP_FULL_CACHING	0
532 #define	IOMAP_NOCACHE_SER	1
533 #define	IOMAP_NOCACHE_NONSER	2
534 #define	IOMAP_NO_COPYBACK	3
535 
536 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
537 #define kern_addr_valid(addr)	(1)
538 
539 /*
540  * No page table caches to initialise
541  */
542 #define pgtable_cache_init()	do { } while (0)
543 
544 void do_page_fault(struct pt_regs *regs, unsigned long address,
545 		   unsigned long error_code);
546 
547 void mapin_ram(void);
548 int map_page(unsigned long va, phys_addr_t pa, int flags);
549 
550 extern int mem_init_done;
551 
552 asmlinkage void __init mmu_init(void);
553 
554 void __init *early_get_page(void);
555 
556 #endif /* __ASSEMBLY__ */
557 #endif /* __KERNEL__ */
558 
559 #endif /* CONFIG_MMU */
560 
561 #ifndef __ASSEMBLY__
562 #include <asm-generic/pgtable.h>
563 
564 extern unsigned long ioremap_bot, ioremap_base;
565 
566 void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
567 void consistent_free(size_t size, void *vaddr);
568 void consistent_sync(void *vaddr, size_t size, int direction);
569 void consistent_sync_page(struct page *page, unsigned long offset,
570 	size_t size, int direction);
571 
572 void setup_memory(void);
573 #endif /* __ASSEMBLY__ */
574 
575 #endif /* _ASM_MICROBLAZE_PGTABLE_H */
576