xref: /openbmc/linux/arch/xtensa/include/asm/page.h (revision ba61bb17)
1 /*
2  * include/asm-xtensa/page.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version2 as
6  * published by the Free Software Foundation.
7  *
8  * Copyright (C) 2001 - 2007 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_PAGE_H
12 #define _XTENSA_PAGE_H
13 
14 #include <asm/processor.h>
15 #include <asm/types.h>
16 #include <asm/cache.h>
17 #include <platform/hardware.h>
18 #include <asm/kmem_layout.h>
19 
20 /*
21  * PAGE_SHIFT determines the page size
22  */
23 
24 #define PAGE_SHIFT	12
25 #define PAGE_SIZE	(__XTENSA_UL_CONST(1) << PAGE_SHIFT)
26 #define PAGE_MASK	(~(PAGE_SIZE-1))
27 
28 #ifdef CONFIG_MMU
29 #define PAGE_OFFSET	XCHAL_KSEG_CACHED_VADDR
30 #define PHYS_OFFSET	XCHAL_KSEG_PADDR
31 #define MAX_LOW_PFN	(PHYS_PFN(XCHAL_KSEG_PADDR) + \
32 			 PHYS_PFN(XCHAL_KSEG_SIZE))
33 #else
34 #define PAGE_OFFSET	PLATFORM_DEFAULT_MEM_START
35 #define PHYS_OFFSET	PLATFORM_DEFAULT_MEM_START
36 #define MAX_LOW_PFN	PHYS_PFN(0xfffffffful)
37 #endif
38 
39 /*
40  * Cache aliasing:
41  *
42  * If the cache size for one way is greater than the page size, we have to
43  * deal with cache aliasing. The cache index is wider than the page size:
44  *
45  * |    |cache| cache index
46  * | pfn  |off|	virtual address
47  * |xxxx:X|zzz|
48  * |    : |   |
49  * | \  / |   |
50  * |trans.|   |
51  * | /  \ |   |
52  * |yyyy:Y|zzz|	physical address
53  *
54  * When the page number is translated to the physical page address, the lowest
55  * bit(s) (X) that are part of the cache index are also translated (Y).
56  * If this translation changes bit(s) (X), the cache index is also afected,
57  * thus resulting in a different cache line than before.
58  * The kernel does not provide a mechanism to ensure that the page color
59  * (represented by this bit) remains the same when allocated or when pages
60  * are remapped. When user pages are mapped into kernel space, the color of
61  * the page might also change.
62  *
63  * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
64  * to temporarily map a patch so we can match the color.
65  */
66 
67 #if DCACHE_WAY_SIZE > PAGE_SIZE
68 # define DCACHE_ALIAS_ORDER	(DCACHE_WAY_SHIFT - PAGE_SHIFT)
69 # define DCACHE_ALIAS_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE - 1))
70 # define DCACHE_ALIAS(a)	(((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
71 # define DCACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
72 #else
73 # define DCACHE_ALIAS_ORDER	0
74 # define DCACHE_ALIAS(a)	((void)(a), 0)
75 #endif
76 #define DCACHE_N_COLORS		(1 << DCACHE_ALIAS_ORDER)
77 
78 #if ICACHE_WAY_SIZE > PAGE_SIZE
79 # define ICACHE_ALIAS_ORDER	(ICACHE_WAY_SHIFT - PAGE_SHIFT)
80 # define ICACHE_ALIAS_MASK	(PAGE_MASK & (ICACHE_WAY_SIZE - 1))
81 # define ICACHE_ALIAS(a)	(((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
82 # define ICACHE_ALIAS_EQ(a,b)	((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
83 #else
84 # define ICACHE_ALIAS_ORDER	0
85 #endif
86 
87 
88 #ifdef __ASSEMBLY__
89 
90 #define __pgprot(x)	(x)
91 
92 #else
93 
94 /*
95  * These are used to make use of C type-checking..
96  */
97 
98 typedef struct { unsigned long pte; } pte_t;		/* page table entry */
99 typedef struct { unsigned long pgd; } pgd_t;		/* PGD table entry */
100 typedef struct { unsigned long pgprot; } pgprot_t;
101 typedef struct page *pgtable_t;
102 
103 #define pte_val(x)	((x).pte)
104 #define pgd_val(x)	((x).pgd)
105 #define pgprot_val(x)	((x).pgprot)
106 
107 #define __pte(x)	((pte_t) { (x) } )
108 #define __pgd(x)	((pgd_t) { (x) } )
109 #define __pgprot(x)	((pgprot_t) { (x) } )
110 
111 /*
112  * Pure 2^n version of get_order
113  * Use 'nsau' instructions if supported by the processor or the generic version.
114  */
115 
116 #if XCHAL_HAVE_NSA
117 
118 static inline __attribute_const__ int get_order(unsigned long size)
119 {
120 	int lz;
121 	asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
122 	return 32 - lz;
123 }
124 
125 #else
126 
127 # include <asm-generic/getorder.h>
128 
129 #endif
130 
131 struct page;
132 struct vm_area_struct;
133 extern void clear_page(void *page);
134 extern void copy_page(void *to, void *from);
135 
136 /*
137  * If we have cache aliasing and writeback caches, we might have to do
138  * some extra work
139  */
140 
141 #if defined(CONFIG_MMU) && DCACHE_WAY_SIZE > PAGE_SIZE
142 extern void clear_page_alias(void *vaddr, unsigned long paddr);
143 extern void copy_page_alias(void *to, void *from,
144 			    unsigned long to_paddr, unsigned long from_paddr);
145 
146 #define clear_user_highpage clear_user_highpage
147 void clear_user_highpage(struct page *page, unsigned long vaddr);
148 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
149 void copy_user_highpage(struct page *to, struct page *from,
150 			unsigned long vaddr, struct vm_area_struct *vma);
151 #else
152 # define clear_user_page(page, vaddr, pg)	clear_page(page)
153 # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
154 #endif
155 
156 /*
157  * This handles the memory map.  We handle pages at
158  * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
159  * These macros are for conversion of kernel address, not user
160  * addresses.
161  */
162 
163 #define ARCH_PFN_OFFSET		(PHYS_OFFSET >> PAGE_SHIFT)
164 
165 #ifdef CONFIG_MMU
166 static inline unsigned long ___pa(unsigned long va)
167 {
168 	unsigned long off = va - PAGE_OFFSET;
169 
170 	if (off >= XCHAL_KSEG_SIZE)
171 		off -= XCHAL_KSEG_SIZE;
172 
173 	return off + PHYS_OFFSET;
174 }
175 #define __pa(x)	___pa((unsigned long)(x))
176 #else
177 #define __pa(x)	\
178 	((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
179 #endif
180 #define __va(x)	\
181 	((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
182 #define pfn_valid(pfn) \
183 	((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
184 
185 #ifdef CONFIG_DISCONTIGMEM
186 # error CONFIG_DISCONTIGMEM not supported
187 #endif
188 
189 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
190 #define page_to_virt(page)	__va(page_to_pfn(page) << PAGE_SHIFT)
191 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
192 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
193 
194 #endif /* __ASSEMBLY__ */
195 
196 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
197 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
198 
199 #include <asm-generic/memory_model.h>
200 #endif /* _XTENSA_PAGE_H */
201