xref: /openbmc/linux/arch/csky/include/asm/pgtable.h (revision a008a3004340887370aea38b5cd441b1db110041)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ASM_CSKY_PGTABLE_H
4 #define __ASM_CSKY_PGTABLE_H
5 
6 #include <asm/fixmap.h>
7 #include <asm/memory.h>
8 #include <asm/addrspace.h>
9 #include <abi/pgtable-bits.h>
10 #include <asm-generic/pgtable-nopmd.h>
11 
12 #define PGDIR_SHIFT		22
13 #define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
14 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
15 
16 #define USER_PTRS_PER_PGD	(PAGE_OFFSET/PGDIR_SIZE)
17 
18 /*
19  * C-SKY is two-level paging structure:
20  */
21 #define PGD_ORDER	0
22 
23 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
24 #define PTRS_PER_PMD	1
25 #define PTRS_PER_PTE	(PAGE_SIZE / sizeof(pte_t))
26 
27 #define pte_ERROR(e) \
28 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
29 #define pgd_ERROR(e) \
30 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
31 
32 #define pmd_pfn(pmd)	(pmd_phys(pmd) >> PAGE_SHIFT)
33 #define pmd_page(pmd)	(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
34 #define pte_clear(mm, addr, ptep)	set_pte((ptep), \
35 	(((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
36 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
37 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
38 #define pte_pfn(x)	((unsigned long)((x).pte_low >> PAGE_SHIFT))
39 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
40 				| pgprot_val(prot))
41 
42 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
43 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
44 
45 #define pte_page(x)			pfn_to_page(pte_pfn(x))
46 #define __mk_pte(page_nr, pgprot)	__pte(((page_nr) << PAGE_SHIFT) | \
47 					pgprot_val(pgprot))
48 
49 /*
50  * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
51  * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
52  */
53 #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED)
54 
55 #define PAGE_NONE	__pgprot(_PAGE_PROT_NONE)
56 #define PAGE_READ	__pgprot(_PAGE_BASE | _PAGE_READ | \
57 				_CACHE_CACHED)
58 #define PAGE_WRITE	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
59 				_CACHE_CACHED)
60 #define PAGE_SHARED PAGE_WRITE
61 
62 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
63 				_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
64 				_PAGE_GLOBAL | \
65 				_CACHE_CACHED)
66 
67 #define _PAGE_IOREMAP		(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
68 				_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
69 				_PAGE_GLOBAL | \
70 				_CACHE_UNCACHED | _PAGE_SO)
71 
72 #define _PAGE_CHG_MASK	(~(unsigned long) \
73 				(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
74 				_CACHE_MASK | _PAGE_GLOBAL))
75 
76 #define MAX_SWAPFILES_CHECK() \
77 		BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
78 
79 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
80 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
81 
82 extern void load_pgd(unsigned long pg_dir);
83 extern pte_t invalid_pte_table[PTRS_PER_PTE];
84 
85 static inline void set_pte(pte_t *p, pte_t pte)
86 {
87 	*p = pte;
88 #if defined(CONFIG_CPU_NEED_TLBSYNC)
89 	dcache_wb_line((u32)p);
90 #endif
91 	/* prevent out of order excution */
92 	smp_mb();
93 }
94 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
95 
96 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
97 {
98 	unsigned long ptr;
99 
100 	ptr = pmd_val(pmd);
101 
102 	return __va(ptr);
103 }
104 
105 #define pmd_phys(pmd) pmd_val(pmd)
106 
107 static inline void set_pmd(pmd_t *p, pmd_t pmd)
108 {
109 	*p = pmd;
110 #if defined(CONFIG_CPU_NEED_TLBSYNC)
111 	dcache_wb_line((u32)p);
112 #endif
113 	/* prevent specul excute */
114 	smp_mb();
115 }
116 
117 
118 static inline int pmd_none(pmd_t pmd)
119 {
120 	return pmd_val(pmd) == __pa(invalid_pte_table);
121 }
122 
123 #define pmd_bad(pmd)	(pmd_val(pmd) & ~PAGE_MASK)
124 
125 static inline int pmd_present(pmd_t pmd)
126 {
127 	return (pmd_val(pmd) != __pa(invalid_pte_table));
128 }
129 
130 static inline void pmd_clear(pmd_t *p)
131 {
132 	pmd_val(*p) = (__pa(invalid_pte_table));
133 #if defined(CONFIG_CPU_NEED_TLBSYNC)
134 	dcache_wb_line((u32)p);
135 #endif
136 }
137 
138 /*
139  * The following only work if pte_present() is true.
140  * Undefined behaviour if not..
141  */
142 static inline int pte_read(pte_t pte)
143 {
144 	return pte.pte_low & _PAGE_READ;
145 }
146 
147 static inline int pte_write(pte_t pte)
148 {
149 	return (pte).pte_low & _PAGE_WRITE;
150 }
151 
152 static inline int pte_dirty(pte_t pte)
153 {
154 	return (pte).pte_low & _PAGE_MODIFIED;
155 }
156 
157 static inline int pte_young(pte_t pte)
158 {
159 	return (pte).pte_low & _PAGE_ACCESSED;
160 }
161 
162 static inline pte_t pte_wrprotect(pte_t pte)
163 {
164 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
165 	return pte;
166 }
167 
168 static inline pte_t pte_mkclean(pte_t pte)
169 {
170 	pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
171 	return pte;
172 }
173 
174 static inline pte_t pte_mkold(pte_t pte)
175 {
176 	pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
177 	return pte;
178 }
179 
180 static inline pte_t pte_mkwrite(pte_t pte)
181 {
182 	pte_val(pte) |= _PAGE_WRITE;
183 	if (pte_val(pte) & _PAGE_MODIFIED)
184 		pte_val(pte) |= _PAGE_DIRTY;
185 	return pte;
186 }
187 
188 static inline pte_t pte_mkdirty(pte_t pte)
189 {
190 	pte_val(pte) |= _PAGE_MODIFIED;
191 	if (pte_val(pte) & _PAGE_WRITE)
192 		pte_val(pte) |= _PAGE_DIRTY;
193 	return pte;
194 }
195 
196 static inline pte_t pte_mkyoung(pte_t pte)
197 {
198 	pte_val(pte) |= _PAGE_ACCESSED;
199 	if (pte_val(pte) & _PAGE_READ)
200 		pte_val(pte) |= _PAGE_VALID;
201 	return pte;
202 }
203 
204 #define __HAVE_PHYS_MEM_ACCESS_PROT
205 struct file;
206 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
207 				     unsigned long size, pgprot_t vma_prot);
208 
209 /*
210  * Macro to make mark a page protection value as "uncacheable".  Note
211  * that "protection" is really a misnomer here as the protection value
212  * contains the memory attribute bits, dirty bits, and various other
213  * bits as well.
214  */
215 #define pgprot_noncached pgprot_noncached
216 
217 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
218 {
219 	unsigned long prot = pgprot_val(_prot);
220 
221 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
222 
223 	return __pgprot(prot);
224 }
225 
226 #define pgprot_writecombine pgprot_writecombine
227 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
228 {
229 	unsigned long prot = pgprot_val(_prot);
230 
231 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
232 
233 	return __pgprot(prot);
234 }
235 
236 /*
237  * Conversion functions: convert a page and protection to a page entry,
238  * and a page entry and page directory to the page they refer to.
239  */
240 #define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
241 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
242 {
243 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
244 		     (pgprot_val(newprot)));
245 }
246 
247 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
248 extern void paging_init(void);
249 
250 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
251 		      pte_t *pte);
252 
253 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
254 #define kern_addr_valid(addr)	(1)
255 
256 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
257 	remap_pfn_range(vma, vaddr, pfn, size, prot)
258 
259 #endif /* __ASM_CSKY_PGTABLE_H */
260