1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
4 
5 /*  asm/pgtable.h:  Defines and functions used to work
6  *                        with Sparc page tables.
7  *
8  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10  */
11 
12 #include <linux/const.h>
13 
14 #define PMD_SHIFT		18
15 #define PMD_SIZE        	(1UL << PMD_SHIFT)
16 #define PMD_MASK        	(~(PMD_SIZE-1))
17 #define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
18 
19 #define PGDIR_SHIFT     	24
20 #define PGDIR_SIZE      	(1UL << PGDIR_SHIFT)
21 #define PGDIR_MASK      	(~(PGDIR_SIZE-1))
22 #define PGDIR_ALIGN(__addr) 	(((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23 
24 #ifndef __ASSEMBLY__
25 #include <asm-generic/pgtable-nopud.h>
26 
27 #include <linux/spinlock.h>
28 #include <linux/mm_types.h>
29 #include <asm/types.h>
30 #include <asm/pgtsrmmu.h>
31 #include <asm/vaddrs.h>
32 #include <asm/oplib.h>
33 #include <asm/cpu_type.h>
34 
35 
36 struct vm_area_struct;
37 struct page;
38 
39 void load_mmu(void);
40 unsigned long calc_highpages(void);
41 unsigned long __init bootmem_init(unsigned long *pages_avail);
42 
43 #define pte_ERROR(e)   __builtin_trap()
44 #define pmd_ERROR(e)   __builtin_trap()
45 #define pgd_ERROR(e)   __builtin_trap()
46 
47 #define PTRS_PER_PTE    	64
48 #define PTRS_PER_PMD    	64
49 #define PTRS_PER_PGD    	256
50 #define USER_PTRS_PER_PGD	PAGE_OFFSET / PGDIR_SIZE
51 #define PTE_SIZE		(PTRS_PER_PTE*4)
52 
53 #define PAGE_NONE	SRMMU_PAGE_NONE
54 #define PAGE_SHARED	SRMMU_PAGE_SHARED
55 #define PAGE_COPY	SRMMU_PAGE_COPY
56 #define PAGE_READONLY	SRMMU_PAGE_RDONLY
57 #define PAGE_KERNEL	SRMMU_PAGE_KERNEL
58 
59 /* Top-level page directory - dummy used by init-mm.
60  * srmmu.c will assign the real one (which is dynamically sized) */
61 #define swapper_pg_dir NULL
62 
63 void paging_init(void);
64 
65 extern unsigned long ptr_in_current_pgd;
66 
67 /*         xwr */
68 #define __P000  PAGE_NONE
69 #define __P001  PAGE_READONLY
70 #define __P010  PAGE_COPY
71 #define __P011  PAGE_COPY
72 #define __P100  PAGE_READONLY
73 #define __P101  PAGE_READONLY
74 #define __P110  PAGE_COPY
75 #define __P111  PAGE_COPY
76 
77 #define __S000	PAGE_NONE
78 #define __S001	PAGE_READONLY
79 #define __S010	PAGE_SHARED
80 #define __S011	PAGE_SHARED
81 #define __S100	PAGE_READONLY
82 #define __S101	PAGE_READONLY
83 #define __S110	PAGE_SHARED
84 #define __S111	PAGE_SHARED
85 
86 /* First physical page can be anywhere, the following is needed so that
87  * va-->pa and vice versa conversions work properly without performance
88  * hit for all __pa()/__va() operations.
89  */
90 extern unsigned long phys_base;
91 extern unsigned long pfn_base;
92 
93 /*
94  * ZERO_PAGE is a global shared page that is always zero: used
95  * for zero-mapped memory areas etc..
96  */
97 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
98 
99 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100 
101 /*
102  * In general all page table modifications should use the V8 atomic
103  * swap instruction.  This insures the mmu and the cpu are in sync
104  * with respect to ref/mod bits in the page tables.
105  */
106 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
107 {
108 	__asm__ __volatile__("swap [%2], %0" :
109 			"=&r" (value) : "0" (value), "r" (addr) : "memory");
110 	return value;
111 }
112 
113 /* Certain architectures need to do special things when pte's
114  * within a page table are directly modified.  Thus, the following
115  * hook is made available.
116  */
117 
118 static inline void set_pte(pte_t *ptep, pte_t pteval)
119 {
120 	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
121 }
122 
123 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
124 
125 static inline int srmmu_device_memory(unsigned long x)
126 {
127 	return ((x & 0xF0000000) != 0);
128 }
129 
130 static inline unsigned long pmd_pfn(pmd_t pmd)
131 {
132 	return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
133 }
134 
135 static inline struct page *pmd_page(pmd_t pmd)
136 {
137 	if (srmmu_device_memory(pmd_val(pmd)))
138 		BUG();
139 	return pfn_to_page(pmd_pfn(pmd));
140 }
141 
142 static inline unsigned long __pmd_page(pmd_t pmd)
143 {
144 	unsigned long v;
145 
146 	if (srmmu_device_memory(pmd_val(pmd)))
147 		BUG();
148 
149 	v = pmd_val(pmd) & SRMMU_PTD_PMASK;
150 	return (unsigned long)__nocache_va(v << 4);
151 }
152 
153 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
154 {
155 	unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
156 	return (unsigned long)__nocache_va(v << 4);
157 }
158 
159 static inline pmd_t *pud_pgtable(pud_t pud)
160 {
161 	if (srmmu_device_memory(pud_val(pud))) {
162 		return (pmd_t *)~0;
163 	} else {
164 		unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
165 		return (pmd_t *)__nocache_va(v << 4);
166 	}
167 }
168 
169 static inline int pte_present(pte_t pte)
170 {
171 	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
172 }
173 
174 static inline int pte_none(pte_t pte)
175 {
176 	return !pte_val(pte);
177 }
178 
179 static inline void __pte_clear(pte_t *ptep)
180 {
181 	set_pte(ptep, __pte(0));
182 }
183 
184 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
185 {
186 	__pte_clear(ptep);
187 }
188 
189 static inline int pmd_bad(pmd_t pmd)
190 {
191 	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
192 }
193 
194 static inline int pmd_present(pmd_t pmd)
195 {
196 	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
197 }
198 
199 static inline int pmd_none(pmd_t pmd)
200 {
201 	return !pmd_val(pmd);
202 }
203 
204 static inline void pmd_clear(pmd_t *pmdp)
205 {
206 	set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
207 }
208 
209 static inline int pud_none(pud_t pud)
210 {
211 	return !(pud_val(pud) & 0xFFFFFFF);
212 }
213 
214 static inline int pud_bad(pud_t pud)
215 {
216 	return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
217 }
218 
219 static inline int pud_present(pud_t pud)
220 {
221 	return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
222 }
223 
224 static inline void pud_clear(pud_t *pudp)
225 {
226 	set_pte((pte_t *)pudp, __pte(0));
227 }
228 
229 /*
230  * The following only work if pte_present() is true.
231  * Undefined behaviour if not..
232  */
233 static inline int pte_write(pte_t pte)
234 {
235 	return pte_val(pte) & SRMMU_WRITE;
236 }
237 
238 static inline int pte_dirty(pte_t pte)
239 {
240 	return pte_val(pte) & SRMMU_DIRTY;
241 }
242 
243 static inline int pte_young(pte_t pte)
244 {
245 	return pte_val(pte) & SRMMU_REF;
246 }
247 
248 static inline pte_t pte_wrprotect(pte_t pte)
249 {
250 	return __pte(pte_val(pte) & ~SRMMU_WRITE);
251 }
252 
253 static inline pte_t pte_mkclean(pte_t pte)
254 {
255 	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
256 }
257 
258 static inline pte_t pte_mkold(pte_t pte)
259 {
260 	return __pte(pte_val(pte) & ~SRMMU_REF);
261 }
262 
263 static inline pte_t pte_mkwrite(pte_t pte)
264 {
265 	return __pte(pte_val(pte) | SRMMU_WRITE);
266 }
267 
268 static inline pte_t pte_mkdirty(pte_t pte)
269 {
270 	return __pte(pte_val(pte) | SRMMU_DIRTY);
271 }
272 
273 static inline pte_t pte_mkyoung(pte_t pte)
274 {
275 	return __pte(pte_val(pte) | SRMMU_REF);
276 }
277 
278 #define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
279 
280 static inline unsigned long pte_pfn(pte_t pte)
281 {
282 	if (srmmu_device_memory(pte_val(pte))) {
283 		/* Just return something that will cause
284 		 * pfn_valid() to return false.  This makes
285 		 * copy_one_pte() to just directly copy to
286 		 * PTE over.
287 		 */
288 		return ~0UL;
289 	}
290 	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
291 }
292 
293 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
294 
295 /*
296  * Conversion functions: convert a page and protection to a page entry,
297  * and a page entry and page directory to the page they refer to.
298  */
299 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
300 {
301 	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
302 }
303 
304 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
305 {
306 	return __pte(((page) >> 4) | pgprot_val(pgprot));
307 }
308 
309 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
310 {
311 	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
312 }
313 
314 #define pgprot_noncached pgprot_noncached
315 static inline pgprot_t pgprot_noncached(pgprot_t prot)
316 {
317 	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
318 	return prot;
319 }
320 
321 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
322 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
323 {
324 	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
325 		pgprot_val(newprot));
326 }
327 
328 /* only used by the huge vmap code, should never be called */
329 #define pud_page(pud)			NULL
330 
331 struct seq_file;
332 void mmu_info(struct seq_file *m);
333 
334 /* Fault handler stuff... */
335 #define FAULT_CODE_PROT     0x1
336 #define FAULT_CODE_WRITE    0x2
337 #define FAULT_CODE_USER     0x4
338 
339 #define update_mmu_cache(vma, address, ptep) do { } while (0)
340 
341 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
342                       unsigned long xva, unsigned int len);
343 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
344 
345 /* Encode and de-code a swap entry */
346 static inline unsigned long __swp_type(swp_entry_t entry)
347 {
348 	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
349 }
350 
351 static inline unsigned long __swp_offset(swp_entry_t entry)
352 {
353 	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
354 }
355 
356 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
357 {
358 	return (swp_entry_t) {
359 		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
360 		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
361 }
362 
363 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
364 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
365 
366 static inline unsigned long
367 __get_phys (unsigned long addr)
368 {
369 	switch (sparc_cpu_model){
370 	case sun4m:
371 	case sun4d:
372 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
373 	default:
374 		return 0;
375 	}
376 }
377 
378 static inline int
379 __get_iospace (unsigned long addr)
380 {
381 	switch (sparc_cpu_model){
382 	case sun4m:
383 	case sun4d:
384 		return (srmmu_get_pte (addr) >> 28);
385 	default:
386 		return -1;
387 	}
388 }
389 
390 extern unsigned long *sparc_valid_addr_bitmap;
391 
392 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
393 #define kern_addr_valid(addr) \
394 	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
395 
396 /*
397  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
398  * its high 4 bits.  These macros/functions put it there or get it from there.
399  */
400 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
401 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
402 #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
403 
404 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
405 		    unsigned long, pgprot_t);
406 
407 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
408 				     unsigned long from, unsigned long pfn,
409 				     unsigned long size, pgprot_t prot)
410 {
411 	unsigned long long offset, space, phys_base;
412 
413 	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
414 	space = GET_IOSPACE(pfn);
415 	phys_base = offset | (space << 32ULL);
416 
417 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
418 }
419 #define io_remap_pfn_range io_remap_pfn_range
420 
421 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
422 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
423 ({									  \
424 	int __changed = !pte_same(*(__ptep), __entry);			  \
425 	if (__changed) {						  \
426 		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
427 		flush_tlb_page(__vma, __address);			  \
428 	}								  \
429 	__changed;							  \
430 })
431 
432 #endif /* !(__ASSEMBLY__) */
433 
434 #define VMALLOC_START           _AC(0xfe600000,UL)
435 #define VMALLOC_END             _AC(0xffc00000,UL)
436 
437 /* We provide our own get_unmapped_area to cope with VA holes for userland */
438 #define HAVE_ARCH_UNMAPPED_AREA
439 
440 #define pmd_pgtable(pmd)	((pgtable_t)__pmd_page(pmd))
441 
442 #endif /* !(_SPARC_PGTABLE_H) */
443