1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/pgtable-bits.h>
15 
16 #if CONFIG_PGTABLE_LEVELS == 2
17 #include <asm-generic/pgtable-nopmd.h>
18 #elif CONFIG_PGTABLE_LEVELS == 3
19 #include <asm-generic/pgtable-nopud.h>
20 #else
21 #include <asm-generic/pgtable-nop4d.h>
22 #endif
23 
24 #define PGD_ORDER		0
25 #define PUD_ORDER		0
26 #define PMD_ORDER		0
27 #define PTE_ORDER		0
28 
29 #if CONFIG_PGTABLE_LEVELS == 2
30 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
31 #elif CONFIG_PGTABLE_LEVELS == 3
32 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
33 #define PMD_SIZE	(1UL << PMD_SHIFT)
34 #define PMD_MASK	(~(PMD_SIZE-1))
35 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
36 #elif CONFIG_PGTABLE_LEVELS == 4
37 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
38 #define PMD_SIZE	(1UL << PMD_SHIFT)
39 #define PMD_MASK	(~(PMD_SIZE-1))
40 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
41 #define PUD_SIZE	(1UL << PUD_SHIFT)
42 #define PUD_MASK	(~(PUD_SIZE-1))
43 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
44 #endif
45 
46 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
47 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
48 
49 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
50 
51 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) >> 3)
52 #if CONFIG_PGTABLE_LEVELS > 3
53 #define PTRS_PER_PUD	((PAGE_SIZE << PUD_ORDER) >> 3)
54 #endif
55 #if CONFIG_PGTABLE_LEVELS > 2
56 #define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) >> 3)
57 #endif
58 #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) >> 3)
59 
60 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
61 
62 #ifndef __ASSEMBLY__
63 
64 #include <linux/mm_types.h>
65 #include <linux/mmzone.h>
66 #include <asm/fixmap.h>
67 #include <asm/io.h>
68 
69 struct mm_struct;
70 struct vm_area_struct;
71 
72 /*
73  * ZERO_PAGE is a global shared page that is always zero; used
74  * for zero-mapped memory areas etc..
75  */
76 
77 extern unsigned long empty_zero_page;
78 extern unsigned long zero_page_mask;
79 
80 #define ZERO_PAGE(vaddr) \
81 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
82 #define __HAVE_COLOR_ZERO_PAGE
83 
84 /*
85  * TLB refill handlers may also map the vmalloc area into xkvrange.
86  * Avoid the first couple of pages so NULL pointer dereferences will
87  * still reliably trap.
88  */
89 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
90 #define MODULES_END	(MODULES_VADDR + SZ_256M)
91 
92 #define VMALLOC_START	MODULES_END
93 #define VMALLOC_END	\
94 	(vm_map_base +	\
95 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
96 
97 #define pte_ERROR(e) \
98 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
99 #ifndef __PAGETABLE_PMD_FOLDED
100 #define pmd_ERROR(e) \
101 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
102 #endif
103 #ifndef __PAGETABLE_PUD_FOLDED
104 #define pud_ERROR(e) \
105 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
106 #endif
107 #define pgd_ERROR(e) \
108 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
109 
110 extern pte_t invalid_pte_table[PTRS_PER_PTE];
111 
112 #ifndef __PAGETABLE_PUD_FOLDED
113 
114 typedef struct { unsigned long pud; } pud_t;
115 #define pud_val(x)	((x).pud)
116 #define __pud(x)	((pud_t) { (x) })
117 
118 extern pud_t invalid_pud_table[PTRS_PER_PUD];
119 
120 /*
121  * Empty pgd/p4d entries point to the invalid_pud_table.
122  */
123 static inline int p4d_none(p4d_t p4d)
124 {
125 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
126 }
127 
128 static inline int p4d_bad(p4d_t p4d)
129 {
130 	return p4d_val(p4d) & ~PAGE_MASK;
131 }
132 
133 static inline int p4d_present(p4d_t p4d)
134 {
135 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
136 }
137 
138 static inline void p4d_clear(p4d_t *p4dp)
139 {
140 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
141 }
142 
143 static inline pud_t *p4d_pgtable(p4d_t p4d)
144 {
145 	return (pud_t *)p4d_val(p4d);
146 }
147 
148 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
149 {
150 	*p4d = p4dval;
151 }
152 
153 #define p4d_phys(p4d)		virt_to_phys((void *)p4d_val(p4d))
154 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
155 
156 #endif
157 
158 #ifndef __PAGETABLE_PMD_FOLDED
159 
160 typedef struct { unsigned long pmd; } pmd_t;
161 #define pmd_val(x)	((x).pmd)
162 #define __pmd(x)	((pmd_t) { (x) })
163 
164 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
165 
166 /*
167  * Empty pud entries point to the invalid_pmd_table.
168  */
169 static inline int pud_none(pud_t pud)
170 {
171 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
172 }
173 
174 static inline int pud_bad(pud_t pud)
175 {
176 	return pud_val(pud) & ~PAGE_MASK;
177 }
178 
179 static inline int pud_present(pud_t pud)
180 {
181 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
182 }
183 
184 static inline void pud_clear(pud_t *pudp)
185 {
186 	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
187 }
188 
189 static inline pmd_t *pud_pgtable(pud_t pud)
190 {
191 	return (pmd_t *)pud_val(pud);
192 }
193 
194 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
195 
196 #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
197 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
198 
199 #endif
200 
201 /*
202  * Empty pmd entries point to the invalid_pte_table.
203  */
204 static inline int pmd_none(pmd_t pmd)
205 {
206 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
207 }
208 
209 static inline int pmd_bad(pmd_t pmd)
210 {
211 	return (pmd_val(pmd) & ~PAGE_MASK);
212 }
213 
214 static inline int pmd_present(pmd_t pmd)
215 {
216 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
217 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
218 
219 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
220 }
221 
222 static inline void pmd_clear(pmd_t *pmdp)
223 {
224 	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
225 }
226 
227 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
228 
229 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
230 
231 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
232 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
233 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
234 
235 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
236 
237 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
238 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
239 
240 #define pte_page(x)		pfn_to_page(pte_pfn(x))
241 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
242 #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
243 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
244 
245 /*
246  * Initialize a new pgd / pmd table with invalid pointers.
247  */
248 extern void pgd_init(unsigned long page);
249 extern void pud_init(unsigned long page, unsigned long pagetable);
250 extern void pmd_init(unsigned long page, unsigned long pagetable);
251 
252 /*
253  * Non-present pages:  high 40 bits are offset, next 8 bits type,
254  * low 16 bits zero.
255  */
256 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
257 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
258 
259 #define __swp_type(x)		(((x).val >> 16) & 0xff)
260 #define __swp_offset(x)		((x).val >> 24)
261 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
262 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
263 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
264 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
265 #define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
266 
267 extern void paging_init(void);
268 
269 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
270 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
271 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
272 
273 static inline void set_pte(pte_t *ptep, pte_t pteval)
274 {
275 	*ptep = pteval;
276 	if (pte_val(pteval) & _PAGE_GLOBAL) {
277 		pte_t *buddy = ptep_buddy(ptep);
278 		/*
279 		 * Make sure the buddy is global too (if it's !none,
280 		 * it better already be global)
281 		 */
282 #ifdef CONFIG_SMP
283 		/*
284 		 * For SMP, multiple CPUs can race, so we need to do
285 		 * this atomically.
286 		 */
287 		unsigned long page_global = _PAGE_GLOBAL;
288 		unsigned long tmp;
289 
290 		__asm__ __volatile__ (
291 		"1:"	__LL	"%[tmp], %[buddy]		\n"
292 		"	bnez	%[tmp], 2f			\n"
293 		"	 or	%[tmp], %[tmp], %[global]	\n"
294 			__SC	"%[tmp], %[buddy]		\n"
295 		"	beqz	%[tmp], 1b			\n"
296 		"	nop					\n"
297 		"2:						\n"
298 		__WEAK_LLSC_MB
299 		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
300 		: [global] "r" (page_global));
301 #else /* !CONFIG_SMP */
302 		if (pte_none(*buddy))
303 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
304 #endif /* CONFIG_SMP */
305 	}
306 }
307 
308 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
309 			      pte_t *ptep, pte_t pteval)
310 {
311 	set_pte(ptep, pteval);
312 }
313 
314 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
315 {
316 	/* Preserve global status for the pair */
317 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
318 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
319 	else
320 		set_pte_at(mm, addr, ptep, __pte(0));
321 }
322 
323 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
324 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
325 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
326 
327 extern pgd_t swapper_pg_dir[];
328 extern pgd_t invalid_pg_dir[];
329 
330 /*
331  * The following only work if pte_present() is true.
332  * Undefined behaviour if not..
333  */
334 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
335 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
336 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
337 
338 static inline pte_t pte_mkold(pte_t pte)
339 {
340 	pte_val(pte) &= ~_PAGE_ACCESSED;
341 	return pte;
342 }
343 
344 static inline pte_t pte_mkyoung(pte_t pte)
345 {
346 	pte_val(pte) |= _PAGE_ACCESSED;
347 	return pte;
348 }
349 
350 static inline pte_t pte_mkclean(pte_t pte)
351 {
352 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
353 	return pte;
354 }
355 
356 static inline pte_t pte_mkdirty(pte_t pte)
357 {
358 	pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
359 	return pte;
360 }
361 
362 static inline pte_t pte_mkwrite(pte_t pte)
363 {
364 	pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
365 	return pte;
366 }
367 
368 static inline pte_t pte_wrprotect(pte_t pte)
369 {
370 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
371 	return pte;
372 }
373 
374 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
375 
376 static inline pte_t pte_mkhuge(pte_t pte)
377 {
378 	pte_val(pte) |= _PAGE_HUGE;
379 	return pte;
380 }
381 
382 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
383 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
384 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
385 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
386 
387 #define pte_accessible pte_accessible
388 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
389 {
390 	if (pte_val(a) & _PAGE_PRESENT)
391 		return true;
392 
393 	if ((pte_val(a) & _PAGE_PROTNONE) &&
394 			atomic_read(&mm->tlb_flush_pending))
395 		return true;
396 
397 	return false;
398 }
399 
400 /*
401  * Conversion functions: convert a page and protection to a page entry,
402  * and a page entry and page directory to the page they refer to.
403  */
404 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
405 
406 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
407 {
408 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
409 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
410 }
411 
412 extern void __update_tlb(struct vm_area_struct *vma,
413 			unsigned long address, pte_t *ptep);
414 
415 static inline void update_mmu_cache(struct vm_area_struct *vma,
416 			unsigned long address, pte_t *ptep)
417 {
418 	__update_tlb(vma, address, ptep);
419 }
420 
421 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
422 			unsigned long address, pmd_t *pmdp)
423 {
424 	__update_tlb(vma, address, (pte_t *)pmdp);
425 }
426 
427 #define kern_addr_valid(addr)	(1)
428 
429 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
430 
431 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
432 #define pmdp_establish generic_pmdp_establish
433 
434 static inline int pmd_trans_huge(pmd_t pmd)
435 {
436 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
437 }
438 
439 static inline pmd_t pmd_mkhuge(pmd_t pmd)
440 {
441 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
442 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
443 	pmd_val(pmd) |= _PAGE_HUGE;
444 
445 	return pmd;
446 }
447 
448 #define pmd_write pmd_write
449 static inline int pmd_write(pmd_t pmd)
450 {
451 	return !!(pmd_val(pmd) & _PAGE_WRITE);
452 }
453 
454 static inline pmd_t pmd_mkwrite(pmd_t pmd)
455 {
456 	pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
457 	return pmd;
458 }
459 
460 static inline pmd_t pmd_wrprotect(pmd_t pmd)
461 {
462 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
463 	return pmd;
464 }
465 
466 static inline int pmd_dirty(pmd_t pmd)
467 {
468 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
469 }
470 
471 static inline pmd_t pmd_mkclean(pmd_t pmd)
472 {
473 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
474 	return pmd;
475 }
476 
477 static inline pmd_t pmd_mkdirty(pmd_t pmd)
478 {
479 	pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
480 	return pmd;
481 }
482 
483 static inline int pmd_young(pmd_t pmd)
484 {
485 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
486 }
487 
488 static inline pmd_t pmd_mkold(pmd_t pmd)
489 {
490 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
491 	return pmd;
492 }
493 
494 static inline pmd_t pmd_mkyoung(pmd_t pmd)
495 {
496 	pmd_val(pmd) |= _PAGE_ACCESSED;
497 	return pmd;
498 }
499 
500 static inline unsigned long pmd_pfn(pmd_t pmd)
501 {
502 	return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
503 }
504 
505 static inline struct page *pmd_page(pmd_t pmd)
506 {
507 	if (pmd_trans_huge(pmd))
508 		return pfn_to_page(pmd_pfn(pmd));
509 
510 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
511 }
512 
513 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
514 {
515 	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
516 				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
517 	return pmd;
518 }
519 
520 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
521 {
522 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
523 
524 	return pmd;
525 }
526 
527 /*
528  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
529  * different prototype.
530  */
531 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
532 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
533 					    unsigned long address, pmd_t *pmdp)
534 {
535 	pmd_t old = *pmdp;
536 
537 	pmd_clear(pmdp);
538 
539 	return old;
540 }
541 
542 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
543 
544 #ifdef CONFIG_NUMA_BALANCING
545 static inline long pte_protnone(pte_t pte)
546 {
547 	return (pte_val(pte) & _PAGE_PROTNONE);
548 }
549 
550 static inline long pmd_protnone(pmd_t pmd)
551 {
552 	return (pmd_val(pmd) & _PAGE_PROTNONE);
553 }
554 #endif /* CONFIG_NUMA_BALANCING */
555 
556 /*
557  * We provide our own get_unmapped area to cope with the virtual aliasing
558  * constraints placed on us by the cache architecture.
559  */
560 #define HAVE_ARCH_UNMAPPED_AREA
561 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
562 
563 #endif /* !__ASSEMBLY__ */
564 
565 #endif /* _ASM_PGTABLE_H */
566