1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/pgtable-bits.h>
15 
16 #if CONFIG_PGTABLE_LEVELS == 2
17 #include <asm-generic/pgtable-nopmd.h>
18 #elif CONFIG_PGTABLE_LEVELS == 3
19 #include <asm-generic/pgtable-nopud.h>
20 #else
21 #include <asm-generic/pgtable-nop4d.h>
22 #endif
23 
24 #if CONFIG_PGTABLE_LEVELS == 2
25 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
26 #elif CONFIG_PGTABLE_LEVELS == 3
27 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
28 #define PMD_SIZE	(1UL << PMD_SHIFT)
29 #define PMD_MASK	(~(PMD_SIZE-1))
30 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
31 #elif CONFIG_PGTABLE_LEVELS == 4
32 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
33 #define PMD_SIZE	(1UL << PMD_SHIFT)
34 #define PMD_MASK	(~(PMD_SIZE-1))
35 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
36 #define PUD_SIZE	(1UL << PUD_SHIFT)
37 #define PUD_MASK	(~(PUD_SIZE-1))
38 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - 3))
39 #endif
40 
41 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
42 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
43 
44 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - 3))
45 
46 #define PTRS_PER_PGD	(PAGE_SIZE >> 3)
47 #if CONFIG_PGTABLE_LEVELS > 3
48 #define PTRS_PER_PUD	(PAGE_SIZE >> 3)
49 #endif
50 #if CONFIG_PGTABLE_LEVELS > 2
51 #define PTRS_PER_PMD	(PAGE_SIZE >> 3)
52 #endif
53 #define PTRS_PER_PTE	(PAGE_SIZE >> 3)
54 
55 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
56 
57 #ifndef __ASSEMBLY__
58 
59 #include <linux/mm_types.h>
60 #include <linux/mmzone.h>
61 #include <asm/fixmap.h>
62 
63 struct mm_struct;
64 struct vm_area_struct;
65 
66 /*
67  * ZERO_PAGE is a global shared page that is always zero; used
68  * for zero-mapped memory areas etc..
69  */
70 
71 extern unsigned long empty_zero_page;
72 extern unsigned long zero_page_mask;
73 
74 #define ZERO_PAGE(vaddr) \
75 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
76 #define __HAVE_COLOR_ZERO_PAGE
77 
78 /*
79  * TLB refill handlers may also map the vmalloc area into xkvrange.
80  * Avoid the first couple of pages so NULL pointer dereferences will
81  * still reliably trap.
82  */
83 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
84 #define MODULES_END	(MODULES_VADDR + SZ_256M)
85 
86 #define VMALLOC_START	MODULES_END
87 #define VMALLOC_END	\
88 	(vm_map_base +	\
89 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE)
90 
91 #define pte_ERROR(e) \
92 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
93 #ifndef __PAGETABLE_PMD_FOLDED
94 #define pmd_ERROR(e) \
95 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
96 #endif
97 #ifndef __PAGETABLE_PUD_FOLDED
98 #define pud_ERROR(e) \
99 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
100 #endif
101 #define pgd_ERROR(e) \
102 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
103 
104 extern pte_t invalid_pte_table[PTRS_PER_PTE];
105 
106 #ifndef __PAGETABLE_PUD_FOLDED
107 
108 typedef struct { unsigned long pud; } pud_t;
109 #define pud_val(x)	((x).pud)
110 #define __pud(x)	((pud_t) { (x) })
111 
112 extern pud_t invalid_pud_table[PTRS_PER_PUD];
113 
114 /*
115  * Empty pgd/p4d entries point to the invalid_pud_table.
116  */
117 static inline int p4d_none(p4d_t p4d)
118 {
119 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
120 }
121 
122 static inline int p4d_bad(p4d_t p4d)
123 {
124 	return p4d_val(p4d) & ~PAGE_MASK;
125 }
126 
127 static inline int p4d_present(p4d_t p4d)
128 {
129 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
130 }
131 
132 static inline void p4d_clear(p4d_t *p4dp)
133 {
134 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
135 }
136 
137 static inline pud_t *p4d_pgtable(p4d_t p4d)
138 {
139 	return (pud_t *)p4d_val(p4d);
140 }
141 
142 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
143 {
144 	*p4d = p4dval;
145 }
146 
147 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
148 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
149 
150 #endif
151 
152 #ifndef __PAGETABLE_PMD_FOLDED
153 
154 typedef struct { unsigned long pmd; } pmd_t;
155 #define pmd_val(x)	((x).pmd)
156 #define __pmd(x)	((pmd_t) { (x) })
157 
158 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
159 
160 /*
161  * Empty pud entries point to the invalid_pmd_table.
162  */
163 static inline int pud_none(pud_t pud)
164 {
165 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
166 }
167 
168 static inline int pud_bad(pud_t pud)
169 {
170 	return pud_val(pud) & ~PAGE_MASK;
171 }
172 
173 static inline int pud_present(pud_t pud)
174 {
175 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
176 }
177 
178 static inline void pud_clear(pud_t *pudp)
179 {
180 	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
181 }
182 
183 static inline pmd_t *pud_pgtable(pud_t pud)
184 {
185 	return (pmd_t *)pud_val(pud);
186 }
187 
188 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
189 
190 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
191 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
192 
193 #endif
194 
195 /*
196  * Empty pmd entries point to the invalid_pte_table.
197  */
198 static inline int pmd_none(pmd_t pmd)
199 {
200 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
201 }
202 
203 static inline int pmd_bad(pmd_t pmd)
204 {
205 	return (pmd_val(pmd) & ~PAGE_MASK);
206 }
207 
208 static inline int pmd_present(pmd_t pmd)
209 {
210 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
211 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
212 
213 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
214 }
215 
216 static inline void pmd_clear(pmd_t *pmdp)
217 {
218 	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
219 }
220 
221 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
222 
223 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
224 
225 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
226 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
227 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
228 
229 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
230 
231 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
232 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
233 
234 #define pte_page(x)		pfn_to_page(pte_pfn(x))
235 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
236 #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
237 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
238 
239 /*
240  * Initialize a new pgd / pmd table with invalid pointers.
241  */
242 extern void pgd_init(unsigned long page);
243 extern void pud_init(unsigned long page, unsigned long pagetable);
244 extern void pmd_init(unsigned long page, unsigned long pagetable);
245 
246 /*
247  * Non-present pages:  high 40 bits are offset, next 8 bits type,
248  * low 16 bits zero.
249  */
250 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
251 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
252 
253 #define __swp_type(x)		(((x).val >> 16) & 0xff)
254 #define __swp_offset(x)		((x).val >> 24)
255 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
256 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
257 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
258 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
259 #define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
260 
261 extern void paging_init(void);
262 
263 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
264 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
265 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
266 
267 static inline void set_pte(pte_t *ptep, pte_t pteval)
268 {
269 	*ptep = pteval;
270 	if (pte_val(pteval) & _PAGE_GLOBAL) {
271 		pte_t *buddy = ptep_buddy(ptep);
272 		/*
273 		 * Make sure the buddy is global too (if it's !none,
274 		 * it better already be global)
275 		 */
276 #ifdef CONFIG_SMP
277 		/*
278 		 * For SMP, multiple CPUs can race, so we need to do
279 		 * this atomically.
280 		 */
281 		unsigned long page_global = _PAGE_GLOBAL;
282 		unsigned long tmp;
283 
284 		__asm__ __volatile__ (
285 		"1:"	__LL	"%[tmp], %[buddy]		\n"
286 		"	bnez	%[tmp], 2f			\n"
287 		"	 or	%[tmp], %[tmp], %[global]	\n"
288 			__SC	"%[tmp], %[buddy]		\n"
289 		"	beqz	%[tmp], 1b			\n"
290 		"	nop					\n"
291 		"2:						\n"
292 		__WEAK_LLSC_MB
293 		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
294 		: [global] "r" (page_global));
295 #else /* !CONFIG_SMP */
296 		if (pte_none(*buddy))
297 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
298 #endif /* CONFIG_SMP */
299 	}
300 }
301 
302 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
303 			      pte_t *ptep, pte_t pteval)
304 {
305 	set_pte(ptep, pteval);
306 }
307 
308 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
309 {
310 	/* Preserve global status for the pair */
311 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
312 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
313 	else
314 		set_pte_at(mm, addr, ptep, __pte(0));
315 }
316 
317 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
318 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
319 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
320 
321 extern pgd_t swapper_pg_dir[];
322 extern pgd_t invalid_pg_dir[];
323 
324 /*
325  * The following only work if pte_present() is true.
326  * Undefined behaviour if not..
327  */
328 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
329 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
330 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
331 
332 static inline pte_t pte_mkold(pte_t pte)
333 {
334 	pte_val(pte) &= ~_PAGE_ACCESSED;
335 	return pte;
336 }
337 
338 static inline pte_t pte_mkyoung(pte_t pte)
339 {
340 	pte_val(pte) |= _PAGE_ACCESSED;
341 	return pte;
342 }
343 
344 static inline pte_t pte_mkclean(pte_t pte)
345 {
346 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
347 	return pte;
348 }
349 
350 static inline pte_t pte_mkdirty(pte_t pte)
351 {
352 	pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
353 	return pte;
354 }
355 
356 static inline pte_t pte_mkwrite(pte_t pte)
357 {
358 	pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
359 	return pte;
360 }
361 
362 static inline pte_t pte_wrprotect(pte_t pte)
363 {
364 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
365 	return pte;
366 }
367 
368 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
369 
370 static inline pte_t pte_mkhuge(pte_t pte)
371 {
372 	pte_val(pte) |= _PAGE_HUGE;
373 	return pte;
374 }
375 
376 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
377 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
378 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
379 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
380 
381 #define pte_accessible pte_accessible
382 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
383 {
384 	if (pte_val(a) & _PAGE_PRESENT)
385 		return true;
386 
387 	if ((pte_val(a) & _PAGE_PROTNONE) &&
388 			atomic_read(&mm->tlb_flush_pending))
389 		return true;
390 
391 	return false;
392 }
393 
394 /*
395  * Conversion functions: convert a page and protection to a page entry,
396  * and a page entry and page directory to the page they refer to.
397  */
398 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
399 
400 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
401 {
402 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
403 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
404 }
405 
406 extern void __update_tlb(struct vm_area_struct *vma,
407 			unsigned long address, pte_t *ptep);
408 
409 static inline void update_mmu_cache(struct vm_area_struct *vma,
410 			unsigned long address, pte_t *ptep)
411 {
412 	__update_tlb(vma, address, ptep);
413 }
414 
415 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
416 			unsigned long address, pmd_t *pmdp)
417 {
418 	__update_tlb(vma, address, (pte_t *)pmdp);
419 }
420 
421 #define kern_addr_valid(addr)	(1)
422 
423 static inline unsigned long pmd_pfn(pmd_t pmd)
424 {
425 	return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
426 }
427 
428 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
429 
430 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
431 #define pmdp_establish generic_pmdp_establish
432 
433 static inline int pmd_trans_huge(pmd_t pmd)
434 {
435 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
436 }
437 
438 static inline pmd_t pmd_mkhuge(pmd_t pmd)
439 {
440 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
441 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
442 	pmd_val(pmd) |= _PAGE_HUGE;
443 
444 	return pmd;
445 }
446 
447 #define pmd_write pmd_write
448 static inline int pmd_write(pmd_t pmd)
449 {
450 	return !!(pmd_val(pmd) & _PAGE_WRITE);
451 }
452 
453 static inline pmd_t pmd_mkwrite(pmd_t pmd)
454 {
455 	pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
456 	return pmd;
457 }
458 
459 static inline pmd_t pmd_wrprotect(pmd_t pmd)
460 {
461 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
462 	return pmd;
463 }
464 
465 static inline int pmd_dirty(pmd_t pmd)
466 {
467 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
468 }
469 
470 static inline pmd_t pmd_mkclean(pmd_t pmd)
471 {
472 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
473 	return pmd;
474 }
475 
476 static inline pmd_t pmd_mkdirty(pmd_t pmd)
477 {
478 	pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
479 	return pmd;
480 }
481 
482 static inline int pmd_young(pmd_t pmd)
483 {
484 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
485 }
486 
487 static inline pmd_t pmd_mkold(pmd_t pmd)
488 {
489 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
490 	return pmd;
491 }
492 
493 static inline pmd_t pmd_mkyoung(pmd_t pmd)
494 {
495 	pmd_val(pmd) |= _PAGE_ACCESSED;
496 	return pmd;
497 }
498 
499 static inline struct page *pmd_page(pmd_t pmd)
500 {
501 	if (pmd_trans_huge(pmd))
502 		return pfn_to_page(pmd_pfn(pmd));
503 
504 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
505 }
506 
507 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
508 {
509 	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
510 				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
511 	return pmd;
512 }
513 
514 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
515 {
516 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
517 
518 	return pmd;
519 }
520 
521 /*
522  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
523  * different prototype.
524  */
525 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
526 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
527 					    unsigned long address, pmd_t *pmdp)
528 {
529 	pmd_t old = *pmdp;
530 
531 	pmd_clear(pmdp);
532 
533 	return old;
534 }
535 
536 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
537 
538 #ifdef CONFIG_NUMA_BALANCING
539 static inline long pte_protnone(pte_t pte)
540 {
541 	return (pte_val(pte) & _PAGE_PROTNONE);
542 }
543 
544 static inline long pmd_protnone(pmd_t pmd)
545 {
546 	return (pmd_val(pmd) & _PAGE_PROTNONE);
547 }
548 #endif /* CONFIG_NUMA_BALANCING */
549 
550 /*
551  * We provide our own get_unmapped area to cope with the virtual aliasing
552  * constraints placed on us by the cache architecture.
553  */
554 #define HAVE_ARCH_UNMAPPED_AREA
555 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
556 
557 #endif /* !__ASSEMBLY__ */
558 
559 #endif /* _ASM_PGTABLE_H */
560