1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11 
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15 #include <asm/pgtable-bits.h>
16 
17 #if CONFIG_PGTABLE_LEVELS == 2
18 #include <asm-generic/pgtable-nopmd.h>
19 #elif CONFIG_PGTABLE_LEVELS == 3
20 #include <asm-generic/pgtable-nopud.h>
21 #else
22 #include <asm-generic/pgtable-nop4d.h>
23 #endif
24 
25 #if CONFIG_PGTABLE_LEVELS == 2
26 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
27 #elif CONFIG_PGTABLE_LEVELS == 3
28 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
29 #define PMD_SIZE	(1UL << PMD_SHIFT)
30 #define PMD_MASK	(~(PMD_SIZE-1))
31 #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
32 #elif CONFIG_PGTABLE_LEVELS == 4
33 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
34 #define PMD_SIZE	(1UL << PMD_SHIFT)
35 #define PMD_MASK	(~(PMD_SIZE-1))
36 #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT - 3))
37 #define PUD_SIZE	(1UL << PUD_SHIFT)
38 #define PUD_MASK	(~(PUD_SIZE-1))
39 #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT - 3))
40 #endif
41 
42 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
44 
45 #define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT - 3))
46 
47 #define PTRS_PER_PGD	(PAGE_SIZE >> 3)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 #define PTRS_PER_PUD	(PAGE_SIZE >> 3)
50 #endif
51 #if CONFIG_PGTABLE_LEVELS > 2
52 #define PTRS_PER_PMD	(PAGE_SIZE >> 3)
53 #endif
54 #define PTRS_PER_PTE	(PAGE_SIZE >> 3)
55 
56 #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
57 
58 #ifndef __ASSEMBLY__
59 
60 #include <linux/mm_types.h>
61 #include <linux/mmzone.h>
62 #include <asm/fixmap.h>
63 #include <asm/sparsemem.h>
64 
65 struct mm_struct;
66 struct vm_area_struct;
67 
68 /*
69  * ZERO_PAGE is a global shared page that is always zero; used
70  * for zero-mapped memory areas etc..
71  */
72 
73 extern unsigned long empty_zero_page;
74 extern unsigned long zero_page_mask;
75 
76 #define ZERO_PAGE(vaddr) \
77 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
78 #define __HAVE_COLOR_ZERO_PAGE
79 
80 /*
81  * TLB refill handlers may also map the vmalloc area into xkvrange.
82  * Avoid the first couple of pages so NULL pointer dereferences will
83  * still reliably trap.
84  */
85 #define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
86 #define MODULES_END	(MODULES_VADDR + SZ_256M)
87 
88 #define VMALLOC_START	MODULES_END
89 #define VMALLOC_END	\
90 	(vm_map_base +	\
91 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
92 
93 #define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
94 #define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
95 
96 #define pte_ERROR(e) \
97 	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
98 #ifndef __PAGETABLE_PMD_FOLDED
99 #define pmd_ERROR(e) \
100 	pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
101 #endif
102 #ifndef __PAGETABLE_PUD_FOLDED
103 #define pud_ERROR(e) \
104 	pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
105 #endif
106 #define pgd_ERROR(e) \
107 	pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
108 
109 extern pte_t invalid_pte_table[PTRS_PER_PTE];
110 
111 #ifndef __PAGETABLE_PUD_FOLDED
112 
113 typedef struct { unsigned long pud; } pud_t;
114 #define pud_val(x)	((x).pud)
115 #define __pud(x)	((pud_t) { (x) })
116 
117 extern pud_t invalid_pud_table[PTRS_PER_PUD];
118 
119 /*
120  * Empty pgd/p4d entries point to the invalid_pud_table.
121  */
122 static inline int p4d_none(p4d_t p4d)
123 {
124 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
125 }
126 
127 static inline int p4d_bad(p4d_t p4d)
128 {
129 	return p4d_val(p4d) & ~PAGE_MASK;
130 }
131 
132 static inline int p4d_present(p4d_t p4d)
133 {
134 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
135 }
136 
137 static inline void p4d_clear(p4d_t *p4dp)
138 {
139 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
140 }
141 
142 static inline pud_t *p4d_pgtable(p4d_t p4d)
143 {
144 	return (pud_t *)p4d_val(p4d);
145 }
146 
147 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
148 {
149 	*p4d = p4dval;
150 }
151 
152 #define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
153 #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
154 
155 #endif
156 
157 #ifndef __PAGETABLE_PMD_FOLDED
158 
159 typedef struct { unsigned long pmd; } pmd_t;
160 #define pmd_val(x)	((x).pmd)
161 #define __pmd(x)	((pmd_t) { (x) })
162 
163 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
164 
165 /*
166  * Empty pud entries point to the invalid_pmd_table.
167  */
168 static inline int pud_none(pud_t pud)
169 {
170 	return pud_val(pud) == (unsigned long)invalid_pmd_table;
171 }
172 
173 static inline int pud_bad(pud_t pud)
174 {
175 	return pud_val(pud) & ~PAGE_MASK;
176 }
177 
178 static inline int pud_present(pud_t pud)
179 {
180 	return pud_val(pud) != (unsigned long)invalid_pmd_table;
181 }
182 
183 static inline void pud_clear(pud_t *pudp)
184 {
185 	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
186 }
187 
188 static inline pmd_t *pud_pgtable(pud_t pud)
189 {
190 	return (pmd_t *)pud_val(pud);
191 }
192 
193 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
194 
195 #define pud_phys(pud)		PHYSADDR(pud_val(pud))
196 #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
197 
198 #endif
199 
200 /*
201  * Empty pmd entries point to the invalid_pte_table.
202  */
203 static inline int pmd_none(pmd_t pmd)
204 {
205 	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
206 }
207 
208 static inline int pmd_bad(pmd_t pmd)
209 {
210 	return (pmd_val(pmd) & ~PAGE_MASK);
211 }
212 
213 static inline int pmd_present(pmd_t pmd)
214 {
215 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
216 		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
217 
218 	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
219 }
220 
221 static inline void pmd_clear(pmd_t *pmdp)
222 {
223 	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
224 }
225 
226 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
227 
228 #define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
229 
230 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
231 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
232 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
233 
234 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
235 
236 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
237 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
238 
239 #define pte_page(x)		pfn_to_page(pte_pfn(x))
240 #define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
241 #define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
242 #define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
243 
244 /*
245  * Initialize a new pgd / pud / pmd table with invalid pointers.
246  */
247 extern void pgd_init(void *addr);
248 extern void pud_init(void *addr);
249 extern void pmd_init(void *addr);
250 
251 /*
252  * Non-present pages:  high 40 bits are offset, next 8 bits type,
253  * low 16 bits zero.
254  */
255 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
256 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
257 
258 #define __swp_type(x)		(((x).val >> 16) & 0xff)
259 #define __swp_offset(x)		((x).val >> 24)
260 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
261 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
262 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
263 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
264 #define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
265 
266 extern void paging_init(void);
267 
268 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
269 #define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
270 #define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
271 
272 static inline void set_pte(pte_t *ptep, pte_t pteval)
273 {
274 	*ptep = pteval;
275 	if (pte_val(pteval) & _PAGE_GLOBAL) {
276 		pte_t *buddy = ptep_buddy(ptep);
277 		/*
278 		 * Make sure the buddy is global too (if it's !none,
279 		 * it better already be global)
280 		 */
281 #ifdef CONFIG_SMP
282 		/*
283 		 * For SMP, multiple CPUs can race, so we need to do
284 		 * this atomically.
285 		 */
286 		unsigned long page_global = _PAGE_GLOBAL;
287 		unsigned long tmp;
288 
289 		__asm__ __volatile__ (
290 		"1:"	__LL	"%[tmp], %[buddy]		\n"
291 		"	bnez	%[tmp], 2f			\n"
292 		"	 or	%[tmp], %[tmp], %[global]	\n"
293 			__SC	"%[tmp], %[buddy]		\n"
294 		"	beqz	%[tmp], 1b			\n"
295 		"	nop					\n"
296 		"2:						\n"
297 		__WEAK_LLSC_MB
298 		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
299 		: [global] "r" (page_global));
300 #else /* !CONFIG_SMP */
301 		if (pte_none(*buddy))
302 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
303 #endif /* CONFIG_SMP */
304 	}
305 }
306 
307 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
308 			      pte_t *ptep, pte_t pteval)
309 {
310 	set_pte(ptep, pteval);
311 }
312 
313 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
314 {
315 	/* Preserve global status for the pair */
316 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
317 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
318 	else
319 		set_pte_at(mm, addr, ptep, __pte(0));
320 }
321 
322 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
323 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
324 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
325 
326 extern pgd_t swapper_pg_dir[];
327 extern pgd_t invalid_pg_dir[];
328 
329 /*
330  * The following only work if pte_present() is true.
331  * Undefined behaviour if not..
332  */
333 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
334 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
335 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
336 
337 static inline pte_t pte_mkold(pte_t pte)
338 {
339 	pte_val(pte) &= ~_PAGE_ACCESSED;
340 	return pte;
341 }
342 
343 static inline pte_t pte_mkyoung(pte_t pte)
344 {
345 	pte_val(pte) |= _PAGE_ACCESSED;
346 	return pte;
347 }
348 
349 static inline pte_t pte_mkclean(pte_t pte)
350 {
351 	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
352 	return pte;
353 }
354 
355 static inline pte_t pte_mkdirty(pte_t pte)
356 {
357 	pte_val(pte) |= _PAGE_MODIFIED;
358 	if (pte_val(pte) & _PAGE_WRITE)
359 		pte_val(pte) |= _PAGE_DIRTY;
360 	return pte;
361 }
362 
363 static inline pte_t pte_mkwrite(pte_t pte)
364 {
365 	pte_val(pte) |= _PAGE_WRITE;
366 	if (pte_val(pte) & _PAGE_MODIFIED)
367 		pte_val(pte) |= _PAGE_DIRTY;
368 	return pte;
369 }
370 
371 static inline pte_t pte_wrprotect(pte_t pte)
372 {
373 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
374 	return pte;
375 }
376 
377 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
378 
379 static inline pte_t pte_mkhuge(pte_t pte)
380 {
381 	pte_val(pte) |= _PAGE_HUGE;
382 	return pte;
383 }
384 
385 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
386 static inline int pte_special(pte_t pte)	{ return pte_val(pte) & _PAGE_SPECIAL; }
387 static inline pte_t pte_mkspecial(pte_t pte)	{ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
388 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
389 
390 #define pte_accessible pte_accessible
391 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
392 {
393 	if (pte_val(a) & _PAGE_PRESENT)
394 		return true;
395 
396 	if ((pte_val(a) & _PAGE_PROTNONE) &&
397 			atomic_read(&mm->tlb_flush_pending))
398 		return true;
399 
400 	return false;
401 }
402 
403 /*
404  * Conversion functions: convert a page and protection to a page entry,
405  * and a page entry and page directory to the page they refer to.
406  */
407 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
408 
409 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410 {
411 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
412 		     (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
413 }
414 
415 extern void __update_tlb(struct vm_area_struct *vma,
416 			unsigned long address, pte_t *ptep);
417 
418 static inline void update_mmu_cache(struct vm_area_struct *vma,
419 			unsigned long address, pte_t *ptep)
420 {
421 	__update_tlb(vma, address, ptep);
422 }
423 
424 #define __HAVE_ARCH_UPDATE_MMU_TLB
425 #define update_mmu_tlb	update_mmu_cache
426 
427 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
428 			unsigned long address, pmd_t *pmdp)
429 {
430 	__update_tlb(vma, address, (pte_t *)pmdp);
431 }
432 
433 static inline unsigned long pmd_pfn(pmd_t pmd)
434 {
435 	return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
436 }
437 
438 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
439 
440 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
441 #define pmdp_establish generic_pmdp_establish
442 
443 static inline int pmd_trans_huge(pmd_t pmd)
444 {
445 	return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
446 }
447 
448 static inline pmd_t pmd_mkhuge(pmd_t pmd)
449 {
450 	pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
451 		((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
452 	pmd_val(pmd) |= _PAGE_HUGE;
453 
454 	return pmd;
455 }
456 
457 #define pmd_write pmd_write
458 static inline int pmd_write(pmd_t pmd)
459 {
460 	return !!(pmd_val(pmd) & _PAGE_WRITE);
461 }
462 
463 static inline pmd_t pmd_mkwrite(pmd_t pmd)
464 {
465 	pmd_val(pmd) |= _PAGE_WRITE;
466 	if (pmd_val(pmd) & _PAGE_MODIFIED)
467 		pmd_val(pmd) |= _PAGE_DIRTY;
468 	return pmd;
469 }
470 
471 static inline pmd_t pmd_wrprotect(pmd_t pmd)
472 {
473 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
474 	return pmd;
475 }
476 
477 static inline int pmd_dirty(pmd_t pmd)
478 {
479 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
480 }
481 
482 static inline pmd_t pmd_mkclean(pmd_t pmd)
483 {
484 	pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
485 	return pmd;
486 }
487 
488 static inline pmd_t pmd_mkdirty(pmd_t pmd)
489 {
490 	pmd_val(pmd) |= _PAGE_MODIFIED;
491 	if (pmd_val(pmd) & _PAGE_WRITE)
492 		pmd_val(pmd) |= _PAGE_DIRTY;
493 	return pmd;
494 }
495 
496 #define pmd_young pmd_young
497 static inline int pmd_young(pmd_t pmd)
498 {
499 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
500 }
501 
502 static inline pmd_t pmd_mkold(pmd_t pmd)
503 {
504 	pmd_val(pmd) &= ~_PAGE_ACCESSED;
505 	return pmd;
506 }
507 
508 static inline pmd_t pmd_mkyoung(pmd_t pmd)
509 {
510 	pmd_val(pmd) |= _PAGE_ACCESSED;
511 	return pmd;
512 }
513 
514 static inline struct page *pmd_page(pmd_t pmd)
515 {
516 	if (pmd_trans_huge(pmd))
517 		return pfn_to_page(pmd_pfn(pmd));
518 
519 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
520 }
521 
522 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
523 {
524 	pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
525 				(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
526 	return pmd;
527 }
528 
529 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
530 {
531 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
532 
533 	return pmd;
534 }
535 
536 /*
537  * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
538  * different prototype.
539  */
540 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
541 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
542 					    unsigned long address, pmd_t *pmdp)
543 {
544 	pmd_t old = *pmdp;
545 
546 	pmd_clear(pmdp);
547 
548 	return old;
549 }
550 
551 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
552 
553 #ifdef CONFIG_NUMA_BALANCING
554 static inline long pte_protnone(pte_t pte)
555 {
556 	return (pte_val(pte) & _PAGE_PROTNONE);
557 }
558 
559 static inline long pmd_protnone(pmd_t pmd)
560 {
561 	return (pmd_val(pmd) & _PAGE_PROTNONE);
562 }
563 #endif /* CONFIG_NUMA_BALANCING */
564 
565 /*
566  * We provide our own get_unmapped area to cope with the virtual aliasing
567  * constraints placed on us by the cache architecture.
568  */
569 #define HAVE_ARCH_UNMAPPED_AREA
570 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
571 
572 #endif /* !__ASSEMBLY__ */
573 
574 #endif /* _ASM_PGTABLE_H */
575