1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 */ 6 7 #ifndef _ASM_RISCV_PGALLOC_H 8 #define _ASM_RISCV_PGALLOC_H 9 10 #include <linux/mm.h> 11 #include <asm/tlb.h> 12 13 #ifdef CONFIG_MMU 14 #define __HAVE_ARCH_PUD_ALLOC_ONE 15 #define __HAVE_ARCH_PUD_FREE 16 #include <asm-generic/pgalloc.h> 17 18 static inline void pmd_populate_kernel(struct mm_struct *mm, 19 pmd_t *pmd, pte_t *pte) 20 { 21 unsigned long pfn = virt_to_pfn(pte); 22 23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 24 } 25 26 static inline void pmd_populate(struct mm_struct *mm, 27 pmd_t *pmd, pgtable_t pte) 28 { 29 unsigned long pfn = virt_to_pfn(page_address(pte)); 30 31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 32 } 33 34 #ifndef __PAGETABLE_PMD_FOLDED 35 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 36 { 37 unsigned long pfn = virt_to_pfn(pmd); 38 39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 40 } 41 42 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 43 { 44 if (pgtable_l4_enabled) { 45 unsigned long pfn = virt_to_pfn(pud); 46 47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 48 } 49 } 50 51 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, 52 pud_t *pud) 53 { 54 if (pgtable_l4_enabled) { 55 unsigned long pfn = virt_to_pfn(pud); 56 57 set_p4d_safe(p4d, 58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 59 } 60 } 61 62 #define pud_alloc_one pud_alloc_one 63 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 64 { 65 if (pgtable_l4_enabled) 66 return __pud_alloc_one(mm, addr); 67 68 return NULL; 69 } 70 71 #define pud_free pud_free 72 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 73 { 74 if (pgtable_l4_enabled) 75 __pud_free(mm, pud); 76 } 77 78 #define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) 79 #endif /* __PAGETABLE_PMD_FOLDED */ 80 81 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 82 { 83 pgd_t *pgd; 84 85 pgd = (pgd_t *)__get_free_page(GFP_KERNEL); 86 if (likely(pgd != NULL)) { 87 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 88 /* Copy kernel mappings */ 89 memcpy(pgd + USER_PTRS_PER_PGD, 90 init_mm.pgd + USER_PTRS_PER_PGD, 91 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 92 } 93 return pgd; 94 } 95 96 #ifndef __PAGETABLE_PMD_FOLDED 97 98 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) 99 100 #endif /* __PAGETABLE_PMD_FOLDED */ 101 102 #define __pte_free_tlb(tlb, pte, buf) \ 103 do { \ 104 pgtable_pte_page_dtor(pte); \ 105 tlb_remove_page((tlb), pte); \ 106 } while (0) 107 #endif /* CONFIG_MMU */ 108 109 #endif /* _ASM_RISCV_PGALLOC_H */ 110