xref: /openbmc/linux/arch/riscv/include/asm/pgalloc.h (revision 930c429a)
1 /*
2  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
3  * Copyright (C) 2012 Regents of the University of California
4  *
5  *   This program is free software; you can redistribute it and/or
6  *   modify it under the terms of the GNU General Public License
7  *   as published by the Free Software Foundation, version 2.
8  *
9  *   This program is distributed in the hope that it will be useful,
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *   GNU General Public License for more details.
13  */
14 
15 #ifndef _ASM_RISCV_PGALLOC_H
16 #define _ASM_RISCV_PGALLOC_H
17 
18 #include <linux/mm.h>
19 #include <asm/tlb.h>
20 
21 static inline void pmd_populate_kernel(struct mm_struct *mm,
22 	pmd_t *pmd, pte_t *pte)
23 {
24 	unsigned long pfn = virt_to_pfn(pte);
25 
26 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
27 }
28 
29 static inline void pmd_populate(struct mm_struct *mm,
30 	pmd_t *pmd, pgtable_t pte)
31 {
32 	unsigned long pfn = virt_to_pfn(page_address(pte));
33 
34 	set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
35 }
36 
37 #ifndef __PAGETABLE_PMD_FOLDED
38 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
39 {
40 	unsigned long pfn = virt_to_pfn(pmd);
41 
42 	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
43 }
44 #endif /* __PAGETABLE_PMD_FOLDED */
45 
46 #define pmd_pgtable(pmd)	pmd_page(pmd)
47 
48 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49 {
50 	pgd_t *pgd;
51 
52 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
53 	if (likely(pgd != NULL)) {
54 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
55 		/* Copy kernel mappings */
56 		memcpy(pgd + USER_PTRS_PER_PGD,
57 			init_mm.pgd + USER_PTRS_PER_PGD,
58 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
59 	}
60 	return pgd;
61 }
62 
63 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
64 {
65 	free_page((unsigned long)pgd);
66 }
67 
68 #ifndef __PAGETABLE_PMD_FOLDED
69 
70 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
71 {
72 	return (pmd_t *)__get_free_page(
73 		GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
74 }
75 
76 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
77 {
78 	free_page((unsigned long)pmd);
79 }
80 
81 #define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
82 
83 #endif /* __PAGETABLE_PMD_FOLDED */
84 
85 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
86 	unsigned long address)
87 {
88 	return (pte_t *)__get_free_page(
89 		GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
90 }
91 
92 static inline struct page *pte_alloc_one(struct mm_struct *mm,
93 	unsigned long address)
94 {
95 	struct page *pte;
96 
97 	pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
98 	if (likely(pte != NULL))
99 		pgtable_page_ctor(pte);
100 	return pte;
101 }
102 
103 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
104 {
105 	free_page((unsigned long)pte);
106 }
107 
108 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
109 {
110 	pgtable_page_dtor(pte);
111 	__free_page(pte);
112 }
113 
114 #define __pte_free_tlb(tlb, pte, buf)   \
115 do {                                    \
116 	pgtable_page_dtor(pte);         \
117 	tlb_remove_page((tlb), pte);    \
118 } while (0)
119 
120 static inline void check_pgt_cache(void)
121 {
122 }
123 
124 #endif /* _ASM_RISCV_PGALLOC_H */
125