xref: /openbmc/linux/arch/arc/include/asm/pgalloc.h (revision 0661cb2a)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  *
5  * vineetg: June 2011
6  *  -"/proc/meminfo | grep PageTables" kept on increasing
7  *   Recently added pgtable dtor was not getting called.
8  *
9  * vineetg: May 2011
10  *  -Variable pg-sz means that Page Tables could be variable sized themselves
11  *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
12  *  -Page Table size capped to max 1 to save memory - hence verified.
13  *  -Since these deal with constants, gcc compile-time optimizes them.
14  *
15  * vineetg: Nov 2010
16  *  -Added pgtable ctor/dtor used for pgtable mem accounting
17  *
18  * vineetg: April 2010
19  *  -Switched pgtable_t from being struct page * to unsigned long
20  *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
21  *       deal with struct page. That way in future we can make it allocate
22  *       multiple PG Tbls in one Page Frame
23  *      =sweet side effect is avoiding calls to ugly page_address( ) from the
24  *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
25  *
26  *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
27  */
28 
29 #ifndef _ASM_ARC_PGALLOC_H
30 #define _ASM_ARC_PGALLOC_H
31 
32 #include <linux/mm.h>
33 #include <linux/log2.h>
34 
35 static inline void
36 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
37 {
38 	pmd_set(pmd, pte);
39 }
40 
41 static inline void
42 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
43 {
44 	pmd_set(pmd, (pte_t *) ptep);
45 }
46 
47 static inline int __get_order_pgd(void)
48 {
49 	return get_order(PTRS_PER_PGD * sizeof(pgd_t));
50 }
51 
52 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
53 {
54 	int num, num2;
55 	pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
56 
57 	if (ret) {
58 		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
59 		memzero(ret, num * sizeof(pgd_t));
60 
61 		num2 = VMALLOC_SIZE / PGDIR_SIZE;
62 		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
63 
64 		memzero(ret + num + num2,
65 			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
66 
67 	}
68 	return ret;
69 }
70 
71 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
72 {
73 	free_pages((unsigned long)pgd, __get_order_pgd());
74 }
75 
76 
77 /*
78  * With software-only page-tables, addr-split for traversal is tweakable and
79  * that directly governs how big tables would be at each level.
80  * Further, the MMU page size is configurable.
81  * Thus we need to programatically assert the size constraint
82  * All of this is const math, allowing gcc to do constant folding/propagation.
83  */
84 
85 static inline int __get_order_pte(void)
86 {
87 	return get_order(PTRS_PER_PTE * sizeof(pte_t));
88 }
89 
90 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
91 {
92 	pte_t *pte;
93 
94 	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
95 					 __get_order_pte());
96 
97 	return pte;
98 }
99 
100 static inline pgtable_t
101 pte_alloc_one(struct mm_struct *mm)
102 {
103 	pgtable_t pte_pg;
104 	struct page *page;
105 
106 	pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
107 	if (!pte_pg)
108 		return 0;
109 	memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
110 	page = virt_to_page(pte_pg);
111 	if (!pgtable_pte_page_ctor(page)) {
112 		__free_page(page);
113 		return 0;
114 	}
115 
116 	return pte_pg;
117 }
118 
119 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
120 {
121 	free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
122 }
123 
124 static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
125 {
126 	pgtable_pte_page_dtor(virt_to_page(ptep));
127 	free_pages((unsigned long)ptep, __get_order_pte());
128 }
129 
130 #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
131 
132 #endif /* _ASM_ARC_PGALLOC_H */
133