xref: /openbmc/linux/arch/powerpc/mm/pgtable-frag.c (revision 5d7800d9)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  *  Handling Page Tables through page fragments
5  *
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <linux/hugetlb.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/tlb.h>
17 
18 void pte_frag_destroy(void *pte_frag)
19 {
20 	int count;
21 	struct page *page;
22 
23 	page = virt_to_page(pte_frag);
24 	/* drop all the pending references */
25 	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
26 	/* We allow PTE_FRAG_NR fragments from a PTE page */
27 	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
28 		pgtable_pte_page_dtor(page);
29 		__free_page(page);
30 	}
31 }
32 
33 static pte_t *get_pte_from_cache(struct mm_struct *mm)
34 {
35 	void *pte_frag, *ret;
36 
37 	if (PTE_FRAG_NR == 1)
38 		return NULL;
39 
40 	spin_lock(&mm->page_table_lock);
41 	ret = pte_frag_get(&mm->context);
42 	if (ret) {
43 		pte_frag = ret + PTE_FRAG_SIZE;
44 		/*
45 		 * If we have taken up all the fragments mark PTE page NULL
46 		 */
47 		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
48 			pte_frag = NULL;
49 		pte_frag_set(&mm->context, pte_frag);
50 	}
51 	spin_unlock(&mm->page_table_lock);
52 	return (pte_t *)ret;
53 }
54 
55 static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
56 {
57 	void *ret = NULL;
58 	struct page *page;
59 
60 	if (!kernel) {
61 		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
62 		if (!page)
63 			return NULL;
64 		if (!pgtable_pte_page_ctor(page)) {
65 			__free_page(page);
66 			return NULL;
67 		}
68 	} else {
69 		page = alloc_page(PGALLOC_GFP);
70 		if (!page)
71 			return NULL;
72 	}
73 
74 	atomic_set(&page->pt_frag_refcount, 1);
75 
76 	ret = page_address(page);
77 	/*
78 	 * if we support only one fragment just return the
79 	 * allocated page.
80 	 */
81 	if (PTE_FRAG_NR == 1)
82 		return ret;
83 	spin_lock(&mm->page_table_lock);
84 	/*
85 	 * If we find pgtable_page set, we return
86 	 * the allocated page with single fragment
87 	 * count.
88 	 */
89 	if (likely(!pte_frag_get(&mm->context))) {
90 		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
91 		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
92 	}
93 	spin_unlock(&mm->page_table_lock);
94 
95 	return (pte_t *)ret;
96 }
97 
98 pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
99 {
100 	pte_t *pte;
101 
102 	pte = get_pte_from_cache(mm);
103 	if (pte)
104 		return pte;
105 
106 	return __alloc_for_ptecache(mm, kernel);
107 }
108 
109 static void pte_free_now(struct rcu_head *head)
110 {
111 	struct page *page;
112 
113 	page = container_of(head, struct page, rcu_head);
114 	pgtable_pte_page_dtor(page);
115 	__free_page(page);
116 }
117 
118 void pte_fragment_free(unsigned long *table, int kernel)
119 {
120 	struct page *page = virt_to_page(table);
121 
122 	if (PageReserved(page))
123 		return free_reserved_page(page);
124 
125 	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
126 	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
127 		if (kernel)
128 			__free_page(page);
129 		else if (TestClearPageActive(page))
130 			call_rcu(&page->rcu_head, pte_free_now);
131 		else
132 			pte_free_now(&page->rcu_head);
133 	}
134 }
135 
136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
138 {
139 	struct page *page;
140 
141 	page = virt_to_page(pgtable);
142 	SetPageActive(page);
143 	pte_fragment_free((unsigned long *)pgtable, 0);
144 }
145 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
146