xref: /openbmc/linux/arch/hexagon/include/asm/pgalloc.h (revision 4f727ecefefbd180de10e25b3e74c03dce3f1e75)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Page table support for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_PGALLOC_H
9 #define _ASM_PGALLOC_H
10 
11 #include <asm/mem-layout.h>
12 #include <asm/atomic.h>
13 
14 #define check_pgt_cache() do {} while (0)
15 
16 extern unsigned long long kmap_generation;
17 
18 /*
19  * Page table creation interface
20  */
21 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
22 {
23 	pgd_t *pgd;
24 
25 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
26 
27 	/*
28 	 * There may be better ways to do this, but to ensure
29 	 * that new address spaces always contain the kernel
30 	 * base mapping, and to ensure that the user area is
31 	 * initially marked invalid, initialize the new map
32 	 * map with a copy of the kernel's persistent map.
33 	 */
34 
35 	memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
36 	mm->context.generation = kmap_generation;
37 
38 	/* Physical version is what is passed to virtual machine on switch */
39 	mm->context.ptbase = __pa(pgd);
40 
41 	return pgd;
42 }
43 
44 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
45 {
46 	free_page((unsigned long) pgd);
47 }
48 
49 static inline struct page *pte_alloc_one(struct mm_struct *mm)
50 {
51 	struct page *pte;
52 
53 	pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
54 	if (!pte)
55 		return NULL;
56 	if (!pgtable_page_ctor(pte)) {
57 		__free_page(pte);
58 		return NULL;
59 	}
60 	return pte;
61 }
62 
63 /* _kernel variant gets to use a different allocator */
64 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
65 {
66 	gfp_t flags =  GFP_KERNEL | __GFP_ZERO;
67 	return (pte_t *) __get_free_page(flags);
68 }
69 
70 static inline void pte_free(struct mm_struct *mm, struct page *pte)
71 {
72 	pgtable_page_dtor(pte);
73 	__free_page(pte);
74 }
75 
76 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
77 {
78 	free_page((unsigned long)pte);
79 }
80 
81 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
82 				pgtable_t pte)
83 {
84 	/*
85 	 * Conveniently, zero in 3 LSB means indirect 4K page table.
86 	 * Not so convenient when you're trying to vary the page size.
87 	 */
88 	set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
89 		HEXAGON_L1_PTE_SIZE));
90 }
91 
92 /*
93  * Other architectures seem to have ways of making all processes
94  * share the same pmd's for their kernel mappings, but the v0.3
95  * Hexagon VM spec has a "monolithic" L1 table for user and kernel
96  * segments.  We track "generations" of the kernel map to minimize
97  * overhead, and update the "slave" copies of the kernel mappings
98  * as part of switch_mm.  However, we still need to update the
99  * kernel map of the active thread who's calling pmd_populate_kernel...
100  */
101 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
102 				       pte_t *pte)
103 {
104 	extern spinlock_t kmap_gen_lock;
105 	pmd_t *ppmd;
106 	int pmdindex;
107 
108 	spin_lock(&kmap_gen_lock);
109 	kmap_generation++;
110 	mm->context.generation = kmap_generation;
111 	current->active_mm->context.generation = kmap_generation;
112 	spin_unlock(&kmap_gen_lock);
113 
114 	set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
115 
116 	/*
117 	 * Now the "slave" copy of the current thread.
118 	 * This is pointer arithmetic, not byte addresses!
119 	 */
120 	pmdindex = (pgd_t *)pmd - mm->pgd;
121 	ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
122 	set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
123 	if (pmdindex > max_kernel_seg)
124 		max_kernel_seg = pmdindex;
125 }
126 
127 #define __pte_free_tlb(tlb, pte, addr)		\
128 do {						\
129 	pgtable_page_dtor((pte));		\
130 	tlb_remove_page((tlb), (pte));		\
131 } while (0)
132 
133 #endif
134