xref: /openbmc/linux/arch/hexagon/include/asm/pgalloc.h (revision 23c2b932)
1 /*
2  * Page table support for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 #ifndef _ASM_PGALLOC_H
22 #define _ASM_PGALLOC_H
23 
24 #include <asm/mem-layout.h>
25 #include <asm/atomic.h>
26 
27 #define check_pgt_cache() do {} while (0)
28 
29 extern unsigned long long kmap_generation;
30 
31 /*
32  * Page table creation interface
33  */
34 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35 {
36 	pgd_t *pgd;
37 
38 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
39 
40 	/*
41 	 * There may be better ways to do this, but to ensure
42 	 * that new address spaces always contain the kernel
43 	 * base mapping, and to ensure that the user area is
44 	 * initially marked invalid, initialize the new map
45 	 * map with a copy of the kernel's persistent map.
46 	 */
47 
48 	memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
49 	mm->context.generation = kmap_generation;
50 
51 	/* Physical version is what is passed to virtual machine on switch */
52 	mm->context.ptbase = __pa(pgd);
53 
54 	return pgd;
55 }
56 
57 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
58 {
59 	free_page((unsigned long) pgd);
60 }
61 
62 static inline struct page *pte_alloc_one(struct mm_struct *mm,
63 					 unsigned long address)
64 {
65 	struct page *pte;
66 
67 	pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
68 	if (!pte)
69 		return NULL;
70 	if (!pgtable_page_ctor(pte)) {
71 		__free_page(pte);
72 		return NULL;
73 	}
74 	return pte;
75 }
76 
77 /* _kernel variant gets to use a different allocator */
78 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
79 					  unsigned long address)
80 {
81 	gfp_t flags =  GFP_KERNEL | __GFP_ZERO;
82 	return (pte_t *) __get_free_page(flags);
83 }
84 
85 static inline void pte_free(struct mm_struct *mm, struct page *pte)
86 {
87 	pgtable_page_dtor(pte);
88 	__free_page(pte);
89 }
90 
91 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
92 {
93 	free_page((unsigned long)pte);
94 }
95 
96 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
97 				pgtable_t pte)
98 {
99 	/*
100 	 * Conveniently, zero in 3 LSB means indirect 4K page table.
101 	 * Not so convenient when you're trying to vary the page size.
102 	 */
103 	set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
104 		HEXAGON_L1_PTE_SIZE));
105 }
106 
107 /*
108  * Other architectures seem to have ways of making all processes
109  * share the same pmd's for their kernel mappings, but the v0.3
110  * Hexagon VM spec has a "monolithic" L1 table for user and kernel
111  * segments.  We track "generations" of the kernel map to minimize
112  * overhead, and update the "slave" copies of the kernel mappings
113  * as part of switch_mm.  However, we still need to update the
114  * kernel map of the active thread who's calling pmd_populate_kernel...
115  */
116 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
117 				       pte_t *pte)
118 {
119 	extern spinlock_t kmap_gen_lock;
120 	pmd_t *ppmd;
121 	int pmdindex;
122 
123 	spin_lock(&kmap_gen_lock);
124 	kmap_generation++;
125 	mm->context.generation = kmap_generation;
126 	current->active_mm->context.generation = kmap_generation;
127 	spin_unlock(&kmap_gen_lock);
128 
129 	set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
130 
131 	/*
132 	 * Now the "slave" copy of the current thread.
133 	 * This is pointer arithmetic, not byte addresses!
134 	 */
135 	pmdindex = (pgd_t *)pmd - mm->pgd;
136 	ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
137 	set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
138 	if (pmdindex > max_kernel_seg)
139 		max_kernel_seg = pmdindex;
140 }
141 
142 #define __pte_free_tlb(tlb, pte, addr)		\
143 do {						\
144 	pgtable_page_dtor((pte));		\
145 	tlb_remove_page((tlb), (pte));		\
146 } while (0)
147 
148 #endif
149