xref: /openbmc/linux/arch/hexagon/include/asm/pgalloc.h (revision 568b9de4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Page table support for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_PGALLOC_H
9 #define _ASM_PGALLOC_H
10 
11 #include <asm/mem-layout.h>
12 #include <asm/atomic.h>
13 
14 #include <asm-generic/pgalloc.h>	/* for pte_{alloc,free}_one */
15 
16 #define check_pgt_cache() do {} while (0)
17 
18 extern unsigned long long kmap_generation;
19 
20 /*
21  * Page table creation interface
22  */
23 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
24 {
25 	pgd_t *pgd;
26 
27 	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
28 
29 	/*
30 	 * There may be better ways to do this, but to ensure
31 	 * that new address spaces always contain the kernel
32 	 * base mapping, and to ensure that the user area is
33 	 * initially marked invalid, initialize the new map
34 	 * map with a copy of the kernel's persistent map.
35 	 */
36 
37 	memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
38 	mm->context.generation = kmap_generation;
39 
40 	/* Physical version is what is passed to virtual machine on switch */
41 	mm->context.ptbase = __pa(pgd);
42 
43 	return pgd;
44 }
45 
46 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47 {
48 	free_page((unsigned long) pgd);
49 }
50 
51 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
52 				pgtable_t pte)
53 {
54 	/*
55 	 * Conveniently, zero in 3 LSB means indirect 4K page table.
56 	 * Not so convenient when you're trying to vary the page size.
57 	 */
58 	set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
59 		HEXAGON_L1_PTE_SIZE));
60 }
61 
62 /*
63  * Other architectures seem to have ways of making all processes
64  * share the same pmd's for their kernel mappings, but the v0.3
65  * Hexagon VM spec has a "monolithic" L1 table for user and kernel
66  * segments.  We track "generations" of the kernel map to minimize
67  * overhead, and update the "slave" copies of the kernel mappings
68  * as part of switch_mm.  However, we still need to update the
69  * kernel map of the active thread who's calling pmd_populate_kernel...
70  */
71 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
72 				       pte_t *pte)
73 {
74 	extern spinlock_t kmap_gen_lock;
75 	pmd_t *ppmd;
76 	int pmdindex;
77 
78 	spin_lock(&kmap_gen_lock);
79 	kmap_generation++;
80 	mm->context.generation = kmap_generation;
81 	current->active_mm->context.generation = kmap_generation;
82 	spin_unlock(&kmap_gen_lock);
83 
84 	set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
85 
86 	/*
87 	 * Now the "slave" copy of the current thread.
88 	 * This is pointer arithmetic, not byte addresses!
89 	 */
90 	pmdindex = (pgd_t *)pmd - mm->pgd;
91 	ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
92 	set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
93 	if (pmdindex > max_kernel_seg)
94 		max_kernel_seg = pmdindex;
95 }
96 
97 #define __pte_free_tlb(tlb, pte, addr)		\
98 do {						\
99 	pgtable_page_dtor((pte));		\
100 	tlb_remove_page((tlb), (pte));		\
101 } while (0)
102 
103 #endif
104