108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2a7e79840SRichard Kuo /*
3a7e79840SRichard Kuo * Page table support for the Hexagon architecture
4a7e79840SRichard Kuo *
5e1858b2aSRichard Kuo * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6a7e79840SRichard Kuo */
7a7e79840SRichard Kuo
8a7e79840SRichard Kuo #ifndef _ASM_PGALLOC_H
9a7e79840SRichard Kuo #define _ASM_PGALLOC_H
10a7e79840SRichard Kuo
11a7e79840SRichard Kuo #include <asm/mem-layout.h>
12a7e79840SRichard Kuo #include <asm/atomic.h>
13a7e79840SRichard Kuo
14f9cb654cSMike Rapoport #include <asm-generic/pgalloc.h>
15618381f0SMike Rapoport
16a7e79840SRichard Kuo extern unsigned long long kmap_generation;
17a7e79840SRichard Kuo
18a7e79840SRichard Kuo /*
19a7e79840SRichard Kuo * Page table creation interface
20a7e79840SRichard Kuo */
pgd_alloc(struct mm_struct * mm)21a7e79840SRichard Kuo static inline pgd_t *pgd_alloc(struct mm_struct *mm)
22a7e79840SRichard Kuo {
23a7e79840SRichard Kuo pgd_t *pgd;
24a7e79840SRichard Kuo
25a7e79840SRichard Kuo pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
26a7e79840SRichard Kuo
27a7e79840SRichard Kuo /*
28a7e79840SRichard Kuo * There may be better ways to do this, but to ensure
29a7e79840SRichard Kuo * that new address spaces always contain the kernel
30a7e79840SRichard Kuo * base mapping, and to ensure that the user area is
31a7e79840SRichard Kuo * initially marked invalid, initialize the new map
32a7e79840SRichard Kuo * map with a copy of the kernel's persistent map.
33a7e79840SRichard Kuo */
34a7e79840SRichard Kuo
350d82674bSIlia Mirkin memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
36a7e79840SRichard Kuo mm->context.generation = kmap_generation;
37a7e79840SRichard Kuo
38a7e79840SRichard Kuo /* Physical version is what is passed to virtual machine on switch */
39a7e79840SRichard Kuo mm->context.ptbase = __pa(pgd);
40a7e79840SRichard Kuo
41a7e79840SRichard Kuo return pgd;
42a7e79840SRichard Kuo }
43a7e79840SRichard Kuo
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)44a7e79840SRichard Kuo static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
45a7e79840SRichard Kuo pgtable_t pte)
46a7e79840SRichard Kuo {
47a7e79840SRichard Kuo /*
48a7e79840SRichard Kuo * Conveniently, zero in 3 LSB means indirect 4K page table.
49a7e79840SRichard Kuo * Not so convenient when you're trying to vary the page size.
50a7e79840SRichard Kuo */
51a7e79840SRichard Kuo set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
52a7e79840SRichard Kuo HEXAGON_L1_PTE_SIZE));
53a7e79840SRichard Kuo }
54a7e79840SRichard Kuo
55a7e79840SRichard Kuo /*
56a7e79840SRichard Kuo * Other architectures seem to have ways of making all processes
57a7e79840SRichard Kuo * share the same pmd's for their kernel mappings, but the v0.3
58a7e79840SRichard Kuo * Hexagon VM spec has a "monolithic" L1 table for user and kernel
59a7e79840SRichard Kuo * segments. We track "generations" of the kernel map to minimize
60a7e79840SRichard Kuo * overhead, and update the "slave" copies of the kernel mappings
61a7e79840SRichard Kuo * as part of switch_mm. However, we still need to update the
62a7e79840SRichard Kuo * kernel map of the active thread who's calling pmd_populate_kernel...
63a7e79840SRichard Kuo */
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)64a7e79840SRichard Kuo static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
65a7e79840SRichard Kuo pte_t *pte)
66a7e79840SRichard Kuo {
67a7e79840SRichard Kuo extern spinlock_t kmap_gen_lock;
68a7e79840SRichard Kuo pmd_t *ppmd;
69a7e79840SRichard Kuo int pmdindex;
70a7e79840SRichard Kuo
71a7e79840SRichard Kuo spin_lock(&kmap_gen_lock);
72a7e79840SRichard Kuo kmap_generation++;
73a7e79840SRichard Kuo mm->context.generation = kmap_generation;
74a7e79840SRichard Kuo current->active_mm->context.generation = kmap_generation;
75a7e79840SRichard Kuo spin_unlock(&kmap_gen_lock);
76a7e79840SRichard Kuo
77a7e79840SRichard Kuo set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
78a7e79840SRichard Kuo
79a7e79840SRichard Kuo /*
80a7e79840SRichard Kuo * Now the "slave" copy of the current thread.
81a7e79840SRichard Kuo * This is pointer arithmetic, not byte addresses!
82a7e79840SRichard Kuo */
83a7e79840SRichard Kuo pmdindex = (pgd_t *)pmd - mm->pgd;
84a7e79840SRichard Kuo ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
85a7e79840SRichard Kuo set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
86a7e79840SRichard Kuo if (pmdindex > max_kernel_seg)
87a7e79840SRichard Kuo max_kernel_seg = pmdindex;
88a7e79840SRichard Kuo }
89a7e79840SRichard Kuo
90a7e79840SRichard Kuo #define __pte_free_tlb(tlb, pte, addr) \
91a7e79840SRichard Kuo do { \
92*b45a12c0SVishal Moola (Oracle) pagetable_pte_dtor((page_ptdesc(pte))); \
93*b45a12c0SVishal Moola (Oracle) tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
94a7e79840SRichard Kuo } while (0)
95a7e79840SRichard Kuo
96a7e79840SRichard Kuo #endif
97