xref: /openbmc/linux/arch/csky/include/asm/pgalloc.h (revision 75020f2d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ASM_CSKY_PGALLOC_H
4 #define __ASM_CSKY_PGALLOC_H
5 
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 
10 #define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
11 #include <asm-generic/pgalloc.h>
12 
13 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
14 					pte_t *pte)
15 {
16 	set_pmd(pmd, __pmd(__pa(pte)));
17 }
18 
19 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
20 					pgtable_t pte)
21 {
22 	set_pmd(pmd, __pmd(__pa(page_address(pte))));
23 }
24 
25 #define pmd_pgtable(pmd) pmd_page(pmd)
26 
27 extern void pgd_init(unsigned long *p);
28 
29 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
30 {
31 	pte_t *pte;
32 	unsigned long i;
33 
34 	pte = (pte_t *) __get_free_page(GFP_KERNEL);
35 	if (!pte)
36 		return NULL;
37 
38 	for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
39 		(pte + i)->pte_low = _PAGE_GLOBAL;
40 
41 	return pte;
42 }
43 
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 {
46 	pgd_t *ret;
47 	pgd_t *init;
48 
49 	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
50 	if (ret) {
51 		init = pgd_offset(&init_mm, 0UL);
52 		pgd_init((unsigned long *)ret);
53 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
54 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
55 		/* prevent out of order excute */
56 		smp_mb();
57 #ifdef CONFIG_CPU_NEED_TLBSYNC
58 		dcache_wb_range((unsigned int)ret,
59 				(unsigned int)(ret + PTRS_PER_PGD));
60 #endif
61 	}
62 
63 	return ret;
64 }
65 
66 #define __pte_free_tlb(tlb, pte, address)		\
67 do {							\
68 	pgtable_pte_page_dtor(pte);			\
69 	tlb_remove_page(tlb, pte);			\
70 } while (0)
71 
72 extern void pagetable_init(void);
73 extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
74 extern void pre_trap_init(void);
75 
76 #endif /* __ASM_CSKY_PGALLOC_H */
77