xref: /openbmc/linux/arch/csky/include/asm/pgalloc.h (revision e3d786a3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #ifndef __ASM_CSKY_PGALLOC_H
5 #define __ASM_CSKY_PGALLOC_H
6 
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 
11 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
12 					pte_t *pte)
13 {
14 	set_pmd(pmd, __pmd(__pa(pte)));
15 }
16 
17 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
18 					pgtable_t pte)
19 {
20 	set_pmd(pmd, __pmd(__pa(page_address(pte))));
21 }
22 
23 #define pmd_pgtable(pmd) pmd_page(pmd)
24 
25 extern void pgd_init(unsigned long *p);
26 
27 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
28 					unsigned long address)
29 {
30 	pte_t *pte;
31 	unsigned long *kaddr, i;
32 
33 	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL,
34 					 PTE_ORDER);
35 	kaddr = (unsigned long *)pte;
36 	if (address & 0x80000000)
37 		for (i = 0; i < (PAGE_SIZE/4); i++)
38 			*(kaddr + i) = 0x1;
39 	else
40 		clear_page(kaddr);
41 
42 	return pte;
43 }
44 
45 static inline struct page *pte_alloc_one(struct mm_struct *mm,
46 						unsigned long address)
47 {
48 	struct page *pte;
49 	unsigned long *kaddr, i;
50 
51 	pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER);
52 	if (pte) {
53 		kaddr = kmap_atomic(pte);
54 		if (address & 0x80000000) {
55 			for (i = 0; i < (PAGE_SIZE/4); i++)
56 				*(kaddr + i) = 0x1;
57 		} else
58 			clear_page(kaddr);
59 		kunmap_atomic(kaddr);
60 		pgtable_page_ctor(pte);
61 	}
62 	return pte;
63 }
64 
65 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
66 {
67 	free_pages((unsigned long)pte, PTE_ORDER);
68 }
69 
70 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
71 {
72 	pgtable_page_dtor(pte);
73 	__free_pages(pte, PTE_ORDER);
74 }
75 
76 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
77 {
78 	free_pages((unsigned long)pgd, PGD_ORDER);
79 }
80 
81 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
82 {
83 	pgd_t *ret;
84 	pgd_t *init;
85 
86 	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
87 	if (ret) {
88 		init = pgd_offset(&init_mm, 0UL);
89 		pgd_init((unsigned long *)ret);
90 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
91 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
92 		/* prevent out of order excute */
93 		smp_mb();
94 #ifdef CONFIG_CPU_NEED_TLBSYNC
95 		dcache_wb_range((unsigned int)ret,
96 				(unsigned int)(ret + PTRS_PER_PGD));
97 #endif
98 	}
99 
100 	return ret;
101 }
102 
103 #define __pte_free_tlb(tlb, pte, address)		\
104 do {							\
105 	pgtable_page_dtor(pte);				\
106 	tlb_remove_page(tlb, pte);			\
107 } while (0)
108 
109 #define check_pgt_cache()	do {} while (0)
110 
111 extern void pagetable_init(void);
112 extern void pre_mmu_init(void);
113 extern void pre_trap_init(void);
114 
115 #endif /* __ASM_CSKY_PGALLOC_H */
116