12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
275a9b8a6SAneesh Kumar K.V #ifndef _ASM_POWERPC_PGALLOC_64_H
375a9b8a6SAneesh Kumar K.V #define _ASM_POWERPC_PGALLOC_64_H
475a9b8a6SAneesh Kumar K.V /*
575a9b8a6SAneesh Kumar K.V  */
675a9b8a6SAneesh Kumar K.V 
775a9b8a6SAneesh Kumar K.V #include <linux/slab.h>
875a9b8a6SAneesh Kumar K.V #include <linux/cpumask.h>
975a9b8a6SAneesh Kumar K.V #include <linux/percpu.h>
1075a9b8a6SAneesh Kumar K.V 
1175a9b8a6SAneesh Kumar K.V struct vmemmap_backing {
1275a9b8a6SAneesh Kumar K.V 	struct vmemmap_backing *list;
1375a9b8a6SAneesh Kumar K.V 	unsigned long phys;
1475a9b8a6SAneesh Kumar K.V 	unsigned long virt_addr;
1575a9b8a6SAneesh Kumar K.V };
1675a9b8a6SAneesh Kumar K.V extern struct vmemmap_backing *vmemmap_list;
1775a9b8a6SAneesh Kumar K.V 
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)18*2db2008eSChristophe Leroy static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
19*2db2008eSChristophe Leroy {
20*2db2008eSChristophe Leroy 	p4d_set(p4d, (unsigned long)pud);
21*2db2008eSChristophe Leroy }
2275a9b8a6SAneesh Kumar K.V 
pud_alloc_one(struct mm_struct * mm,unsigned long addr)2375a9b8a6SAneesh Kumar K.V static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2475a9b8a6SAneesh Kumar K.V {
25de3b8761SBalbir Singh 	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
26de3b8761SBalbir Singh 			pgtable_gfp_flags(mm, GFP_KERNEL));
2775a9b8a6SAneesh Kumar K.V }
2875a9b8a6SAneesh Kumar K.V 
pud_free(struct mm_struct * mm,pud_t * pud)2975a9b8a6SAneesh Kumar K.V static inline void pud_free(struct mm_struct *mm, pud_t *pud)
3075a9b8a6SAneesh Kumar K.V {
3175a9b8a6SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
3275a9b8a6SAneesh Kumar K.V }
3375a9b8a6SAneesh Kumar K.V 
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)3475a9b8a6SAneesh Kumar K.V static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3575a9b8a6SAneesh Kumar K.V {
3627209206SAneesh Kumar K.V 	pud_set(pud, (unsigned long)pmd);
3775a9b8a6SAneesh Kumar K.V }
3875a9b8a6SAneesh Kumar K.V 
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)3975a9b8a6SAneesh Kumar K.V static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
4075a9b8a6SAneesh Kumar K.V 				       pte_t *pte)
4175a9b8a6SAneesh Kumar K.V {
4227209206SAneesh Kumar K.V 	pmd_set(pmd, (unsigned long)pte);
4375a9b8a6SAneesh Kumar K.V }
4475a9b8a6SAneesh Kumar K.V 
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte_page)4575a9b8a6SAneesh Kumar K.V static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
4675a9b8a6SAneesh Kumar K.V 				pgtable_t pte_page)
4775a9b8a6SAneesh Kumar K.V {
48737b434dSChristophe Leroy 	pmd_set(pmd, (unsigned long)pte_page);
4975a9b8a6SAneesh Kumar K.V }
5075a9b8a6SAneesh Kumar K.V 
pmd_alloc_one(struct mm_struct * mm,unsigned long addr)5170234676SAneesh Kumar K.V static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5270234676SAneesh Kumar K.V {
5370234676SAneesh Kumar K.V 	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
5470234676SAneesh Kumar K.V 			pgtable_gfp_flags(mm, GFP_KERNEL));
5570234676SAneesh Kumar K.V }
5670234676SAneesh Kumar K.V 
pmd_free(struct mm_struct * mm,pmd_t * pmd)5770234676SAneesh Kumar K.V static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5870234676SAneesh Kumar K.V {
5970234676SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
6070234676SAneesh Kumar K.V }
6170234676SAneesh Kumar K.V 
6275a9b8a6SAneesh Kumar K.V #define __pmd_free_tlb(tlb, pmd, addr)		      \
6375a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
6475a9b8a6SAneesh Kumar K.V #define __pud_free_tlb(tlb, pud, addr)		      \
6575a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
6675a9b8a6SAneesh Kumar K.V 
6775a9b8a6SAneesh Kumar K.V #endif /* _ASM_POWERPC_PGALLOC_64_H */
68