1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
22a5eaccaSMatt Fleming #include <linux/mm.h>
35a0e3ad6STejun Heo #include <linux/slab.h>
42a5eaccaSMatt Fleming
52a5eaccaSMatt Fleming static struct kmem_cache *pgd_cachep;
6782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
72a5eaccaSMatt Fleming static struct kmem_cache *pmd_cachep;
82a5eaccaSMatt Fleming #endif
92a5eaccaSMatt Fleming
pgd_ctor(void * x)102a5eaccaSMatt Fleming void pgd_ctor(void *x)
112a5eaccaSMatt Fleming {
122a5eaccaSMatt Fleming pgd_t *pgd = x;
132a5eaccaSMatt Fleming
14*be74273aSMatthew Wilcox (Oracle) memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
152a5eaccaSMatt Fleming memcpy(pgd + USER_PTRS_PER_PGD,
162a5eaccaSMatt Fleming swapper_pg_dir + USER_PTRS_PER_PGD,
172a5eaccaSMatt Fleming (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
182a5eaccaSMatt Fleming }
192a5eaccaSMatt Fleming
pgtable_cache_init(void)202a5eaccaSMatt Fleming void pgtable_cache_init(void)
212a5eaccaSMatt Fleming {
222a5eaccaSMatt Fleming pgd_cachep = kmem_cache_create("pgd_cache",
232a5eaccaSMatt Fleming PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
242a5eaccaSMatt Fleming PAGE_SIZE, SLAB_PANIC, pgd_ctor);
25782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
262a5eaccaSMatt Fleming pmd_cachep = kmem_cache_create("pmd_cache",
272a5eaccaSMatt Fleming PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
282a5eaccaSMatt Fleming PAGE_SIZE, SLAB_PANIC, NULL);
292a5eaccaSMatt Fleming #endif
302a5eaccaSMatt Fleming }
312a5eaccaSMatt Fleming
pgd_alloc(struct mm_struct * mm)322a5eaccaSMatt Fleming pgd_t *pgd_alloc(struct mm_struct *mm)
332a5eaccaSMatt Fleming {
34*be74273aSMatthew Wilcox (Oracle) return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
352a5eaccaSMatt Fleming }
362a5eaccaSMatt Fleming
pgd_free(struct mm_struct * mm,pgd_t * pgd)372a5eaccaSMatt Fleming void pgd_free(struct mm_struct *mm, pgd_t *pgd)
382a5eaccaSMatt Fleming {
392a5eaccaSMatt Fleming kmem_cache_free(pgd_cachep, pgd);
402a5eaccaSMatt Fleming }
412a5eaccaSMatt Fleming
42782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)432a5eaccaSMatt Fleming void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
442a5eaccaSMatt Fleming {
452a5eaccaSMatt Fleming set_pud(pud, __pud((unsigned long)pmd));
462a5eaccaSMatt Fleming }
472a5eaccaSMatt Fleming
pmd_alloc_one(struct mm_struct * mm,unsigned long address)482a5eaccaSMatt Fleming pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
492a5eaccaSMatt Fleming {
50*be74273aSMatthew Wilcox (Oracle) return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
512a5eaccaSMatt Fleming }
522a5eaccaSMatt Fleming
pmd_free(struct mm_struct * mm,pmd_t * pmd)532a5eaccaSMatt Fleming void pmd_free(struct mm_struct *mm, pmd_t *pmd)
542a5eaccaSMatt Fleming {
552a5eaccaSMatt Fleming kmem_cache_free(pmd_cachep, pmd);
562a5eaccaSMatt Fleming }
57782bb5a5SPaul Mundt #endif /* PAGETABLE_LEVELS > 2 */
58