xref: /openbmc/linux/arch/sh/mm/pgtable.c (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
22a5eaccaSMatt Fleming #include <linux/mm.h>
35a0e3ad6STejun Heo #include <linux/slab.h>
42a5eaccaSMatt Fleming 
5884ed4cbSMichal Hocko #define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
62a5eaccaSMatt Fleming 
72a5eaccaSMatt Fleming static struct kmem_cache *pgd_cachep;
8782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
92a5eaccaSMatt Fleming static struct kmem_cache *pmd_cachep;
102a5eaccaSMatt Fleming #endif
112a5eaccaSMatt Fleming 
122a5eaccaSMatt Fleming void pgd_ctor(void *x)
132a5eaccaSMatt Fleming {
142a5eaccaSMatt Fleming 	pgd_t *pgd = x;
152a5eaccaSMatt Fleming 
162a5eaccaSMatt Fleming 	memcpy(pgd + USER_PTRS_PER_PGD,
172a5eaccaSMatt Fleming 	       swapper_pg_dir + USER_PTRS_PER_PGD,
182a5eaccaSMatt Fleming 	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
192a5eaccaSMatt Fleming }
202a5eaccaSMatt Fleming 
212a5eaccaSMatt Fleming void pgtable_cache_init(void)
222a5eaccaSMatt Fleming {
232a5eaccaSMatt Fleming 	pgd_cachep = kmem_cache_create("pgd_cache",
242a5eaccaSMatt Fleming 				       PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
252a5eaccaSMatt Fleming 				       PAGE_SIZE, SLAB_PANIC, pgd_ctor);
26782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
272a5eaccaSMatt Fleming 	pmd_cachep = kmem_cache_create("pmd_cache",
282a5eaccaSMatt Fleming 				       PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
292a5eaccaSMatt Fleming 				       PAGE_SIZE, SLAB_PANIC, NULL);
302a5eaccaSMatt Fleming #endif
312a5eaccaSMatt Fleming }
322a5eaccaSMatt Fleming 
332a5eaccaSMatt Fleming pgd_t *pgd_alloc(struct mm_struct *mm)
342a5eaccaSMatt Fleming {
352a5eaccaSMatt Fleming 	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
362a5eaccaSMatt Fleming }
372a5eaccaSMatt Fleming 
382a5eaccaSMatt Fleming void pgd_free(struct mm_struct *mm, pgd_t *pgd)
392a5eaccaSMatt Fleming {
402a5eaccaSMatt Fleming 	kmem_cache_free(pgd_cachep, pgd);
412a5eaccaSMatt Fleming }
422a5eaccaSMatt Fleming 
43782bb5a5SPaul Mundt #if PAGETABLE_LEVELS > 2
442a5eaccaSMatt Fleming void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
452a5eaccaSMatt Fleming {
462a5eaccaSMatt Fleming 	set_pud(pud, __pud((unsigned long)pmd));
472a5eaccaSMatt Fleming }
482a5eaccaSMatt Fleming 
492a5eaccaSMatt Fleming pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
502a5eaccaSMatt Fleming {
512a5eaccaSMatt Fleming 	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
522a5eaccaSMatt Fleming }
532a5eaccaSMatt Fleming 
542a5eaccaSMatt Fleming void pmd_free(struct mm_struct *mm, pmd_t *pmd)
552a5eaccaSMatt Fleming {
562a5eaccaSMatt Fleming 	kmem_cache_free(pmd_cachep, pmd);
572a5eaccaSMatt Fleming }
58782bb5a5SPaul Mundt #endif /* PAGETABLE_LEVELS > 2 */
59