xref: /openbmc/linux/arch/arc/include/asm/pgalloc.h (revision 8747ff70)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
25dda4dc5SVineet Gupta /*
35dda4dc5SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
45dda4dc5SVineet Gupta  *
55dda4dc5SVineet Gupta  * vineetg: June 2011
65dda4dc5SVineet Gupta  *  -"/proc/meminfo | grep PageTables" kept on increasing
75dda4dc5SVineet Gupta  *   Recently added pgtable dtor was not getting called.
85dda4dc5SVineet Gupta  *
95dda4dc5SVineet Gupta  * vineetg: May 2011
105dda4dc5SVineet Gupta  *  -Variable pg-sz means that Page Tables could be variable sized themselves
115dda4dc5SVineet Gupta  *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
125dda4dc5SVineet Gupta  *  -Page Table size capped to max 1 to save memory - hence verified.
135dda4dc5SVineet Gupta  *  -Since these deal with constants, gcc compile-time optimizes them.
145dda4dc5SVineet Gupta  *
155dda4dc5SVineet Gupta  * vineetg: Nov 2010
165dda4dc5SVineet Gupta  *  -Added pgtable ctor/dtor used for pgtable mem accounting
175dda4dc5SVineet Gupta  *
185dda4dc5SVineet Gupta  * vineetg: April 2010
195dda4dc5SVineet Gupta  *  -Switched pgtable_t from being struct page * to unsigned long
205dda4dc5SVineet Gupta  *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
21e5ed6069SRandy Dunlap  *       deal with struct page. That way in future we can make it allocate
225dda4dc5SVineet Gupta  *       multiple PG Tbls in one Page Frame
235dda4dc5SVineet Gupta  *      =sweet side effect is avoiding calls to ugly page_address( ) from the
24e5ed6069SRandy Dunlap  *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
255dda4dc5SVineet Gupta  *
265dda4dc5SVineet Gupta  *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
275dda4dc5SVineet Gupta  */
285dda4dc5SVineet Gupta 
295dda4dc5SVineet Gupta #ifndef _ASM_ARC_PGALLOC_H
305dda4dc5SVineet Gupta #define _ASM_ARC_PGALLOC_H
315dda4dc5SVineet Gupta 
325dda4dc5SVineet Gupta #include <linux/mm.h>
335dda4dc5SVineet Gupta #include <linux/log2.h>
349f3c76aeSVineet Gupta #include <asm-generic/pgalloc.h>
355dda4dc5SVineet Gupta 
365dda4dc5SVineet Gupta static inline void
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)375dda4dc5SVineet Gupta pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
385dda4dc5SVineet Gupta {
39e93e59acSVineet Gupta 	/*
40e93e59acSVineet Gupta 	 * The cast to long below is OK in 32-bit PAE40 regime with long long pte
41e93e59acSVineet Gupta 	 * Despite "wider" pte, the pte table needs to be in non-PAE low memory
42e93e59acSVineet Gupta 	 * as all higher levels can only hold long pointers.
43e93e59acSVineet Gupta 	 *
44e93e59acSVineet Gupta 	 * The cast itself is needed given simplistic definition of set_pmd()
45e93e59acSVineet Gupta 	 */
46e93e59acSVineet Gupta 	set_pmd(pmd, __pmd((unsigned long)pte));
475dda4dc5SVineet Gupta }
485dda4dc5SVineet Gupta 
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte_page)49d9820ff7SVineet Gupta static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
505dda4dc5SVineet Gupta {
51d9820ff7SVineet Gupta 	set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
525dda4dc5SVineet Gupta }
535dda4dc5SVineet Gupta 
pgd_alloc(struct mm_struct * mm)545dda4dc5SVineet Gupta static inline pgd_t *pgd_alloc(struct mm_struct *mm)
555dda4dc5SVineet Gupta {
56d9820ff7SVineet Gupta 	pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
575dda4dc5SVineet Gupta 
585dda4dc5SVineet Gupta 	if (ret) {
59d9820ff7SVineet Gupta 		int num, num2;
605dda4dc5SVineet Gupta 		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
615dda4dc5SVineet Gupta 		memzero(ret, num * sizeof(pgd_t));
625dda4dc5SVineet Gupta 
635dda4dc5SVineet Gupta 		num2 = VMALLOC_SIZE / PGDIR_SIZE;
645dda4dc5SVineet Gupta 		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
655dda4dc5SVineet Gupta 
665dda4dc5SVineet Gupta 		memzero(ret + num + num2,
675dda4dc5SVineet Gupta 			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
685dda4dc5SVineet Gupta 
695dda4dc5SVineet Gupta 	}
705dda4dc5SVineet Gupta 	return ret;
715dda4dc5SVineet Gupta }
725dda4dc5SVineet Gupta 
73*8747ff70SVineet Gupta #if CONFIG_PGTABLE_LEVELS > 3
74*8747ff70SVineet Gupta 
p4d_populate(struct mm_struct * mm,p4d_t * p4dp,pud_t * pudp)75*8747ff70SVineet Gupta static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
76*8747ff70SVineet Gupta {
77*8747ff70SVineet Gupta 	set_p4d(p4dp, __p4d((unsigned long)pudp));
78*8747ff70SVineet Gupta }
79*8747ff70SVineet Gupta 
80*8747ff70SVineet Gupta #define __pud_free_tlb(tlb, pmd, addr)  pud_free((tlb)->mm, pmd)
81*8747ff70SVineet Gupta 
82*8747ff70SVineet Gupta #endif
83*8747ff70SVineet Gupta 
842dde02abSVineet Gupta #if CONFIG_PGTABLE_LEVELS > 2
852dde02abSVineet Gupta 
pud_populate(struct mm_struct * mm,pud_t * pudp,pmd_t * pmdp)862dde02abSVineet Gupta static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
872dde02abSVineet Gupta {
882dde02abSVineet Gupta 	set_pud(pudp, __pud((unsigned long)pmdp));
892dde02abSVineet Gupta }
902dde02abSVineet Gupta 
912dde02abSVineet Gupta #define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
922dde02abSVineet Gupta 
932dde02abSVineet Gupta #endif
942dde02abSVineet Gupta 
955dda4dc5SVineet Gupta #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
965dda4dc5SVineet Gupta 
975dda4dc5SVineet Gupta #endif /* _ASM_ARC_PGALLOC_H */
98