xref: /openbmc/linux/arch/arc/include/asm/pgalloc.h (revision b4ed71f5)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
25dda4dc5SVineet Gupta /*
35dda4dc5SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
45dda4dc5SVineet Gupta  *
55dda4dc5SVineet Gupta  * vineetg: June 2011
65dda4dc5SVineet Gupta  *  -"/proc/meminfo | grep PageTables" kept on increasing
75dda4dc5SVineet Gupta  *   Recently added pgtable dtor was not getting called.
85dda4dc5SVineet Gupta  *
95dda4dc5SVineet Gupta  * vineetg: May 2011
105dda4dc5SVineet Gupta  *  -Variable pg-sz means that Page Tables could be variable sized themselves
115dda4dc5SVineet Gupta  *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
125dda4dc5SVineet Gupta  *  -Page Table size capped to max 1 to save memory - hence verified.
135dda4dc5SVineet Gupta  *  -Since these deal with constants, gcc compile-time optimizes them.
145dda4dc5SVineet Gupta  *
155dda4dc5SVineet Gupta  * vineetg: Nov 2010
165dda4dc5SVineet Gupta  *  -Added pgtable ctor/dtor used for pgtable mem accounting
175dda4dc5SVineet Gupta  *
185dda4dc5SVineet Gupta  * vineetg: April 2010
195dda4dc5SVineet Gupta  *  -Switched pgtable_t from being struct page * to unsigned long
205dda4dc5SVineet Gupta  *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
215dda4dc5SVineet Gupta  *       to deal with struct page. Thay way in future we can make it allocate
225dda4dc5SVineet Gupta  *       multiple PG Tbls in one Page Frame
235dda4dc5SVineet Gupta  *      =sweet side effect is avoiding calls to ugly page_address( ) from the
245dda4dc5SVineet Gupta  *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
255dda4dc5SVineet Gupta  *
265dda4dc5SVineet Gupta  *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
275dda4dc5SVineet Gupta  */
285dda4dc5SVineet Gupta 
295dda4dc5SVineet Gupta #ifndef _ASM_ARC_PGALLOC_H
305dda4dc5SVineet Gupta #define _ASM_ARC_PGALLOC_H
315dda4dc5SVineet Gupta 
325dda4dc5SVineet Gupta #include <linux/mm.h>
335dda4dc5SVineet Gupta #include <linux/log2.h>
345dda4dc5SVineet Gupta 
355dda4dc5SVineet Gupta static inline void
365dda4dc5SVineet Gupta pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
375dda4dc5SVineet Gupta {
385dda4dc5SVineet Gupta 	pmd_set(pmd, pte);
395dda4dc5SVineet Gupta }
405dda4dc5SVineet Gupta 
415dda4dc5SVineet Gupta static inline void
425dda4dc5SVineet Gupta pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
435dda4dc5SVineet Gupta {
445dda4dc5SVineet Gupta 	pmd_set(pmd, (pte_t *) ptep);
455dda4dc5SVineet Gupta }
465dda4dc5SVineet Gupta 
475dda4dc5SVineet Gupta static inline int __get_order_pgd(void)
485dda4dc5SVineet Gupta {
495a364c2aSVineet Gupta 	return get_order(PTRS_PER_PGD * sizeof(pgd_t));
505dda4dc5SVineet Gupta }
515dda4dc5SVineet Gupta 
525dda4dc5SVineet Gupta static inline pgd_t *pgd_alloc(struct mm_struct *mm)
535dda4dc5SVineet Gupta {
545dda4dc5SVineet Gupta 	int num, num2;
555dda4dc5SVineet Gupta 	pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
565dda4dc5SVineet Gupta 
575dda4dc5SVineet Gupta 	if (ret) {
585dda4dc5SVineet Gupta 		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
595dda4dc5SVineet Gupta 		memzero(ret, num * sizeof(pgd_t));
605dda4dc5SVineet Gupta 
615dda4dc5SVineet Gupta 		num2 = VMALLOC_SIZE / PGDIR_SIZE;
625dda4dc5SVineet Gupta 		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
635dda4dc5SVineet Gupta 
645dda4dc5SVineet Gupta 		memzero(ret + num + num2,
655dda4dc5SVineet Gupta 			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
665dda4dc5SVineet Gupta 
675dda4dc5SVineet Gupta 	}
685dda4dc5SVineet Gupta 	return ret;
695dda4dc5SVineet Gupta }
705dda4dc5SVineet Gupta 
715dda4dc5SVineet Gupta static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
725dda4dc5SVineet Gupta {
735dda4dc5SVineet Gupta 	free_pages((unsigned long)pgd, __get_order_pgd());
745dda4dc5SVineet Gupta }
755dda4dc5SVineet Gupta 
765dda4dc5SVineet Gupta 
775dda4dc5SVineet Gupta /*
785dda4dc5SVineet Gupta  * With software-only page-tables, addr-split for traversal is tweakable and
795dda4dc5SVineet Gupta  * that directly governs how big tables would be at each level.
805dda4dc5SVineet Gupta  * Further, the MMU page size is configurable.
815dda4dc5SVineet Gupta  * Thus we need to programatically assert the size constraint
825dda4dc5SVineet Gupta  * All of this is const math, allowing gcc to do constant folding/propagation.
835dda4dc5SVineet Gupta  */
845dda4dc5SVineet Gupta 
855dda4dc5SVineet Gupta static inline int __get_order_pte(void)
865dda4dc5SVineet Gupta {
875a364c2aSVineet Gupta 	return get_order(PTRS_PER_PTE * sizeof(pte_t));
885dda4dc5SVineet Gupta }
895dda4dc5SVineet Gupta 
904cf58924SJoel Fernandes (Google) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
915dda4dc5SVineet Gupta {
925dda4dc5SVineet Gupta 	pte_t *pte;
935dda4dc5SVineet Gupta 
9454d87d60SMichal Hocko 	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
955dda4dc5SVineet Gupta 					 __get_order_pte());
965dda4dc5SVineet Gupta 
975dda4dc5SVineet Gupta 	return pte;
985dda4dc5SVineet Gupta }
995dda4dc5SVineet Gupta 
1005dda4dc5SVineet Gupta static inline pgtable_t
1014cf58924SJoel Fernandes (Google) pte_alloc_one(struct mm_struct *mm)
1025dda4dc5SVineet Gupta {
1035dda4dc5SVineet Gupta 	pgtable_t pte_pg;
104ca6ec3bbSKirill A. Shutemov 	struct page *page;
1055dda4dc5SVineet Gupta 
10654d87d60SMichal Hocko 	pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
107ca6ec3bbSKirill A. Shutemov 	if (!pte_pg)
108ca6ec3bbSKirill A. Shutemov 		return 0;
1095a364c2aSVineet Gupta 	memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
110ca6ec3bbSKirill A. Shutemov 	page = virt_to_page(pte_pg);
111b4ed71f5SMark Rutland 	if (!pgtable_pte_page_ctor(page)) {
112ca6ec3bbSKirill A. Shutemov 		__free_page(page);
113ca6ec3bbSKirill A. Shutemov 		return 0;
1145dda4dc5SVineet Gupta 	}
1155dda4dc5SVineet Gupta 
1165dda4dc5SVineet Gupta 	return pte_pg;
1175dda4dc5SVineet Gupta }
1185dda4dc5SVineet Gupta 
1195dda4dc5SVineet Gupta static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
1205dda4dc5SVineet Gupta {
1215dda4dc5SVineet Gupta 	free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
1225dda4dc5SVineet Gupta }
1235dda4dc5SVineet Gupta 
1245dda4dc5SVineet Gupta static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
1255dda4dc5SVineet Gupta {
126b4ed71f5SMark Rutland 	pgtable_pte_page_dtor(virt_to_page(ptep));
127e8a75963SVineet Gupta 	free_pages((unsigned long)ptep, __get_order_pte());
1285dda4dc5SVineet Gupta }
1295dda4dc5SVineet Gupta 
1305dda4dc5SVineet Gupta #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
1315dda4dc5SVineet Gupta 
132e8a75963SVineet Gupta #define pmd_pgtable(pmd)	((pgtable_t) pmd_page_vaddr(pmd))
1335dda4dc5SVineet Gupta 
1345dda4dc5SVineet Gupta #endif /* _ASM_ARC_PGALLOC_H */
135