xref: /openbmc/linux/arch/arc/include/asm/pgalloc.h (revision d9820ff7)
1d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
25dda4dc5SVineet Gupta /*
35dda4dc5SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
45dda4dc5SVineet Gupta  *
55dda4dc5SVineet Gupta  * vineetg: June 2011
65dda4dc5SVineet Gupta  *  -"/proc/meminfo | grep PageTables" kept on increasing
75dda4dc5SVineet Gupta  *   Recently added pgtable dtor was not getting called.
85dda4dc5SVineet Gupta  *
95dda4dc5SVineet Gupta  * vineetg: May 2011
105dda4dc5SVineet Gupta  *  -Variable pg-sz means that Page Tables could be variable sized themselves
115dda4dc5SVineet Gupta  *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
125dda4dc5SVineet Gupta  *  -Page Table size capped to max 1 to save memory - hence verified.
135dda4dc5SVineet Gupta  *  -Since these deal with constants, gcc compile-time optimizes them.
145dda4dc5SVineet Gupta  *
155dda4dc5SVineet Gupta  * vineetg: Nov 2010
165dda4dc5SVineet Gupta  *  -Added pgtable ctor/dtor used for pgtable mem accounting
175dda4dc5SVineet Gupta  *
185dda4dc5SVineet Gupta  * vineetg: April 2010
195dda4dc5SVineet Gupta  *  -Switched pgtable_t from being struct page * to unsigned long
205dda4dc5SVineet Gupta  *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
21e5ed6069SRandy Dunlap  *       deal with struct page. That way in future we can make it allocate
225dda4dc5SVineet Gupta  *       multiple PG Tbls in one Page Frame
235dda4dc5SVineet Gupta  *      =sweet side effect is avoiding calls to ugly page_address( ) from the
24e5ed6069SRandy Dunlap  *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
255dda4dc5SVineet Gupta  *
265dda4dc5SVineet Gupta  *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
275dda4dc5SVineet Gupta  */
285dda4dc5SVineet Gupta 
295dda4dc5SVineet Gupta #ifndef _ASM_ARC_PGALLOC_H
305dda4dc5SVineet Gupta #define _ASM_ARC_PGALLOC_H
315dda4dc5SVineet Gupta 
325dda4dc5SVineet Gupta #include <linux/mm.h>
335dda4dc5SVineet Gupta #include <linux/log2.h>
345dda4dc5SVineet Gupta 
355dda4dc5SVineet Gupta static inline void
365dda4dc5SVineet Gupta pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
375dda4dc5SVineet Gupta {
38e93e59acSVineet Gupta 	/*
39e93e59acSVineet Gupta 	 * The cast to long below is OK in 32-bit PAE40 regime with long long pte
40e93e59acSVineet Gupta 	 * Despite "wider" pte, the pte table needs to be in non-PAE low memory
41e93e59acSVineet Gupta 	 * as all higher levels can only hold long pointers.
42e93e59acSVineet Gupta 	 *
43e93e59acSVineet Gupta 	 * The cast itself is needed given simplistic definition of set_pmd()
44e93e59acSVineet Gupta 	 */
45e93e59acSVineet Gupta 	set_pmd(pmd, __pmd((unsigned long)pte));
465dda4dc5SVineet Gupta }
475dda4dc5SVineet Gupta 
48*d9820ff7SVineet Gupta static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
495dda4dc5SVineet Gupta {
50*d9820ff7SVineet Gupta 	set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
515dda4dc5SVineet Gupta }
525dda4dc5SVineet Gupta 
535dda4dc5SVineet Gupta static inline pgd_t *pgd_alloc(struct mm_struct *mm)
545dda4dc5SVineet Gupta {
55*d9820ff7SVineet Gupta 	pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
565dda4dc5SVineet Gupta 
575dda4dc5SVineet Gupta 	if (ret) {
58*d9820ff7SVineet Gupta 		int num, num2;
595dda4dc5SVineet Gupta 		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
605dda4dc5SVineet Gupta 		memzero(ret, num * sizeof(pgd_t));
615dda4dc5SVineet Gupta 
625dda4dc5SVineet Gupta 		num2 = VMALLOC_SIZE / PGDIR_SIZE;
635dda4dc5SVineet Gupta 		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
645dda4dc5SVineet Gupta 
655dda4dc5SVineet Gupta 		memzero(ret + num + num2,
665dda4dc5SVineet Gupta 			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
675dda4dc5SVineet Gupta 
685dda4dc5SVineet Gupta 	}
695dda4dc5SVineet Gupta 	return ret;
705dda4dc5SVineet Gupta }
715dda4dc5SVineet Gupta 
725dda4dc5SVineet Gupta static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
735dda4dc5SVineet Gupta {
74*d9820ff7SVineet Gupta 	free_page((unsigned long)pgd);
755dda4dc5SVineet Gupta }
765dda4dc5SVineet Gupta 
774cf58924SJoel Fernandes (Google) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
785dda4dc5SVineet Gupta {
795dda4dc5SVineet Gupta 	pte_t *pte;
805dda4dc5SVineet Gupta 
81*d9820ff7SVineet Gupta 	pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
825dda4dc5SVineet Gupta 
835dda4dc5SVineet Gupta 	return pte;
845dda4dc5SVineet Gupta }
855dda4dc5SVineet Gupta 
86*d9820ff7SVineet Gupta static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
875dda4dc5SVineet Gupta {
88ca6ec3bbSKirill A. Shutemov 	struct page *page;
895dda4dc5SVineet Gupta 
90*d9820ff7SVineet Gupta 	page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
91*d9820ff7SVineet Gupta 	if (!page)
92*d9820ff7SVineet Gupta 		return NULL;
93*d9820ff7SVineet Gupta 
94b4ed71f5SMark Rutland 	if (!pgtable_pte_page_ctor(page)) {
95ca6ec3bbSKirill A. Shutemov 		__free_page(page);
96*d9820ff7SVineet Gupta 		return NULL;
975dda4dc5SVineet Gupta 	}
985dda4dc5SVineet Gupta 
99*d9820ff7SVineet Gupta 	return page;
1005dda4dc5SVineet Gupta }
1015dda4dc5SVineet Gupta 
1025dda4dc5SVineet Gupta static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
1035dda4dc5SVineet Gupta {
104*d9820ff7SVineet Gupta 	free_page((unsigned long)pte);
1055dda4dc5SVineet Gupta }
1065dda4dc5SVineet Gupta 
107*d9820ff7SVineet Gupta static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
1085dda4dc5SVineet Gupta {
109*d9820ff7SVineet Gupta 	pgtable_pte_page_dtor(pte_page);
110*d9820ff7SVineet Gupta 	__free_page(pte_page);
1115dda4dc5SVineet Gupta }
1125dda4dc5SVineet Gupta 
1135dda4dc5SVineet Gupta #define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
1145dda4dc5SVineet Gupta 
1155dda4dc5SVineet Gupta #endif /* _ASM_ARC_PGALLOC_H */
116