175a9b8a6SAneesh Kumar K.V #ifndef _ASM_POWERPC_PGALLOC_64_H
275a9b8a6SAneesh Kumar K.V #define _ASM_POWERPC_PGALLOC_64_H
375a9b8a6SAneesh Kumar K.V /*
475a9b8a6SAneesh Kumar K.V  * This program is free software; you can redistribute it and/or
575a9b8a6SAneesh Kumar K.V  * modify it under the terms of the GNU General Public License
675a9b8a6SAneesh Kumar K.V  * as published by the Free Software Foundation; either version
775a9b8a6SAneesh Kumar K.V  * 2 of the License, or (at your option) any later version.
875a9b8a6SAneesh Kumar K.V  */
975a9b8a6SAneesh Kumar K.V 
1075a9b8a6SAneesh Kumar K.V #include <linux/slab.h>
1175a9b8a6SAneesh Kumar K.V #include <linux/cpumask.h>
1275a9b8a6SAneesh Kumar K.V #include <linux/percpu.h>
1375a9b8a6SAneesh Kumar K.V 
1475a9b8a6SAneesh Kumar K.V struct vmemmap_backing {
1575a9b8a6SAneesh Kumar K.V 	struct vmemmap_backing *list;
1675a9b8a6SAneesh Kumar K.V 	unsigned long phys;
1775a9b8a6SAneesh Kumar K.V 	unsigned long virt_addr;
1875a9b8a6SAneesh Kumar K.V };
1975a9b8a6SAneesh Kumar K.V extern struct vmemmap_backing *vmemmap_list;
2075a9b8a6SAneesh Kumar K.V 
2175a9b8a6SAneesh Kumar K.V /*
2275a9b8a6SAneesh Kumar K.V  * Functions that deal with pagetables that could be at any level of
2375a9b8a6SAneesh Kumar K.V  * the table need to be passed an "index_size" so they know how to
2475a9b8a6SAneesh Kumar K.V  * handle allocation.  For PTE pages (which are linked to a struct
2575a9b8a6SAneesh Kumar K.V  * page for now, and drawn from the main get_free_pages() pool), the
2675a9b8a6SAneesh Kumar K.V  * allocation size will be (2^index_size * sizeof(pointer)) and
2775a9b8a6SAneesh Kumar K.V  * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
2875a9b8a6SAneesh Kumar K.V  *
2975a9b8a6SAneesh Kumar K.V  * The maximum index size needs to be big enough to allow any
3075a9b8a6SAneesh Kumar K.V  * pagetable sizes we need, but small enough to fit in the low bits of
3175a9b8a6SAneesh Kumar K.V  * any page table pointer.  In other words all pagetables, even tiny
3275a9b8a6SAneesh Kumar K.V  * ones, must be aligned to allow at least enough low 0 bits to
3375a9b8a6SAneesh Kumar K.V  * contain this value.  This value is also used as a mask, so it must
3475a9b8a6SAneesh Kumar K.V  * be one less than a power of two.
3575a9b8a6SAneesh Kumar K.V  */
3675a9b8a6SAneesh Kumar K.V #define MAX_PGTABLE_INDEX_SIZE	0xf
3775a9b8a6SAneesh Kumar K.V 
3875a9b8a6SAneesh Kumar K.V extern struct kmem_cache *pgtable_cache[];
3975a9b8a6SAneesh Kumar K.V #define PGT_CACHE(shift) ({				\
4075a9b8a6SAneesh Kumar K.V 			BUG_ON(!(shift));		\
4175a9b8a6SAneesh Kumar K.V 			pgtable_cache[(shift) - 1];	\
4275a9b8a6SAneesh Kumar K.V 		})
4375a9b8a6SAneesh Kumar K.V 
4475a9b8a6SAneesh Kumar K.V static inline pgd_t *pgd_alloc(struct mm_struct *mm)
4575a9b8a6SAneesh Kumar K.V {
4675a9b8a6SAneesh Kumar K.V 	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
4775a9b8a6SAneesh Kumar K.V }
4875a9b8a6SAneesh Kumar K.V 
4975a9b8a6SAneesh Kumar K.V static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5075a9b8a6SAneesh Kumar K.V {
5175a9b8a6SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
5275a9b8a6SAneesh Kumar K.V }
5375a9b8a6SAneesh Kumar K.V 
5475a9b8a6SAneesh Kumar K.V #ifndef CONFIG_PPC_64K_PAGES
5575a9b8a6SAneesh Kumar K.V 
5675a9b8a6SAneesh Kumar K.V #define pgd_populate(MM, PGD, PUD)	pgd_set(PGD, __pgtable_ptr_val(PUD))
5775a9b8a6SAneesh Kumar K.V 
5875a9b8a6SAneesh Kumar K.V static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5975a9b8a6SAneesh Kumar K.V {
6075a9b8a6SAneesh Kumar K.V 	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
6175a9b8a6SAneesh Kumar K.V 				GFP_KERNEL|__GFP_REPEAT);
6275a9b8a6SAneesh Kumar K.V }
6375a9b8a6SAneesh Kumar K.V 
6475a9b8a6SAneesh Kumar K.V static inline void pud_free(struct mm_struct *mm, pud_t *pud)
6575a9b8a6SAneesh Kumar K.V {
6675a9b8a6SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
6775a9b8a6SAneesh Kumar K.V }
6875a9b8a6SAneesh Kumar K.V 
6975a9b8a6SAneesh Kumar K.V static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7075a9b8a6SAneesh Kumar K.V {
7175a9b8a6SAneesh Kumar K.V 	pud_set(pud, __pgtable_ptr_val(pmd));
7275a9b8a6SAneesh Kumar K.V }
7375a9b8a6SAneesh Kumar K.V 
7475a9b8a6SAneesh Kumar K.V static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7575a9b8a6SAneesh Kumar K.V 				       pte_t *pte)
7675a9b8a6SAneesh Kumar K.V {
7775a9b8a6SAneesh Kumar K.V 	pmd_set(pmd, __pgtable_ptr_val(pte));
7875a9b8a6SAneesh Kumar K.V }
7975a9b8a6SAneesh Kumar K.V 
8075a9b8a6SAneesh Kumar K.V static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
8175a9b8a6SAneesh Kumar K.V 				pgtable_t pte_page)
8275a9b8a6SAneesh Kumar K.V {
8375a9b8a6SAneesh Kumar K.V 	pmd_set(pmd, __pgtable_ptr_val(page_address(pte_page)));
8475a9b8a6SAneesh Kumar K.V }
8575a9b8a6SAneesh Kumar K.V 
8675a9b8a6SAneesh Kumar K.V #define pmd_pgtable(pmd) pmd_page(pmd)
8775a9b8a6SAneesh Kumar K.V 
8875a9b8a6SAneesh Kumar K.V static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
8975a9b8a6SAneesh Kumar K.V 					  unsigned long address)
9075a9b8a6SAneesh Kumar K.V {
9175a9b8a6SAneesh Kumar K.V 	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
9275a9b8a6SAneesh Kumar K.V }
9375a9b8a6SAneesh Kumar K.V 
9475a9b8a6SAneesh Kumar K.V static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
9575a9b8a6SAneesh Kumar K.V 				      unsigned long address)
9675a9b8a6SAneesh Kumar K.V {
9775a9b8a6SAneesh Kumar K.V 	struct page *page;
9875a9b8a6SAneesh Kumar K.V 	pte_t *pte;
9975a9b8a6SAneesh Kumar K.V 
10075a9b8a6SAneesh Kumar K.V 	pte = pte_alloc_one_kernel(mm, address);
10175a9b8a6SAneesh Kumar K.V 	if (!pte)
10275a9b8a6SAneesh Kumar K.V 		return NULL;
10375a9b8a6SAneesh Kumar K.V 	page = virt_to_page(pte);
10475a9b8a6SAneesh Kumar K.V 	if (!pgtable_page_ctor(page)) {
10575a9b8a6SAneesh Kumar K.V 		__free_page(page);
10675a9b8a6SAneesh Kumar K.V 		return NULL;
10775a9b8a6SAneesh Kumar K.V 	}
10875a9b8a6SAneesh Kumar K.V 	return page;
10975a9b8a6SAneesh Kumar K.V }
11075a9b8a6SAneesh Kumar K.V 
11175a9b8a6SAneesh Kumar K.V static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
11275a9b8a6SAneesh Kumar K.V {
11375a9b8a6SAneesh Kumar K.V 	free_page((unsigned long)pte);
11475a9b8a6SAneesh Kumar K.V }
11575a9b8a6SAneesh Kumar K.V 
11675a9b8a6SAneesh Kumar K.V static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
11775a9b8a6SAneesh Kumar K.V {
11875a9b8a6SAneesh Kumar K.V 	pgtable_page_dtor(ptepage);
11975a9b8a6SAneesh Kumar K.V 	__free_page(ptepage);
12075a9b8a6SAneesh Kumar K.V }
12175a9b8a6SAneesh Kumar K.V 
12275a9b8a6SAneesh Kumar K.V static inline void pgtable_free(void *table, unsigned index_size)
12375a9b8a6SAneesh Kumar K.V {
12475a9b8a6SAneesh Kumar K.V 	if (!index_size)
12575a9b8a6SAneesh Kumar K.V 		free_page((unsigned long)table);
12675a9b8a6SAneesh Kumar K.V 	else {
12775a9b8a6SAneesh Kumar K.V 		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
12875a9b8a6SAneesh Kumar K.V 		kmem_cache_free(PGT_CACHE(index_size), table);
12975a9b8a6SAneesh Kumar K.V 	}
13075a9b8a6SAneesh Kumar K.V }
13175a9b8a6SAneesh Kumar K.V 
13275a9b8a6SAneesh Kumar K.V #ifdef CONFIG_SMP
13375a9b8a6SAneesh Kumar K.V static inline void pgtable_free_tlb(struct mmu_gather *tlb,
13475a9b8a6SAneesh Kumar K.V 				    void *table, int shift)
13575a9b8a6SAneesh Kumar K.V {
13675a9b8a6SAneesh Kumar K.V 	unsigned long pgf = (unsigned long)table;
13775a9b8a6SAneesh Kumar K.V 	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
13875a9b8a6SAneesh Kumar K.V 	pgf |= shift;
13975a9b8a6SAneesh Kumar K.V 	tlb_remove_table(tlb, (void *)pgf);
14075a9b8a6SAneesh Kumar K.V }
14175a9b8a6SAneesh Kumar K.V 
14275a9b8a6SAneesh Kumar K.V static inline void __tlb_remove_table(void *_table)
14375a9b8a6SAneesh Kumar K.V {
14475a9b8a6SAneesh Kumar K.V 	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
14575a9b8a6SAneesh Kumar K.V 	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
14675a9b8a6SAneesh Kumar K.V 
14775a9b8a6SAneesh Kumar K.V 	pgtable_free(table, shift);
14875a9b8a6SAneesh Kumar K.V }
14975a9b8a6SAneesh Kumar K.V #else /* !CONFIG_SMP */
15075a9b8a6SAneesh Kumar K.V static inline void pgtable_free_tlb(struct mmu_gather *tlb,
15175a9b8a6SAneesh Kumar K.V 				    void *table, int shift)
15275a9b8a6SAneesh Kumar K.V {
15375a9b8a6SAneesh Kumar K.V 	pgtable_free(table, shift);
15475a9b8a6SAneesh Kumar K.V }
15575a9b8a6SAneesh Kumar K.V #endif /* CONFIG_SMP */
15675a9b8a6SAneesh Kumar K.V 
15775a9b8a6SAneesh Kumar K.V static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
15875a9b8a6SAneesh Kumar K.V 				  unsigned long address)
15975a9b8a6SAneesh Kumar K.V {
16075a9b8a6SAneesh Kumar K.V 	tlb_flush_pgtable(tlb, address);
16175a9b8a6SAneesh Kumar K.V 	pgtable_page_dtor(table);
16275a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, page_address(table), 0);
16375a9b8a6SAneesh Kumar K.V }
16475a9b8a6SAneesh Kumar K.V 
16575a9b8a6SAneesh Kumar K.V #else /* if CONFIG_PPC_64K_PAGES */
16675a9b8a6SAneesh Kumar K.V 
16775a9b8a6SAneesh Kumar K.V extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
16875a9b8a6SAneesh Kumar K.V extern void page_table_free(struct mm_struct *, unsigned long *, int);
16975a9b8a6SAneesh Kumar K.V extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
17075a9b8a6SAneesh Kumar K.V #ifdef CONFIG_SMP
17175a9b8a6SAneesh Kumar K.V extern void __tlb_remove_table(void *_table);
17275a9b8a6SAneesh Kumar K.V #endif
17375a9b8a6SAneesh Kumar K.V 
17475a9b8a6SAneesh Kumar K.V #ifndef __PAGETABLE_PUD_FOLDED
17575a9b8a6SAneesh Kumar K.V /* book3s 64 is 4 level page table */
17675a9b8a6SAneesh Kumar K.V static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17775a9b8a6SAneesh Kumar K.V {
17875a9b8a6SAneesh Kumar K.V 	pgd_set(pgd, __pgtable_ptr_val(pud));
17975a9b8a6SAneesh Kumar K.V }
18075a9b8a6SAneesh Kumar K.V 
18175a9b8a6SAneesh Kumar K.V static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18275a9b8a6SAneesh Kumar K.V {
18375a9b8a6SAneesh Kumar K.V 	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
18475a9b8a6SAneesh Kumar K.V 				GFP_KERNEL|__GFP_REPEAT);
18575a9b8a6SAneesh Kumar K.V }
18675a9b8a6SAneesh Kumar K.V 
18775a9b8a6SAneesh Kumar K.V static inline void pud_free(struct mm_struct *mm, pud_t *pud)
18875a9b8a6SAneesh Kumar K.V {
18975a9b8a6SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
19075a9b8a6SAneesh Kumar K.V }
19175a9b8a6SAneesh Kumar K.V #endif
19275a9b8a6SAneesh Kumar K.V 
19375a9b8a6SAneesh Kumar K.V static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
19475a9b8a6SAneesh Kumar K.V {
19575a9b8a6SAneesh Kumar K.V 	pud_set(pud, __pgtable_ptr_val(pmd));
19675a9b8a6SAneesh Kumar K.V }
19775a9b8a6SAneesh Kumar K.V 
19875a9b8a6SAneesh Kumar K.V static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
19975a9b8a6SAneesh Kumar K.V 				       pte_t *pte)
20075a9b8a6SAneesh Kumar K.V {
20175a9b8a6SAneesh Kumar K.V 	pmd_set(pmd, __pgtable_ptr_val(pte));
20275a9b8a6SAneesh Kumar K.V }
20375a9b8a6SAneesh Kumar K.V 
20475a9b8a6SAneesh Kumar K.V static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
20575a9b8a6SAneesh Kumar K.V 				pgtable_t pte_page)
20675a9b8a6SAneesh Kumar K.V {
20775a9b8a6SAneesh Kumar K.V 	pmd_set(pmd, __pgtable_ptr_val(pte_page));
20875a9b8a6SAneesh Kumar K.V }
20975a9b8a6SAneesh Kumar K.V 
21075a9b8a6SAneesh Kumar K.V static inline pgtable_t pmd_pgtable(pmd_t pmd)
21175a9b8a6SAneesh Kumar K.V {
21275a9b8a6SAneesh Kumar K.V 	return (pgtable_t)pmd_page_vaddr(pmd);
21375a9b8a6SAneesh Kumar K.V }
21475a9b8a6SAneesh Kumar K.V 
21575a9b8a6SAneesh Kumar K.V static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
21675a9b8a6SAneesh Kumar K.V 					  unsigned long address)
21775a9b8a6SAneesh Kumar K.V {
21875a9b8a6SAneesh Kumar K.V 	return (pte_t *)page_table_alloc(mm, address, 1);
21975a9b8a6SAneesh Kumar K.V }
22075a9b8a6SAneesh Kumar K.V 
22175a9b8a6SAneesh Kumar K.V static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
22275a9b8a6SAneesh Kumar K.V 					unsigned long address)
22375a9b8a6SAneesh Kumar K.V {
22475a9b8a6SAneesh Kumar K.V 	return (pgtable_t)page_table_alloc(mm, address, 0);
22575a9b8a6SAneesh Kumar K.V }
22675a9b8a6SAneesh Kumar K.V 
22775a9b8a6SAneesh Kumar K.V static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
22875a9b8a6SAneesh Kumar K.V {
22975a9b8a6SAneesh Kumar K.V 	page_table_free(mm, (unsigned long *)pte, 1);
23075a9b8a6SAneesh Kumar K.V }
23175a9b8a6SAneesh Kumar K.V 
23275a9b8a6SAneesh Kumar K.V static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
23375a9b8a6SAneesh Kumar K.V {
23475a9b8a6SAneesh Kumar K.V 	page_table_free(mm, (unsigned long *)ptepage, 0);
23575a9b8a6SAneesh Kumar K.V }
23675a9b8a6SAneesh Kumar K.V 
23775a9b8a6SAneesh Kumar K.V static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
23875a9b8a6SAneesh Kumar K.V 				  unsigned long address)
23975a9b8a6SAneesh Kumar K.V {
24075a9b8a6SAneesh Kumar K.V 	tlb_flush_pgtable(tlb, address);
24175a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, table, 0);
24275a9b8a6SAneesh Kumar K.V }
24375a9b8a6SAneesh Kumar K.V #endif /* CONFIG_PPC_64K_PAGES */
24475a9b8a6SAneesh Kumar K.V 
24575a9b8a6SAneesh Kumar K.V static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
24675a9b8a6SAneesh Kumar K.V {
24775a9b8a6SAneesh Kumar K.V 	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
24875a9b8a6SAneesh Kumar K.V 				GFP_KERNEL|__GFP_REPEAT);
24975a9b8a6SAneesh Kumar K.V }
25075a9b8a6SAneesh Kumar K.V 
25175a9b8a6SAneesh Kumar K.V static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
25275a9b8a6SAneesh Kumar K.V {
25375a9b8a6SAneesh Kumar K.V 	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
25475a9b8a6SAneesh Kumar K.V }
25575a9b8a6SAneesh Kumar K.V 
25675a9b8a6SAneesh Kumar K.V #define __pmd_free_tlb(tlb, pmd, addr)		      \
25775a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
25875a9b8a6SAneesh Kumar K.V #ifndef __PAGETABLE_PUD_FOLDED
25975a9b8a6SAneesh Kumar K.V #define __pud_free_tlb(tlb, pud, addr)		      \
26075a9b8a6SAneesh Kumar K.V 	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
26175a9b8a6SAneesh Kumar K.V 
26275a9b8a6SAneesh Kumar K.V #endif /* __PAGETABLE_PUD_FOLDED */
26375a9b8a6SAneesh Kumar K.V 
26475a9b8a6SAneesh Kumar K.V #define check_pgt_cache()	do { } while (0)
26575a9b8a6SAneesh Kumar K.V 
26675a9b8a6SAneesh Kumar K.V #endif /* _ASM_POWERPC_PGALLOC_64_H */
267