1 /* 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 3 * Copyright (C) 2008-2009 PetaLogix 4 * Copyright (C) 2006 Atmark Techno, Inc. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 */ 10 11 #ifndef _ASM_MICROBLAZE_PGALLOC_H 12 #define _ASM_MICROBLAZE_PGALLOC_H 13 14 #ifdef CONFIG_MMU 15 16 #include <linux/kernel.h> /* For min/max macros */ 17 #include <linux/highmem.h> 18 #include <asm/setup.h> 19 #include <asm/io.h> 20 #include <asm/page.h> 21 #include <asm/cache.h> 22 23 #define PGDIR_ORDER 0 24 25 /* 26 * This is handled very differently on MicroBlaze since out page tables 27 * are all 0's and I want to be able to use these zero'd pages elsewhere 28 * as well - it gives us quite a speedup. 29 * -- Cort 30 */ 31 extern struct pgtable_cache_struct { 32 unsigned long *pgd_cache; 33 unsigned long *pte_cache; 34 unsigned long pgtable_cache_sz; 35 } quicklists; 36 37 #define pgd_quicklist (quicklists.pgd_cache) 38 #define pmd_quicklist ((unsigned long *)0) 39 #define pte_quicklist (quicklists.pte_cache) 40 #define pgtable_cache_size (quicklists.pgtable_cache_sz) 41 42 extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ 43 extern atomic_t zero_sz; /* # currently pre-zero'd pages */ 44 extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */ 45 extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */ 46 extern atomic_t zerototal; /* # pages zero'd over time */ 47 48 #define zero_quicklist (zero_cache) 49 #define zero_cache_sz (zero_sz) 50 #define zero_cache_calls (zeropage_calls) 51 #define zero_cache_hits (zeropage_hits) 52 #define zero_cache_total (zerototal) 53 54 /* 55 * return a pre-zero'd page from the list, 56 * return NULL if none available -- Cort 57 */ 58 extern unsigned long get_zero_page_fast(void); 59 60 extern void __bad_pte(pmd_t *pmd); 61 62 extern inline pgd_t *get_pgd_slow(void) 63 { 64 pgd_t *ret; 65 66 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER); 67 if (ret != NULL) 68 clear_page(ret); 69 return ret; 70 } 71 72 extern inline pgd_t *get_pgd_fast(void) 73 { 74 unsigned long *ret; 75 76 ret = pgd_quicklist; 77 if (ret != NULL) { 78 pgd_quicklist = (unsigned long *)(*ret); 79 ret[0] = 0; 80 pgtable_cache_size--; 81 } else 82 ret = (unsigned long *)get_pgd_slow(); 83 return (pgd_t *)ret; 84 } 85 86 extern inline void free_pgd_fast(pgd_t *pgd) 87 { 88 *(unsigned long **)pgd = pgd_quicklist; 89 pgd_quicklist = (unsigned long *) pgd; 90 pgtable_cache_size++; 91 } 92 93 extern inline void free_pgd_slow(pgd_t *pgd) 94 { 95 free_page((unsigned long)pgd); 96 } 97 98 #define pgd_free(mm, pgd) free_pgd_fast(pgd) 99 #define pgd_alloc(mm) get_pgd_fast() 100 101 #define pmd_pgtable(pmd) pmd_page(pmd) 102 103 /* 104 * We don't have any real pmd's, and this code never triggers because 105 * the pgd will always be present.. 106 */ 107 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 108 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 109 110 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 111 unsigned long address) 112 { 113 pte_t *pte; 114 extern int mem_init_done; 115 extern void *early_get_page(void); 116 if (mem_init_done) { 117 pte = (pte_t *)__get_free_page(GFP_KERNEL | 118 __GFP_REPEAT | __GFP_ZERO); 119 } else { 120 pte = (pte_t *)early_get_page(); 121 if (pte) 122 clear_page(pte); 123 } 124 return pte; 125 } 126 127 static inline struct page *pte_alloc_one(struct mm_struct *mm, 128 unsigned long address) 129 { 130 struct page *ptepage; 131 132 #ifdef CONFIG_HIGHPTE 133 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; 134 #else 135 int flags = GFP_KERNEL | __GFP_REPEAT; 136 #endif 137 138 ptepage = alloc_pages(flags, 0); 139 if (ptepage) 140 clear_highpage(ptepage); 141 return ptepage; 142 } 143 144 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, 145 unsigned long address) 146 { 147 unsigned long *ret; 148 149 ret = pte_quicklist; 150 if (ret != NULL) { 151 pte_quicklist = (unsigned long *)(*ret); 152 ret[0] = 0; 153 pgtable_cache_size--; 154 } 155 return (pte_t *)ret; 156 } 157 158 extern inline void pte_free_fast(pte_t *pte) 159 { 160 *(unsigned long **)pte = pte_quicklist; 161 pte_quicklist = (unsigned long *) pte; 162 pgtable_cache_size++; 163 } 164 165 extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 166 { 167 free_page((unsigned long)pte); 168 } 169 170 extern inline void pte_free_slow(struct page *ptepage) 171 { 172 __free_page(ptepage); 173 } 174 175 extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) 176 { 177 __free_page(ptepage); 178 } 179 180 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte)) 181 182 #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) 183 184 #define pmd_populate_kernel(mm, pmd, pte) \ 185 (pmd_val(*(pmd)) = (unsigned long) (pte)) 186 187 /* 188 * We don't have any real pmd's, and this code never triggers because 189 * the pgd will always be present.. 190 */ 191 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 192 #define pmd_free(mm, x) do { } while (0) 193 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) 194 #define pgd_populate(mm, pmd, pte) BUG() 195 196 extern int do_check_pgt_cache(int, int); 197 198 #endif /* CONFIG_MMU */ 199 200 #define check_pgt_cache() do { } while (0) 201 202 #endif /* _ASM_MICROBLAZE_PGALLOC_H */ 203