1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * Derived from "include/asm-i386/pgalloc.h" 8 * Copyright (C) 1994 Linus Torvalds 9 */ 10 11 #ifndef _S390_PGALLOC_H 12 #define _S390_PGALLOC_H 13 14 #include <linux/threads.h> 15 #include <linux/gfp.h> 16 #include <linux/mm.h> 17 18 unsigned long *crst_table_alloc(struct mm_struct *); 19 void crst_table_free(struct mm_struct *, unsigned long *); 20 21 unsigned long *page_table_alloc(struct mm_struct *); 22 struct page *page_table_alloc_pgste(struct mm_struct *mm); 23 void page_table_free(struct mm_struct *, unsigned long *); 24 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); 25 void page_table_free_pgste(struct page *page); 26 extern int page_table_allocate_pgste; 27 28 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 29 { 30 struct addrtype { char _[256]; }; 31 int i; 32 33 for (i = 0; i < n; i += 256) { 34 *s = val; 35 asm volatile( 36 "mvc 8(248,%[s]),0(%[s])\n" 37 : "+m" (*(struct addrtype *) s) 38 : [s] "a" (s)); 39 s += 256 / sizeof(long); 40 } 41 } 42 43 static inline void crst_table_init(unsigned long *crst, unsigned long entry) 44 { 45 clear_table(crst, entry, sizeof(unsigned long)*2048); 46 } 47 48 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 49 { 50 if (mm->context.asce_limit <= (1UL << 31)) 51 return _SEGMENT_ENTRY_EMPTY; 52 if (mm->context.asce_limit <= (1UL << 42)) 53 return _REGION3_ENTRY_EMPTY; 54 if (mm->context.asce_limit <= (1UL << 53)) 55 return _REGION2_ENTRY_EMPTY; 56 return _REGION1_ENTRY_EMPTY; 57 } 58 59 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 60 void crst_table_downgrade(struct mm_struct *); 61 62 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) 63 { 64 unsigned long *table = crst_table_alloc(mm); 65 66 if (table) 67 crst_table_init(table, _REGION2_ENTRY_EMPTY); 68 return (p4d_t *) table; 69 } 70 #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d) 71 72 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 73 { 74 unsigned long *table = crst_table_alloc(mm); 75 if (table) 76 crst_table_init(table, _REGION3_ENTRY_EMPTY); 77 return (pud_t *) table; 78 } 79 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) 80 81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 82 { 83 unsigned long *table = crst_table_alloc(mm); 84 85 if (!table) 86 return NULL; 87 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 88 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { 89 crst_table_free(mm, table); 90 return NULL; 91 } 92 return (pmd_t *) table; 93 } 94 95 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 96 { 97 pgtable_pmd_page_dtor(virt_to_page(pmd)); 98 crst_table_free(mm, (unsigned long *) pmd); 99 } 100 101 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 102 { 103 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d); 104 } 105 106 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 107 { 108 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud); 109 } 110 111 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 112 { 113 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 114 } 115 116 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 117 { 118 unsigned long *table = crst_table_alloc(mm); 119 120 if (!table) 121 return NULL; 122 if (mm->context.asce_limit == (1UL << 31)) { 123 /* Forking a compat process with 2 page table levels */ 124 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { 125 crst_table_free(mm, table); 126 return NULL; 127 } 128 } 129 return (pgd_t *) table; 130 } 131 132 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 133 { 134 if (mm->context.asce_limit == (1UL << 31)) 135 pgtable_pmd_page_dtor(virt_to_page(pgd)); 136 crst_table_free(mm, (unsigned long *) pgd); 137 } 138 139 static inline void pmd_populate(struct mm_struct *mm, 140 pmd_t *pmd, pgtable_t pte) 141 { 142 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 143 } 144 145 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) 146 147 #define pmd_pgtable(pmd) \ 148 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 149 150 /* 151 * page table entry allocation/free routines. 152 */ 153 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 154 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 155 156 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 157 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 158 159 extern void rcu_table_freelist_finish(void); 160 161 #endif /* _S390_PGALLOC_H */ 162