1 /* 2 * include/asm-s390/pgalloc.h 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgalloc.h" 10 * Copyright (C) 1994 Linus Torvalds 11 */ 12 13 #ifndef _S390_PGALLOC_H 14 #define _S390_PGALLOC_H 15 16 #include <linux/threads.h> 17 #include <linux/gfp.h> 18 #include <linux/mm.h> 19 20 #define check_pgt_cache() do {} while (0) 21 22 unsigned long *crst_table_alloc(struct mm_struct *, int); 23 void crst_table_free(struct mm_struct *, unsigned long *); 24 25 unsigned long *page_table_alloc(struct mm_struct *); 26 void page_table_free(struct mm_struct *, unsigned long *); 27 void disable_noexec(struct mm_struct *, struct task_struct *); 28 29 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30 { 31 typedef struct { char _[n]; } addrtype; 32 33 *s = val; 34 n = (n / 256) - 1; 35 asm volatile( 36 #ifdef CONFIG_64BIT 37 " mvc 8(248,%0),0(%0)\n" 38 #else 39 " mvc 4(252,%0),0(%0)\n" 40 #endif 41 "0: mvc 256(256,%0),0(%0)\n" 42 " la %0,256(%0)\n" 43 " brct %1,0b\n" 44 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s) 45 : "m" (*(addrtype *) s)); 46 } 47 48 static inline void crst_table_init(unsigned long *crst, unsigned long entry) 49 { 50 clear_table(crst, entry, sizeof(unsigned long)*2048); 51 crst = get_shadow_table(crst); 52 if (crst) 53 clear_table(crst, entry, sizeof(unsigned long)*2048); 54 } 55 56 #ifndef __s390x__ 57 58 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 59 { 60 return _SEGMENT_ENTRY_EMPTY; 61 } 62 63 #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) 64 #define pud_free(mm, x) do { } while (0) 65 66 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 67 #define pmd_free(mm, x) do { } while (0) 68 69 #define pgd_populate(mm, pgd, pud) BUG() 70 #define pgd_populate_kernel(mm, pgd, pud) BUG() 71 72 #define pud_populate(mm, pud, pmd) BUG() 73 #define pud_populate_kernel(mm, pud, pmd) BUG() 74 75 #else /* __s390x__ */ 76 77 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 78 { 79 if (mm->context.asce_limit <= (1UL << 31)) 80 return _SEGMENT_ENTRY_EMPTY; 81 if (mm->context.asce_limit <= (1UL << 42)) 82 return _REGION3_ENTRY_EMPTY; 83 return _REGION2_ENTRY_EMPTY; 84 } 85 86 int crst_table_upgrade(struct mm_struct *, unsigned long limit); 87 void crst_table_downgrade(struct mm_struct *, unsigned long limit); 88 89 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 90 { 91 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 92 if (table) 93 crst_table_init(table, _REGION3_ENTRY_EMPTY); 94 return (pud_t *) table; 95 } 96 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) 97 98 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 99 { 100 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 101 if (table) 102 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 103 return (pmd_t *) table; 104 } 105 #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 106 107 static inline void pgd_populate_kernel(struct mm_struct *mm, 108 pgd_t *pgd, pud_t *pud) 109 { 110 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud); 111 } 112 113 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 114 { 115 pgd_populate_kernel(mm, pgd, pud); 116 if (mm->context.noexec) { 117 pgd = get_shadow_table(pgd); 118 pud = get_shadow_table(pud); 119 pgd_populate_kernel(mm, pgd, pud); 120 } 121 } 122 123 static inline void pud_populate_kernel(struct mm_struct *mm, 124 pud_t *pud, pmd_t *pmd) 125 { 126 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 127 } 128 129 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 130 { 131 pud_populate_kernel(mm, pud, pmd); 132 if (mm->context.noexec) { 133 pud = get_shadow_table(pud); 134 pmd = get_shadow_table(pmd); 135 pud_populate_kernel(mm, pud, pmd); 136 } 137 } 138 139 #endif /* __s390x__ */ 140 141 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 142 { 143 INIT_LIST_HEAD(&mm->context.crst_list); 144 INIT_LIST_HEAD(&mm->context.pgtable_list); 145 return (pgd_t *) crst_table_alloc(mm, s390_noexec); 146 } 147 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 148 149 static inline void pmd_populate_kernel(struct mm_struct *mm, 150 pmd_t *pmd, pte_t *pte) 151 { 152 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 153 } 154 155 static inline void pmd_populate(struct mm_struct *mm, 156 pmd_t *pmd, pgtable_t pte) 157 { 158 pmd_populate_kernel(mm, pmd, pte); 159 if (mm->context.noexec) { 160 pmd = get_shadow_table(pmd); 161 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE); 162 } 163 } 164 165 #define pmd_pgtable(pmd) \ 166 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 167 168 /* 169 * page table entry allocation/free routines. 170 */ 171 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 172 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 173 174 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 175 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 176 177 #endif /* _S390_PGALLOC_H */ 178