1 /* 2 * IBM System z Huge TLB Page Support for Kernel. 3 * 4 * Copyright IBM Corp. 2007 5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/hugetlb.h> 10 11 static inline pmd_t __pte_to_pmd(pte_t pte) 12 { 13 pmd_t pmd; 14 15 /* 16 * Convert encoding pte bits pmd bits 17 * lIR.uswrdy.p dy..R...I...wr 18 * empty 010.000000.0 -> 00..0...1...00 19 * prot-none, clean, old 111.000000.1 -> 00..1...1...00 20 * prot-none, clean, young 111.000001.1 -> 01..1...1...00 21 * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 22 * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 23 * read-only, clean, old 111.000100.1 -> 00..1...1...01 24 * read-only, clean, young 101.000101.1 -> 01..1...0...01 25 * read-only, dirty, old 111.000110.1 -> 10..1...1...01 26 * read-only, dirty, young 101.000111.1 -> 11..1...0...01 27 * read-write, clean, old 111.001100.1 -> 00..1...1...11 28 * read-write, clean, young 101.001101.1 -> 01..1...0...11 29 * read-write, dirty, old 110.001110.1 -> 10..0...1...11 30 * read-write, dirty, young 100.001111.1 -> 11..0...0...11 31 * HW-bits: R read-only, I invalid 32 * SW-bits: p present, y young, d dirty, r read, w write, s special, 33 * u unused, l large 34 */ 35 if (pte_present(pte)) { 36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; 37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4; 38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4; 39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5; 40 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); 41 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 42 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 43 pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; 44 } else 45 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 46 return pmd; 47 } 48 49 static inline pte_t __pmd_to_pte(pmd_t pmd) 50 { 51 pte_t pte; 52 53 /* 54 * Convert encoding pmd bits pte bits 55 * dy..R...I...wr lIR.uswrdy.p 56 * empty 00..0...1...00 -> 010.000000.0 57 * prot-none, clean, old 00..1...1...00 -> 111.000000.1 58 * prot-none, clean, young 01..1...1...00 -> 111.000001.1 59 * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 60 * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 61 * read-only, clean, old 00..1...1...01 -> 111.000100.1 62 * read-only, clean, young 01..1...0...01 -> 101.000101.1 63 * read-only, dirty, old 10..1...1...01 -> 111.000110.1 64 * read-only, dirty, young 11..1...0...01 -> 101.000111.1 65 * read-write, clean, old 00..1...1...11 -> 111.001100.1 66 * read-write, clean, young 01..1...0...11 -> 101.001101.1 67 * read-write, dirty, old 10..0...1...11 -> 110.001110.1 68 * read-write, dirty, young 11..0...0...11 -> 100.001111.1 69 * HW-bits: R read-only, I invalid 70 * SW-bits: p present, y young, d dirty, r read, w write, s special, 71 * u unused, l large 72 */ 73 if (pmd_present(pmd)) { 74 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; 75 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 76 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4; 77 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; 78 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; 79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); 80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; 81 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; 82 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; 83 } else 84 pte_val(pte) = _PAGE_INVALID; 85 return pte; 86 } 87 88 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 89 pte_t *ptep, pte_t pte) 90 { 91 pmd_t pmd = __pte_to_pmd(pte); 92 93 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 94 *(pmd_t *) ptep = pmd; 95 } 96 97 pte_t huge_ptep_get(pte_t *ptep) 98 { 99 pmd_t pmd = *(pmd_t *) ptep; 100 101 return __pmd_to_pte(pmd); 102 } 103 104 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 105 unsigned long addr, pte_t *ptep) 106 { 107 pmd_t *pmdp = (pmd_t *) ptep; 108 pte_t pte = huge_ptep_get(ptep); 109 110 pmdp_flush_direct(mm, addr, pmdp); 111 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 112 return pte; 113 } 114 115 pte_t *huge_pte_alloc(struct mm_struct *mm, 116 unsigned long addr, unsigned long sz) 117 { 118 pgd_t *pgdp; 119 pud_t *pudp; 120 pmd_t *pmdp = NULL; 121 122 pgdp = pgd_offset(mm, addr); 123 pudp = pud_alloc(mm, pgdp, addr); 124 if (pudp) 125 pmdp = pmd_alloc(mm, pudp, addr); 126 return (pte_t *) pmdp; 127 } 128 129 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 130 { 131 pgd_t *pgdp; 132 pud_t *pudp; 133 pmd_t *pmdp = NULL; 134 135 pgdp = pgd_offset(mm, addr); 136 if (pgd_present(*pgdp)) { 137 pudp = pud_offset(pgdp, addr); 138 if (pud_present(*pudp)) 139 pmdp = pmd_offset(pudp, addr); 140 } 141 return (pte_t *) pmdp; 142 } 143 144 int pmd_huge(pmd_t pmd) 145 { 146 return pmd_large(pmd); 147 } 148 149 int pud_huge(pud_t pud) 150 { 151 return 0; 152 } 153