hugetlbpage.c (f26e8817b235d8764363bffcc9cbfc61867371f2) | hugetlbpage.c (c7d9f77d33a779ad582d8b2284ba007931ebd894) |
---|---|
1/* 2 * SPARC64 Huge TLB page support. 3 * 4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) 5 */ 6 7#include <linux/fs.h> 8#include <linux/mm.h> --- 14 unchanged lines hidden (view full) --- 23 */ 24 25static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 26 unsigned long addr, 27 unsigned long len, 28 unsigned long pgoff, 29 unsigned long flags) 30{ | 1/* 2 * SPARC64 Huge TLB page support. 3 * 4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) 5 */ 6 7#include <linux/fs.h> 8#include <linux/mm.h> --- 14 unchanged lines hidden (view full) --- 23 */ 24 25static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 26 unsigned long addr, 27 unsigned long len, 28 unsigned long pgoff, 29 unsigned long flags) 30{ |
31 struct hstate *h = hstate_file(filp); |
|
31 unsigned long task_size = TASK_SIZE; 32 struct vm_unmapped_area_info info; 33 34 if (test_thread_flag(TIF_32BIT)) 35 task_size = STACK_TOP32; 36 37 info.flags = 0; 38 info.length = len; 39 info.low_limit = TASK_UNMAPPED_BASE; 40 info.high_limit = min(task_size, VA_EXCLUDE_START); | 32 unsigned long task_size = TASK_SIZE; 33 struct vm_unmapped_area_info info; 34 35 if (test_thread_flag(TIF_32BIT)) 36 task_size = STACK_TOP32; 37 38 info.flags = 0; 39 info.length = len; 40 info.low_limit = TASK_UNMAPPED_BASE; 41 info.high_limit = min(task_size, VA_EXCLUDE_START); |
41 info.align_mask = PAGE_MASK & ~HPAGE_MASK; | 42 info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
42 info.align_offset = 0; 43 addr = vm_unmapped_area(&info); 44 45 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 46 VM_BUG_ON(addr != -ENOMEM); 47 info.low_limit = VA_EXCLUDE_END; 48 info.high_limit = task_size; 49 addr = vm_unmapped_area(&info); 50 } 51 52 return addr; 53} 54 55static unsigned long 56hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 57 const unsigned long len, 58 const unsigned long pgoff, 59 const unsigned long flags) 60{ | 43 info.align_offset = 0; 44 addr = vm_unmapped_area(&info); 45 46 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 47 VM_BUG_ON(addr != -ENOMEM); 48 info.low_limit = VA_EXCLUDE_END; 49 info.high_limit = task_size; 50 addr = vm_unmapped_area(&info); 51 } 52 53 return addr; 54} 55 56static unsigned long 57hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 58 const unsigned long len, 59 const unsigned long pgoff, 60 const unsigned long flags) 61{ |
62 struct hstate *h = hstate_file(filp); |
|
61 struct mm_struct *mm = current->mm; 62 unsigned long addr = addr0; 63 struct vm_unmapped_area_info info; 64 65 /* This should only ever run for 32-bit processes. */ 66 BUG_ON(!test_thread_flag(TIF_32BIT)); 67 68 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 69 info.length = len; 70 info.low_limit = PAGE_SIZE; 71 info.high_limit = mm->mmap_base; | 63 struct mm_struct *mm = current->mm; 64 unsigned long addr = addr0; 65 struct vm_unmapped_area_info info; 66 67 /* This should only ever run for 32-bit processes. */ 68 BUG_ON(!test_thread_flag(TIF_32BIT)); 69 70 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 71 info.length = len; 72 info.low_limit = PAGE_SIZE; 73 info.high_limit = mm->mmap_base; |
72 info.align_mask = PAGE_MASK & ~HPAGE_MASK; | 74 info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
73 info.align_offset = 0; 74 addr = vm_unmapped_area(&info); 75 76 /* 77 * A failed mmap() very likely causes application failure, 78 * so fall back to the bottom-up function here. This scenario 79 * can happen with large stack limits and large mmap() 80 * allocations. --- 8 unchanged lines hidden (view full) --- 89 90 return addr; 91} 92 93unsigned long 94hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 95 unsigned long len, unsigned long pgoff, unsigned long flags) 96{ | 75 info.align_offset = 0; 76 addr = vm_unmapped_area(&info); 77 78 /* 79 * A failed mmap() very likely causes application failure, 80 * so fall back to the bottom-up function here. This scenario 81 * can happen with large stack limits and large mmap() 82 * allocations. --- 8 unchanged lines hidden (view full) --- 91 92 return addr; 93} 94 95unsigned long 96hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 97 unsigned long len, unsigned long pgoff, unsigned long flags) 98{ |
99 struct hstate *h = hstate_file(file); |
|
97 struct mm_struct *mm = current->mm; 98 struct vm_area_struct *vma; 99 unsigned long task_size = TASK_SIZE; 100 101 if (test_thread_flag(TIF_32BIT)) 102 task_size = STACK_TOP32; 103 | 100 struct mm_struct *mm = current->mm; 101 struct vm_area_struct *vma; 102 unsigned long task_size = TASK_SIZE; 103 104 if (test_thread_flag(TIF_32BIT)) 105 task_size = STACK_TOP32; 106 |
104 if (len & ~HPAGE_MASK) | 107 if (len & ~huge_page_mask(h)) |
105 return -EINVAL; 106 if (len > task_size) 107 return -ENOMEM; 108 109 if (flags & MAP_FIXED) { 110 if (prepare_hugepage_range(file, addr, len)) 111 return -EINVAL; 112 return addr; 113 } 114 115 if (addr) { | 108 return -EINVAL; 109 if (len > task_size) 110 return -ENOMEM; 111 112 if (flags & MAP_FIXED) { 113 if (prepare_hugepage_range(file, addr, len)) 114 return -EINVAL; 115 return addr; 116 } 117 118 if (addr) { |
116 addr = ALIGN(addr, HPAGE_SIZE); | 119 addr = ALIGN(addr, huge_page_size(h)); |
117 vma = find_vma(mm, addr); 118 if (task_size - len >= addr && 119 (!vma || addr + len <= vma->vm_start)) 120 return addr; 121 } 122 if (mm->get_unmapped_area == arch_get_unmapped_area) 123 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 124 pgoff, flags); 125 else 126 return hugetlb_get_unmapped_area_topdown(file, addr, len, 127 pgoff, flags); 128} 129 | 120 vma = find_vma(mm, addr); 121 if (task_size - len >= addr && 122 (!vma || addr + len <= vma->vm_start)) 123 return addr; 124 } 125 if (mm->get_unmapped_area == arch_get_unmapped_area) 126 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 127 pgoff, flags); 128 else 129 return hugetlb_get_unmapped_area_topdown(file, addr, len, 130 pgoff, flags); 131} 132 |
133static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 134{ 135 return entry; 136} 137 138static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 139{ 140 unsigned long hugepage_size = _PAGE_SZ4MB_4V; 141 142 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143 144 switch (shift) { 145 case HPAGE_256MB_SHIFT: 146 hugepage_size = _PAGE_SZ256MB_4V; 147 pte_val(entry) |= _PAGE_PMD_HUGE; 148 break; 149 case HPAGE_SHIFT: 150 pte_val(entry) |= _PAGE_PMD_HUGE; 151 break; 152 default: 153 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); 154 } 155 156 pte_val(entry) = pte_val(entry) | hugepage_size; 157 return entry; 158} 159 160static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) 161{ 162 if (tlb_type == hypervisor) 163 return sun4v_hugepage_shift_to_tte(entry, shift); 164 else 165 return sun4u_hugepage_shift_to_tte(entry, shift); 166} 167 168pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 169 struct page *page, int writeable) 170{ 171 unsigned int shift = huge_page_shift(hstate_vma(vma)); 172 173 return hugepage_shift_to_tte(entry, shift); 174} 175 176static unsigned int sun4v_huge_tte_to_shift(pte_t entry) 177{ 178 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; 179 unsigned int shift; 180 181 switch (tte_szbits) { 182 case _PAGE_SZ256MB_4V: 183 shift = HPAGE_256MB_SHIFT; 184 break; 185 case _PAGE_SZ4MB_4V: 186 shift = REAL_HPAGE_SHIFT; 187 break; 188 default: 189 shift = PAGE_SHIFT; 190 break; 191 } 192 return shift; 193} 194 195static unsigned int sun4u_huge_tte_to_shift(pte_t entry) 196{ 197 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; 198 unsigned int shift; 199 200 switch (tte_szbits) { 201 case _PAGE_SZ256MB_4U: 202 shift = HPAGE_256MB_SHIFT; 203 break; 204 case _PAGE_SZ4MB_4U: 205 shift = REAL_HPAGE_SHIFT; 206 break; 207 default: 208 shift = PAGE_SHIFT; 209 break; 210 } 211 return shift; 212} 213 214static unsigned int huge_tte_to_shift(pte_t entry) 215{ 216 unsigned long shift; 217 218 if (tlb_type == hypervisor) 219 shift = sun4v_huge_tte_to_shift(entry); 220 else 221 shift = sun4u_huge_tte_to_shift(entry); 222 223 if (shift == PAGE_SHIFT) 224 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", 225 pte_val(entry)); 226 227 return shift; 228} 229 230static unsigned long huge_tte_to_size(pte_t pte) 231{ 232 unsigned long size = 1UL << huge_tte_to_shift(pte); 233 234 if (size == REAL_HPAGE_SIZE) 235 size = HPAGE_SIZE; 236 return size; 237} 238 |
|
130pte_t *huge_pte_alloc(struct mm_struct *mm, 131 unsigned long addr, unsigned long sz) 132{ 133 pgd_t *pgd; 134 pud_t *pud; 135 pte_t *pte = NULL; 136 137 pgd = pgd_offset(mm, addr); --- 17 unchanged lines hidden (view full) --- 155 pte = (pte_t *)pmd_offset(pud, addr); 156 } 157 return pte; 158} 159 160void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 161 pte_t *ptep, pte_t entry) 162{ | 239pte_t *huge_pte_alloc(struct mm_struct *mm, 240 unsigned long addr, unsigned long sz) 241{ 242 pgd_t *pgd; 243 pud_t *pud; 244 pte_t *pte = NULL; 245 246 pgd = pgd_offset(mm, addr); --- 17 unchanged lines hidden (view full) --- 264 pte = (pte_t *)pmd_offset(pud, addr); 265 } 266 return pte; 267} 268 269void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 270 pte_t *ptep, pte_t entry) 271{ |
272 unsigned int i, nptes, hugepage_shift; 273 unsigned long size; |
|
163 pte_t orig; 164 | 274 pte_t orig; 275 |
276 size = huge_tte_to_size(entry); 277 nptes = size >> PMD_SHIFT; 278 |
|
165 if (!pte_present(*ptep) && pte_present(entry)) | 279 if (!pte_present(*ptep) && pte_present(entry)) |
166 mm->context.hugetlb_pte_count++; | 280 mm->context.hugetlb_pte_count += nptes; |
167 | 281 |
168 addr &= HPAGE_MASK; | 282 addr &= ~(size - 1); |
169 orig = *ptep; | 283 orig = *ptep; |
170 *ptep = entry; | 284 hugepage_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); |
171 | 285 |
172 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ 173 maybe_tlb_batch_add(mm, addr, ptep, orig, 0); 174 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); | 286 for (i = 0; i < nptes; i++) 287 ptep[i] = __pte(pte_val(entry) + (i << PMD_SHIFT)); 288 289 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, hugepage_shift); 290 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 291 if (size == HPAGE_SIZE) 292 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 293 hugepage_shift); |
175} 176 177pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 178 pte_t *ptep) 179{ | 294} 295 296pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 297 pte_t *ptep) 298{ |
299 unsigned int i, nptes, hugepage_shift; 300 unsigned long size; |
|
180 pte_t entry; 181 182 entry = *ptep; | 301 pte_t entry; 302 303 entry = *ptep; |
304 size = huge_tte_to_size(entry); 305 nptes = size >> PMD_SHIFT; 306 hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); 307 |
|
183 if (pte_present(entry)) | 308 if (pte_present(entry)) |
184 mm->context.hugetlb_pte_count--; | 309 mm->context.hugetlb_pte_count -= nptes; |
185 | 310 |
186 addr &= HPAGE_MASK; 187 *ptep = __pte(0UL); | 311 addr &= ~(size - 1); 312 for (i = 0; i < nptes; i++) 313 ptep[i] = __pte(0UL); |
188 | 314 |
189 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ 190 maybe_tlb_batch_add(mm, addr, ptep, entry, 0); 191 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); | 315 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); 316 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 317 if (size == HPAGE_SIZE) 318 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, 319 hugepage_shift); |
192 193 return entry; 194} 195 196int pmd_huge(pmd_t pmd) 197{ 198 return !pmd_none(pmd) && 199 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; --- 103 unchanged lines hidden --- | 320 321 return entry; 322} 323 324int pmd_huge(pmd_t pmd) 325{ 326 return !pmd_none(pmd) && 327 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; --- 103 unchanged lines hidden --- |