1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 253492b1dSGerald Schaefer /* 353492b1dSGerald Schaefer * IBM System z Huge TLB Page Support for Kernel. 453492b1dSGerald Schaefer * 55f490a52SGerald Schaefer * Copyright IBM Corp. 2007,2020 653492b1dSGerald Schaefer * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 753492b1dSGerald Schaefer */ 853492b1dSGerald Schaefer 9d08de8e2SGerald Schaefer #define KMSG_COMPONENT "hugetlb" 10d08de8e2SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11d08de8e2SGerald Schaefer 1253492b1dSGerald Schaefer #include <linux/mm.h> 1353492b1dSGerald Schaefer #include <linux/hugetlb.h> 145f490a52SGerald Schaefer #include <linux/mman.h> 155f490a52SGerald Schaefer #include <linux/sched/mm.h> 165f490a52SGerald Schaefer #include <linux/security.h> 1753492b1dSGerald Schaefer 18bc29b7acSGerald Schaefer /* 19bc29b7acSGerald Schaefer * If the bit selected by single-bit bitmask "a" is set within "x", move 20bc29b7acSGerald Schaefer * it to the position indicated by single-bit bitmask "b". 21bc29b7acSGerald Schaefer */ 22bc29b7acSGerald Schaefer #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b)) 23bc29b7acSGerald Schaefer 24d08de8e2SGerald Schaefer static inline unsigned long __pte_to_rste(pte_t pte) 2553492b1dSGerald Schaefer { 26d08de8e2SGerald Schaefer unsigned long rste; 2753492b1dSGerald Schaefer 28e5098611SMartin Schwidefsky /* 29d08de8e2SGerald Schaefer * Convert encoding pte bits pmd / pud bits 30a1c843b8SMartin Schwidefsky * lIR.uswrdy.p dy..R...I...wr 31a1c843b8SMartin Schwidefsky * empty 010.000000.0 -> 00..0...1...00 32a1c843b8SMartin Schwidefsky * prot-none, clean, old 111.000000.1 -> 00..1...1...00 33a1c843b8SMartin Schwidefsky * prot-none, clean, young 111.000001.1 -> 01..1...1...00 34a1c843b8SMartin Schwidefsky * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 35a1c843b8SMartin Schwidefsky * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 36a1c843b8SMartin Schwidefsky * read-only, clean, old 111.000100.1 -> 00..1...1...01 37a1c843b8SMartin Schwidefsky * read-only, clean, young 101.000101.1 -> 01..1...0...01 38a1c843b8SMartin Schwidefsky * read-only, dirty, old 111.000110.1 -> 10..1...1...01 39a1c843b8SMartin Schwidefsky * read-only, dirty, young 101.000111.1 -> 11..1...0...01 40a1c843b8SMartin Schwidefsky * read-write, clean, old 111.001100.1 -> 00..1...1...11 41a1c843b8SMartin Schwidefsky * read-write, clean, young 101.001101.1 -> 01..1...0...11 42a1c843b8SMartin Schwidefsky * read-write, dirty, old 110.001110.1 -> 10..0...1...11 43a1c843b8SMartin Schwidefsky * read-write, dirty, young 100.001111.1 -> 11..0...0...11 44a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 45a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 46a1c843b8SMartin Schwidefsky * u unused, l large 47e5098611SMartin Schwidefsky */ 48e5098611SMartin Schwidefsky if (pte_present(pte)) { 49d08de8e2SGerald Schaefer rste = pte_val(pte) & PAGE_MASK; 50bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_READ, 51bc29b7acSGerald Schaefer _SEGMENT_ENTRY_READ); 52bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_WRITE, 53bc29b7acSGerald Schaefer _SEGMENT_ENTRY_WRITE); 54bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, 55bc29b7acSGerald Schaefer _SEGMENT_ENTRY_INVALID); 56bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT, 57bc29b7acSGerald Schaefer _SEGMENT_ENTRY_PROTECT); 58bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY, 59bc29b7acSGerald Schaefer _SEGMENT_ENTRY_DIRTY); 60bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG, 61bc29b7acSGerald Schaefer _SEGMENT_ENTRY_YOUNG); 62bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 63bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY, 64bc29b7acSGerald Schaefer _SEGMENT_ENTRY_SOFT_DIRTY); 65bc29b7acSGerald Schaefer #endif 6657d7f939SMartin Schwidefsky rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, 6757d7f939SMartin Schwidefsky _SEGMENT_ENTRY_NOEXEC); 68e5098611SMartin Schwidefsky } else 6954397bb0SDominik Dingel rste = _SEGMENT_ENTRY_EMPTY; 70d08de8e2SGerald Schaefer return rste; 7153492b1dSGerald Schaefer } 7253492b1dSGerald Schaefer 73d08de8e2SGerald Schaefer static inline pte_t __rste_to_pte(unsigned long rste) 74e5098611SMartin Schwidefsky { 75d08de8e2SGerald Schaefer int present; 76e5098611SMartin Schwidefsky pte_t pte; 77e5098611SMartin Schwidefsky 78d08de8e2SGerald Schaefer if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 79d08de8e2SGerald Schaefer present = pud_present(__pud(rste)); 80d08de8e2SGerald Schaefer else 81d08de8e2SGerald Schaefer present = pmd_present(__pmd(rste)); 82d08de8e2SGerald Schaefer 83e5098611SMartin Schwidefsky /* 84d08de8e2SGerald Schaefer * Convert encoding pmd / pud bits pte bits 85a1c843b8SMartin Schwidefsky * dy..R...I...wr lIR.uswrdy.p 86a1c843b8SMartin Schwidefsky * empty 00..0...1...00 -> 010.000000.0 87a1c843b8SMartin Schwidefsky * prot-none, clean, old 00..1...1...00 -> 111.000000.1 88a1c843b8SMartin Schwidefsky * prot-none, clean, young 01..1...1...00 -> 111.000001.1 89a1c843b8SMartin Schwidefsky * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 90a1c843b8SMartin Schwidefsky * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 91a1c843b8SMartin Schwidefsky * read-only, clean, old 00..1...1...01 -> 111.000100.1 92a1c843b8SMartin Schwidefsky * read-only, clean, young 01..1...0...01 -> 101.000101.1 93a1c843b8SMartin Schwidefsky * read-only, dirty, old 10..1...1...01 -> 111.000110.1 94a1c843b8SMartin Schwidefsky * read-only, dirty, young 11..1...0...01 -> 101.000111.1 95a1c843b8SMartin Schwidefsky * read-write, clean, old 00..1...1...11 -> 111.001100.1 96a1c843b8SMartin Schwidefsky * read-write, clean, young 01..1...0...11 -> 101.001101.1 97a1c843b8SMartin Schwidefsky * read-write, dirty, old 10..0...1...11 -> 110.001110.1 98a1c843b8SMartin Schwidefsky * read-write, dirty, young 11..0...0...11 -> 100.001111.1 99a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 100a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 101a1c843b8SMartin Schwidefsky * u unused, l large 102e5098611SMartin Schwidefsky */ 103d08de8e2SGerald Schaefer if (present) { 104d08de8e2SGerald Schaefer pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 105152125b7SMartin Schwidefsky pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 106bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ, 107bc29b7acSGerald Schaefer _PAGE_READ); 108bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, 109bc29b7acSGerald Schaefer _PAGE_WRITE); 110bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, 111bc29b7acSGerald Schaefer _PAGE_INVALID); 112bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, 113bc29b7acSGerald Schaefer _PAGE_PROTECT); 114bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, 115bc29b7acSGerald Schaefer _PAGE_DIRTY); 116bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, 117bc29b7acSGerald Schaefer _PAGE_YOUNG); 118bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 119bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, 120bc29b7acSGerald Schaefer _PAGE_DIRTY); 121bc29b7acSGerald Schaefer #endif 12257d7f939SMartin Schwidefsky pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, 12357d7f939SMartin Schwidefsky _PAGE_NOEXEC); 124e5098611SMartin Schwidefsky } else 125e5098611SMartin Schwidefsky pte_val(pte) = _PAGE_INVALID; 126e5098611SMartin Schwidefsky return pte; 127e5098611SMartin Schwidefsky } 128e5098611SMartin Schwidefsky 1293afdfca6SJanosch Frank static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) 1303afdfca6SJanosch Frank { 1313afdfca6SJanosch Frank struct page *page; 1323afdfca6SJanosch Frank unsigned long size, paddr; 1333afdfca6SJanosch Frank 1343afdfca6SJanosch Frank if (!mm_uses_skeys(mm) || 1353afdfca6SJanosch Frank rste & _SEGMENT_ENTRY_INVALID) 1363afdfca6SJanosch Frank return; 1373afdfca6SJanosch Frank 1383afdfca6SJanosch Frank if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 1393afdfca6SJanosch Frank page = pud_page(__pud(rste)); 1403afdfca6SJanosch Frank size = PUD_SIZE; 1413afdfca6SJanosch Frank paddr = rste & PUD_MASK; 1423afdfca6SJanosch Frank } else { 1433afdfca6SJanosch Frank page = pmd_page(__pmd(rste)); 1443afdfca6SJanosch Frank size = PMD_SIZE; 1453afdfca6SJanosch Frank paddr = rste & PMD_MASK; 1463afdfca6SJanosch Frank } 1473afdfca6SJanosch Frank 1483afdfca6SJanosch Frank if (!test_and_set_bit(PG_arch_1, &page->flags)) 1493afdfca6SJanosch Frank __storage_key_init_range(paddr, paddr + size - 1); 1503afdfca6SJanosch Frank } 1513afdfca6SJanosch Frank 152e5098611SMartin Schwidefsky void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 153e5098611SMartin Schwidefsky pte_t *ptep, pte_t pte) 154e5098611SMartin Schwidefsky { 15557d7f939SMartin Schwidefsky unsigned long rste; 15657d7f939SMartin Schwidefsky 15757d7f939SMartin Schwidefsky rste = __pte_to_rste(pte); 15857d7f939SMartin Schwidefsky if (!MACHINE_HAS_NX) 15957d7f939SMartin Schwidefsky rste &= ~_SEGMENT_ENTRY_NOEXEC; 160e5098611SMartin Schwidefsky 161d08de8e2SGerald Schaefer /* Set correct table type for 2G hugepages */ 162ac8372f3SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 163ac8372f3SGerald Schaefer if (likely(pte_present(pte))) 164ac8372f3SGerald Schaefer rste |= _REGION3_ENTRY_LARGE; 165ac8372f3SGerald Schaefer rste |= _REGION_ENTRY_TYPE_R3; 166ac8372f3SGerald Schaefer } else if (likely(pte_present(pte))) 167d08de8e2SGerald Schaefer rste |= _SEGMENT_ENTRY_LARGE; 168ac8372f3SGerald Schaefer 1693afdfca6SJanosch Frank clear_huge_pte_skeys(mm, rste); 170d08de8e2SGerald Schaefer pte_val(*ptep) = rste; 171e5098611SMartin Schwidefsky } 172e5098611SMartin Schwidefsky 173e5098611SMartin Schwidefsky pte_t huge_ptep_get(pte_t *ptep) 174e5098611SMartin Schwidefsky { 175d08de8e2SGerald Schaefer return __rste_to_pte(pte_val(*ptep)); 176e5098611SMartin Schwidefsky } 177e5098611SMartin Schwidefsky 178e5098611SMartin Schwidefsky pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 179e5098611SMartin Schwidefsky unsigned long addr, pte_t *ptep) 180e5098611SMartin Schwidefsky { 181d08de8e2SGerald Schaefer pte_t pte = huge_ptep_get(ptep); 182e5098611SMartin Schwidefsky pmd_t *pmdp = (pmd_t *) ptep; 183d08de8e2SGerald Schaefer pud_t *pudp = (pud_t *) ptep; 184e5098611SMartin Schwidefsky 185d08de8e2SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 186d08de8e2SGerald Schaefer pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY)); 187d08de8e2SGerald Schaefer else 188d08de8e2SGerald Schaefer pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 189d08de8e2SGerald Schaefer return pte; 19053492b1dSGerald Schaefer } 19153492b1dSGerald Schaefer 192a5516438SAndi Kleen pte_t *huge_pte_alloc(struct mm_struct *mm, 193a5516438SAndi Kleen unsigned long addr, unsigned long sz) 19453492b1dSGerald Schaefer { 19553492b1dSGerald Schaefer pgd_t *pgdp; 1961aea9b3fSMartin Schwidefsky p4d_t *p4dp; 19753492b1dSGerald Schaefer pud_t *pudp; 19853492b1dSGerald Schaefer pmd_t *pmdp = NULL; 19953492b1dSGerald Schaefer 20053492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 2011aea9b3fSMartin Schwidefsky p4dp = p4d_alloc(mm, pgdp, addr); 2021aea9b3fSMartin Schwidefsky if (p4dp) { 2031aea9b3fSMartin Schwidefsky pudp = pud_alloc(mm, p4dp, addr); 204d08de8e2SGerald Schaefer if (pudp) { 205d08de8e2SGerald Schaefer if (sz == PUD_SIZE) 206d08de8e2SGerald Schaefer return (pte_t *) pudp; 207d08de8e2SGerald Schaefer else if (sz == PMD_SIZE) 20853492b1dSGerald Schaefer pmdp = pmd_alloc(mm, pudp, addr); 209d08de8e2SGerald Schaefer } 2101aea9b3fSMartin Schwidefsky } 21153492b1dSGerald Schaefer return (pte_t *) pmdp; 21253492b1dSGerald Schaefer } 21353492b1dSGerald Schaefer 2147868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm, 2157868a208SPunit Agrawal unsigned long addr, unsigned long sz) 21653492b1dSGerald Schaefer { 21753492b1dSGerald Schaefer pgd_t *pgdp; 2181aea9b3fSMartin Schwidefsky p4d_t *p4dp; 21953492b1dSGerald Schaefer pud_t *pudp; 22053492b1dSGerald Schaefer pmd_t *pmdp = NULL; 22153492b1dSGerald Schaefer 22253492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 22353492b1dSGerald Schaefer if (pgd_present(*pgdp)) { 2241aea9b3fSMartin Schwidefsky p4dp = p4d_offset(pgdp, addr); 2251aea9b3fSMartin Schwidefsky if (p4d_present(*p4dp)) { 2261aea9b3fSMartin Schwidefsky pudp = pud_offset(p4dp, addr); 227d08de8e2SGerald Schaefer if (pud_present(*pudp)) { 228d08de8e2SGerald Schaefer if (pud_large(*pudp)) 229d08de8e2SGerald Schaefer return (pte_t *) pudp; 23053492b1dSGerald Schaefer pmdp = pmd_offset(pudp, addr); 23153492b1dSGerald Schaefer } 232d08de8e2SGerald Schaefer } 2331aea9b3fSMartin Schwidefsky } 23453492b1dSGerald Schaefer return (pte_t *) pmdp; 23553492b1dSGerald Schaefer } 23653492b1dSGerald Schaefer 23753492b1dSGerald Schaefer int pmd_huge(pmd_t pmd) 23853492b1dSGerald Schaefer { 239cbd7d9c2SDominik Dingel return pmd_large(pmd); 24053492b1dSGerald Schaefer } 24153492b1dSGerald Schaefer 242ceb86879SAndi Kleen int pud_huge(pud_t pud) 243ceb86879SAndi Kleen { 244d08de8e2SGerald Schaefer return pud_large(pud); 245d08de8e2SGerald Schaefer } 246d08de8e2SGerald Schaefer 247d08de8e2SGerald Schaefer struct page * 248d08de8e2SGerald Schaefer follow_huge_pud(struct mm_struct *mm, unsigned long address, 249d08de8e2SGerald Schaefer pud_t *pud, int flags) 250d08de8e2SGerald Schaefer { 251d08de8e2SGerald Schaefer if (flags & FOLL_GET) 252d08de8e2SGerald Schaefer return NULL; 253d08de8e2SGerald Schaefer 254d08de8e2SGerald Schaefer return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 255d08de8e2SGerald Schaefer } 256d08de8e2SGerald Schaefer 257*ae94da89SMike Kravetz bool __init arch_hugetlb_valid_size(unsigned long size) 258*ae94da89SMike Kravetz { 259*ae94da89SMike Kravetz if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) 260*ae94da89SMike Kravetz return true; 261*ae94da89SMike Kravetz else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) 262*ae94da89SMike Kravetz return true; 263*ae94da89SMike Kravetz else 264*ae94da89SMike Kravetz return false; 265*ae94da89SMike Kravetz } 266*ae94da89SMike Kravetz 267d08de8e2SGerald Schaefer static __init int setup_hugepagesz(char *opt) 268d08de8e2SGerald Schaefer { 269d08de8e2SGerald Schaefer unsigned long size; 270d08de8e2SGerald Schaefer char *string = opt; 271d08de8e2SGerald Schaefer 272d08de8e2SGerald Schaefer size = memparse(opt, &opt); 273*ae94da89SMike Kravetz if (arch_hugetlb_valid_size(size)) { 274*ae94da89SMike Kravetz hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 275d08de8e2SGerald Schaefer } else { 276b5003b5fSShyam Saini hugetlb_bad_size(); 277d08de8e2SGerald Schaefer pr_err("hugepagesz= specifies an unsupported page size %s\n", 278d08de8e2SGerald Schaefer string); 279ceb86879SAndi Kleen return 0; 280ceb86879SAndi Kleen } 281d08de8e2SGerald Schaefer return 1; 282d08de8e2SGerald Schaefer } 283d08de8e2SGerald Schaefer __setup("hugepagesz=", setup_hugepagesz); 2845f490a52SGerald Schaefer 2855f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 2865f490a52SGerald Schaefer unsigned long addr, unsigned long len, 2875f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2885f490a52SGerald Schaefer { 2895f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2905f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2915f490a52SGerald Schaefer 2925f490a52SGerald Schaefer info.flags = 0; 2935f490a52SGerald Schaefer info.length = len; 2945f490a52SGerald Schaefer info.low_limit = current->mm->mmap_base; 2955f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 2965f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2975f490a52SGerald Schaefer info.align_offset = 0; 2985f490a52SGerald Schaefer return vm_unmapped_area(&info); 2995f490a52SGerald Schaefer } 3005f490a52SGerald Schaefer 3015f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 3025f490a52SGerald Schaefer unsigned long addr0, unsigned long len, 3035f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 3045f490a52SGerald Schaefer { 3055f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 3065f490a52SGerald Schaefer struct vm_unmapped_area_info info; 3075f490a52SGerald Schaefer unsigned long addr; 3085f490a52SGerald Schaefer 3095f490a52SGerald Schaefer info.flags = VM_UNMAPPED_AREA_TOPDOWN; 3105f490a52SGerald Schaefer info.length = len; 3115f490a52SGerald Schaefer info.low_limit = max(PAGE_SIZE, mmap_min_addr); 3125f490a52SGerald Schaefer info.high_limit = current->mm->mmap_base; 3135f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 3145f490a52SGerald Schaefer info.align_offset = 0; 3155f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3165f490a52SGerald Schaefer 3175f490a52SGerald Schaefer /* 3185f490a52SGerald Schaefer * A failed mmap() very likely causes application failure, 3195f490a52SGerald Schaefer * so fall back to the bottom-up function here. This scenario 3205f490a52SGerald Schaefer * can happen with large stack limits and large mmap() 3215f490a52SGerald Schaefer * allocations. 3225f490a52SGerald Schaefer */ 3235f490a52SGerald Schaefer if (addr & ~PAGE_MASK) { 3245f490a52SGerald Schaefer VM_BUG_ON(addr != -ENOMEM); 3255f490a52SGerald Schaefer info.flags = 0; 3265f490a52SGerald Schaefer info.low_limit = TASK_UNMAPPED_BASE; 3275f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 3285f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3295f490a52SGerald Schaefer } 3305f490a52SGerald Schaefer 3315f490a52SGerald Schaefer return addr; 3325f490a52SGerald Schaefer } 3335f490a52SGerald Schaefer 3345f490a52SGerald Schaefer unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 3355f490a52SGerald Schaefer unsigned long len, unsigned long pgoff, unsigned long flags) 3365f490a52SGerald Schaefer { 3375f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 3385f490a52SGerald Schaefer struct mm_struct *mm = current->mm; 3395f490a52SGerald Schaefer struct vm_area_struct *vma; 3405f490a52SGerald Schaefer 3415f490a52SGerald Schaefer if (len & ~huge_page_mask(h)) 3425f490a52SGerald Schaefer return -EINVAL; 3435f490a52SGerald Schaefer if (len > TASK_SIZE - mmap_min_addr) 3445f490a52SGerald Schaefer return -ENOMEM; 3455f490a52SGerald Schaefer 3465f490a52SGerald Schaefer if (flags & MAP_FIXED) { 3475f490a52SGerald Schaefer if (prepare_hugepage_range(file, addr, len)) 3485f490a52SGerald Schaefer return -EINVAL; 3495f490a52SGerald Schaefer goto check_asce_limit; 3505f490a52SGerald Schaefer } 3515f490a52SGerald Schaefer 3525f490a52SGerald Schaefer if (addr) { 3535f490a52SGerald Schaefer addr = ALIGN(addr, huge_page_size(h)); 3545f490a52SGerald Schaefer vma = find_vma(mm, addr); 3555f490a52SGerald Schaefer if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 3565f490a52SGerald Schaefer (!vma || addr + len <= vm_start_gap(vma))) 3575f490a52SGerald Schaefer goto check_asce_limit; 3585f490a52SGerald Schaefer } 3595f490a52SGerald Schaefer 3605f490a52SGerald Schaefer if (mm->get_unmapped_area == arch_get_unmapped_area) 3615f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, 3625f490a52SGerald Schaefer pgoff, flags); 3635f490a52SGerald Schaefer else 3645f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 3655f490a52SGerald Schaefer pgoff, flags); 366712fa5f2SAlexander Gordeev if (offset_in_page(addr)) 3675f490a52SGerald Schaefer return addr; 3685f490a52SGerald Schaefer 3695f490a52SGerald Schaefer check_asce_limit: 370712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len); 3715f490a52SGerald Schaefer } 372