1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 253492b1dSGerald Schaefer /* 353492b1dSGerald Schaefer * IBM System z Huge TLB Page Support for Kernel. 453492b1dSGerald Schaefer * 55f490a52SGerald Schaefer * Copyright IBM Corp. 2007,2020 653492b1dSGerald Schaefer * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 753492b1dSGerald Schaefer */ 853492b1dSGerald Schaefer 9d08de8e2SGerald Schaefer #define KMSG_COMPONENT "hugetlb" 10d08de8e2SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11d08de8e2SGerald Schaefer 12f7ea534aSJakub Kicinski #include <asm/pgalloc.h> 1353492b1dSGerald Schaefer #include <linux/mm.h> 1453492b1dSGerald Schaefer #include <linux/hugetlb.h> 155f490a52SGerald Schaefer #include <linux/mman.h> 165f490a52SGerald Schaefer #include <linux/sched/mm.h> 175f490a52SGerald Schaefer #include <linux/security.h> 1853492b1dSGerald Schaefer 19bc29b7acSGerald Schaefer /* 20bc29b7acSGerald Schaefer * If the bit selected by single-bit bitmask "a" is set within "x", move 21bc29b7acSGerald Schaefer * it to the position indicated by single-bit bitmask "b". 22bc29b7acSGerald Schaefer */ 23bc29b7acSGerald Schaefer #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b)) 24bc29b7acSGerald Schaefer 25d08de8e2SGerald Schaefer static inline unsigned long __pte_to_rste(pte_t pte) 2653492b1dSGerald Schaefer { 27d08de8e2SGerald Schaefer unsigned long rste; 2853492b1dSGerald Schaefer 29e5098611SMartin Schwidefsky /* 30d08de8e2SGerald Schaefer * Convert encoding pte bits pmd / pud bits 31a1c843b8SMartin Schwidefsky * lIR.uswrdy.p dy..R...I...wr 32a1c843b8SMartin Schwidefsky * empty 010.000000.0 -> 00..0...1...00 33a1c843b8SMartin Schwidefsky * prot-none, clean, old 111.000000.1 -> 00..1...1...00 34a1c843b8SMartin Schwidefsky * prot-none, clean, young 111.000001.1 -> 01..1...1...00 35a1c843b8SMartin Schwidefsky * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 36a1c843b8SMartin Schwidefsky * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 37a1c843b8SMartin Schwidefsky * read-only, clean, old 111.000100.1 -> 00..1...1...01 38a1c843b8SMartin Schwidefsky * read-only, clean, young 101.000101.1 -> 01..1...0...01 39a1c843b8SMartin Schwidefsky * read-only, dirty, old 111.000110.1 -> 10..1...1...01 40a1c843b8SMartin Schwidefsky * read-only, dirty, young 101.000111.1 -> 11..1...0...01 41a1c843b8SMartin Schwidefsky * read-write, clean, old 111.001100.1 -> 00..1...1...11 42a1c843b8SMartin Schwidefsky * read-write, clean, young 101.001101.1 -> 01..1...0...11 43a1c843b8SMartin Schwidefsky * read-write, dirty, old 110.001110.1 -> 10..0...1...11 44a1c843b8SMartin Schwidefsky * read-write, dirty, young 100.001111.1 -> 11..0...0...11 45a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 46a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 47a1c843b8SMartin Schwidefsky * u unused, l large 48e5098611SMartin Schwidefsky */ 49e5098611SMartin Schwidefsky if (pte_present(pte)) { 50d08de8e2SGerald Schaefer rste = pte_val(pte) & PAGE_MASK; 51bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_READ, 52bc29b7acSGerald Schaefer _SEGMENT_ENTRY_READ); 53bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_WRITE, 54bc29b7acSGerald Schaefer _SEGMENT_ENTRY_WRITE); 55bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, 56bc29b7acSGerald Schaefer _SEGMENT_ENTRY_INVALID); 57bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT, 58bc29b7acSGerald Schaefer _SEGMENT_ENTRY_PROTECT); 59bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY, 60bc29b7acSGerald Schaefer _SEGMENT_ENTRY_DIRTY); 61bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG, 62bc29b7acSGerald Schaefer _SEGMENT_ENTRY_YOUNG); 63bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 64bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY, 65bc29b7acSGerald Schaefer _SEGMENT_ENTRY_SOFT_DIRTY); 66bc29b7acSGerald Schaefer #endif 6757d7f939SMartin Schwidefsky rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, 6857d7f939SMartin Schwidefsky _SEGMENT_ENTRY_NOEXEC); 69e5098611SMartin Schwidefsky } else 7054397bb0SDominik Dingel rste = _SEGMENT_ENTRY_EMPTY; 71d08de8e2SGerald Schaefer return rste; 7253492b1dSGerald Schaefer } 7353492b1dSGerald Schaefer 74d08de8e2SGerald Schaefer static inline pte_t __rste_to_pte(unsigned long rste) 75e5098611SMartin Schwidefsky { 76d08de8e2SGerald Schaefer int present; 77e5098611SMartin Schwidefsky pte_t pte; 78e5098611SMartin Schwidefsky 79d08de8e2SGerald Schaefer if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 80d08de8e2SGerald Schaefer present = pud_present(__pud(rste)); 81d08de8e2SGerald Schaefer else 82d08de8e2SGerald Schaefer present = pmd_present(__pmd(rste)); 83d08de8e2SGerald Schaefer 84e5098611SMartin Schwidefsky /* 85d08de8e2SGerald Schaefer * Convert encoding pmd / pud bits pte bits 86a1c843b8SMartin Schwidefsky * dy..R...I...wr lIR.uswrdy.p 87a1c843b8SMartin Schwidefsky * empty 00..0...1...00 -> 010.000000.0 88a1c843b8SMartin Schwidefsky * prot-none, clean, old 00..1...1...00 -> 111.000000.1 89a1c843b8SMartin Schwidefsky * prot-none, clean, young 01..1...1...00 -> 111.000001.1 90a1c843b8SMartin Schwidefsky * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 91a1c843b8SMartin Schwidefsky * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 92a1c843b8SMartin Schwidefsky * read-only, clean, old 00..1...1...01 -> 111.000100.1 93a1c843b8SMartin Schwidefsky * read-only, clean, young 01..1...0...01 -> 101.000101.1 94a1c843b8SMartin Schwidefsky * read-only, dirty, old 10..1...1...01 -> 111.000110.1 95a1c843b8SMartin Schwidefsky * read-only, dirty, young 11..1...0...01 -> 101.000111.1 96a1c843b8SMartin Schwidefsky * read-write, clean, old 00..1...1...11 -> 111.001100.1 97a1c843b8SMartin Schwidefsky * read-write, clean, young 01..1...0...11 -> 101.001101.1 98a1c843b8SMartin Schwidefsky * read-write, dirty, old 10..0...1...11 -> 110.001110.1 99a1c843b8SMartin Schwidefsky * read-write, dirty, young 11..0...0...11 -> 100.001111.1 100a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 101a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 102a1c843b8SMartin Schwidefsky * u unused, l large 103e5098611SMartin Schwidefsky */ 104d08de8e2SGerald Schaefer if (present) { 105d08de8e2SGerald Schaefer pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 106152125b7SMartin Schwidefsky pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 107bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ, 108bc29b7acSGerald Schaefer _PAGE_READ); 109bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, 110bc29b7acSGerald Schaefer _PAGE_WRITE); 111bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, 112bc29b7acSGerald Schaefer _PAGE_INVALID); 113bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, 114bc29b7acSGerald Schaefer _PAGE_PROTECT); 115bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, 116bc29b7acSGerald Schaefer _PAGE_DIRTY); 117bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, 118bc29b7acSGerald Schaefer _PAGE_YOUNG); 119bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 120bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, 121528a9539SJanosch Frank _PAGE_SOFT_DIRTY); 122bc29b7acSGerald Schaefer #endif 12357d7f939SMartin Schwidefsky pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, 12457d7f939SMartin Schwidefsky _PAGE_NOEXEC); 125e5098611SMartin Schwidefsky } else 126e5098611SMartin Schwidefsky pte_val(pte) = _PAGE_INVALID; 127e5098611SMartin Schwidefsky return pte; 128e5098611SMartin Schwidefsky } 129e5098611SMartin Schwidefsky 1303afdfca6SJanosch Frank static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) 1313afdfca6SJanosch Frank { 1323afdfca6SJanosch Frank struct page *page; 1333afdfca6SJanosch Frank unsigned long size, paddr; 1343afdfca6SJanosch Frank 1353afdfca6SJanosch Frank if (!mm_uses_skeys(mm) || 1363afdfca6SJanosch Frank rste & _SEGMENT_ENTRY_INVALID) 1373afdfca6SJanosch Frank return; 1383afdfca6SJanosch Frank 1393afdfca6SJanosch Frank if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 1403afdfca6SJanosch Frank page = pud_page(__pud(rste)); 1413afdfca6SJanosch Frank size = PUD_SIZE; 1423afdfca6SJanosch Frank paddr = rste & PUD_MASK; 1433afdfca6SJanosch Frank } else { 1443afdfca6SJanosch Frank page = pmd_page(__pmd(rste)); 1453afdfca6SJanosch Frank size = PMD_SIZE; 1463afdfca6SJanosch Frank paddr = rste & PMD_MASK; 1473afdfca6SJanosch Frank } 1483afdfca6SJanosch Frank 1493afdfca6SJanosch Frank if (!test_and_set_bit(PG_arch_1, &page->flags)) 1503afdfca6SJanosch Frank __storage_key_init_range(paddr, paddr + size - 1); 1513afdfca6SJanosch Frank } 1523afdfca6SJanosch Frank 153e5098611SMartin Schwidefsky void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 154e5098611SMartin Schwidefsky pte_t *ptep, pte_t pte) 155e5098611SMartin Schwidefsky { 15657d7f939SMartin Schwidefsky unsigned long rste; 15757d7f939SMartin Schwidefsky 15857d7f939SMartin Schwidefsky rste = __pte_to_rste(pte); 15957d7f939SMartin Schwidefsky if (!MACHINE_HAS_NX) 16057d7f939SMartin Schwidefsky rste &= ~_SEGMENT_ENTRY_NOEXEC; 161e5098611SMartin Schwidefsky 162d08de8e2SGerald Schaefer /* Set correct table type for 2G hugepages */ 163ac8372f3SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 164ac8372f3SGerald Schaefer if (likely(pte_present(pte))) 165ac8372f3SGerald Schaefer rste |= _REGION3_ENTRY_LARGE; 166ac8372f3SGerald Schaefer rste |= _REGION_ENTRY_TYPE_R3; 167ac8372f3SGerald Schaefer } else if (likely(pte_present(pte))) 168d08de8e2SGerald Schaefer rste |= _SEGMENT_ENTRY_LARGE; 169ac8372f3SGerald Schaefer 1703afdfca6SJanosch Frank clear_huge_pte_skeys(mm, rste); 171*b8e3b379SHeiko Carstens set_pte(ptep, __pte(rste)); 172e5098611SMartin Schwidefsky } 173e5098611SMartin Schwidefsky 174e5098611SMartin Schwidefsky pte_t huge_ptep_get(pte_t *ptep) 175e5098611SMartin Schwidefsky { 176d08de8e2SGerald Schaefer return __rste_to_pte(pte_val(*ptep)); 177e5098611SMartin Schwidefsky } 178e5098611SMartin Schwidefsky 179e5098611SMartin Schwidefsky pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 180e5098611SMartin Schwidefsky unsigned long addr, pte_t *ptep) 181e5098611SMartin Schwidefsky { 182d08de8e2SGerald Schaefer pte_t pte = huge_ptep_get(ptep); 183e5098611SMartin Schwidefsky pmd_t *pmdp = (pmd_t *) ptep; 184d08de8e2SGerald Schaefer pud_t *pudp = (pud_t *) ptep; 185e5098611SMartin Schwidefsky 186d08de8e2SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 187d08de8e2SGerald Schaefer pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY)); 188d08de8e2SGerald Schaefer else 189d08de8e2SGerald Schaefer pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 190d08de8e2SGerald Schaefer return pte; 19153492b1dSGerald Schaefer } 19253492b1dSGerald Schaefer 193aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 194a5516438SAndi Kleen unsigned long addr, unsigned long sz) 19553492b1dSGerald Schaefer { 19653492b1dSGerald Schaefer pgd_t *pgdp; 1971aea9b3fSMartin Schwidefsky p4d_t *p4dp; 19853492b1dSGerald Schaefer pud_t *pudp; 19953492b1dSGerald Schaefer pmd_t *pmdp = NULL; 20053492b1dSGerald Schaefer 20153492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 2021aea9b3fSMartin Schwidefsky p4dp = p4d_alloc(mm, pgdp, addr); 2031aea9b3fSMartin Schwidefsky if (p4dp) { 2041aea9b3fSMartin Schwidefsky pudp = pud_alloc(mm, p4dp, addr); 205d08de8e2SGerald Schaefer if (pudp) { 206d08de8e2SGerald Schaefer if (sz == PUD_SIZE) 207d08de8e2SGerald Schaefer return (pte_t *) pudp; 208d08de8e2SGerald Schaefer else if (sz == PMD_SIZE) 20953492b1dSGerald Schaefer pmdp = pmd_alloc(mm, pudp, addr); 210d08de8e2SGerald Schaefer } 2111aea9b3fSMartin Schwidefsky } 21253492b1dSGerald Schaefer return (pte_t *) pmdp; 21353492b1dSGerald Schaefer } 21453492b1dSGerald Schaefer 2157868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm, 2167868a208SPunit Agrawal unsigned long addr, unsigned long sz) 21753492b1dSGerald Schaefer { 21853492b1dSGerald Schaefer pgd_t *pgdp; 2191aea9b3fSMartin Schwidefsky p4d_t *p4dp; 22053492b1dSGerald Schaefer pud_t *pudp; 22153492b1dSGerald Schaefer pmd_t *pmdp = NULL; 22253492b1dSGerald Schaefer 22353492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 22453492b1dSGerald Schaefer if (pgd_present(*pgdp)) { 2251aea9b3fSMartin Schwidefsky p4dp = p4d_offset(pgdp, addr); 2261aea9b3fSMartin Schwidefsky if (p4d_present(*p4dp)) { 2271aea9b3fSMartin Schwidefsky pudp = pud_offset(p4dp, addr); 228d08de8e2SGerald Schaefer if (pud_present(*pudp)) { 229d08de8e2SGerald Schaefer if (pud_large(*pudp)) 230d08de8e2SGerald Schaefer return (pte_t *) pudp; 23153492b1dSGerald Schaefer pmdp = pmd_offset(pudp, addr); 23253492b1dSGerald Schaefer } 233d08de8e2SGerald Schaefer } 2341aea9b3fSMartin Schwidefsky } 23553492b1dSGerald Schaefer return (pte_t *) pmdp; 23653492b1dSGerald Schaefer } 23753492b1dSGerald Schaefer 23853492b1dSGerald Schaefer int pmd_huge(pmd_t pmd) 23953492b1dSGerald Schaefer { 240cbd7d9c2SDominik Dingel return pmd_large(pmd); 24153492b1dSGerald Schaefer } 24253492b1dSGerald Schaefer 243ceb86879SAndi Kleen int pud_huge(pud_t pud) 244ceb86879SAndi Kleen { 245d08de8e2SGerald Schaefer return pud_large(pud); 246d08de8e2SGerald Schaefer } 247d08de8e2SGerald Schaefer 248d08de8e2SGerald Schaefer struct page * 249d08de8e2SGerald Schaefer follow_huge_pud(struct mm_struct *mm, unsigned long address, 250d08de8e2SGerald Schaefer pud_t *pud, int flags) 251d08de8e2SGerald Schaefer { 252d08de8e2SGerald Schaefer if (flags & FOLL_GET) 253d08de8e2SGerald Schaefer return NULL; 254d08de8e2SGerald Schaefer 255d08de8e2SGerald Schaefer return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 256d08de8e2SGerald Schaefer } 257d08de8e2SGerald Schaefer 258ae94da89SMike Kravetz bool __init arch_hugetlb_valid_size(unsigned long size) 259ae94da89SMike Kravetz { 260ae94da89SMike Kravetz if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) 261ae94da89SMike Kravetz return true; 262ae94da89SMike Kravetz else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) 263ae94da89SMike Kravetz return true; 264ae94da89SMike Kravetz else 265ae94da89SMike Kravetz return false; 266ae94da89SMike Kravetz } 267ae94da89SMike Kravetz 2685f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 2695f490a52SGerald Schaefer unsigned long addr, unsigned long len, 2705f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2715f490a52SGerald Schaefer { 2725f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2735f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2745f490a52SGerald Schaefer 2755f490a52SGerald Schaefer info.flags = 0; 2765f490a52SGerald Schaefer info.length = len; 2775f490a52SGerald Schaefer info.low_limit = current->mm->mmap_base; 2785f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 2795f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2805f490a52SGerald Schaefer info.align_offset = 0; 2815f490a52SGerald Schaefer return vm_unmapped_area(&info); 2825f490a52SGerald Schaefer } 2835f490a52SGerald Schaefer 2845f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 2855f490a52SGerald Schaefer unsigned long addr0, unsigned long len, 2865f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2875f490a52SGerald Schaefer { 2885f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2895f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2905f490a52SGerald Schaefer unsigned long addr; 2915f490a52SGerald Schaefer 2925f490a52SGerald Schaefer info.flags = VM_UNMAPPED_AREA_TOPDOWN; 2935f490a52SGerald Schaefer info.length = len; 2945f490a52SGerald Schaefer info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2955f490a52SGerald Schaefer info.high_limit = current->mm->mmap_base; 2965f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2975f490a52SGerald Schaefer info.align_offset = 0; 2985f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 2995f490a52SGerald Schaefer 3005f490a52SGerald Schaefer /* 3015f490a52SGerald Schaefer * A failed mmap() very likely causes application failure, 3025f490a52SGerald Schaefer * so fall back to the bottom-up function here. This scenario 3035f490a52SGerald Schaefer * can happen with large stack limits and large mmap() 3045f490a52SGerald Schaefer * allocations. 3055f490a52SGerald Schaefer */ 3065f490a52SGerald Schaefer if (addr & ~PAGE_MASK) { 3075f490a52SGerald Schaefer VM_BUG_ON(addr != -ENOMEM); 3085f490a52SGerald Schaefer info.flags = 0; 3095f490a52SGerald Schaefer info.low_limit = TASK_UNMAPPED_BASE; 3105f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 3115f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3125f490a52SGerald Schaefer } 3135f490a52SGerald Schaefer 3145f490a52SGerald Schaefer return addr; 3155f490a52SGerald Schaefer } 3165f490a52SGerald Schaefer 3175f490a52SGerald Schaefer unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 3185f490a52SGerald Schaefer unsigned long len, unsigned long pgoff, unsigned long flags) 3195f490a52SGerald Schaefer { 3205f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 3215f490a52SGerald Schaefer struct mm_struct *mm = current->mm; 3225f490a52SGerald Schaefer struct vm_area_struct *vma; 3235f490a52SGerald Schaefer 3245f490a52SGerald Schaefer if (len & ~huge_page_mask(h)) 3255f490a52SGerald Schaefer return -EINVAL; 3265f490a52SGerald Schaefer if (len > TASK_SIZE - mmap_min_addr) 3275f490a52SGerald Schaefer return -ENOMEM; 3285f490a52SGerald Schaefer 3295f490a52SGerald Schaefer if (flags & MAP_FIXED) { 3305f490a52SGerald Schaefer if (prepare_hugepage_range(file, addr, len)) 3315f490a52SGerald Schaefer return -EINVAL; 3325f490a52SGerald Schaefer goto check_asce_limit; 3335f490a52SGerald Schaefer } 3345f490a52SGerald Schaefer 3355f490a52SGerald Schaefer if (addr) { 3365f490a52SGerald Schaefer addr = ALIGN(addr, huge_page_size(h)); 3375f490a52SGerald Schaefer vma = find_vma(mm, addr); 3385f490a52SGerald Schaefer if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 3395f490a52SGerald Schaefer (!vma || addr + len <= vm_start_gap(vma))) 3405f490a52SGerald Schaefer goto check_asce_limit; 3415f490a52SGerald Schaefer } 3425f490a52SGerald Schaefer 3435f490a52SGerald Schaefer if (mm->get_unmapped_area == arch_get_unmapped_area) 3445f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, 3455f490a52SGerald Schaefer pgoff, flags); 3465f490a52SGerald Schaefer else 3475f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 3485f490a52SGerald Schaefer pgoff, flags); 349712fa5f2SAlexander Gordeev if (offset_in_page(addr)) 3505f490a52SGerald Schaefer return addr; 3515f490a52SGerald Schaefer 3525f490a52SGerald Schaefer check_asce_limit: 353712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len); 3545f490a52SGerald Schaefer } 355