1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 253492b1dSGerald Schaefer /* 353492b1dSGerald Schaefer * IBM System z Huge TLB Page Support for Kernel. 453492b1dSGerald Schaefer * 55f490a52SGerald Schaefer * Copyright IBM Corp. 2007,2020 653492b1dSGerald Schaefer * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 753492b1dSGerald Schaefer */ 853492b1dSGerald Schaefer 9d08de8e2SGerald Schaefer #define KMSG_COMPONENT "hugetlb" 10d08de8e2SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11d08de8e2SGerald Schaefer 12f7ea534aSJakub Kicinski #include <asm/pgalloc.h> 1353492b1dSGerald Schaefer #include <linux/mm.h> 1453492b1dSGerald Schaefer #include <linux/hugetlb.h> 155f490a52SGerald Schaefer #include <linux/mman.h> 165f490a52SGerald Schaefer #include <linux/sched/mm.h> 175f490a52SGerald Schaefer #include <linux/security.h> 1853492b1dSGerald Schaefer 19bc29b7acSGerald Schaefer /* 20bc29b7acSGerald Schaefer * If the bit selected by single-bit bitmask "a" is set within "x", move 21bc29b7acSGerald Schaefer * it to the position indicated by single-bit bitmask "b". 22bc29b7acSGerald Schaefer */ 23bc29b7acSGerald Schaefer #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b)) 24bc29b7acSGerald Schaefer 25d08de8e2SGerald Schaefer static inline unsigned long __pte_to_rste(pte_t pte) 2653492b1dSGerald Schaefer { 27d08de8e2SGerald Schaefer unsigned long rste; 2853492b1dSGerald Schaefer 29e5098611SMartin Schwidefsky /* 30d08de8e2SGerald Schaefer * Convert encoding pte bits pmd / pud bits 31a1c843b8SMartin Schwidefsky * lIR.uswrdy.p dy..R...I...wr 32a1c843b8SMartin Schwidefsky * empty 010.000000.0 -> 00..0...1...00 33a1c843b8SMartin Schwidefsky * prot-none, clean, old 111.000000.1 -> 00..1...1...00 34a1c843b8SMartin Schwidefsky * prot-none, clean, young 111.000001.1 -> 01..1...1...00 35a1c843b8SMartin Schwidefsky * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 36a1c843b8SMartin Schwidefsky * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 37a1c843b8SMartin Schwidefsky * read-only, clean, old 111.000100.1 -> 00..1...1...01 38a1c843b8SMartin Schwidefsky * read-only, clean, young 101.000101.1 -> 01..1...0...01 39a1c843b8SMartin Schwidefsky * read-only, dirty, old 111.000110.1 -> 10..1...1...01 40a1c843b8SMartin Schwidefsky * read-only, dirty, young 101.000111.1 -> 11..1...0...01 41a1c843b8SMartin Schwidefsky * read-write, clean, old 111.001100.1 -> 00..1...1...11 42a1c843b8SMartin Schwidefsky * read-write, clean, young 101.001101.1 -> 01..1...0...11 43a1c843b8SMartin Schwidefsky * read-write, dirty, old 110.001110.1 -> 10..0...1...11 44a1c843b8SMartin Schwidefsky * read-write, dirty, young 100.001111.1 -> 11..0...0...11 45a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 46a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 47a1c843b8SMartin Schwidefsky * u unused, l large 48e5098611SMartin Schwidefsky */ 49e5098611SMartin Schwidefsky if (pte_present(pte)) { 50d08de8e2SGerald Schaefer rste = pte_val(pte) & PAGE_MASK; 51bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_READ, 52bc29b7acSGerald Schaefer _SEGMENT_ENTRY_READ); 53bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_WRITE, 54bc29b7acSGerald Schaefer _SEGMENT_ENTRY_WRITE); 55bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, 56bc29b7acSGerald Schaefer _SEGMENT_ENTRY_INVALID); 57bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT, 58bc29b7acSGerald Schaefer _SEGMENT_ENTRY_PROTECT); 59bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY, 60bc29b7acSGerald Schaefer _SEGMENT_ENTRY_DIRTY); 61bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG, 62bc29b7acSGerald Schaefer _SEGMENT_ENTRY_YOUNG); 63bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 64bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY, 65bc29b7acSGerald Schaefer _SEGMENT_ENTRY_SOFT_DIRTY); 66bc29b7acSGerald Schaefer #endif 6757d7f939SMartin Schwidefsky rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, 6857d7f939SMartin Schwidefsky _SEGMENT_ENTRY_NOEXEC); 69e5098611SMartin Schwidefsky } else 7054397bb0SDominik Dingel rste = _SEGMENT_ENTRY_EMPTY; 71d08de8e2SGerald Schaefer return rste; 7253492b1dSGerald Schaefer } 7353492b1dSGerald Schaefer 74d08de8e2SGerald Schaefer static inline pte_t __rste_to_pte(unsigned long rste) 75e5098611SMartin Schwidefsky { 76933b7253SHeiko Carstens unsigned long pteval; 77d08de8e2SGerald Schaefer int present; 78e5098611SMartin Schwidefsky 79d08de8e2SGerald Schaefer if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 80d08de8e2SGerald Schaefer present = pud_present(__pud(rste)); 81d08de8e2SGerald Schaefer else 82d08de8e2SGerald Schaefer present = pmd_present(__pmd(rste)); 83d08de8e2SGerald Schaefer 84e5098611SMartin Schwidefsky /* 85d08de8e2SGerald Schaefer * Convert encoding pmd / pud bits pte bits 86a1c843b8SMartin Schwidefsky * dy..R...I...wr lIR.uswrdy.p 87a1c843b8SMartin Schwidefsky * empty 00..0...1...00 -> 010.000000.0 88a1c843b8SMartin Schwidefsky * prot-none, clean, old 00..1...1...00 -> 111.000000.1 89a1c843b8SMartin Schwidefsky * prot-none, clean, young 01..1...1...00 -> 111.000001.1 90a1c843b8SMartin Schwidefsky * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 91a1c843b8SMartin Schwidefsky * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 92a1c843b8SMartin Schwidefsky * read-only, clean, old 00..1...1...01 -> 111.000100.1 93a1c843b8SMartin Schwidefsky * read-only, clean, young 01..1...0...01 -> 101.000101.1 94a1c843b8SMartin Schwidefsky * read-only, dirty, old 10..1...1...01 -> 111.000110.1 95a1c843b8SMartin Schwidefsky * read-only, dirty, young 11..1...0...01 -> 101.000111.1 96a1c843b8SMartin Schwidefsky * read-write, clean, old 00..1...1...11 -> 111.001100.1 97a1c843b8SMartin Schwidefsky * read-write, clean, young 01..1...0...11 -> 101.001101.1 98a1c843b8SMartin Schwidefsky * read-write, dirty, old 10..0...1...11 -> 110.001110.1 99a1c843b8SMartin Schwidefsky * read-write, dirty, young 11..0...0...11 -> 100.001111.1 100a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 101a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 102a1c843b8SMartin Schwidefsky * u unused, l large 103e5098611SMartin Schwidefsky */ 104d08de8e2SGerald Schaefer if (present) { 105933b7253SHeiko Carstens pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 106933b7253SHeiko Carstens pteval |= _PAGE_LARGE | _PAGE_PRESENT; 107933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ); 108933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE); 109933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID); 110933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT); 111933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY); 112933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG); 113bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 114933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY); 115bc29b7acSGerald Schaefer #endif 116933b7253SHeiko Carstens pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); 117e5098611SMartin Schwidefsky } else 118933b7253SHeiko Carstens pteval = _PAGE_INVALID; 119933b7253SHeiko Carstens return __pte(pteval); 120e5098611SMartin Schwidefsky } 121e5098611SMartin Schwidefsky 1223afdfca6SJanosch Frank static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) 1233afdfca6SJanosch Frank { 1243afdfca6SJanosch Frank struct page *page; 1253afdfca6SJanosch Frank unsigned long size, paddr; 1263afdfca6SJanosch Frank 1273afdfca6SJanosch Frank if (!mm_uses_skeys(mm) || 1283afdfca6SJanosch Frank rste & _SEGMENT_ENTRY_INVALID) 1293afdfca6SJanosch Frank return; 1303afdfca6SJanosch Frank 1313afdfca6SJanosch Frank if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 1323afdfca6SJanosch Frank page = pud_page(__pud(rste)); 1333afdfca6SJanosch Frank size = PUD_SIZE; 1343afdfca6SJanosch Frank paddr = rste & PUD_MASK; 1353afdfca6SJanosch Frank } else { 1363afdfca6SJanosch Frank page = pmd_page(__pmd(rste)); 1373afdfca6SJanosch Frank size = PMD_SIZE; 1383afdfca6SJanosch Frank paddr = rste & PMD_MASK; 1393afdfca6SJanosch Frank } 1403afdfca6SJanosch Frank 1413afdfca6SJanosch Frank if (!test_and_set_bit(PG_arch_1, &page->flags)) 1423afdfca6SJanosch Frank __storage_key_init_range(paddr, paddr + size - 1); 1433afdfca6SJanosch Frank } 1443afdfca6SJanosch Frank 145935d4f0cSRyan Roberts void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 146e5098611SMartin Schwidefsky pte_t *ptep, pte_t pte) 147e5098611SMartin Schwidefsky { 14857d7f939SMartin Schwidefsky unsigned long rste; 14957d7f939SMartin Schwidefsky 15057d7f939SMartin Schwidefsky rste = __pte_to_rste(pte); 15157d7f939SMartin Schwidefsky if (!MACHINE_HAS_NX) 15257d7f939SMartin Schwidefsky rste &= ~_SEGMENT_ENTRY_NOEXEC; 153e5098611SMartin Schwidefsky 154d08de8e2SGerald Schaefer /* Set correct table type for 2G hugepages */ 155ac8372f3SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 156ac8372f3SGerald Schaefer if (likely(pte_present(pte))) 157ac8372f3SGerald Schaefer rste |= _REGION3_ENTRY_LARGE; 158ac8372f3SGerald Schaefer rste |= _REGION_ENTRY_TYPE_R3; 159ac8372f3SGerald Schaefer } else if (likely(pte_present(pte))) 160d08de8e2SGerald Schaefer rste |= _SEGMENT_ENTRY_LARGE; 161ac8372f3SGerald Schaefer 1623afdfca6SJanosch Frank clear_huge_pte_skeys(mm, rste); 163b8e3b379SHeiko Carstens set_pte(ptep, __pte(rste)); 164e5098611SMartin Schwidefsky } 165e5098611SMartin Schwidefsky 166935d4f0cSRyan Roberts void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 167935d4f0cSRyan Roberts pte_t *ptep, pte_t pte, unsigned long sz) 168935d4f0cSRyan Roberts { 169935d4f0cSRyan Roberts __set_huge_pte_at(mm, addr, ptep, pte); 170935d4f0cSRyan Roberts } 171935d4f0cSRyan Roberts 172e5098611SMartin Schwidefsky pte_t huge_ptep_get(pte_t *ptep) 173e5098611SMartin Schwidefsky { 174d08de8e2SGerald Schaefer return __rste_to_pte(pte_val(*ptep)); 175e5098611SMartin Schwidefsky } 176e5098611SMartin Schwidefsky 177e5098611SMartin Schwidefsky pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 178e5098611SMartin Schwidefsky unsigned long addr, pte_t *ptep) 179e5098611SMartin Schwidefsky { 180d08de8e2SGerald Schaefer pte_t pte = huge_ptep_get(ptep); 181e5098611SMartin Schwidefsky pmd_t *pmdp = (pmd_t *) ptep; 182d08de8e2SGerald Schaefer pud_t *pudp = (pud_t *) ptep; 183e5098611SMartin Schwidefsky 184d08de8e2SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 185d08de8e2SGerald Schaefer pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY)); 186d08de8e2SGerald Schaefer else 187d08de8e2SGerald Schaefer pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 188d08de8e2SGerald Schaefer return pte; 18953492b1dSGerald Schaefer } 19053492b1dSGerald Schaefer 191aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 192a5516438SAndi Kleen unsigned long addr, unsigned long sz) 19353492b1dSGerald Schaefer { 19453492b1dSGerald Schaefer pgd_t *pgdp; 1951aea9b3fSMartin Schwidefsky p4d_t *p4dp; 19653492b1dSGerald Schaefer pud_t *pudp; 19753492b1dSGerald Schaefer pmd_t *pmdp = NULL; 19853492b1dSGerald Schaefer 19953492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 2001aea9b3fSMartin Schwidefsky p4dp = p4d_alloc(mm, pgdp, addr); 2011aea9b3fSMartin Schwidefsky if (p4dp) { 2021aea9b3fSMartin Schwidefsky pudp = pud_alloc(mm, p4dp, addr); 203d08de8e2SGerald Schaefer if (pudp) { 204d08de8e2SGerald Schaefer if (sz == PUD_SIZE) 205d08de8e2SGerald Schaefer return (pte_t *) pudp; 206d08de8e2SGerald Schaefer else if (sz == PMD_SIZE) 20753492b1dSGerald Schaefer pmdp = pmd_alloc(mm, pudp, addr); 208d08de8e2SGerald Schaefer } 2091aea9b3fSMartin Schwidefsky } 21053492b1dSGerald Schaefer return (pte_t *) pmdp; 21153492b1dSGerald Schaefer } 21253492b1dSGerald Schaefer 2137868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm, 2147868a208SPunit Agrawal unsigned long addr, unsigned long sz) 21553492b1dSGerald Schaefer { 21653492b1dSGerald Schaefer pgd_t *pgdp; 2171aea9b3fSMartin Schwidefsky p4d_t *p4dp; 21853492b1dSGerald Schaefer pud_t *pudp; 21953492b1dSGerald Schaefer pmd_t *pmdp = NULL; 22053492b1dSGerald Schaefer 22153492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 22253492b1dSGerald Schaefer if (pgd_present(*pgdp)) { 2231aea9b3fSMartin Schwidefsky p4dp = p4d_offset(pgdp, addr); 2241aea9b3fSMartin Schwidefsky if (p4d_present(*p4dp)) { 2251aea9b3fSMartin Schwidefsky pudp = pud_offset(p4dp, addr); 226d08de8e2SGerald Schaefer if (pud_present(*pudp)) { 227*907835e6SPeter Xu if (pud_leaf(*pudp)) 228d08de8e2SGerald Schaefer return (pte_t *) pudp; 22953492b1dSGerald Schaefer pmdp = pmd_offset(pudp, addr); 23053492b1dSGerald Schaefer } 231d08de8e2SGerald Schaefer } 2321aea9b3fSMartin Schwidefsky } 23353492b1dSGerald Schaefer return (pte_t *) pmdp; 23453492b1dSGerald Schaefer } 23553492b1dSGerald Schaefer 23653492b1dSGerald Schaefer int pmd_huge(pmd_t pmd) 23753492b1dSGerald Schaefer { 238cbd7d9c2SDominik Dingel return pmd_large(pmd); 23953492b1dSGerald Schaefer } 24053492b1dSGerald Schaefer 241ceb86879SAndi Kleen int pud_huge(pud_t pud) 242ceb86879SAndi Kleen { 243*907835e6SPeter Xu return pud_leaf(pud); 244d08de8e2SGerald Schaefer } 245d08de8e2SGerald Schaefer 246ae94da89SMike Kravetz bool __init arch_hugetlb_valid_size(unsigned long size) 247ae94da89SMike Kravetz { 248ae94da89SMike Kravetz if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) 249ae94da89SMike Kravetz return true; 250ae94da89SMike Kravetz else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) 251ae94da89SMike Kravetz return true; 252ae94da89SMike Kravetz else 253ae94da89SMike Kravetz return false; 254ae94da89SMike Kravetz } 255ae94da89SMike Kravetz 2565f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 2575f490a52SGerald Schaefer unsigned long addr, unsigned long len, 2585f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2595f490a52SGerald Schaefer { 2605f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2615f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2625f490a52SGerald Schaefer 2635f490a52SGerald Schaefer info.flags = 0; 2645f490a52SGerald Schaefer info.length = len; 2655f490a52SGerald Schaefer info.low_limit = current->mm->mmap_base; 2665f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 2675f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2685f490a52SGerald Schaefer info.align_offset = 0; 2695f490a52SGerald Schaefer return vm_unmapped_area(&info); 2705f490a52SGerald Schaefer } 2715f490a52SGerald Schaefer 2725f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 2735f490a52SGerald Schaefer unsigned long addr0, unsigned long len, 2745f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2755f490a52SGerald Schaefer { 2765f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2775f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2785f490a52SGerald Schaefer unsigned long addr; 2795f490a52SGerald Schaefer 2805f490a52SGerald Schaefer info.flags = VM_UNMAPPED_AREA_TOPDOWN; 2815f490a52SGerald Schaefer info.length = len; 2826b008640SLinus Torvalds info.low_limit = PAGE_SIZE; 2835f490a52SGerald Schaefer info.high_limit = current->mm->mmap_base; 2845f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2855f490a52SGerald Schaefer info.align_offset = 0; 2865f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 2875f490a52SGerald Schaefer 2885f490a52SGerald Schaefer /* 2895f490a52SGerald Schaefer * A failed mmap() very likely causes application failure, 2905f490a52SGerald Schaefer * so fall back to the bottom-up function here. This scenario 2915f490a52SGerald Schaefer * can happen with large stack limits and large mmap() 2925f490a52SGerald Schaefer * allocations. 2935f490a52SGerald Schaefer */ 2945f490a52SGerald Schaefer if (addr & ~PAGE_MASK) { 2955f490a52SGerald Schaefer VM_BUG_ON(addr != -ENOMEM); 2965f490a52SGerald Schaefer info.flags = 0; 2975f490a52SGerald Schaefer info.low_limit = TASK_UNMAPPED_BASE; 2985f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 2995f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3005f490a52SGerald Schaefer } 3015f490a52SGerald Schaefer 3025f490a52SGerald Schaefer return addr; 3035f490a52SGerald Schaefer } 3045f490a52SGerald Schaefer 3055f490a52SGerald Schaefer unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 3065f490a52SGerald Schaefer unsigned long len, unsigned long pgoff, unsigned long flags) 3075f490a52SGerald Schaefer { 3085f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 3095f490a52SGerald Schaefer struct mm_struct *mm = current->mm; 3105f490a52SGerald Schaefer struct vm_area_struct *vma; 3115f490a52SGerald Schaefer 3125f490a52SGerald Schaefer if (len & ~huge_page_mask(h)) 3135f490a52SGerald Schaefer return -EINVAL; 3145f490a52SGerald Schaefer if (len > TASK_SIZE - mmap_min_addr) 3155f490a52SGerald Schaefer return -ENOMEM; 3165f490a52SGerald Schaefer 3175f490a52SGerald Schaefer if (flags & MAP_FIXED) { 3185f490a52SGerald Schaefer if (prepare_hugepage_range(file, addr, len)) 3195f490a52SGerald Schaefer return -EINVAL; 3205f490a52SGerald Schaefer goto check_asce_limit; 3215f490a52SGerald Schaefer } 3225f490a52SGerald Schaefer 3235f490a52SGerald Schaefer if (addr) { 3245f490a52SGerald Schaefer addr = ALIGN(addr, huge_page_size(h)); 3255f490a52SGerald Schaefer vma = find_vma(mm, addr); 3265f490a52SGerald Schaefer if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 3275f490a52SGerald Schaefer (!vma || addr + len <= vm_start_gap(vma))) 3285f490a52SGerald Schaefer goto check_asce_limit; 3295f490a52SGerald Schaefer } 3305f490a52SGerald Schaefer 3315f490a52SGerald Schaefer if (mm->get_unmapped_area == arch_get_unmapped_area) 3325f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, 3335f490a52SGerald Schaefer pgoff, flags); 3345f490a52SGerald Schaefer else 3355f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 3365f490a52SGerald Schaefer pgoff, flags); 337712fa5f2SAlexander Gordeev if (offset_in_page(addr)) 3385f490a52SGerald Schaefer return addr; 3395f490a52SGerald Schaefer 3405f490a52SGerald Schaefer check_asce_limit: 341712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len); 3425f490a52SGerald Schaefer } 343