1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 253492b1dSGerald Schaefer /* 353492b1dSGerald Schaefer * IBM System z Huge TLB Page Support for Kernel. 453492b1dSGerald Schaefer * 55f490a52SGerald Schaefer * Copyright IBM Corp. 2007,2020 653492b1dSGerald Schaefer * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 753492b1dSGerald Schaefer */ 853492b1dSGerald Schaefer 9d08de8e2SGerald Schaefer #define KMSG_COMPONENT "hugetlb" 10d08de8e2SGerald Schaefer #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11d08de8e2SGerald Schaefer 1253492b1dSGerald Schaefer #include <linux/mm.h> 1353492b1dSGerald Schaefer #include <linux/hugetlb.h> 145f490a52SGerald Schaefer #include <linux/mman.h> 155f490a52SGerald Schaefer #include <linux/sched/mm.h> 165f490a52SGerald Schaefer #include <linux/security.h> 1753492b1dSGerald Schaefer 18bc29b7acSGerald Schaefer /* 19bc29b7acSGerald Schaefer * If the bit selected by single-bit bitmask "a" is set within "x", move 20bc29b7acSGerald Schaefer * it to the position indicated by single-bit bitmask "b". 21bc29b7acSGerald Schaefer */ 22bc29b7acSGerald Schaefer #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b)) 23bc29b7acSGerald Schaefer 24d08de8e2SGerald Schaefer static inline unsigned long __pte_to_rste(pte_t pte) 2553492b1dSGerald Schaefer { 26d08de8e2SGerald Schaefer unsigned long rste; 2753492b1dSGerald Schaefer 28e5098611SMartin Schwidefsky /* 29d08de8e2SGerald Schaefer * Convert encoding pte bits pmd / pud bits 30a1c843b8SMartin Schwidefsky * lIR.uswrdy.p dy..R...I...wr 31a1c843b8SMartin Schwidefsky * empty 010.000000.0 -> 00..0...1...00 32a1c843b8SMartin Schwidefsky * prot-none, clean, old 111.000000.1 -> 00..1...1...00 33a1c843b8SMartin Schwidefsky * prot-none, clean, young 111.000001.1 -> 01..1...1...00 34a1c843b8SMartin Schwidefsky * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 35a1c843b8SMartin Schwidefsky * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 36a1c843b8SMartin Schwidefsky * read-only, clean, old 111.000100.1 -> 00..1...1...01 37a1c843b8SMartin Schwidefsky * read-only, clean, young 101.000101.1 -> 01..1...0...01 38a1c843b8SMartin Schwidefsky * read-only, dirty, old 111.000110.1 -> 10..1...1...01 39a1c843b8SMartin Schwidefsky * read-only, dirty, young 101.000111.1 -> 11..1...0...01 40a1c843b8SMartin Schwidefsky * read-write, clean, old 111.001100.1 -> 00..1...1...11 41a1c843b8SMartin Schwidefsky * read-write, clean, young 101.001101.1 -> 01..1...0...11 42a1c843b8SMartin Schwidefsky * read-write, dirty, old 110.001110.1 -> 10..0...1...11 43a1c843b8SMartin Schwidefsky * read-write, dirty, young 100.001111.1 -> 11..0...0...11 44a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 45a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 46a1c843b8SMartin Schwidefsky * u unused, l large 47e5098611SMartin Schwidefsky */ 48e5098611SMartin Schwidefsky if (pte_present(pte)) { 49d08de8e2SGerald Schaefer rste = pte_val(pte) & PAGE_MASK; 50bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_READ, 51bc29b7acSGerald Schaefer _SEGMENT_ENTRY_READ); 52bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_WRITE, 53bc29b7acSGerald Schaefer _SEGMENT_ENTRY_WRITE); 54bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, 55bc29b7acSGerald Schaefer _SEGMENT_ENTRY_INVALID); 56bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT, 57bc29b7acSGerald Schaefer _SEGMENT_ENTRY_PROTECT); 58bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY, 59bc29b7acSGerald Schaefer _SEGMENT_ENTRY_DIRTY); 60bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG, 61bc29b7acSGerald Schaefer _SEGMENT_ENTRY_YOUNG); 62bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 63bc29b7acSGerald Schaefer rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY, 64bc29b7acSGerald Schaefer _SEGMENT_ENTRY_SOFT_DIRTY); 65bc29b7acSGerald Schaefer #endif 6657d7f939SMartin Schwidefsky rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, 6757d7f939SMartin Schwidefsky _SEGMENT_ENTRY_NOEXEC); 68e5098611SMartin Schwidefsky } else 6954397bb0SDominik Dingel rste = _SEGMENT_ENTRY_EMPTY; 70d08de8e2SGerald Schaefer return rste; 7153492b1dSGerald Schaefer } 7253492b1dSGerald Schaefer 73d08de8e2SGerald Schaefer static inline pte_t __rste_to_pte(unsigned long rste) 74e5098611SMartin Schwidefsky { 75d08de8e2SGerald Schaefer int present; 76e5098611SMartin Schwidefsky pte_t pte; 77e5098611SMartin Schwidefsky 78d08de8e2SGerald Schaefer if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 79d08de8e2SGerald Schaefer present = pud_present(__pud(rste)); 80d08de8e2SGerald Schaefer else 81d08de8e2SGerald Schaefer present = pmd_present(__pmd(rste)); 82d08de8e2SGerald Schaefer 83e5098611SMartin Schwidefsky /* 84d08de8e2SGerald Schaefer * Convert encoding pmd / pud bits pte bits 85a1c843b8SMartin Schwidefsky * dy..R...I...wr lIR.uswrdy.p 86a1c843b8SMartin Schwidefsky * empty 00..0...1...00 -> 010.000000.0 87a1c843b8SMartin Schwidefsky * prot-none, clean, old 00..1...1...00 -> 111.000000.1 88a1c843b8SMartin Schwidefsky * prot-none, clean, young 01..1...1...00 -> 111.000001.1 89a1c843b8SMartin Schwidefsky * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 90a1c843b8SMartin Schwidefsky * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 91a1c843b8SMartin Schwidefsky * read-only, clean, old 00..1...1...01 -> 111.000100.1 92a1c843b8SMartin Schwidefsky * read-only, clean, young 01..1...0...01 -> 101.000101.1 93a1c843b8SMartin Schwidefsky * read-only, dirty, old 10..1...1...01 -> 111.000110.1 94a1c843b8SMartin Schwidefsky * read-only, dirty, young 11..1...0...01 -> 101.000111.1 95a1c843b8SMartin Schwidefsky * read-write, clean, old 00..1...1...11 -> 111.001100.1 96a1c843b8SMartin Schwidefsky * read-write, clean, young 01..1...0...11 -> 101.001101.1 97a1c843b8SMartin Schwidefsky * read-write, dirty, old 10..0...1...11 -> 110.001110.1 98a1c843b8SMartin Schwidefsky * read-write, dirty, young 11..0...0...11 -> 100.001111.1 99a1c843b8SMartin Schwidefsky * HW-bits: R read-only, I invalid 100a1c843b8SMartin Schwidefsky * SW-bits: p present, y young, d dirty, r read, w write, s special, 101a1c843b8SMartin Schwidefsky * u unused, l large 102e5098611SMartin Schwidefsky */ 103d08de8e2SGerald Schaefer if (present) { 104d08de8e2SGerald Schaefer pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 105152125b7SMartin Schwidefsky pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 106bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ, 107bc29b7acSGerald Schaefer _PAGE_READ); 108bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, 109bc29b7acSGerald Schaefer _PAGE_WRITE); 110bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, 111bc29b7acSGerald Schaefer _PAGE_INVALID); 112bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, 113bc29b7acSGerald Schaefer _PAGE_PROTECT); 114bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, 115bc29b7acSGerald Schaefer _PAGE_DIRTY); 116bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, 117bc29b7acSGerald Schaefer _PAGE_YOUNG); 118bc29b7acSGerald Schaefer #ifdef CONFIG_MEM_SOFT_DIRTY 119bc29b7acSGerald Schaefer pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, 120bc29b7acSGerald Schaefer _PAGE_DIRTY); 121bc29b7acSGerald Schaefer #endif 12257d7f939SMartin Schwidefsky pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, 12357d7f939SMartin Schwidefsky _PAGE_NOEXEC); 124e5098611SMartin Schwidefsky } else 125e5098611SMartin Schwidefsky pte_val(pte) = _PAGE_INVALID; 126e5098611SMartin Schwidefsky return pte; 127e5098611SMartin Schwidefsky } 128e5098611SMartin Schwidefsky 1293afdfca6SJanosch Frank static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) 1303afdfca6SJanosch Frank { 1313afdfca6SJanosch Frank struct page *page; 1323afdfca6SJanosch Frank unsigned long size, paddr; 1333afdfca6SJanosch Frank 1343afdfca6SJanosch Frank if (!mm_uses_skeys(mm) || 1353afdfca6SJanosch Frank rste & _SEGMENT_ENTRY_INVALID) 1363afdfca6SJanosch Frank return; 1373afdfca6SJanosch Frank 1383afdfca6SJanosch Frank if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { 1393afdfca6SJanosch Frank page = pud_page(__pud(rste)); 1403afdfca6SJanosch Frank size = PUD_SIZE; 1413afdfca6SJanosch Frank paddr = rste & PUD_MASK; 1423afdfca6SJanosch Frank } else { 1433afdfca6SJanosch Frank page = pmd_page(__pmd(rste)); 1443afdfca6SJanosch Frank size = PMD_SIZE; 1453afdfca6SJanosch Frank paddr = rste & PMD_MASK; 1463afdfca6SJanosch Frank } 1473afdfca6SJanosch Frank 1483afdfca6SJanosch Frank if (!test_and_set_bit(PG_arch_1, &page->flags)) 1493afdfca6SJanosch Frank __storage_key_init_range(paddr, paddr + size - 1); 1503afdfca6SJanosch Frank } 1513afdfca6SJanosch Frank 152e5098611SMartin Schwidefsky void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 153e5098611SMartin Schwidefsky pte_t *ptep, pte_t pte) 154e5098611SMartin Schwidefsky { 15557d7f939SMartin Schwidefsky unsigned long rste; 15657d7f939SMartin Schwidefsky 15757d7f939SMartin Schwidefsky rste = __pte_to_rste(pte); 15857d7f939SMartin Schwidefsky if (!MACHINE_HAS_NX) 15957d7f939SMartin Schwidefsky rste &= ~_SEGMENT_ENTRY_NOEXEC; 160e5098611SMartin Schwidefsky 161d08de8e2SGerald Schaefer /* Set correct table type for 2G hugepages */ 162d08de8e2SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 163d08de8e2SGerald Schaefer rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE; 164d08de8e2SGerald Schaefer else 165d08de8e2SGerald Schaefer rste |= _SEGMENT_ENTRY_LARGE; 1663afdfca6SJanosch Frank clear_huge_pte_skeys(mm, rste); 167d08de8e2SGerald Schaefer pte_val(*ptep) = rste; 168e5098611SMartin Schwidefsky } 169e5098611SMartin Schwidefsky 170e5098611SMartin Schwidefsky pte_t huge_ptep_get(pte_t *ptep) 171e5098611SMartin Schwidefsky { 172d08de8e2SGerald Schaefer return __rste_to_pte(pte_val(*ptep)); 173e5098611SMartin Schwidefsky } 174e5098611SMartin Schwidefsky 175e5098611SMartin Schwidefsky pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 176e5098611SMartin Schwidefsky unsigned long addr, pte_t *ptep) 177e5098611SMartin Schwidefsky { 178d08de8e2SGerald Schaefer pte_t pte = huge_ptep_get(ptep); 179e5098611SMartin Schwidefsky pmd_t *pmdp = (pmd_t *) ptep; 180d08de8e2SGerald Schaefer pud_t *pudp = (pud_t *) ptep; 181e5098611SMartin Schwidefsky 182d08de8e2SGerald Schaefer if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 183d08de8e2SGerald Schaefer pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY)); 184d08de8e2SGerald Schaefer else 185d08de8e2SGerald Schaefer pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 186d08de8e2SGerald Schaefer return pte; 18753492b1dSGerald Schaefer } 18853492b1dSGerald Schaefer 189a5516438SAndi Kleen pte_t *huge_pte_alloc(struct mm_struct *mm, 190a5516438SAndi Kleen unsigned long addr, unsigned long sz) 19153492b1dSGerald Schaefer { 19253492b1dSGerald Schaefer pgd_t *pgdp; 1931aea9b3fSMartin Schwidefsky p4d_t *p4dp; 19453492b1dSGerald Schaefer pud_t *pudp; 19553492b1dSGerald Schaefer pmd_t *pmdp = NULL; 19653492b1dSGerald Schaefer 19753492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 1981aea9b3fSMartin Schwidefsky p4dp = p4d_alloc(mm, pgdp, addr); 1991aea9b3fSMartin Schwidefsky if (p4dp) { 2001aea9b3fSMartin Schwidefsky pudp = pud_alloc(mm, p4dp, addr); 201d08de8e2SGerald Schaefer if (pudp) { 202d08de8e2SGerald Schaefer if (sz == PUD_SIZE) 203d08de8e2SGerald Schaefer return (pte_t *) pudp; 204d08de8e2SGerald Schaefer else if (sz == PMD_SIZE) 20553492b1dSGerald Schaefer pmdp = pmd_alloc(mm, pudp, addr); 206d08de8e2SGerald Schaefer } 2071aea9b3fSMartin Schwidefsky } 20853492b1dSGerald Schaefer return (pte_t *) pmdp; 20953492b1dSGerald Schaefer } 21053492b1dSGerald Schaefer 2117868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm, 2127868a208SPunit Agrawal unsigned long addr, unsigned long sz) 21353492b1dSGerald Schaefer { 21453492b1dSGerald Schaefer pgd_t *pgdp; 2151aea9b3fSMartin Schwidefsky p4d_t *p4dp; 21653492b1dSGerald Schaefer pud_t *pudp; 21753492b1dSGerald Schaefer pmd_t *pmdp = NULL; 21853492b1dSGerald Schaefer 21953492b1dSGerald Schaefer pgdp = pgd_offset(mm, addr); 22053492b1dSGerald Schaefer if (pgd_present(*pgdp)) { 2211aea9b3fSMartin Schwidefsky p4dp = p4d_offset(pgdp, addr); 2221aea9b3fSMartin Schwidefsky if (p4d_present(*p4dp)) { 2231aea9b3fSMartin Schwidefsky pudp = pud_offset(p4dp, addr); 224d08de8e2SGerald Schaefer if (pud_present(*pudp)) { 225d08de8e2SGerald Schaefer if (pud_large(*pudp)) 226d08de8e2SGerald Schaefer return (pte_t *) pudp; 22753492b1dSGerald Schaefer pmdp = pmd_offset(pudp, addr); 22853492b1dSGerald Schaefer } 229d08de8e2SGerald Schaefer } 2301aea9b3fSMartin Schwidefsky } 23153492b1dSGerald Schaefer return (pte_t *) pmdp; 23253492b1dSGerald Schaefer } 23353492b1dSGerald Schaefer 23453492b1dSGerald Schaefer int pmd_huge(pmd_t pmd) 23553492b1dSGerald Schaefer { 236cbd7d9c2SDominik Dingel return pmd_large(pmd); 23753492b1dSGerald Schaefer } 23853492b1dSGerald Schaefer 239ceb86879SAndi Kleen int pud_huge(pud_t pud) 240ceb86879SAndi Kleen { 241d08de8e2SGerald Schaefer return pud_large(pud); 242d08de8e2SGerald Schaefer } 243d08de8e2SGerald Schaefer 244d08de8e2SGerald Schaefer struct page * 245d08de8e2SGerald Schaefer follow_huge_pud(struct mm_struct *mm, unsigned long address, 246d08de8e2SGerald Schaefer pud_t *pud, int flags) 247d08de8e2SGerald Schaefer { 248d08de8e2SGerald Schaefer if (flags & FOLL_GET) 249d08de8e2SGerald Schaefer return NULL; 250d08de8e2SGerald Schaefer 251d08de8e2SGerald Schaefer return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 252d08de8e2SGerald Schaefer } 253d08de8e2SGerald Schaefer 254d08de8e2SGerald Schaefer static __init int setup_hugepagesz(char *opt) 255d08de8e2SGerald Schaefer { 256d08de8e2SGerald Schaefer unsigned long size; 257d08de8e2SGerald Schaefer char *string = opt; 258d08de8e2SGerald Schaefer 259d08de8e2SGerald Schaefer size = memparse(opt, &opt); 260d08de8e2SGerald Schaefer if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) { 261d08de8e2SGerald Schaefer hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 262d08de8e2SGerald Schaefer } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { 263d08de8e2SGerald Schaefer hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 264d08de8e2SGerald Schaefer } else { 265b5003b5fSShyam Saini hugetlb_bad_size(); 266d08de8e2SGerald Schaefer pr_err("hugepagesz= specifies an unsupported page size %s\n", 267d08de8e2SGerald Schaefer string); 268ceb86879SAndi Kleen return 0; 269ceb86879SAndi Kleen } 270d08de8e2SGerald Schaefer return 1; 271d08de8e2SGerald Schaefer } 272d08de8e2SGerald Schaefer __setup("hugepagesz=", setup_hugepagesz); 2735f490a52SGerald Schaefer 2745f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 2755f490a52SGerald Schaefer unsigned long addr, unsigned long len, 2765f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2775f490a52SGerald Schaefer { 2785f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2795f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2805f490a52SGerald Schaefer 2815f490a52SGerald Schaefer info.flags = 0; 2825f490a52SGerald Schaefer info.length = len; 2835f490a52SGerald Schaefer info.low_limit = current->mm->mmap_base; 2845f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 2855f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 2865f490a52SGerald Schaefer info.align_offset = 0; 2875f490a52SGerald Schaefer return vm_unmapped_area(&info); 2885f490a52SGerald Schaefer } 2895f490a52SGerald Schaefer 2905f490a52SGerald Schaefer static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 2915f490a52SGerald Schaefer unsigned long addr0, unsigned long len, 2925f490a52SGerald Schaefer unsigned long pgoff, unsigned long flags) 2935f490a52SGerald Schaefer { 2945f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 2955f490a52SGerald Schaefer struct vm_unmapped_area_info info; 2965f490a52SGerald Schaefer unsigned long addr; 2975f490a52SGerald Schaefer 2985f490a52SGerald Schaefer info.flags = VM_UNMAPPED_AREA_TOPDOWN; 2995f490a52SGerald Schaefer info.length = len; 3005f490a52SGerald Schaefer info.low_limit = max(PAGE_SIZE, mmap_min_addr); 3015f490a52SGerald Schaefer info.high_limit = current->mm->mmap_base; 3025f490a52SGerald Schaefer info.align_mask = PAGE_MASK & ~huge_page_mask(h); 3035f490a52SGerald Schaefer info.align_offset = 0; 3045f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3055f490a52SGerald Schaefer 3065f490a52SGerald Schaefer /* 3075f490a52SGerald Schaefer * A failed mmap() very likely causes application failure, 3085f490a52SGerald Schaefer * so fall back to the bottom-up function here. This scenario 3095f490a52SGerald Schaefer * can happen with large stack limits and large mmap() 3105f490a52SGerald Schaefer * allocations. 3115f490a52SGerald Schaefer */ 3125f490a52SGerald Schaefer if (addr & ~PAGE_MASK) { 3135f490a52SGerald Schaefer VM_BUG_ON(addr != -ENOMEM); 3145f490a52SGerald Schaefer info.flags = 0; 3155f490a52SGerald Schaefer info.low_limit = TASK_UNMAPPED_BASE; 3165f490a52SGerald Schaefer info.high_limit = TASK_SIZE; 3175f490a52SGerald Schaefer addr = vm_unmapped_area(&info); 3185f490a52SGerald Schaefer } 3195f490a52SGerald Schaefer 3205f490a52SGerald Schaefer return addr; 3215f490a52SGerald Schaefer } 3225f490a52SGerald Schaefer 3235f490a52SGerald Schaefer unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 3245f490a52SGerald Schaefer unsigned long len, unsigned long pgoff, unsigned long flags) 3255f490a52SGerald Schaefer { 3265f490a52SGerald Schaefer struct hstate *h = hstate_file(file); 3275f490a52SGerald Schaefer struct mm_struct *mm = current->mm; 3285f490a52SGerald Schaefer struct vm_area_struct *vma; 3295f490a52SGerald Schaefer 3305f490a52SGerald Schaefer if (len & ~huge_page_mask(h)) 3315f490a52SGerald Schaefer return -EINVAL; 3325f490a52SGerald Schaefer if (len > TASK_SIZE - mmap_min_addr) 3335f490a52SGerald Schaefer return -ENOMEM; 3345f490a52SGerald Schaefer 3355f490a52SGerald Schaefer if (flags & MAP_FIXED) { 3365f490a52SGerald Schaefer if (prepare_hugepage_range(file, addr, len)) 3375f490a52SGerald Schaefer return -EINVAL; 3385f490a52SGerald Schaefer goto check_asce_limit; 3395f490a52SGerald Schaefer } 3405f490a52SGerald Schaefer 3415f490a52SGerald Schaefer if (addr) { 3425f490a52SGerald Schaefer addr = ALIGN(addr, huge_page_size(h)); 3435f490a52SGerald Schaefer vma = find_vma(mm, addr); 3445f490a52SGerald Schaefer if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 3455f490a52SGerald Schaefer (!vma || addr + len <= vm_start_gap(vma))) 3465f490a52SGerald Schaefer goto check_asce_limit; 3475f490a52SGerald Schaefer } 3485f490a52SGerald Schaefer 3495f490a52SGerald Schaefer if (mm->get_unmapped_area == arch_get_unmapped_area) 3505f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, 3515f490a52SGerald Schaefer pgoff, flags); 3525f490a52SGerald Schaefer else 3535f490a52SGerald Schaefer addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 3545f490a52SGerald Schaefer pgoff, flags); 355*712fa5f2SAlexander Gordeev if (offset_in_page(addr)) 3565f490a52SGerald Schaefer return addr; 3575f490a52SGerald Schaefer 3585f490a52SGerald Schaefer check_asce_limit: 359*712fa5f2SAlexander Gordeev return check_asce_limit(mm, addr, len); 3605f490a52SGerald Schaefer } 361