xref: /openbmc/linux/arch/ia64/mm/hugetlbpage.c (revision 0db639f7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IA-64 Huge TLB Page Support for Kernel.
4  *
5  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7  *
8  * Sep, 2003: add numa support
9  * Feb, 2004: dynamic hugetlb page size via boot parameter
10  */
11 
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/module.h>
18 #include <linux/sysctl.h>
19 #include <linux/log2.h>
20 #include <asm/mman.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
26 
27 pte_t *
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)28 huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
29 	       unsigned long addr, unsigned long sz)
30 {
31 	unsigned long taddr = htlbpage_to_page(addr);
32 	pgd_t *pgd;
33 	p4d_t *p4d;
34 	pud_t *pud;
35 	pmd_t *pmd;
36 	pte_t *pte = NULL;
37 
38 	pgd = pgd_offset(mm, taddr);
39 	p4d = p4d_offset(pgd, taddr);
40 	pud = pud_alloc(mm, p4d, taddr);
41 	if (pud) {
42 		pmd = pmd_alloc(mm, pud, taddr);
43 		if (pmd)
44 			pte = pte_alloc_huge(mm, pmd, taddr);
45 	}
46 	return pte;
47 }
48 
49 pte_t *
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)50 huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
51 {
52 	unsigned long taddr = htlbpage_to_page(addr);
53 	pgd_t *pgd;
54 	p4d_t *p4d;
55 	pud_t *pud;
56 	pmd_t *pmd;
57 	pte_t *pte = NULL;
58 
59 	pgd = pgd_offset(mm, taddr);
60 	if (pgd_present(*pgd)) {
61 		p4d = p4d_offset(pgd, taddr);
62 		if (p4d_present(*p4d)) {
63 			pud = pud_offset(p4d, taddr);
64 			if (pud_present(*pud)) {
65 				pmd = pmd_offset(pud, taddr);
66 				if (pmd_present(*pmd))
67 					pte = pte_offset_huge(pmd, taddr);
68 			}
69 		}
70 	}
71 
72 	return pte;
73 }
74 
75 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
76 
77 /*
78  * Don't actually need to do any preparation, but need to make sure
79  * the address is in the right region.
80  */
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)81 int prepare_hugepage_range(struct file *file,
82 			unsigned long addr, unsigned long len)
83 {
84 	if (len & ~HPAGE_MASK)
85 		return -EINVAL;
86 	if (addr & ~HPAGE_MASK)
87 		return -EINVAL;
88 	if (REGION_NUMBER(addr) != RGN_HPAGE)
89 		return -EINVAL;
90 
91 	return 0;
92 }
93 
pmd_huge(pmd_t pmd)94 int pmd_huge(pmd_t pmd)
95 {
96 	return 0;
97 }
98 
pud_huge(pud_t pud)99 int pud_huge(pud_t pud)
100 {
101 	return 0;
102 }
103 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)104 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
105 			unsigned long addr, unsigned long end,
106 			unsigned long floor, unsigned long ceiling)
107 {
108 	/*
109 	 * This is called to free hugetlb page tables.
110 	 *
111 	 * The offset of these addresses from the base of the hugetlb
112 	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
113 	 * the standard free_pgd_range will free the right page tables.
114 	 *
115 	 * If floor and ceiling are also in the hugetlb region, they
116 	 * must likewise be scaled down; but if outside, left unchanged.
117 	 */
118 
119 	addr = htlbpage_to_page(addr);
120 	end  = htlbpage_to_page(end);
121 	if (REGION_NUMBER(floor) == RGN_HPAGE)
122 		floor = htlbpage_to_page(floor);
123 	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
124 		ceiling = htlbpage_to_page(ceiling);
125 
126 	free_pgd_range(tlb, addr, end, floor, ceiling);
127 }
128 
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)129 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
130 		unsigned long pgoff, unsigned long flags)
131 {
132 	struct vm_unmapped_area_info info;
133 
134 	if (len > RGN_MAP_LIMIT)
135 		return -ENOMEM;
136 	if (len & ~HPAGE_MASK)
137 		return -EINVAL;
138 
139 	/* Handle MAP_FIXED */
140 	if (flags & MAP_FIXED) {
141 		if (prepare_hugepage_range(file, addr, len))
142 			return -EINVAL;
143 		return addr;
144 	}
145 
146 	/* This code assumes that RGN_HPAGE != 0. */
147 	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
148 		addr = HPAGE_REGION_BASE;
149 
150 	info.flags = 0;
151 	info.length = len;
152 	info.low_limit = addr;
153 	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
154 	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
155 	info.align_offset = 0;
156 	return vm_unmapped_area(&info);
157 }
158 
hugetlb_setup_sz(char * str)159 static int __init hugetlb_setup_sz(char *str)
160 {
161 	u64 tr_pages;
162 	unsigned long long size;
163 
164 	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
165 		/*
166 		 * shouldn't happen, but just in case.
167 		 */
168 		tr_pages = 0x15557000UL;
169 
170 	size = memparse(str, &str);
171 	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
172 		size <= PAGE_SIZE ||
173 		size > (1UL << PAGE_SHIFT << MAX_ORDER)) {
174 		printk(KERN_WARNING "Invalid huge page size specified\n");
175 		return 1;
176 	}
177 
178 	hpage_shift = __ffs(size);
179 	/*
180 	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
181 	 * override here with new page shift.
182 	 */
183 	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
184 	return 0;
185 }
186 early_param("hugepagesz", hugetlb_setup_sz);
187