xref: /openbmc/linux/arch/ia64/mm/hugetlbpage.c (revision 8ec90bfd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IA-64 Huge TLB Page Support for Kernel.
4  *
5  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7  *
8  * Sep, 2003: add numa support
9  * Feb, 2004: dynamic hugetlb page size via boot parameter
10  */
11 
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/module.h>
18 #include <linux/sysctl.h>
19 #include <linux/log2.h>
20 #include <asm/mman.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
26 
27 pte_t *
28 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
29 {
30 	unsigned long taddr = htlbpage_to_page(addr);
31 	pgd_t *pgd;
32 	p4d_t *p4d;
33 	pud_t *pud;
34 	pmd_t *pmd;
35 	pte_t *pte = NULL;
36 
37 	pgd = pgd_offset(mm, taddr);
38 	p4d = p4d_offset(pgd, taddr);
39 	pud = pud_alloc(mm, p4d, taddr);
40 	if (pud) {
41 		pmd = pmd_alloc(mm, pud, taddr);
42 		if (pmd)
43 			pte = pte_alloc_map(mm, pmd, taddr);
44 	}
45 	return pte;
46 }
47 
48 pte_t *
49 huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
50 {
51 	unsigned long taddr = htlbpage_to_page(addr);
52 	pgd_t *pgd;
53 	p4d_t *p4d;
54 	pud_t *pud;
55 	pmd_t *pmd;
56 	pte_t *pte = NULL;
57 
58 	pgd = pgd_offset(mm, taddr);
59 	if (pgd_present(*pgd)) {
60 		p4d = p4d_offset(pgd, addr);
61 		if (p4d_present(*p4d)) {
62 			pud = pud_offset(p4d, taddr);
63 			if (pud_present(*pud)) {
64 				pmd = pmd_offset(pud, taddr);
65 				if (pmd_present(*pmd))
66 					pte = pte_offset_map(pmd, taddr);
67 			}
68 		}
69 	}
70 
71 	return pte;
72 }
73 
74 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
75 
76 /*
77  * Don't actually need to do any preparation, but need to make sure
78  * the address is in the right region.
79  */
80 int prepare_hugepage_range(struct file *file,
81 			unsigned long addr, unsigned long len)
82 {
83 	if (len & ~HPAGE_MASK)
84 		return -EINVAL;
85 	if (addr & ~HPAGE_MASK)
86 		return -EINVAL;
87 	if (REGION_NUMBER(addr) != RGN_HPAGE)
88 		return -EINVAL;
89 
90 	return 0;
91 }
92 
93 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
94 {
95 	struct page *page;
96 	pte_t *ptep;
97 
98 	if (REGION_NUMBER(addr) != RGN_HPAGE)
99 		return ERR_PTR(-EINVAL);
100 
101 	ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
102 	if (!ptep || pte_none(*ptep))
103 		return NULL;
104 	page = pte_page(*ptep);
105 	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
106 	return page;
107 }
108 int pmd_huge(pmd_t pmd)
109 {
110 	return 0;
111 }
112 
113 int pud_huge(pud_t pud)
114 {
115 	return 0;
116 }
117 
118 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
119 			unsigned long addr, unsigned long end,
120 			unsigned long floor, unsigned long ceiling)
121 {
122 	/*
123 	 * This is called to free hugetlb page tables.
124 	 *
125 	 * The offset of these addresses from the base of the hugetlb
126 	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
127 	 * the standard free_pgd_range will free the right page tables.
128 	 *
129 	 * If floor and ceiling are also in the hugetlb region, they
130 	 * must likewise be scaled down; but if outside, left unchanged.
131 	 */
132 
133 	addr = htlbpage_to_page(addr);
134 	end  = htlbpage_to_page(end);
135 	if (REGION_NUMBER(floor) == RGN_HPAGE)
136 		floor = htlbpage_to_page(floor);
137 	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
138 		ceiling = htlbpage_to_page(ceiling);
139 
140 	free_pgd_range(tlb, addr, end, floor, ceiling);
141 }
142 
143 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
144 		unsigned long pgoff, unsigned long flags)
145 {
146 	struct vm_unmapped_area_info info;
147 
148 	if (len > RGN_MAP_LIMIT)
149 		return -ENOMEM;
150 	if (len & ~HPAGE_MASK)
151 		return -EINVAL;
152 
153 	/* Handle MAP_FIXED */
154 	if (flags & MAP_FIXED) {
155 		if (prepare_hugepage_range(file, addr, len))
156 			return -EINVAL;
157 		return addr;
158 	}
159 
160 	/* This code assumes that RGN_HPAGE != 0. */
161 	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
162 		addr = HPAGE_REGION_BASE;
163 
164 	info.flags = 0;
165 	info.length = len;
166 	info.low_limit = addr;
167 	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
168 	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
169 	info.align_offset = 0;
170 	return vm_unmapped_area(&info);
171 }
172 
173 static int __init hugetlb_setup_sz(char *str)
174 {
175 	u64 tr_pages;
176 	unsigned long long size;
177 
178 	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
179 		/*
180 		 * shouldn't happen, but just in case.
181 		 */
182 		tr_pages = 0x15557000UL;
183 
184 	size = memparse(str, &str);
185 	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
186 		size <= PAGE_SIZE ||
187 		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
188 		printk(KERN_WARNING "Invalid huge page size specified\n");
189 		return 1;
190 	}
191 
192 	hpage_shift = __ffs(size);
193 	/*
194 	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
195 	 * override here with new page shift.
196 	 */
197 	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
198 	return 0;
199 }
200 early_param("hugepagesz", hugetlb_setup_sz);
201