xref: /openbmc/linux/arch/ia64/mm/hugetlbpage.c (revision 8a10bc9d)
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10 
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/module.h>
17 #include <linux/sysctl.h>
18 #include <linux/log2.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
26 
27 pte_t *
28 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
29 {
30 	unsigned long taddr = htlbpage_to_page(addr);
31 	pgd_t *pgd;
32 	pud_t *pud;
33 	pmd_t *pmd;
34 	pte_t *pte = NULL;
35 
36 	pgd = pgd_offset(mm, taddr);
37 	pud = pud_alloc(mm, pgd, taddr);
38 	if (pud) {
39 		pmd = pmd_alloc(mm, pud, taddr);
40 		if (pmd)
41 			pte = pte_alloc_map(mm, NULL, pmd, taddr);
42 	}
43 	return pte;
44 }
45 
46 pte_t *
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48 {
49 	unsigned long taddr = htlbpage_to_page(addr);
50 	pgd_t *pgd;
51 	pud_t *pud;
52 	pmd_t *pmd;
53 	pte_t *pte = NULL;
54 
55 	pgd = pgd_offset(mm, taddr);
56 	if (pgd_present(*pgd)) {
57 		pud = pud_offset(pgd, taddr);
58 		if (pud_present(*pud)) {
59 			pmd = pmd_offset(pud, taddr);
60 			if (pmd_present(*pmd))
61 				pte = pte_offset_map(pmd, taddr);
62 		}
63 	}
64 
65 	return pte;
66 }
67 
68 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
69 {
70 	return 0;
71 }
72 
73 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
74 
75 /*
76  * Don't actually need to do any preparation, but need to make sure
77  * the address is in the right region.
78  */
79 int prepare_hugepage_range(struct file *file,
80 			unsigned long addr, unsigned long len)
81 {
82 	if (len & ~HPAGE_MASK)
83 		return -EINVAL;
84 	if (addr & ~HPAGE_MASK)
85 		return -EINVAL;
86 	if (REGION_NUMBER(addr) != RGN_HPAGE)
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
92 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
93 {
94 	struct page *page;
95 	pte_t *ptep;
96 
97 	if (REGION_NUMBER(addr) != RGN_HPAGE)
98 		return ERR_PTR(-EINVAL);
99 
100 	ptep = huge_pte_offset(mm, addr);
101 	if (!ptep || pte_none(*ptep))
102 		return NULL;
103 	page = pte_page(*ptep);
104 	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
105 	return page;
106 }
107 int pmd_huge(pmd_t pmd)
108 {
109 	return 0;
110 }
111 
112 int pud_huge(pud_t pud)
113 {
114 	return 0;
115 }
116 
117 int pmd_huge_support(void)
118 {
119 	return 0;
120 }
121 
122 struct page *
123 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
124 {
125 	return NULL;
126 }
127 
128 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
129 			unsigned long addr, unsigned long end,
130 			unsigned long floor, unsigned long ceiling)
131 {
132 	/*
133 	 * This is called to free hugetlb page tables.
134 	 *
135 	 * The offset of these addresses from the base of the hugetlb
136 	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
137 	 * the standard free_pgd_range will free the right page tables.
138 	 *
139 	 * If floor and ceiling are also in the hugetlb region, they
140 	 * must likewise be scaled down; but if outside, left unchanged.
141 	 */
142 
143 	addr = htlbpage_to_page(addr);
144 	end  = htlbpage_to_page(end);
145 	if (REGION_NUMBER(floor) == RGN_HPAGE)
146 		floor = htlbpage_to_page(floor);
147 	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
148 		ceiling = htlbpage_to_page(ceiling);
149 
150 	free_pgd_range(tlb, addr, end, floor, ceiling);
151 }
152 
153 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
154 		unsigned long pgoff, unsigned long flags)
155 {
156 	struct vm_unmapped_area_info info;
157 
158 	if (len > RGN_MAP_LIMIT)
159 		return -ENOMEM;
160 	if (len & ~HPAGE_MASK)
161 		return -EINVAL;
162 
163 	/* Handle MAP_FIXED */
164 	if (flags & MAP_FIXED) {
165 		if (prepare_hugepage_range(file, addr, len))
166 			return -EINVAL;
167 		return addr;
168 	}
169 
170 	/* This code assumes that RGN_HPAGE != 0. */
171 	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
172 		addr = HPAGE_REGION_BASE;
173 
174 	info.flags = 0;
175 	info.length = len;
176 	info.low_limit = addr;
177 	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
178 	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
179 	info.align_offset = 0;
180 	return vm_unmapped_area(&info);
181 }
182 
183 static int __init hugetlb_setup_sz(char *str)
184 {
185 	u64 tr_pages;
186 	unsigned long long size;
187 
188 	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
189 		/*
190 		 * shouldn't happen, but just in case.
191 		 */
192 		tr_pages = 0x15557000UL;
193 
194 	size = memparse(str, &str);
195 	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
196 		size <= PAGE_SIZE ||
197 		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
198 		printk(KERN_WARNING "Invalid huge page size specified\n");
199 		return 1;
200 	}
201 
202 	hpage_shift = __ffs(size);
203 	/*
204 	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
205 	 * override here with new page shift.
206 	 */
207 	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
208 	return 0;
209 }
210 early_param("hugepagesz", hugetlb_setup_sz);
211