xref: /openbmc/linux/arch/ia64/mm/hugetlbpage.c (revision 5d0e4d78)
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10 
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/module.h>
17 #include <linux/sysctl.h>
18 #include <linux/log2.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
26 
27 pte_t *
28 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
29 {
30 	unsigned long taddr = htlbpage_to_page(addr);
31 	pgd_t *pgd;
32 	pud_t *pud;
33 	pmd_t *pmd;
34 	pte_t *pte = NULL;
35 
36 	pgd = pgd_offset(mm, taddr);
37 	pud = pud_alloc(mm, pgd, taddr);
38 	if (pud) {
39 		pmd = pmd_alloc(mm, pud, taddr);
40 		if (pmd)
41 			pte = pte_alloc_map(mm, pmd, taddr);
42 	}
43 	return pte;
44 }
45 
46 pte_t *
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
48 {
49 	unsigned long taddr = htlbpage_to_page(addr);
50 	pgd_t *pgd;
51 	pud_t *pud;
52 	pmd_t *pmd;
53 	pte_t *pte = NULL;
54 
55 	pgd = pgd_offset(mm, taddr);
56 	if (pgd_present(*pgd)) {
57 		pud = pud_offset(pgd, taddr);
58 		if (pud_present(*pud)) {
59 			pmd = pmd_offset(pud, taddr);
60 			if (pmd_present(*pmd))
61 				pte = pte_offset_map(pmd, taddr);
62 		}
63 	}
64 
65 	return pte;
66 }
67 
68 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69 
70 /*
71  * Don't actually need to do any preparation, but need to make sure
72  * the address is in the right region.
73  */
74 int prepare_hugepage_range(struct file *file,
75 			unsigned long addr, unsigned long len)
76 {
77 	if (len & ~HPAGE_MASK)
78 		return -EINVAL;
79 	if (addr & ~HPAGE_MASK)
80 		return -EINVAL;
81 	if (REGION_NUMBER(addr) != RGN_HPAGE)
82 		return -EINVAL;
83 
84 	return 0;
85 }
86 
87 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
88 {
89 	struct page *page;
90 	pte_t *ptep;
91 
92 	if (REGION_NUMBER(addr) != RGN_HPAGE)
93 		return ERR_PTR(-EINVAL);
94 
95 	ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
96 	if (!ptep || pte_none(*ptep))
97 		return NULL;
98 	page = pte_page(*ptep);
99 	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
100 	return page;
101 }
102 int pmd_huge(pmd_t pmd)
103 {
104 	return 0;
105 }
106 
107 int pud_huge(pud_t pud)
108 {
109 	return 0;
110 }
111 
112 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
113 			unsigned long addr, unsigned long end,
114 			unsigned long floor, unsigned long ceiling)
115 {
116 	/*
117 	 * This is called to free hugetlb page tables.
118 	 *
119 	 * The offset of these addresses from the base of the hugetlb
120 	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
121 	 * the standard free_pgd_range will free the right page tables.
122 	 *
123 	 * If floor and ceiling are also in the hugetlb region, they
124 	 * must likewise be scaled down; but if outside, left unchanged.
125 	 */
126 
127 	addr = htlbpage_to_page(addr);
128 	end  = htlbpage_to_page(end);
129 	if (REGION_NUMBER(floor) == RGN_HPAGE)
130 		floor = htlbpage_to_page(floor);
131 	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
132 		ceiling = htlbpage_to_page(ceiling);
133 
134 	free_pgd_range(tlb, addr, end, floor, ceiling);
135 }
136 
137 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
138 		unsigned long pgoff, unsigned long flags)
139 {
140 	struct vm_unmapped_area_info info;
141 
142 	if (len > RGN_MAP_LIMIT)
143 		return -ENOMEM;
144 	if (len & ~HPAGE_MASK)
145 		return -EINVAL;
146 
147 	/* Handle MAP_FIXED */
148 	if (flags & MAP_FIXED) {
149 		if (prepare_hugepage_range(file, addr, len))
150 			return -EINVAL;
151 		return addr;
152 	}
153 
154 	/* This code assumes that RGN_HPAGE != 0. */
155 	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
156 		addr = HPAGE_REGION_BASE;
157 
158 	info.flags = 0;
159 	info.length = len;
160 	info.low_limit = addr;
161 	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
162 	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
163 	info.align_offset = 0;
164 	return vm_unmapped_area(&info);
165 }
166 
167 static int __init hugetlb_setup_sz(char *str)
168 {
169 	u64 tr_pages;
170 	unsigned long long size;
171 
172 	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
173 		/*
174 		 * shouldn't happen, but just in case.
175 		 */
176 		tr_pages = 0x15557000UL;
177 
178 	size = memparse(str, &str);
179 	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
180 		size <= PAGE_SIZE ||
181 		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
182 		printk(KERN_WARNING "Invalid huge page size specified\n");
183 		return 1;
184 	}
185 
186 	hpage_shift = __ffs(size);
187 	/*
188 	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
189 	 * override here with new page shift.
190 	 */
191 	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
192 	return 0;
193 }
194 early_param("hugepagesz", hugetlb_setup_sz);
195