xref: /openbmc/linux/arch/ia64/mm/hugetlbpage.c (revision 643d1f7f)
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10 
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/log2.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 
24 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
25 
26 pte_t *
27 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
28 {
29 	unsigned long taddr = htlbpage_to_page(addr);
30 	pgd_t *pgd;
31 	pud_t *pud;
32 	pmd_t *pmd;
33 	pte_t *pte = NULL;
34 
35 	pgd = pgd_offset(mm, taddr);
36 	pud = pud_alloc(mm, pgd, taddr);
37 	if (pud) {
38 		pmd = pmd_alloc(mm, pud, taddr);
39 		if (pmd)
40 			pte = pte_alloc_map(mm, pmd, taddr);
41 	}
42 	return pte;
43 }
44 
45 pte_t *
46 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
47 {
48 	unsigned long taddr = htlbpage_to_page(addr);
49 	pgd_t *pgd;
50 	pud_t *pud;
51 	pmd_t *pmd;
52 	pte_t *pte = NULL;
53 
54 	pgd = pgd_offset(mm, taddr);
55 	if (pgd_present(*pgd)) {
56 		pud = pud_offset(pgd, taddr);
57 		if (pud_present(*pud)) {
58 			pmd = pmd_offset(pud, taddr);
59 			if (pmd_present(*pmd))
60 				pte = pte_offset_map(pmd, taddr);
61 		}
62 	}
63 
64 	return pte;
65 }
66 
67 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68 {
69 	return 0;
70 }
71 
72 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
73 
74 /*
75  * Don't actually need to do any preparation, but need to make sure
76  * the address is in the right region.
77  */
78 int prepare_hugepage_range(unsigned long addr, unsigned long len)
79 {
80 	if (len & ~HPAGE_MASK)
81 		return -EINVAL;
82 	if (addr & ~HPAGE_MASK)
83 		return -EINVAL;
84 	if (REGION_NUMBER(addr) != RGN_HPAGE)
85 		return -EINVAL;
86 
87 	return 0;
88 }
89 
90 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
91 {
92 	struct page *page;
93 	pte_t *ptep;
94 
95 	if (REGION_NUMBER(addr) != RGN_HPAGE)
96 		return ERR_PTR(-EINVAL);
97 
98 	ptep = huge_pte_offset(mm, addr);
99 	if (!ptep || pte_none(*ptep))
100 		return NULL;
101 	page = pte_page(*ptep);
102 	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
103 	return page;
104 }
105 int pmd_huge(pmd_t pmd)
106 {
107 	return 0;
108 }
109 struct page *
110 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
111 {
112 	return NULL;
113 }
114 
115 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
116 			unsigned long addr, unsigned long end,
117 			unsigned long floor, unsigned long ceiling)
118 {
119 	/*
120 	 * This is called to free hugetlb page tables.
121 	 *
122 	 * The offset of these addresses from the base of the hugetlb
123 	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
124 	 * the standard free_pgd_range will free the right page tables.
125 	 *
126 	 * If floor and ceiling are also in the hugetlb region, they
127 	 * must likewise be scaled down; but if outside, left unchanged.
128 	 */
129 
130 	addr = htlbpage_to_page(addr);
131 	end  = htlbpage_to_page(end);
132 	if (REGION_NUMBER(floor) == RGN_HPAGE)
133 		floor = htlbpage_to_page(floor);
134 	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
135 		ceiling = htlbpage_to_page(ceiling);
136 
137 	free_pgd_range(tlb, addr, end, floor, ceiling);
138 }
139 
140 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
141 		unsigned long pgoff, unsigned long flags)
142 {
143 	struct vm_area_struct *vmm;
144 
145 	if (len > RGN_MAP_LIMIT)
146 		return -ENOMEM;
147 	if (len & ~HPAGE_MASK)
148 		return -EINVAL;
149 
150 	/* Handle MAP_FIXED */
151 	if (flags & MAP_FIXED) {
152 		if (prepare_hugepage_range(addr, len))
153 			return -EINVAL;
154 		return addr;
155 	}
156 
157 	/* This code assumes that RGN_HPAGE != 0. */
158 	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
159 		addr = HPAGE_REGION_BASE;
160 	else
161 		addr = ALIGN(addr, HPAGE_SIZE);
162 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
163 		/* At this point:  (!vmm || addr < vmm->vm_end). */
164 		if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
165 			return -ENOMEM;
166 		if (!vmm || (addr + len) <= vmm->vm_start)
167 			return addr;
168 		addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
169 	}
170 }
171 
172 static int __init hugetlb_setup_sz(char *str)
173 {
174 	u64 tr_pages;
175 	unsigned long long size;
176 
177 	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
178 		/*
179 		 * shouldn't happen, but just in case.
180 		 */
181 		tr_pages = 0x15557000UL;
182 
183 	size = memparse(str, &str);
184 	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
185 		size <= PAGE_SIZE ||
186 		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
187 		printk(KERN_WARNING "Invalid huge page size specified\n");
188 		return 1;
189 	}
190 
191 	hpage_shift = __ffs(size);
192 	/*
193 	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
194 	 * override here with new page shift.
195 	 */
196 	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
197 	return 0;
198 }
199 early_param("hugepagesz", hugetlb_setup_sz);
200