xref: /openbmc/linux/arch/s390/mm/hugetlbpage.c (revision ee89bd6b)
1 /*
2  *  IBM System z Huge TLB Page Support for Kernel.
3  *
4  *    Copyright IBM Corp. 2007
5  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 
11 
12 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 				   pte_t *pteptr, pte_t pteval)
14 {
15 	pmd_t *pmdp = (pmd_t *) pteptr;
16 	unsigned long mask;
17 
18 	if (!MACHINE_HAS_HPAGE) {
19 		pteptr = (pte_t *) pte_page(pteval)[1].index;
20 		mask = pte_val(pteval) &
21 				(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
22 		pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
23 	}
24 
25 	pmd_val(*pmdp) = pte_val(pteval);
26 }
27 
28 int arch_prepare_hugepage(struct page *page)
29 {
30 	unsigned long addr = page_to_phys(page);
31 	pte_t pte;
32 	pte_t *ptep;
33 	int i;
34 
35 	if (MACHINE_HAS_HPAGE)
36 		return 0;
37 
38 	ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
39 	if (!ptep)
40 		return -ENOMEM;
41 
42 	pte_val(pte) = addr;
43 	for (i = 0; i < PTRS_PER_PTE; i++) {
44 		set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 		pte_val(pte) += PAGE_SIZE;
46 	}
47 	page[1].index = (unsigned long) ptep;
48 	return 0;
49 }
50 
51 void arch_release_hugepage(struct page *page)
52 {
53 	pte_t *ptep;
54 
55 	if (MACHINE_HAS_HPAGE)
56 		return;
57 
58 	ptep = (pte_t *) page[1].index;
59 	if (!ptep)
60 		return;
61 	clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
62 		    PTRS_PER_PTE * sizeof(pte_t));
63 	page_table_free(&init_mm, (unsigned long *) ptep);
64 	page[1].index = 0;
65 }
66 
67 pte_t *huge_pte_alloc(struct mm_struct *mm,
68 			unsigned long addr, unsigned long sz)
69 {
70 	pgd_t *pgdp;
71 	pud_t *pudp;
72 	pmd_t *pmdp = NULL;
73 
74 	pgdp = pgd_offset(mm, addr);
75 	pudp = pud_alloc(mm, pgdp, addr);
76 	if (pudp)
77 		pmdp = pmd_alloc(mm, pudp, addr);
78 	return (pte_t *) pmdp;
79 }
80 
81 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
82 {
83 	pgd_t *pgdp;
84 	pud_t *pudp;
85 	pmd_t *pmdp = NULL;
86 
87 	pgdp = pgd_offset(mm, addr);
88 	if (pgd_present(*pgdp)) {
89 		pudp = pud_offset(pgdp, addr);
90 		if (pud_present(*pudp))
91 			pmdp = pmd_offset(pudp, addr);
92 	}
93 	return (pte_t *) pmdp;
94 }
95 
96 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
97 {
98 	return 0;
99 }
100 
101 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
102 			      int write)
103 {
104 	return ERR_PTR(-EINVAL);
105 }
106 
107 int pmd_huge(pmd_t pmd)
108 {
109 	if (!MACHINE_HAS_HPAGE)
110 		return 0;
111 
112 	return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
113 }
114 
115 int pud_huge(pud_t pud)
116 {
117 	return 0;
118 }
119 
120 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
121 			     pmd_t *pmdp, int write)
122 {
123 	struct page *page;
124 
125 	if (!MACHINE_HAS_HPAGE)
126 		return NULL;
127 
128 	page = pmd_page(*pmdp);
129 	if (page)
130 		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
131 	return page;
132 }
133