xref: /openbmc/linux/mm/pgtable-generic.c (revision 6b0b50b0617fad5f2af3b928596a25f7de8dbf50)
1e2cda322SAndrea Arcangeli /*
2e2cda322SAndrea Arcangeli  *  mm/pgtable-generic.c
3e2cda322SAndrea Arcangeli  *
4e2cda322SAndrea Arcangeli  *  Generic pgtable methods declared in asm-generic/pgtable.h
5e2cda322SAndrea Arcangeli  *
6e2cda322SAndrea Arcangeli  *  Copyright (C) 2010  Linus Torvalds
7e2cda322SAndrea Arcangeli  */
8e2cda322SAndrea Arcangeli 
9f95ba941SAndrew Morton #include <linux/pagemap.h>
10e2cda322SAndrea Arcangeli #include <asm/tlb.h>
11e2cda322SAndrea Arcangeli #include <asm-generic/pgtable.h>
12e2cda322SAndrea Arcangeli 
13e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14e2cda322SAndrea Arcangeli /*
15cef23d9dSRik van Riel  * Only sets the access flags (dirty, accessed), as well as write
16cef23d9dSRik van Riel  * permission. Furthermore, we know it always gets set to a "more
17e2cda322SAndrea Arcangeli  * permissive" setting, which allows most architectures to optimize
18e2cda322SAndrea Arcangeli  * this. We return whether the PTE actually changed, which in turn
19e2cda322SAndrea Arcangeli  * instructs the caller to do things like update__mmu_cache.  This
20e2cda322SAndrea Arcangeli  * used to be done in the caller, but sparc needs minor faults to
21e2cda322SAndrea Arcangeli  * force that call on sun4c so we changed this macro slightly
22e2cda322SAndrea Arcangeli  */
23e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma,
24e2cda322SAndrea Arcangeli 			  unsigned long address, pte_t *ptep,
25e2cda322SAndrea Arcangeli 			  pte_t entry, int dirty)
26e2cda322SAndrea Arcangeli {
27e2cda322SAndrea Arcangeli 	int changed = !pte_same(*ptep, entry);
28e2cda322SAndrea Arcangeli 	if (changed) {
29e2cda322SAndrea Arcangeli 		set_pte_at(vma->vm_mm, address, ptep, entry);
30cef23d9dSRik van Riel 		flush_tlb_fix_spurious_fault(vma, address);
31e2cda322SAndrea Arcangeli 	}
32e2cda322SAndrea Arcangeli 	return changed;
33e2cda322SAndrea Arcangeli }
34e2cda322SAndrea Arcangeli #endif
35e2cda322SAndrea Arcangeli 
36e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma,
38e2cda322SAndrea Arcangeli 			  unsigned long address, pmd_t *pmdp,
39e2cda322SAndrea Arcangeli 			  pmd_t entry, int dirty)
40e2cda322SAndrea Arcangeli {
41e2cda322SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42e2cda322SAndrea Arcangeli 	int changed = !pmd_same(*pmdp, entry);
43e2cda322SAndrea Arcangeli 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44e2cda322SAndrea Arcangeli 	if (changed) {
45e2cda322SAndrea Arcangeli 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
46e2cda322SAndrea Arcangeli 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47e2cda322SAndrea Arcangeli 	}
48e2cda322SAndrea Arcangeli 	return changed;
49e2cda322SAndrea Arcangeli #else /* CONFIG_TRANSPARENT_HUGEPAGE */
50e2cda322SAndrea Arcangeli 	BUG();
51e2cda322SAndrea Arcangeli 	return 0;
52e2cda322SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53e2cda322SAndrea Arcangeli }
54e2cda322SAndrea Arcangeli #endif
55e2cda322SAndrea Arcangeli 
56e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57e2cda322SAndrea Arcangeli int ptep_clear_flush_young(struct vm_area_struct *vma,
58e2cda322SAndrea Arcangeli 			   unsigned long address, pte_t *ptep)
59e2cda322SAndrea Arcangeli {
60e2cda322SAndrea Arcangeli 	int young;
61e2cda322SAndrea Arcangeli 	young = ptep_test_and_clear_young(vma, address, ptep);
62e2cda322SAndrea Arcangeli 	if (young)
63e2cda322SAndrea Arcangeli 		flush_tlb_page(vma, address);
64e2cda322SAndrea Arcangeli 	return young;
65e2cda322SAndrea Arcangeli }
66e2cda322SAndrea Arcangeli #endif
67e2cda322SAndrea Arcangeli 
68e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma,
70e2cda322SAndrea Arcangeli 			   unsigned long address, pmd_t *pmdp)
71e2cda322SAndrea Arcangeli {
72e2cda322SAndrea Arcangeli 	int young;
73d8c37c48SNaoya Horiguchi #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74d8c37c48SNaoya Horiguchi 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75d8c37c48SNaoya Horiguchi #else
76e2cda322SAndrea Arcangeli 	BUG();
77e2cda322SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78e2cda322SAndrea Arcangeli 	young = pmdp_test_and_clear_young(vma, address, pmdp);
79e2cda322SAndrea Arcangeli 	if (young)
80e2cda322SAndrea Arcangeli 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81e2cda322SAndrea Arcangeli 	return young;
82e2cda322SAndrea Arcangeli }
83e2cda322SAndrea Arcangeli #endif
84e2cda322SAndrea Arcangeli 
85e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86e2cda322SAndrea Arcangeli pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87e2cda322SAndrea Arcangeli 		       pte_t *ptep)
88e2cda322SAndrea Arcangeli {
89e2cda322SAndrea Arcangeli 	pte_t pte;
90e2cda322SAndrea Arcangeli 	pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
918d1acce4SRik van Riel 	if (pte_accessible(pte))
92e2cda322SAndrea Arcangeli 		flush_tlb_page(vma, address);
93e2cda322SAndrea Arcangeli 	return pte;
94e2cda322SAndrea Arcangeli }
95e2cda322SAndrea Arcangeli #endif
96e2cda322SAndrea Arcangeli 
97e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
98b3697c02SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE
99e2cda322SAndrea Arcangeli pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
100e2cda322SAndrea Arcangeli 		       pmd_t *pmdp)
101e2cda322SAndrea Arcangeli {
102e2cda322SAndrea Arcangeli 	pmd_t pmd;
103e2cda322SAndrea Arcangeli 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
104e2cda322SAndrea Arcangeli 	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
105e2cda322SAndrea Arcangeli 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
106e2cda322SAndrea Arcangeli 	return pmd;
107e2cda322SAndrea Arcangeli }
108b3697c02SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
109e2cda322SAndrea Arcangeli #endif
110e2cda322SAndrea Arcangeli 
111e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
112b3697c02SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE
11373636b1aSChris Metcalf void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
114e2cda322SAndrea Arcangeli 			  pmd_t *pmdp)
115e2cda322SAndrea Arcangeli {
116e2cda322SAndrea Arcangeli 	pmd_t pmd = pmd_mksplitting(*pmdp);
117e2cda322SAndrea Arcangeli 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
118e2cda322SAndrea Arcangeli 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
119e2cda322SAndrea Arcangeli 	/* tlb flush only to serialize against gup-fast */
120e2cda322SAndrea Arcangeli 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
121e2cda322SAndrea Arcangeli }
122b3697c02SAndrea Arcangeli #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
123e2cda322SAndrea Arcangeli #endif
124e3ebcf64SGerald Schaefer 
125e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
126e3ebcf64SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE
127*6b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
128*6b0b50b0SAneesh Kumar K.V 				pgtable_t pgtable)
129e3ebcf64SGerald Schaefer {
130e3ebcf64SGerald Schaefer 	assert_spin_locked(&mm->page_table_lock);
131e3ebcf64SGerald Schaefer 
132e3ebcf64SGerald Schaefer 	/* FIFO */
133e3ebcf64SGerald Schaefer 	if (!mm->pmd_huge_pte)
134e3ebcf64SGerald Schaefer 		INIT_LIST_HEAD(&pgtable->lru);
135e3ebcf64SGerald Schaefer 	else
136e3ebcf64SGerald Schaefer 		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
137e3ebcf64SGerald Schaefer 	mm->pmd_huge_pte = pgtable;
138e3ebcf64SGerald Schaefer }
139e3ebcf64SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
140e3ebcf64SGerald Schaefer #endif
141e3ebcf64SGerald Schaefer 
142e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
143e3ebcf64SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE
144e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */
145*6b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
146e3ebcf64SGerald Schaefer {
147e3ebcf64SGerald Schaefer 	pgtable_t pgtable;
148e3ebcf64SGerald Schaefer 
149e3ebcf64SGerald Schaefer 	assert_spin_locked(&mm->page_table_lock);
150e3ebcf64SGerald Schaefer 
151e3ebcf64SGerald Schaefer 	/* FIFO */
152e3ebcf64SGerald Schaefer 	pgtable = mm->pmd_huge_pte;
153e3ebcf64SGerald Schaefer 	if (list_empty(&pgtable->lru))
154e3ebcf64SGerald Schaefer 		mm->pmd_huge_pte = NULL;
155e3ebcf64SGerald Schaefer 	else {
156e3ebcf64SGerald Schaefer 		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
157e3ebcf64SGerald Schaefer 					      struct page, lru);
158e3ebcf64SGerald Schaefer 		list_del(&pgtable->lru);
159e3ebcf64SGerald Schaefer 	}
160e3ebcf64SGerald Schaefer 	return pgtable;
161e3ebcf64SGerald Schaefer }
162e3ebcf64SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
163e3ebcf64SGerald Schaefer #endif
16446dcde73SGerald Schaefer 
16546dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE
16646dcde73SGerald Schaefer #ifdef CONFIG_TRANSPARENT_HUGEPAGE
16746dcde73SGerald Schaefer void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
16846dcde73SGerald Schaefer 		     pmd_t *pmdp)
16946dcde73SGerald Schaefer {
17046dcde73SGerald Schaefer 	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
17146dcde73SGerald Schaefer 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
17246dcde73SGerald Schaefer }
17346dcde73SGerald Schaefer #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
17446dcde73SGerald Schaefer #endif
175