tsb.c (858a0d7eb5300b5f620d98ab3c4b96c9d5f19131) tsb.c (c7d9f77d33a779ad582d8b2284ba007931ebd894)
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/preempt.h>
8#include <linux/slab.h>

--- 72 unchanged lines hidden (view full) ---

81 unsigned long tsb, unsigned long nentries)
82{
83 unsigned long i;
84
85 for (i = 0; i < tb->tlb_nr; i++)
86 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
87}
88
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/preempt.h>
8#include <linux/slab.h>

--- 72 unchanged lines hidden (view full) ---

81 unsigned long tsb, unsigned long nentries)
82{
83 unsigned long i;
84
85 for (i = 0; i < tb->tlb_nr; i++)
86 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
87}
88
89#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
90static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
91 unsigned long hash_shift,
92 unsigned long nentries,
93 unsigned int hugepage_shift)
94{
95 unsigned int hpage_entries;
96 unsigned int i;
97
98 hpage_entries = 1 << (hugepage_shift - hash_shift);
99 for (i = 0; i < hpage_entries; i++)
100 __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
101 nentries);
102}
103
104static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
105 unsigned long tsb, unsigned long nentries,
106 unsigned int hugepage_shift)
107{
108 unsigned long i;
109
110 for (i = 0; i < tb->tlb_nr; i++)
111 __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
112 nentries, hugepage_shift);
113}
114#endif
115
89void flush_tsb_user(struct tlb_batch *tb)
90{
91 struct mm_struct *mm = tb->mm;
92 unsigned long nentries, base, flags;
93
94 spin_lock_irqsave(&mm->context.lock, flags);
95
116void flush_tsb_user(struct tlb_batch *tb)
117{
118 struct mm_struct *mm = tb->mm;
119 unsigned long nentries, base, flags;
120
121 spin_lock_irqsave(&mm->context.lock, flags);
122
96 if (!tb->huge) {
123 if (tb->hugepage_shift == PAGE_SHIFT) {
97 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
98 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
99 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
100 base = __pa(base);
101 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
102 }
103#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
124 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
125 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
126 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
127 base = __pa(base);
128 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
129 }
130#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
104 if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
131 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
105 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
106 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
107 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
108 base = __pa(base);
132 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
133 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
134 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
135 base = __pa(base);
109 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
136 __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
137 tb->hugepage_shift);
110 }
111#endif
112 spin_unlock_irqrestore(&mm->context.lock, flags);
113}
114
138 }
139#endif
140 spin_unlock_irqrestore(&mm->context.lock, flags);
141}
142
115void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
143void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
144 unsigned int hugepage_shift)
116{
117 unsigned long nentries, base, flags;
118
119 spin_lock_irqsave(&mm->context.lock, flags);
120
145{
146 unsigned long nentries, base, flags;
147
148 spin_lock_irqsave(&mm->context.lock, flags);
149
121 if (!huge) {
150 if (hugepage_shift == PAGE_SHIFT) {
122 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
123 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
124 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
125 base = __pa(base);
126 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
127 }
128#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
151 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
152 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
153 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
154 base = __pa(base);
155 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
156 }
157#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
129 if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
158 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
130 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
131 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
132 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
133 base = __pa(base);
159 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
162 base = __pa(base);
134 __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
163 __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
164 nentries, hugepage_shift);
135 }
136#endif
137 spin_unlock_irqrestore(&mm->context.lock, flags);
138}
139
140#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
141#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
142

--- 423 unchanged lines hidden ---
165 }
166#endif
167 spin_unlock_irqrestore(&mm->context.lock, flags);
168}
169
170#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
171#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
172

--- 423 unchanged lines hidden ---