1 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
2 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 
4 #include <asm-generic/pgtable-nopmd.h>
5 
6 #include <asm/book3s/32/hash.h>
7 
8 /* And here we include common definitions */
9 #include <asm/pte-common.h>
10 
11 /*
12  * The normal case is that PTEs are 32-bits and we have a 1-page
13  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
14  *
15  * For any >32-bit physical address platform, we can use the following
16  * two level page table layout where the pgdir is 8KB and the MS 13 bits
17  * are an index to the second level table.  The combined pgdir/pmd first
18  * level has 2048 entries and the second level has 512 64-bit PTE entries.
19  * -Matt
20  */
21 /* PGDIR_SHIFT determines what a top-level page table entry can map */
22 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
23 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
24 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
25 
26 #define PTRS_PER_PTE	(1 << PTE_SHIFT)
27 #define PTRS_PER_PMD	1
28 #define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
29 
30 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
31 /*
32  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
33  * value (for now) on others, from where we can start layout kernel
34  * virtual space that goes below PKMAP and FIXMAP
35  */
36 #ifdef CONFIG_HIGHMEM
37 #define KVIRT_TOP	PKMAP_BASE
38 #else
39 #define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
40 #endif
41 
42 /*
43  * ioremap_bot starts at that address. Early ioremaps move down from there,
44  * until mem_init() at which point this becomes the top of the vmalloc
45  * and ioremap space
46  */
47 #ifdef CONFIG_NOT_COHERENT_CACHE
48 #define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
49 #else
50 #define IOREMAP_TOP	KVIRT_TOP
51 #endif
52 
53 /*
54  * Just any arbitrary offset to the start of the vmalloc VM area: the
55  * current 16MB value just means that there will be a 64MB "hole" after the
56  * physical memory until the kernel virtual memory starts.  That means that
57  * any out-of-bounds memory accesses will hopefully be caught.
58  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
59  * area for the same reason. ;)
60  *
61  * We no longer map larger than phys RAM with the BATs so we don't have
62  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
63  * about clashes between our early calls to ioremap() that start growing down
64  * from ioremap_base being run into the VM area allocations (growing upwards
65  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
66  * we actually run into our mappings setup in the early boot with the VM
67  * system.  This really does become a problem for machines with good amounts
68  * of RAM.  -- Cort
69  */
70 #define VMALLOC_OFFSET (0x1000000) /* 16M */
71 #ifdef PPC_PIN_SIZE
72 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
73 #else
74 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
75 #endif
76 #define VMALLOC_END	ioremap_bot
77 
78 #ifndef __ASSEMBLY__
79 #include <linux/sched.h>
80 #include <linux/threads.h>
81 #include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
82 
83 extern unsigned long ioremap_bot;
84 
85 /*
86  * entries per page directory level: our page-table tree is two-level, so
87  * we don't really have any PMD directory.
88  */
89 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
90 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
91 
92 #define pte_ERROR(e) \
93 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
94 		(unsigned long long)pte_val(e))
95 #define pgd_ERROR(e) \
96 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
97 /*
98  * Bits in a linux-style PTE.  These match the bits in the
99  * (hardware-defined) PowerPC PTE as closely as possible.
100  */
101 
102 #define pte_clear(mm, addr, ptep) \
103 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
104 
105 #define pmd_none(pmd)		(!pmd_val(pmd))
106 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
107 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
108 static inline void pmd_clear(pmd_t *pmdp)
109 {
110 	*pmdp = __pmd(0);
111 }
112 
113 
114 /*
115  * When flushing the tlb entry for a page, we also need to flush the hash
116  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
117  */
118 extern int flush_hash_pages(unsigned context, unsigned long va,
119 			    unsigned long pmdval, int count);
120 
121 /* Add an HPTE to the hash table */
122 extern void add_hash_page(unsigned context, unsigned long va,
123 			  unsigned long pmdval);
124 
125 /* Flush an entry from the TLB/hash table */
126 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
127 			     unsigned long address);
128 
129 /*
130  * PTE updates. This function is called whenever an existing
131  * valid PTE is updated. This does -not- include set_pte_at()
132  * which nowadays only sets a new PTE.
133  *
134  * Depending on the type of MMU, we may need to use atomic updates
135  * and the PTE may be either 32 or 64 bit wide. In the later case,
136  * when using atomic updates, only the low part of the PTE is
137  * accessed atomically.
138  *
139  * In addition, on 44x, we also maintain a global flag indicating
140  * that an executable user mapping was modified, which is needed
141  * to properly flush the virtually tagged instruction cache of
142  * those implementations.
143  */
144 #ifndef CONFIG_PTE_64BIT
145 static inline unsigned long pte_update(pte_t *p,
146 				       unsigned long clr,
147 				       unsigned long set)
148 {
149 	unsigned long old, tmp;
150 
151 	__asm__ __volatile__("\
152 1:	lwarx	%0,0,%3\n\
153 	andc	%1,%0,%4\n\
154 	or	%1,%1,%5\n"
155 	PPC405_ERR77(0,%3)
156 "	stwcx.	%1,0,%3\n\
157 	bne-	1b"
158 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
159 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
160 	: "cc" );
161 
162 	return old;
163 }
164 #else /* CONFIG_PTE_64BIT */
165 static inline unsigned long long pte_update(pte_t *p,
166 					    unsigned long clr,
167 					    unsigned long set)
168 {
169 	unsigned long long old;
170 	unsigned long tmp;
171 
172 	__asm__ __volatile__("\
173 1:	lwarx	%L0,0,%4\n\
174 	lwzx	%0,0,%3\n\
175 	andc	%1,%L0,%5\n\
176 	or	%1,%1,%6\n"
177 	PPC405_ERR77(0,%3)
178 "	stwcx.	%1,0,%4\n\
179 	bne-	1b"
180 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
181 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
182 	: "cc" );
183 
184 	return old;
185 }
186 #endif /* CONFIG_PTE_64BIT */
187 
188 /*
189  * 2.6 calls this without flushing the TLB entry; this is wrong
190  * for our hash-based implementation, we fix that up here.
191  */
192 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
193 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
194 {
195 	unsigned long old;
196 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
197 	if (old & _PAGE_HASHPTE) {
198 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
199 		flush_hash_pages(context, addr, ptephys, 1);
200 	}
201 	return (old & _PAGE_ACCESSED) != 0;
202 }
203 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
204 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
205 
206 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
207 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
208 				       pte_t *ptep)
209 {
210 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
211 }
212 
213 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
214 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
215 				      pte_t *ptep)
216 {
217 	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
218 }
219 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
220 					   unsigned long addr, pte_t *ptep)
221 {
222 	ptep_set_wrprotect(mm, addr, ptep);
223 }
224 
225 
226 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
227 {
228 	unsigned long set = pte_val(entry) &
229 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
230 	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
231 
232 	pte_update(ptep, clr, set);
233 }
234 
235 #define __HAVE_ARCH_PTE_SAME
236 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
237 
238 /*
239  * Note that on Book E processors, the pmd contains the kernel virtual
240  * (lowmem) address of the pte page.  The physical address is less useful
241  * because everything runs with translation enabled (even the TLB miss
242  * handler).  On everything else the pmd contains the physical address
243  * of the pte page.  -- paulus
244  */
245 #ifndef CONFIG_BOOKE
246 #define pmd_page_vaddr(pmd)	\
247 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
248 #define pmd_page(pmd)		\
249 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
250 #else
251 #define pmd_page_vaddr(pmd)	\
252 	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
253 #define pmd_page(pmd)		\
254 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
255 #endif
256 
257 /* to find an entry in a kernel page-table-directory */
258 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
259 
260 /* to find an entry in a page-table-directory */
261 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
262 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
263 
264 /* Find an entry in the third-level page table.. */
265 #define pte_index(address)		\
266 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
267 #define pte_offset_kernel(dir, addr)	\
268 	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
269 #define pte_offset_map(dir, addr)		\
270 	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
271 #define pte_unmap(pte)		kunmap_atomic(pte)
272 
273 /*
274  * Encode and decode a swap entry.
275  * Note that the bits we use in a PTE for representing a swap entry
276  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
277  *   -- paulus
278  */
279 #define __swp_type(entry)		((entry).val & 0x1f)
280 #define __swp_offset(entry)		((entry).val >> 5)
281 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
282 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
283 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
284 
285 #ifndef CONFIG_PPC_4K_PAGES
286 void pgtable_cache_init(void);
287 #else
288 /*
289  * No page table caches to initialise
290  */
291 #define pgtable_cache_init()	do { } while (0)
292 #endif
293 
294 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
295 		      pmd_t **pmdp);
296 
297 /* Generic accessors to PTE bits */
298 static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
299 static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
300 static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
301 static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
302 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
303 static inline pgprot_t pte_pgprot(pte_t pte)	{ return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
304 
305 static inline int pte_present(pte_t pte)
306 {
307 	return pte_val(pte) & _PAGE_PRESENT;
308 }
309 
310 /* Conversion functions: convert a page and protection to a page entry,
311  * and a page entry and page directory to the page they refer to.
312  *
313  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
314  * long for now.
315  */
316 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
317 {
318 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
319 		     pgprot_val(pgprot));
320 }
321 
322 static inline unsigned long pte_pfn(pte_t pte)
323 {
324 	return pte_val(pte) >> PTE_RPN_SHIFT;
325 }
326 
327 /* Generic modifiers for PTE bits */
328 static inline pte_t pte_wrprotect(pte_t pte)
329 {
330 	return __pte(pte_val(pte) & ~_PAGE_RW);
331 }
332 
333 static inline pte_t pte_mkclean(pte_t pte)
334 {
335 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
336 }
337 
338 static inline pte_t pte_mkold(pte_t pte)
339 {
340 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
341 }
342 
343 static inline pte_t pte_mkwrite(pte_t pte)
344 {
345 	return __pte(pte_val(pte) | _PAGE_RW);
346 }
347 
348 static inline pte_t pte_mkdirty(pte_t pte)
349 {
350 	return __pte(pte_val(pte) | _PAGE_DIRTY);
351 }
352 
353 static inline pte_t pte_mkyoung(pte_t pte)
354 {
355 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
356 }
357 
358 static inline pte_t pte_mkspecial(pte_t pte)
359 {
360 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
361 }
362 
363 static inline pte_t pte_mkhuge(pte_t pte)
364 {
365 	return pte;
366 }
367 
368 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
369 {
370 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
371 }
372 
373 
374 
375 /* This low level function performs the actual PTE insertion
376  * Setting the PTE depends on the MMU type and other factors. It's
377  * an horrible mess that I'm not going to try to clean up now but
378  * I'm keeping it in one place rather than spread around
379  */
380 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
381 				pte_t *ptep, pte_t pte, int percpu)
382 {
383 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
384 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
385 	 * helper pte_update() which does an atomic update. We need to do that
386 	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
387 	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
388 	 * the hash bits instead (ie, same as the non-SMP case)
389 	 */
390 	if (percpu)
391 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
392 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
393 	else
394 		pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
395 
396 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
397 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
398 	 * can just store as long as we do the two halves in the right order
399 	 * with a barrier in between. This is possible because we take care,
400 	 * in the hash code, to pre-invalidate if the PTE was already hashed,
401 	 * which synchronizes us with any concurrent invalidation.
402 	 * In the percpu case, we also fallback to the simple update preserving
403 	 * the hash bits
404 	 */
405 	if (percpu) {
406 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
407 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
408 		return;
409 	}
410 	if (pte_val(*ptep) & _PAGE_HASHPTE)
411 		flush_hash_entry(mm, ptep, addr);
412 	__asm__ __volatile__("\
413 		stw%U0%X0 %2,%0\n\
414 		eieio\n\
415 		stw%U0%X0 %L2,%1"
416 	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
417 	: "r" (pte) : "memory");
418 
419 #elif defined(CONFIG_PPC_STD_MMU_32)
420 	/* Third case is 32-bit hash table in UP mode, we need to preserve
421 	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
422 	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
423 	 * and see we need to keep track that this PTE needs invalidating
424 	 */
425 	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
426 		      | (pte_val(pte) & ~_PAGE_HASHPTE));
427 
428 #else
429 #error "Not supported "
430 #endif
431 }
432 
433 /*
434  * Macro to mark a page protection value as "uncacheable".
435  */
436 
437 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
438 			 _PAGE_WRITETHRU)
439 
440 #define pgprot_noncached pgprot_noncached
441 static inline pgprot_t pgprot_noncached(pgprot_t prot)
442 {
443 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
444 			_PAGE_NO_CACHE | _PAGE_GUARDED);
445 }
446 
447 #define pgprot_noncached_wc pgprot_noncached_wc
448 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
449 {
450 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
451 			_PAGE_NO_CACHE);
452 }
453 
454 #define pgprot_cached pgprot_cached
455 static inline pgprot_t pgprot_cached(pgprot_t prot)
456 {
457 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
458 			_PAGE_COHERENT);
459 }
460 
461 #define pgprot_cached_wthru pgprot_cached_wthru
462 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
463 {
464 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
465 			_PAGE_COHERENT | _PAGE_WRITETHRU);
466 }
467 
468 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
469 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
470 {
471 	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
472 }
473 
474 #define pgprot_writecombine pgprot_writecombine
475 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
476 {
477 	return pgprot_noncached_wc(prot);
478 }
479 
480 #endif /* !__ASSEMBLY__ */
481 
482 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
483