xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision 206a81c1)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003 Ralf Baechle
7  */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10 
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19 
20 #include <asm/io.h>
21 #include <asm/pgtable-bits.h>
22 
23 struct mm_struct;
24 struct vm_area_struct;
25 
26 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
27 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
28 				 _page_cachable_default)
29 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
30 				 (cpu_has_rixi ?  _PAGE_NO_EXEC : 0) | _page_cachable_default)
31 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
32 				 _page_cachable_default)
33 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
34 				 _PAGE_GLOBAL | _page_cachable_default)
35 #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
37 #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
38 				 _page_cachable_default)
39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41 
42 /*
43  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44  * execute, and consider it to be the same as read. Also, write
45  * permissions imply read permissions. This is the closest we can get
46  * by reasonable means..
47  */
48 
49 /*
50  * Dummy values to fill the table in mmap.c
51  * The real values will be generated at runtime
52  */
53 #define __P000 __pgprot(0)
54 #define __P001 __pgprot(0)
55 #define __P010 __pgprot(0)
56 #define __P011 __pgprot(0)
57 #define __P100 __pgprot(0)
58 #define __P101 __pgprot(0)
59 #define __P110 __pgprot(0)
60 #define __P111 __pgprot(0)
61 
62 #define __S000 __pgprot(0)
63 #define __S001 __pgprot(0)
64 #define __S010 __pgprot(0)
65 #define __S011 __pgprot(0)
66 #define __S100 __pgprot(0)
67 #define __S101 __pgprot(0)
68 #define __S110 __pgprot(0)
69 #define __S111 __pgprot(0)
70 
71 extern unsigned long _page_cachable_default;
72 
73 /*
74  * ZERO_PAGE is a global shared page that is always zero; used
75  * for zero-mapped memory areas etc..
76  */
77 
78 extern unsigned long empty_zero_page;
79 extern unsigned long zero_page_mask;
80 
81 #define ZERO_PAGE(vaddr) \
82 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
83 #define __HAVE_COLOR_ZERO_PAGE
84 
85 extern void paging_init(void);
86 
87 /*
88  * Conversion functions: convert a page and protection to a page entry,
89  * and a page entry and page directory to the page they refer to.
90  */
91 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
92 
93 #define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
95 #define pmd_page(pmd)		__pmd_page(pmd)
96 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
97 
98 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
99 
100 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
101 
102 #define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
103 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
104 
105 static inline void set_pte(pte_t *ptep, pte_t pte)
106 {
107 	ptep->pte_high = pte.pte_high;
108 	smp_wmb();
109 	ptep->pte_low = pte.pte_low;
110 
111 	if (pte.pte_low & _PAGE_GLOBAL) {
112 		pte_t *buddy = ptep_buddy(ptep);
113 		/*
114 		 * Make sure the buddy is global too (if it's !none,
115 		 * it better already be global)
116 		 */
117 		if (pte_none(*buddy)) {
118 			buddy->pte_low	|= _PAGE_GLOBAL;
119 			buddy->pte_high |= _PAGE_GLOBAL;
120 		}
121 	}
122 }
123 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
124 
125 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
126 {
127 	pte_t null = __pte(0);
128 
129 	/* Preserve global status for the pair */
130 	if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
131 		null.pte_low = null.pte_high = _PAGE_GLOBAL;
132 
133 	set_pte_at(mm, addr, ptep, null);
134 }
135 #else
136 
137 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
138 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
139 
140 /*
141  * Certain architectures need to do special things when pte's
142  * within a page table are directly modified.  Thus, the following
143  * hook is made available.
144  */
145 static inline void set_pte(pte_t *ptep, pte_t pteval)
146 {
147 	*ptep = pteval;
148 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
149 	if (pte_val(pteval) & _PAGE_GLOBAL) {
150 		pte_t *buddy = ptep_buddy(ptep);
151 		/*
152 		 * Make sure the buddy is global too (if it's !none,
153 		 * it better already be global)
154 		 */
155 		if (pte_none(*buddy))
156 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
157 	}
158 #endif
159 }
160 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
161 
162 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
163 {
164 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
165 	/* Preserve global status for the pair */
166 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
167 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
168 	else
169 #endif
170 		set_pte_at(mm, addr, ptep, __pte(0));
171 }
172 #endif
173 
174 /*
175  * (pmds are folded into puds so this doesn't get actually called,
176  * but the define is needed for a generic inline function.)
177  */
178 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
179 
180 #ifndef __PAGETABLE_PMD_FOLDED
181 /*
182  * (puds are folded into pgds so this doesn't get actually called,
183  * but the define is needed for a generic inline function.)
184  */
185 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
186 #endif
187 
188 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
189 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
190 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
191 
192 /*
193  * We used to declare this array with size but gcc 3.3 and older are not able
194  * to find that this expression is a constant, so the size is dropped.
195  */
196 extern pgd_t swapper_pg_dir[];
197 
198 /*
199  * The following only work if pte_present() is true.
200  * Undefined behaviour if not..
201  */
202 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
203 static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
204 static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
205 static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
206 static inline int pte_file(pte_t pte)	{ return pte.pte_low & _PAGE_FILE; }
207 
208 static inline pte_t pte_wrprotect(pte_t pte)
209 {
210 	pte.pte_low  &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
211 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
212 	return pte;
213 }
214 
215 static inline pte_t pte_mkclean(pte_t pte)
216 {
217 	pte.pte_low  &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
218 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
219 	return pte;
220 }
221 
222 static inline pte_t pte_mkold(pte_t pte)
223 {
224 	pte.pte_low  &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
225 	pte.pte_high &= ~_PAGE_SILENT_READ;
226 	return pte;
227 }
228 
229 static inline pte_t pte_mkwrite(pte_t pte)
230 {
231 	pte.pte_low |= _PAGE_WRITE;
232 	if (pte.pte_low & _PAGE_MODIFIED) {
233 		pte.pte_low  |= _PAGE_SILENT_WRITE;
234 		pte.pte_high |= _PAGE_SILENT_WRITE;
235 	}
236 	return pte;
237 }
238 
239 static inline pte_t pte_mkdirty(pte_t pte)
240 {
241 	pte.pte_low |= _PAGE_MODIFIED;
242 	if (pte.pte_low & _PAGE_WRITE) {
243 		pte.pte_low  |= _PAGE_SILENT_WRITE;
244 		pte.pte_high |= _PAGE_SILENT_WRITE;
245 	}
246 	return pte;
247 }
248 
249 static inline pte_t pte_mkyoung(pte_t pte)
250 {
251 	pte.pte_low |= _PAGE_ACCESSED;
252 	if (pte.pte_low & _PAGE_READ) {
253 		pte.pte_low  |= _PAGE_SILENT_READ;
254 		pte.pte_high |= _PAGE_SILENT_READ;
255 	}
256 	return pte;
257 }
258 #else
259 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
260 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
261 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
262 static inline int pte_file(pte_t pte)	{ return pte_val(pte) & _PAGE_FILE; }
263 
264 static inline pte_t pte_wrprotect(pte_t pte)
265 {
266 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
267 	return pte;
268 }
269 
270 static inline pte_t pte_mkclean(pte_t pte)
271 {
272 	pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
273 	return pte;
274 }
275 
276 static inline pte_t pte_mkold(pte_t pte)
277 {
278 	pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
279 	return pte;
280 }
281 
282 static inline pte_t pte_mkwrite(pte_t pte)
283 {
284 	pte_val(pte) |= _PAGE_WRITE;
285 	if (pte_val(pte) & _PAGE_MODIFIED)
286 		pte_val(pte) |= _PAGE_SILENT_WRITE;
287 	return pte;
288 }
289 
290 static inline pte_t pte_mkdirty(pte_t pte)
291 {
292 	pte_val(pte) |= _PAGE_MODIFIED;
293 	if (pte_val(pte) & _PAGE_WRITE)
294 		pte_val(pte) |= _PAGE_SILENT_WRITE;
295 	return pte;
296 }
297 
298 static inline pte_t pte_mkyoung(pte_t pte)
299 {
300 	pte_val(pte) |= _PAGE_ACCESSED;
301 	if (cpu_has_rixi) {
302 		if (!(pte_val(pte) & _PAGE_NO_READ))
303 			pte_val(pte) |= _PAGE_SILENT_READ;
304 	} else {
305 		if (pte_val(pte) & _PAGE_READ)
306 			pte_val(pte) |= _PAGE_SILENT_READ;
307 	}
308 	return pte;
309 }
310 
311 #ifdef _PAGE_HUGE
312 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
313 
314 static inline pte_t pte_mkhuge(pte_t pte)
315 {
316 	pte_val(pte) |= _PAGE_HUGE;
317 	return pte;
318 }
319 #endif /* _PAGE_HUGE */
320 #endif
321 static inline int pte_special(pte_t pte)	{ return 0; }
322 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
323 
324 /*
325  * Macro to make mark a page protection value as "uncacheable".	 Note
326  * that "protection" is really a misnomer here as the protection value
327  * contains the memory attribute bits, dirty bits, and various other
328  * bits as well.
329  */
330 #define pgprot_noncached pgprot_noncached
331 
332 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
333 {
334 	unsigned long prot = pgprot_val(_prot);
335 
336 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
337 
338 	return __pgprot(prot);
339 }
340 
341 /*
342  * Conversion functions: convert a page and protection to a page entry,
343  * and a page entry and page directory to the page they refer to.
344  */
345 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
346 
347 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
348 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
349 {
350 	pte.pte_low  &= _PAGE_CHG_MASK;
351 	pte.pte_high &= ~0x3f;
352 	pte.pte_low  |= pgprot_val(newprot);
353 	pte.pte_high |= pgprot_val(newprot) & 0x3f;
354 	return pte;
355 }
356 #else
357 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
358 {
359 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
360 }
361 #endif
362 
363 
364 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
365 	pte_t pte);
366 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
367 	pte_t pte);
368 
369 static inline void update_mmu_cache(struct vm_area_struct *vma,
370 	unsigned long address, pte_t *ptep)
371 {
372 	pte_t pte = *ptep;
373 	__update_tlb(vma, address, pte);
374 	__update_cache(vma, address, pte);
375 }
376 
377 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
378 	unsigned long address, pmd_t *pmdp)
379 {
380 	pte_t pte = *(pte_t *)pmdp;
381 
382 	__update_tlb(vma, address, pte);
383 }
384 
385 #define kern_addr_valid(addr)	(1)
386 
387 #ifdef CONFIG_64BIT_PHYS_ADDR
388 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
389 
390 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
391 		unsigned long vaddr,
392 		unsigned long pfn,
393 		unsigned long size,
394 		pgprot_t prot)
395 {
396 	phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
397 	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
398 }
399 #define io_remap_pfn_range io_remap_pfn_range
400 #endif
401 
402 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
403 
404 extern int has_transparent_hugepage(void);
405 
406 static inline int pmd_trans_huge(pmd_t pmd)
407 {
408 	return !!(pmd_val(pmd) & _PAGE_HUGE);
409 }
410 
411 static inline pmd_t pmd_mkhuge(pmd_t pmd)
412 {
413 	pmd_val(pmd) |= _PAGE_HUGE;
414 
415 	return pmd;
416 }
417 
418 static inline int pmd_trans_splitting(pmd_t pmd)
419 {
420 	return !!(pmd_val(pmd) & _PAGE_SPLITTING);
421 }
422 
423 static inline pmd_t pmd_mksplitting(pmd_t pmd)
424 {
425 	pmd_val(pmd) |= _PAGE_SPLITTING;
426 
427 	return pmd;
428 }
429 
430 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
431 		       pmd_t *pmdp, pmd_t pmd);
432 
433 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
434 /* Extern to avoid header file madness */
435 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
436 					unsigned long address,
437 					pmd_t *pmdp);
438 
439 #define __HAVE_ARCH_PMD_WRITE
440 static inline int pmd_write(pmd_t pmd)
441 {
442 	return !!(pmd_val(pmd) & _PAGE_WRITE);
443 }
444 
445 static inline pmd_t pmd_wrprotect(pmd_t pmd)
446 {
447 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
448 	return pmd;
449 }
450 
451 static inline pmd_t pmd_mkwrite(pmd_t pmd)
452 {
453 	pmd_val(pmd) |= _PAGE_WRITE;
454 	if (pmd_val(pmd) & _PAGE_MODIFIED)
455 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
456 
457 	return pmd;
458 }
459 
460 static inline int pmd_dirty(pmd_t pmd)
461 {
462 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
463 }
464 
465 static inline pmd_t pmd_mkclean(pmd_t pmd)
466 {
467 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
468 	return pmd;
469 }
470 
471 static inline pmd_t pmd_mkdirty(pmd_t pmd)
472 {
473 	pmd_val(pmd) |= _PAGE_MODIFIED;
474 	if (pmd_val(pmd) & _PAGE_WRITE)
475 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
476 
477 	return pmd;
478 }
479 
480 static inline int pmd_young(pmd_t pmd)
481 {
482 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
483 }
484 
485 static inline pmd_t pmd_mkold(pmd_t pmd)
486 {
487 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
488 
489 	return pmd;
490 }
491 
492 static inline pmd_t pmd_mkyoung(pmd_t pmd)
493 {
494 	pmd_val(pmd) |= _PAGE_ACCESSED;
495 
496 	if (cpu_has_rixi) {
497 		if (!(pmd_val(pmd) & _PAGE_NO_READ))
498 			pmd_val(pmd) |= _PAGE_SILENT_READ;
499 	} else {
500 		if (pmd_val(pmd) & _PAGE_READ)
501 			pmd_val(pmd) |= _PAGE_SILENT_READ;
502 	}
503 
504 	return pmd;
505 }
506 
507 /* Extern to avoid header file madness */
508 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
509 
510 static inline unsigned long pmd_pfn(pmd_t pmd)
511 {
512 	return pmd_val(pmd) >> _PFN_SHIFT;
513 }
514 
515 static inline struct page *pmd_page(pmd_t pmd)
516 {
517 	if (pmd_trans_huge(pmd))
518 		return pfn_to_page(pmd_pfn(pmd));
519 
520 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
521 }
522 
523 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
524 {
525 	pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
526 	return pmd;
527 }
528 
529 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
530 {
531 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
532 
533 	return pmd;
534 }
535 
536 /*
537  * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
538  * different prototype.
539  */
540 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
541 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
542 				       unsigned long address, pmd_t *pmdp)
543 {
544 	pmd_t old = *pmdp;
545 
546 	pmd_clear(pmdp);
547 
548 	return old;
549 }
550 
551 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
552 
553 #include <asm-generic/pgtable.h>
554 
555 /*
556  * uncached accelerated TLB map for video memory access
557  */
558 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
559 #define __HAVE_PHYS_MEM_ACCESS_PROT
560 
561 struct file;
562 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
563 		unsigned long size, pgprot_t vma_prot);
564 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
565 		unsigned long size, pgprot_t *vma_prot);
566 #endif
567 
568 /*
569  * We provide our own get_unmapped area to cope with the virtual aliasing
570  * constraints placed on us by the cache architecture.
571  */
572 #define HAVE_ARCH_UNMAPPED_AREA
573 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
574 
575 /*
576  * No page table caches to initialise
577  */
578 #define pgtable_cache_init()	do { } while (0)
579 
580 #endif /* _ASM_PGTABLE_H */
581