xref: /openbmc/linux/arch/mips/include/asm/pgtable.h (revision b6bec26c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003 Ralf Baechle
7  */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10 
11 #include <linux/mmzone.h>
12 #ifdef CONFIG_32BIT
13 #include <asm/pgtable-32.h>
14 #endif
15 #ifdef CONFIG_64BIT
16 #include <asm/pgtable-64.h>
17 #endif
18 
19 #include <asm/io.h>
20 #include <asm/pgtable-bits.h>
21 
22 struct mm_struct;
23 struct vm_area_struct;
24 
25 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
26 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
27 				 _page_cachable_default)
28 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
29 				 (cpu_has_rixi ?  _PAGE_NO_EXEC : 0) | _page_cachable_default)
30 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
31 				 _page_cachable_default)
32 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 				 _PAGE_GLOBAL | _page_cachable_default)
34 #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
35 				 _page_cachable_default)
36 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
37 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
38 
39 /*
40  * If _PAGE_NO_EXEC is not defined, we can't do page protection for
41  * execute, and consider it to be the same as read. Also, write
42  * permissions imply read permissions. This is the closest we can get
43  * by reasonable means..
44  */
45 
46 /*
47  * Dummy values to fill the table in mmap.c
48  * The real values will be generated at runtime
49  */
50 #define __P000 __pgprot(0)
51 #define __P001 __pgprot(0)
52 #define __P010 __pgprot(0)
53 #define __P011 __pgprot(0)
54 #define __P100 __pgprot(0)
55 #define __P101 __pgprot(0)
56 #define __P110 __pgprot(0)
57 #define __P111 __pgprot(0)
58 
59 #define __S000 __pgprot(0)
60 #define __S001 __pgprot(0)
61 #define __S010 __pgprot(0)
62 #define __S011 __pgprot(0)
63 #define __S100 __pgprot(0)
64 #define __S101 __pgprot(0)
65 #define __S110 __pgprot(0)
66 #define __S111 __pgprot(0)
67 
68 extern unsigned long _page_cachable_default;
69 
70 /*
71  * ZERO_PAGE is a global shared page that is always zero; used
72  * for zero-mapped memory areas etc..
73  */
74 
75 extern unsigned long empty_zero_page;
76 extern unsigned long zero_page_mask;
77 
78 #define ZERO_PAGE(vaddr) \
79 	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
80 #define __HAVE_COLOR_ZERO_PAGE
81 
82 extern void paging_init(void);
83 
84 /*
85  * Conversion functions: convert a page and protection to a page entry,
86  * and a page entry and page directory to the page they refer to.
87  */
88 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
89 
90 #define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
91 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
92 #define pmd_page(pmd)		__pmd_page(pmd)
93 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
94 
95 #define pmd_page_vaddr(pmd)	pmd_val(pmd)
96 
97 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
98 
99 #define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
100 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
101 
102 static inline void set_pte(pte_t *ptep, pte_t pte)
103 {
104 	ptep->pte_high = pte.pte_high;
105 	smp_wmb();
106 	ptep->pte_low = pte.pte_low;
107 
108 	if (pte.pte_low & _PAGE_GLOBAL) {
109 		pte_t *buddy = ptep_buddy(ptep);
110 		/*
111 		 * Make sure the buddy is global too (if it's !none,
112 		 * it better already be global)
113 		 */
114 		if (pte_none(*buddy)) {
115 			buddy->pte_low  |= _PAGE_GLOBAL;
116 			buddy->pte_high |= _PAGE_GLOBAL;
117 		}
118 	}
119 }
120 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
121 
122 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
123 {
124 	pte_t null = __pte(0);
125 
126 	/* Preserve global status for the pair */
127 	if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
128 		null.pte_low = null.pte_high = _PAGE_GLOBAL;
129 
130 	set_pte_at(mm, addr, ptep, null);
131 }
132 #else
133 
134 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
135 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
136 
137 /*
138  * Certain architectures need to do special things when pte's
139  * within a page table are directly modified.  Thus, the following
140  * hook is made available.
141  */
142 static inline void set_pte(pte_t *ptep, pte_t pteval)
143 {
144 	*ptep = pteval;
145 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
146 	if (pte_val(pteval) & _PAGE_GLOBAL) {
147 		pte_t *buddy = ptep_buddy(ptep);
148 		/*
149 		 * Make sure the buddy is global too (if it's !none,
150 		 * it better already be global)
151 		 */
152 		if (pte_none(*buddy))
153 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
154 	}
155 #endif
156 }
157 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
158 
159 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160 {
161 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
162 	/* Preserve global status for the pair */
163 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
164 		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
165 	else
166 #endif
167 		set_pte_at(mm, addr, ptep, __pte(0));
168 }
169 #endif
170 
171 /*
172  * (pmds are folded into puds so this doesn't get actually called,
173  * but the define is needed for a generic inline function.)
174  */
175 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
176 
177 #ifndef __PAGETABLE_PMD_FOLDED
178 /*
179  * (puds are folded into pgds so this doesn't get actually called,
180  * but the define is needed for a generic inline function.)
181  */
182 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
183 #endif
184 
185 #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
186 #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
187 #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
188 
189 /*
190  * We used to declare this array with size but gcc 3.3 and older are not able
191  * to find that this expression is a constant, so the size is dropped.
192  */
193 extern pgd_t swapper_pg_dir[];
194 
195 /*
196  * The following only work if pte_present() is true.
197  * Undefined behaviour if not..
198  */
199 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
200 static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
201 static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
202 static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
203 static inline int pte_file(pte_t pte)	{ return pte.pte_low & _PAGE_FILE; }
204 
205 static inline pte_t pte_wrprotect(pte_t pte)
206 {
207 	pte.pte_low  &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
208 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
209 	return pte;
210 }
211 
212 static inline pte_t pte_mkclean(pte_t pte)
213 {
214 	pte.pte_low  &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
215 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
216 	return pte;
217 }
218 
219 static inline pte_t pte_mkold(pte_t pte)
220 {
221 	pte.pte_low  &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
222 	pte.pte_high &= ~_PAGE_SILENT_READ;
223 	return pte;
224 }
225 
226 static inline pte_t pte_mkwrite(pte_t pte)
227 {
228 	pte.pte_low |= _PAGE_WRITE;
229 	if (pte.pte_low & _PAGE_MODIFIED) {
230 		pte.pte_low  |= _PAGE_SILENT_WRITE;
231 		pte.pte_high |= _PAGE_SILENT_WRITE;
232 	}
233 	return pte;
234 }
235 
236 static inline pte_t pte_mkdirty(pte_t pte)
237 {
238 	pte.pte_low |= _PAGE_MODIFIED;
239 	if (pte.pte_low & _PAGE_WRITE) {
240 		pte.pte_low  |= _PAGE_SILENT_WRITE;
241 		pte.pte_high |= _PAGE_SILENT_WRITE;
242 	}
243 	return pte;
244 }
245 
246 static inline pte_t pte_mkyoung(pte_t pte)
247 {
248 	pte.pte_low |= _PAGE_ACCESSED;
249 	if (pte.pte_low & _PAGE_READ) {
250 		pte.pte_low  |= _PAGE_SILENT_READ;
251 		pte.pte_high |= _PAGE_SILENT_READ;
252 	}
253 	return pte;
254 }
255 #else
256 static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
257 static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
258 static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
259 static inline int pte_file(pte_t pte)	{ return pte_val(pte) & _PAGE_FILE; }
260 
261 static inline pte_t pte_wrprotect(pte_t pte)
262 {
263 	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
264 	return pte;
265 }
266 
267 static inline pte_t pte_mkclean(pte_t pte)
268 {
269 	pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
270 	return pte;
271 }
272 
273 static inline pte_t pte_mkold(pte_t pte)
274 {
275 	pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
276 	return pte;
277 }
278 
279 static inline pte_t pte_mkwrite(pte_t pte)
280 {
281 	pte_val(pte) |= _PAGE_WRITE;
282 	if (pte_val(pte) & _PAGE_MODIFIED)
283 		pte_val(pte) |= _PAGE_SILENT_WRITE;
284 	return pte;
285 }
286 
287 static inline pte_t pte_mkdirty(pte_t pte)
288 {
289 	pte_val(pte) |= _PAGE_MODIFIED;
290 	if (pte_val(pte) & _PAGE_WRITE)
291 		pte_val(pte) |= _PAGE_SILENT_WRITE;
292 	return pte;
293 }
294 
295 static inline pte_t pte_mkyoung(pte_t pte)
296 {
297 	pte_val(pte) |= _PAGE_ACCESSED;
298 	if (cpu_has_rixi) {
299 		if (!(pte_val(pte) & _PAGE_NO_READ))
300 			pte_val(pte) |= _PAGE_SILENT_READ;
301 	} else {
302 		if (pte_val(pte) & _PAGE_READ)
303 			pte_val(pte) |= _PAGE_SILENT_READ;
304 	}
305 	return pte;
306 }
307 
308 #ifdef _PAGE_HUGE
309 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
310 
311 static inline pte_t pte_mkhuge(pte_t pte)
312 {
313 	pte_val(pte) |= _PAGE_HUGE;
314 	return pte;
315 }
316 #endif /* _PAGE_HUGE */
317 #endif
318 static inline int pte_special(pte_t pte)	{ return 0; }
319 static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
320 
321 /*
322  * Macro to make mark a page protection value as "uncacheable".  Note
323  * that "protection" is really a misnomer here as the protection value
324  * contains the memory attribute bits, dirty bits, and various other
325  * bits as well.
326  */
327 #define pgprot_noncached pgprot_noncached
328 
329 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
330 {
331 	unsigned long prot = pgprot_val(_prot);
332 
333 	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
334 
335 	return __pgprot(prot);
336 }
337 
338 /*
339  * Conversion functions: convert a page and protection to a page entry,
340  * and a page entry and page directory to the page they refer to.
341  */
342 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
343 
344 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
345 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
346 {
347 	pte.pte_low  &= _PAGE_CHG_MASK;
348 	pte.pte_high &= ~0x3f;
349 	pte.pte_low  |= pgprot_val(newprot);
350 	pte.pte_high |= pgprot_val(newprot) & 0x3f;
351 	return pte;
352 }
353 #else
354 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
355 {
356 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
357 }
358 #endif
359 
360 
361 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
362 	pte_t pte);
363 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
364 	pte_t pte);
365 
366 static inline void update_mmu_cache(struct vm_area_struct *vma,
367 	unsigned long address, pte_t *ptep)
368 {
369 	pte_t pte = *ptep;
370 	__update_tlb(vma, address, pte);
371 	__update_cache(vma, address, pte);
372 }
373 
374 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
375 	unsigned long address, pmd_t *pmdp)
376 {
377 	pte_t pte = *(pte_t *)pmdp;
378 
379 	__update_tlb(vma, address, pte);
380 }
381 
382 #define kern_addr_valid(addr)	(1)
383 
384 #ifdef CONFIG_64BIT_PHYS_ADDR
385 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
386 
387 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
388 		unsigned long vaddr,
389 		unsigned long pfn,
390 		unsigned long size,
391 		pgprot_t prot)
392 {
393 	phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
394 	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
395 }
396 #else
397 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
398 		remap_pfn_range(vma, vaddr, pfn, size, prot)
399 #endif
400 
401 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
402 
403 extern int has_transparent_hugepage(void);
404 
405 static inline int pmd_trans_huge(pmd_t pmd)
406 {
407 	return !!(pmd_val(pmd) & _PAGE_HUGE);
408 }
409 
410 static inline pmd_t pmd_mkhuge(pmd_t pmd)
411 {
412 	pmd_val(pmd) |= _PAGE_HUGE;
413 
414 	return pmd;
415 }
416 
417 static inline int pmd_trans_splitting(pmd_t pmd)
418 {
419 	return !!(pmd_val(pmd) & _PAGE_SPLITTING);
420 }
421 
422 static inline pmd_t pmd_mksplitting(pmd_t pmd)
423 {
424 	pmd_val(pmd) |= _PAGE_SPLITTING;
425 
426 	return pmd;
427 }
428 
429 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
430 		       pmd_t *pmdp, pmd_t pmd);
431 
432 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
433 /* Extern to avoid header file madness */
434 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
435 					unsigned long address,
436 					pmd_t *pmdp);
437 
438 #define __HAVE_ARCH_PMD_WRITE
439 static inline int pmd_write(pmd_t pmd)
440 {
441 	return !!(pmd_val(pmd) & _PAGE_WRITE);
442 }
443 
444 static inline pmd_t pmd_wrprotect(pmd_t pmd)
445 {
446 	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
447 	return pmd;
448 }
449 
450 static inline pmd_t pmd_mkwrite(pmd_t pmd)
451 {
452 	pmd_val(pmd) |= _PAGE_WRITE;
453 	if (pmd_val(pmd) & _PAGE_MODIFIED)
454 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
455 
456 	return pmd;
457 }
458 
459 static inline int pmd_dirty(pmd_t pmd)
460 {
461 	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
462 }
463 
464 static inline pmd_t pmd_mkclean(pmd_t pmd)
465 {
466 	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
467 	return pmd;
468 }
469 
470 static inline pmd_t pmd_mkdirty(pmd_t pmd)
471 {
472 	pmd_val(pmd) |= _PAGE_MODIFIED;
473 	if (pmd_val(pmd) & _PAGE_WRITE)
474 		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
475 
476 	return pmd;
477 }
478 
479 static inline int pmd_young(pmd_t pmd)
480 {
481 	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
482 }
483 
484 static inline pmd_t pmd_mkold(pmd_t pmd)
485 {
486 	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
487 
488 	return pmd;
489 }
490 
491 static inline pmd_t pmd_mkyoung(pmd_t pmd)
492 {
493 	pmd_val(pmd) |= _PAGE_ACCESSED;
494 
495 	if (cpu_has_rixi) {
496 		if (!(pmd_val(pmd) & _PAGE_NO_READ))
497 			pmd_val(pmd) |= _PAGE_SILENT_READ;
498 	} else {
499 		if (pmd_val(pmd) & _PAGE_READ)
500 			pmd_val(pmd) |= _PAGE_SILENT_READ;
501 	}
502 
503 	return pmd;
504 }
505 
506 /* Extern to avoid header file madness */
507 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
508 
509 static inline unsigned long pmd_pfn(pmd_t pmd)
510 {
511 	return pmd_val(pmd) >> _PFN_SHIFT;
512 }
513 
514 static inline struct page *pmd_page(pmd_t pmd)
515 {
516 	if (pmd_trans_huge(pmd))
517 		return pfn_to_page(pmd_pfn(pmd));
518 
519 	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
520 }
521 
522 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
523 {
524 	pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
525 	return pmd;
526 }
527 
528 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
529 {
530 	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
531 
532 	return pmd;
533 }
534 
535 /*
536  * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
537  * different prototype.
538  */
539 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
540 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
541 				       unsigned long address, pmd_t *pmdp)
542 {
543 	pmd_t old = *pmdp;
544 
545 	pmd_clear(pmdp);
546 
547 	return old;
548 }
549 
550 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
551 
552 #include <asm-generic/pgtable.h>
553 
554 /*
555  * uncached accelerated TLB map for video memory access
556  */
557 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
558 #define __HAVE_PHYS_MEM_ACCESS_PROT
559 
560 struct file;
561 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
562 		unsigned long size, pgprot_t vma_prot);
563 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
564 		unsigned long size, pgprot_t *vma_prot);
565 #endif
566 
567 /*
568  * We provide our own get_unmapped area to cope with the virtual aliasing
569  * constraints placed on us by the cache architecture.
570  */
571 #define HAVE_ARCH_UNMAPPED_AREA
572 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
573 
574 /*
575  * No page table caches to initialise
576  */
577 #define pgtable_cache_init()	do { } while (0)
578 
579 #endif /* _ASM_PGTABLE_H */
580