xref: /openbmc/linux/arch/m68k/include/asm/pgtable_mm.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
249148020SSam Ravnborg #ifndef _M68K_PGTABLE_H
349148020SSam Ravnborg #define _M68K_PGTABLE_H
449148020SSam Ravnborg 
560e50f34SMike Rapoport 
660e50f34SMike Rapoport #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
760e50f34SMike Rapoport #include <asm-generic/pgtable-nopmd.h>
860e50f34SMike Rapoport #else
960e50f34SMike Rapoport #include <asm-generic/pgtable-nopud.h>
1060e50f34SMike Rapoport #endif
1149148020SSam Ravnborg 
1249148020SSam Ravnborg #include <asm/setup.h>
1349148020SSam Ravnborg 
1449148020SSam Ravnborg #ifndef __ASSEMBLY__
1549148020SSam Ravnborg #include <asm/processor.h>
1649148020SSam Ravnborg #include <linux/sched.h>
1749148020SSam Ravnborg #include <linux/threads.h>
1849148020SSam Ravnborg 
1949148020SSam Ravnborg /*
2049148020SSam Ravnborg  * This file contains the functions and defines necessary to modify and use
2149148020SSam Ravnborg  * the m68k page table tree.
2249148020SSam Ravnborg  */
2349148020SSam Ravnborg 
2449148020SSam Ravnborg #include <asm/virtconvert.h>
2549148020SSam Ravnborg 
2649148020SSam Ravnborg /* Certain architectures need to do special things when pte's
2749148020SSam Ravnborg  * within a page table are directly modified.  Thus, the following
2849148020SSam Ravnborg  * hook is made available.
2949148020SSam Ravnborg  */
3049148020SSam Ravnborg #define set_pte(pteptr, pteval)					\
3149148020SSam Ravnborg 	do{							\
3249148020SSam Ravnborg 		*(pteptr) = (pteval);				\
3349148020SSam Ravnborg 	} while(0)
3449148020SSam Ravnborg 
3549148020SSam Ravnborg /* PMD_SHIFT determines the size of the area a second-level page table can map */
3660e50f34SMike Rapoport #if CONFIG_PGTABLE_LEVELS == 3
37ef22d8abSPeter Zijlstra #define PMD_SHIFT	18
3849148020SSam Ravnborg #endif
3949148020SSam Ravnborg #define PMD_SIZE	(1UL << PMD_SHIFT)
4049148020SSam Ravnborg #define PMD_MASK	(~(PMD_SIZE-1))
4149148020SSam Ravnborg 
4249148020SSam Ravnborg /* PGDIR_SHIFT determines what a third-level page table entry can map */
4349148020SSam Ravnborg #ifdef CONFIG_SUN3
4449148020SSam Ravnborg #define PGDIR_SHIFT     17
45813db7fcSGreg Ungerer #elif defined(CONFIG_COLDFIRE)
46813db7fcSGreg Ungerer #define PGDIR_SHIFT     22
4749148020SSam Ravnborg #else
4849148020SSam Ravnborg #define PGDIR_SHIFT	25
4949148020SSam Ravnborg #endif
5049148020SSam Ravnborg #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
5149148020SSam Ravnborg #define PGDIR_MASK	(~(PGDIR_SIZE-1))
5249148020SSam Ravnborg 
5349148020SSam Ravnborg /*
5449148020SSam Ravnborg  * entries per page directory level: the m68k is configured as three-level,
5549148020SSam Ravnborg  * so we do have PMD level physically.
5649148020SSam Ravnborg  */
5749148020SSam Ravnborg #ifdef CONFIG_SUN3
5849148020SSam Ravnborg #define PTRS_PER_PTE   16
59a8874e7eSMartin Schwidefsky #define __PAGETABLE_PMD_FOLDED 1
6049148020SSam Ravnborg #define PTRS_PER_PMD   1
6149148020SSam Ravnborg #define PTRS_PER_PGD   2048
62813db7fcSGreg Ungerer #elif defined(CONFIG_COLDFIRE)
63813db7fcSGreg Ungerer #define PTRS_PER_PTE	512
64a8874e7eSMartin Schwidefsky #define __PAGETABLE_PMD_FOLDED 1
65813db7fcSGreg Ungerer #define PTRS_PER_PMD	1
66813db7fcSGreg Ungerer #define PTRS_PER_PGD	1024
6749148020SSam Ravnborg #else
68ef22d8abSPeter Zijlstra #define PTRS_PER_PTE	64
69ef22d8abSPeter Zijlstra #define PTRS_PER_PMD	128
7049148020SSam Ravnborg #define PTRS_PER_PGD	128
7149148020SSam Ravnborg #endif
7249148020SSam Ravnborg #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
7349148020SSam Ravnborg 
7449148020SSam Ravnborg /* Virtual address region for use by kernel_map() */
7549148020SSam Ravnborg #ifdef CONFIG_SUN3
76ef22d8abSPeter Zijlstra #define KMAP_START	0x0dc00000
77ef22d8abSPeter Zijlstra #define KMAP_END	0x0e000000
78813db7fcSGreg Ungerer #elif defined(CONFIG_COLDFIRE)
79813db7fcSGreg Ungerer #define KMAP_START	0xe0000000
80813db7fcSGreg Ungerer #define KMAP_END	0xf0000000
8105d51e42SLaurent Vivier #elif defined(CONFIG_VIRT)
8205d51e42SLaurent Vivier #define	KMAP_START	0xdf000000
8305d51e42SLaurent Vivier #define	KMAP_END	0xff000000
8449148020SSam Ravnborg #else
8549148020SSam Ravnborg #define	KMAP_START	0xd0000000
8649148020SSam Ravnborg #define	KMAP_END	0xf0000000
8749148020SSam Ravnborg #endif
8849148020SSam Ravnborg 
89813db7fcSGreg Ungerer #ifdef CONFIG_SUN3
90813db7fcSGreg Ungerer extern unsigned long m68k_vmalloc_end;
91813db7fcSGreg Ungerer #define VMALLOC_START 0x0f800000
92813db7fcSGreg Ungerer #define VMALLOC_END m68k_vmalloc_end
93813db7fcSGreg Ungerer #elif defined(CONFIG_COLDFIRE)
94813db7fcSGreg Ungerer #define VMALLOC_START	0xd0000000
95813db7fcSGreg Ungerer #define VMALLOC_END	0xe0000000
9605d51e42SLaurent Vivier #elif defined(CONFIG_VIRT)
9705d51e42SLaurent Vivier #define VMALLOC_OFFSET	PAGE_SIZE
9805d51e42SLaurent Vivier #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
9905d51e42SLaurent Vivier #define VMALLOC_END     KMAP_START
100813db7fcSGreg Ungerer #else
10149148020SSam Ravnborg /* Just any arbitrary offset to the start of the vmalloc VM area: the
10249148020SSam Ravnborg  * current 8MB value just means that there will be a 8MB "hole" after the
10349148020SSam Ravnborg  * physical memory until the kernel virtual memory starts.  That means that
10449148020SSam Ravnborg  * any out-of-bounds memory accesses will hopefully be caught.
10549148020SSam Ravnborg  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
10649148020SSam Ravnborg  * area for the same reason. ;)
10749148020SSam Ravnborg  */
10849148020SSam Ravnborg #define VMALLOC_OFFSET	(8*1024*1024)
10949148020SSam Ravnborg #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
11049148020SSam Ravnborg #define VMALLOC_END KMAP_START
111813db7fcSGreg Ungerer #endif
11249148020SSam Ravnborg 
11349148020SSam Ravnborg /* zero page used for uninitialized stuff */
11449148020SSam Ravnborg extern void *empty_zero_page;
11549148020SSam Ravnborg 
11649148020SSam Ravnborg /*
11749148020SSam Ravnborg  * ZERO_PAGE is a global shared page that is always zero: used
11849148020SSam Ravnborg  * for zero-mapped memory areas etc..
11949148020SSam Ravnborg  */
12049148020SSam Ravnborg #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
12149148020SSam Ravnborg 
12249148020SSam Ravnborg /* number of bits that fit into a memory pointer */
12349148020SSam Ravnborg #define BITS_PER_PTR			(8*sizeof(unsigned long))
12449148020SSam Ravnborg 
12549148020SSam Ravnborg /* to align the pointer to a pointer address */
12649148020SSam Ravnborg #define PTR_MASK			(~(sizeof(void*)-1))
12749148020SSam Ravnborg 
12849148020SSam Ravnborg /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
12949148020SSam Ravnborg /* 64-bit machines, beware!  SRB. */
13049148020SSam Ravnborg #define SIZEOF_PTR_LOG2			       2
13149148020SSam Ravnborg 
13249148020SSam Ravnborg extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
13349148020SSam Ravnborg 
13449148020SSam Ravnborg /*
13549148020SSam Ravnborg  * The m68k doesn't have any external MMU info: the kernel page
13649148020SSam Ravnborg  * tables contain all the necessary information.  The Sun3 does, but
13749148020SSam Ravnborg  * they are updated on demand.
13849148020SSam Ravnborg  */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)139*5553b15aSMatthew Wilcox (Oracle) static inline void update_mmu_cache_range(struct vm_fault *vmf,
140*5553b15aSMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long address,
141*5553b15aSMatthew Wilcox (Oracle) 		pte_t *ptep, unsigned int nr)
14249148020SSam Ravnborg {
14349148020SSam Ravnborg }
14449148020SSam Ravnborg 
145*5553b15aSMatthew Wilcox (Oracle) #define update_mmu_cache(vma, addr, ptep) \
146*5553b15aSMatthew Wilcox (Oracle) 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
147*5553b15aSMatthew Wilcox (Oracle) 
14849148020SSam Ravnborg #endif /* !__ASSEMBLY__ */
14949148020SSam Ravnborg 
15049148020SSam Ravnborg /* MMU-specific headers */
15149148020SSam Ravnborg 
15249148020SSam Ravnborg #ifdef CONFIG_SUN3
15349148020SSam Ravnborg #include <asm/sun3_pgtable.h>
154813db7fcSGreg Ungerer #elif defined(CONFIG_COLDFIRE)
155813db7fcSGreg Ungerer #include <asm/mcf_pgtable.h>
15649148020SSam Ravnborg #else
15749148020SSam Ravnborg #include <asm/motorola_pgtable.h>
15849148020SSam Ravnborg #endif
15949148020SSam Ravnborg 
16049148020SSam Ravnborg #ifndef __ASSEMBLY__
16149148020SSam Ravnborg /*
16249148020SSam Ravnborg  * Macro to mark a page protection value as "uncacheable".
16349148020SSam Ravnborg  */
164813db7fcSGreg Ungerer #ifdef CONFIG_COLDFIRE
165813db7fcSGreg Ungerer # define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
166813db7fcSGreg Ungerer #else
16749148020SSam Ravnborg #ifdef SUN3_PAGE_NOCACHE
16849148020SSam Ravnborg # define __SUN3_PAGE_NOCACHE	SUN3_PAGE_NOCACHE
16949148020SSam Ravnborg #else
17049148020SSam Ravnborg # define __SUN3_PAGE_NOCACHE	0
17149148020SSam Ravnborg #endif
17249148020SSam Ravnborg #define pgprot_noncached(prot)							\
17349148020SSam Ravnborg 	(MMU_IS_SUN3								\
17449148020SSam Ravnborg 	 ? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE))			\
17549148020SSam Ravnborg 	 : ((MMU_IS_851 || MMU_IS_030)						\
17649148020SSam Ravnborg 	    ? (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE030))			\
17749148020SSam Ravnborg 	    : (MMU_IS_040 || MMU_IS_060)					\
17849148020SSam Ravnborg 	    ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S))	\
17949148020SSam Ravnborg 	    : (prot)))
18049148020SSam Ravnborg 
181419e2f18SChristoph Hellwig pgprot_t pgprot_dmacoherent(pgprot_t prot);
182419e2f18SChristoph Hellwig #define pgprot_dmacoherent(prot)	pgprot_dmacoherent(prot)
183419e2f18SChristoph Hellwig 
184813db7fcSGreg Ungerer #endif /* CONFIG_COLDFIRE */
18549148020SSam Ravnborg #endif /* !__ASSEMBLY__ */
18649148020SSam Ravnborg 
18749148020SSam Ravnborg #endif /* _M68K_PGTABLE_H */
188