xref: /openbmc/linux/arch/s390/include/asm/page.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   *  S390 version
4   *    Copyright IBM Corp. 1999, 2000
5   *    Author(s): Hartmut Penner (hp@de.ibm.com)
6   */
7  
8  #ifndef _S390_PAGE_H
9  #define _S390_PAGE_H
10  
11  #include <linux/const.h>
12  #include <asm/types.h>
13  
14  #define _PAGE_SHIFT	12
15  #define _PAGE_SIZE	(_AC(1, UL) << _PAGE_SHIFT)
16  #define _PAGE_MASK	(~(_PAGE_SIZE - 1))
17  
18  /* PAGE_SHIFT determines the page size */
19  #define PAGE_SHIFT	_PAGE_SHIFT
20  #define PAGE_SIZE	_PAGE_SIZE
21  #define PAGE_MASK	_PAGE_MASK
22  #define PAGE_DEFAULT_ACC	_AC(0, UL)
23  /* storage-protection override */
24  #define PAGE_SPO_ACC		9
25  #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
26  
27  #define HPAGE_SHIFT	20
28  #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
29  #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
30  #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
31  #define HUGE_MAX_HSTATE		2
32  
33  #define ARCH_HAS_SETCLEAR_HUGE_PTE
34  #define ARCH_HAS_HUGE_PTE_TYPE
35  #define ARCH_HAS_PREPARE_HUGEPAGE
36  #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
37  
38  #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
39  
40  #include <asm/setup.h>
41  #ifndef __ASSEMBLY__
42  
43  void __storage_key_init_range(unsigned long start, unsigned long end);
44  
storage_key_init_range(unsigned long start,unsigned long end)45  static inline void storage_key_init_range(unsigned long start, unsigned long end)
46  {
47  	if (PAGE_DEFAULT_KEY != 0)
48  		__storage_key_init_range(start, end);
49  }
50  
51  #define clear_page(page)	memset((page), 0, PAGE_SIZE)
52  
53  /*
54   * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
55   * bypass caches when copying a page. Especially when copying huge pages
56   * this keeps L1 and L2 data caches alive.
57   */
copy_page(void * to,void * from)58  static inline void copy_page(void *to, void *from)
59  {
60  	union register_pair dst, src;
61  
62  	dst.even = (unsigned long) to;
63  	dst.odd  = 0x1000;
64  	src.even = (unsigned long) from;
65  	src.odd  = 0xb0001000;
66  
67  	asm volatile(
68  		"	mvcl	%[dst],%[src]"
69  		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
70  		: : "memory", "cc");
71  }
72  
73  #define clear_user_page(page, vaddr, pg)	clear_page(page)
74  #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
75  
76  #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
77  	vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
78  
79  /*
80   * These are used to make use of C type-checking..
81   */
82  
83  typedef struct { unsigned long pgprot; } pgprot_t;
84  typedef struct { unsigned long pgste; } pgste_t;
85  typedef struct { unsigned long pte; } pte_t;
86  typedef struct { unsigned long pmd; } pmd_t;
87  typedef struct { unsigned long pud; } pud_t;
88  typedef struct { unsigned long p4d; } p4d_t;
89  typedef struct { unsigned long pgd; } pgd_t;
90  typedef pte_t *pgtable_t;
91  
92  #define pgprot_val(x)	((x).pgprot)
93  #define pgste_val(x)	((x).pgste)
94  
pte_val(pte_t pte)95  static inline unsigned long pte_val(pte_t pte)
96  {
97  	return pte.pte;
98  }
99  
pmd_val(pmd_t pmd)100  static inline unsigned long pmd_val(pmd_t pmd)
101  {
102  	return pmd.pmd;
103  }
104  
pud_val(pud_t pud)105  static inline unsigned long pud_val(pud_t pud)
106  {
107  	return pud.pud;
108  }
109  
p4d_val(p4d_t p4d)110  static inline unsigned long p4d_val(p4d_t p4d)
111  {
112  	return p4d.p4d;
113  }
114  
pgd_val(pgd_t pgd)115  static inline unsigned long pgd_val(pgd_t pgd)
116  {
117  	return pgd.pgd;
118  }
119  
120  #define __pgste(x)	((pgste_t) { (x) } )
121  #define __pte(x)        ((pte_t) { (x) } )
122  #define __pmd(x)        ((pmd_t) { (x) } )
123  #define __pud(x)	((pud_t) { (x) } )
124  #define __p4d(x)	((p4d_t) { (x) } )
125  #define __pgd(x)        ((pgd_t) { (x) } )
126  #define __pgprot(x)     ((pgprot_t) { (x) } )
127  
page_set_storage_key(unsigned long addr,unsigned char skey,int mapped)128  static inline void page_set_storage_key(unsigned long addr,
129  					unsigned char skey, int mapped)
130  {
131  	if (!mapped)
132  		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
133  			     : : "d" (skey), "a" (addr));
134  	else
135  		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
136  }
137  
page_get_storage_key(unsigned long addr)138  static inline unsigned char page_get_storage_key(unsigned long addr)
139  {
140  	unsigned char skey;
141  
142  	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
143  	return skey;
144  }
145  
page_reset_referenced(unsigned long addr)146  static inline int page_reset_referenced(unsigned long addr)
147  {
148  	int cc;
149  
150  	asm volatile(
151  		"	rrbe	0,%1\n"
152  		"	ipm	%0\n"
153  		"	srl	%0,28\n"
154  		: "=d" (cc) : "a" (addr) : "cc");
155  	return cc;
156  }
157  
158  /* Bits int the storage key */
159  #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
160  #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
161  #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
162  #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
163  
164  struct page;
165  void arch_free_page(struct page *page, int order);
166  void arch_alloc_page(struct page *page, int order);
167  void arch_set_page_dat(struct page *page, int order);
168  
devmem_is_allowed(unsigned long pfn)169  static inline int devmem_is_allowed(unsigned long pfn)
170  {
171  	return 0;
172  }
173  
174  #define HAVE_ARCH_FREE_PAGE
175  #define HAVE_ARCH_ALLOC_PAGE
176  
177  #if IS_ENABLED(CONFIG_PGSTE)
178  int arch_make_page_accessible(struct page *page);
179  #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
180  #endif
181  
182  #define __PAGE_OFFSET		0x0UL
183  #define PAGE_OFFSET		0x0UL
184  
185  #define __pa(x)			((unsigned long)(x))
186  #define __va(x)			((void *)(unsigned long)(x))
187  
188  #define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
189  #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
190  
191  #define phys_to_page(phys)	pfn_to_page(phys_to_pfn(phys))
192  #define page_to_phys(page)	pfn_to_phys(page_to_pfn(page))
193  
pfn_to_virt(unsigned long pfn)194  static inline void *pfn_to_virt(unsigned long pfn)
195  {
196  	return __va(pfn_to_phys(pfn));
197  }
198  
virt_to_pfn(const void * kaddr)199  static inline unsigned long virt_to_pfn(const void *kaddr)
200  {
201  	return phys_to_pfn(__pa(kaddr));
202  }
203  
204  #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
205  
206  #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
207  #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
208  
209  #define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
210  
211  #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
212  
213  #endif /* !__ASSEMBLY__ */
214  
215  #include <asm-generic/memory_model.h>
216  #include <asm-generic/getorder.h>
217  
218  #endif /* _S390_PAGE_H */
219