1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 */ 6 7 #ifndef _S390_PAGE_H 8 #define _S390_PAGE_H 9 10 #include <linux/const.h> 11 #include <asm/types.h> 12 13 /* PAGE_SHIFT determines the page size */ 14 #define PAGE_SHIFT 12 15 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 16 #define PAGE_MASK (~(PAGE_SIZE-1)) 17 #define PAGE_DEFAULT_ACC 0 18 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) 19 20 #define HPAGE_SHIFT 20 21 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 22 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 23 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 24 #define HUGE_MAX_HSTATE 2 25 26 #define ARCH_HAS_SETCLEAR_HUGE_PTE 27 #define ARCH_HAS_HUGE_PTE_TYPE 28 #define ARCH_HAS_PREPARE_HUGEPAGE 29 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH 30 31 #include <asm/setup.h> 32 #ifndef __ASSEMBLY__ 33 34 void __storage_key_init_range(unsigned long start, unsigned long end); 35 36 static inline void storage_key_init_range(unsigned long start, unsigned long end) 37 { 38 if (PAGE_DEFAULT_KEY) 39 __storage_key_init_range(start, end); 40 } 41 42 #define clear_page(page) memset((page), 0, PAGE_SIZE) 43 44 /* 45 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 46 * bypass caches when copying a page. Especially when copying huge pages 47 * this keeps L1 and L2 data caches alive. 48 */ 49 static inline void copy_page(void *to, void *from) 50 { 51 register void *reg2 asm ("2") = to; 52 register unsigned long reg3 asm ("3") = 0x1000; 53 register void *reg4 asm ("4") = from; 54 register unsigned long reg5 asm ("5") = 0xb0001000; 55 asm volatile( 56 " mvcl 2,4" 57 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5) 58 : : "memory", "cc"); 59 } 60 61 #define clear_user_page(page, vaddr, pg) clear_page(page) 62 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 63 64 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ 65 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 66 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 67 68 /* 69 * These are used to make use of C type-checking.. 70 */ 71 72 typedef struct { unsigned long pgprot; } pgprot_t; 73 typedef struct { unsigned long pgste; } pgste_t; 74 typedef struct { unsigned long pte; } pte_t; 75 typedef struct { unsigned long pmd; } pmd_t; 76 typedef struct { unsigned long pud; } pud_t; 77 typedef struct { unsigned long pgd; } pgd_t; 78 typedef pte_t *pgtable_t; 79 80 #define pgprot_val(x) ((x).pgprot) 81 #define pgste_val(x) ((x).pgste) 82 #define pte_val(x) ((x).pte) 83 #define pmd_val(x) ((x).pmd) 84 #define pud_val(x) ((x).pud) 85 #define pgd_val(x) ((x).pgd) 86 87 #define __pgste(x) ((pgste_t) { (x) } ) 88 #define __pte(x) ((pte_t) { (x) } ) 89 #define __pmd(x) ((pmd_t) { (x) } ) 90 #define __pud(x) ((pud_t) { (x) } ) 91 #define __pgd(x) ((pgd_t) { (x) } ) 92 #define __pgprot(x) ((pgprot_t) { (x) } ) 93 94 static inline void page_set_storage_key(unsigned long addr, 95 unsigned char skey, int mapped) 96 { 97 if (!mapped) 98 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 99 : : "d" (skey), "a" (addr)); 100 else 101 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 102 } 103 104 static inline unsigned char page_get_storage_key(unsigned long addr) 105 { 106 unsigned char skey; 107 108 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); 109 return skey; 110 } 111 112 static inline int page_reset_referenced(unsigned long addr) 113 { 114 int cc; 115 116 asm volatile( 117 " rrbe 0,%1\n" 118 " ipm %0\n" 119 " srl %0,28\n" 120 : "=d" (cc) : "a" (addr) : "cc"); 121 return cc; 122 } 123 124 /* Bits int the storage key */ 125 #define _PAGE_CHANGED 0x02 /* HW changed bit */ 126 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 127 #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ 128 #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ 129 130 struct page; 131 void arch_free_page(struct page *page, int order); 132 void arch_alloc_page(struct page *page, int order); 133 void arch_set_page_states(int make_stable); 134 135 static inline int devmem_is_allowed(unsigned long pfn) 136 { 137 return 0; 138 } 139 140 #define HAVE_ARCH_FREE_PAGE 141 #define HAVE_ARCH_ALLOC_PAGE 142 143 #endif /* !__ASSEMBLY__ */ 144 145 #define __PAGE_OFFSET 0x0UL 146 #define PAGE_OFFSET 0x0UL 147 #define __pa(x) (unsigned long)(x) 148 #define __va(x) (void *)(unsigned long)(x) 149 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 150 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 151 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 152 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 153 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) 154 155 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 156 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 157 158 #include <asm-generic/memory_model.h> 159 #include <asm-generic/getorder.h> 160 161 #endif /* _S390_PAGE_H */ 162