1 /* 2 * linux/arch/arm/mm/copypage-v6.c 3 * 4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/init.h> 11 #include <linux/spinlock.h> 12 #include <linux/mm.h> 13 #include <linux/highmem.h> 14 15 #include <asm/pgtable.h> 16 #include <asm/shmparam.h> 17 #include <asm/tlbflush.h> 18 #include <asm/cacheflush.h> 19 #include <asm/cachetype.h> 20 21 #include "mm.h" 22 23 #if SHMLBA > 16384 24 #error FIX ME 25 #endif 26 27 #define from_address (0xffff8000) 28 #define to_address (0xffffc000) 29 30 static DEFINE_SPINLOCK(v6_lock); 31 32 /* 33 * Copy the user page. No aliasing to deal with so we can just 34 * attack the kernel's existing mapping of these pages. 35 */ 36 static void v6_copy_user_highpage_nonaliasing(struct page *to, 37 struct page *from, unsigned long vaddr) 38 { 39 void *kto, *kfrom; 40 41 kfrom = kmap_atomic(from, KM_USER0); 42 kto = kmap_atomic(to, KM_USER1); 43 copy_page(kto, kfrom); 44 #ifdef CONFIG_HIGHMEM 45 /* 46 * kmap_atomic() doesn't set the page virtual address, and 47 * kunmap_atomic() takes care of cache flushing already. 48 */ 49 if (page_address(to) != NULL) 50 #endif 51 __cpuc_flush_dcache_area(kto, PAGE_SIZE); 52 kunmap_atomic(kto, KM_USER1); 53 kunmap_atomic(kfrom, KM_USER0); 54 } 55 56 /* 57 * Clear the user page. No aliasing to deal with so we can just 58 * attack the kernel's existing mapping of this page. 59 */ 60 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) 61 { 62 void *kaddr = kmap_atomic(page, KM_USER0); 63 clear_page(kaddr); 64 kunmap_atomic(kaddr, KM_USER0); 65 } 66 67 /* 68 * Discard data in the kernel mapping for the new page. 69 * FIXME: needs this MCRR to be supported. 70 */ 71 static void discard_old_kernel_data(void *kto) 72 { 73 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 74 : 75 : "r" (kto), 76 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 77 : "cc"); 78 } 79 80 /* 81 * Copy the page, taking account of the cache colour. 82 */ 83 static void v6_copy_user_highpage_aliasing(struct page *to, 84 struct page *from, unsigned long vaddr) 85 { 86 unsigned int offset = CACHE_COLOUR(vaddr); 87 unsigned long kfrom, kto; 88 89 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 90 __flush_dcache_page(page_mapping(from), from); 91 92 /* FIXME: not highmem safe */ 93 discard_old_kernel_data(page_address(to)); 94 95 /* 96 * Now copy the page using the same cache colour as the 97 * pages ultimate destination. 98 */ 99 spin_lock(&v6_lock); 100 101 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 102 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 103 104 kfrom = from_address + (offset << PAGE_SHIFT); 105 kto = to_address + (offset << PAGE_SHIFT); 106 107 flush_tlb_kernel_page(kfrom); 108 flush_tlb_kernel_page(kto); 109 110 copy_page((void *)kto, (void *)kfrom); 111 112 spin_unlock(&v6_lock); 113 } 114 115 /* 116 * Clear the user page. We need to deal with the aliasing issues, 117 * so remap the kernel page into the same cache colour as the user 118 * page. 119 */ 120 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) 121 { 122 unsigned int offset = CACHE_COLOUR(vaddr); 123 unsigned long to = to_address + (offset << PAGE_SHIFT); 124 125 /* FIXME: not highmem safe */ 126 discard_old_kernel_data(page_address(page)); 127 128 /* 129 * Now clear the page using the same cache colour as 130 * the pages ultimate destination. 131 */ 132 spin_lock(&v6_lock); 133 134 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 135 flush_tlb_kernel_page(to); 136 clear_page((void *)to); 137 138 spin_unlock(&v6_lock); 139 } 140 141 struct cpu_user_fns v6_user_fns __initdata = { 142 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, 143 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, 144 }; 145 146 static int __init v6_userpage_init(void) 147 { 148 if (cache_is_vipt_aliasing()) { 149 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; 150 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; 151 } 152 153 return 0; 154 } 155 156 core_initcall(v6_userpage_init); 157