1 /* 2 * linux/arch/arm/lib/copypage-xscale.S 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This handles the mini data cache, as found on SA11x0 and XScale 11 * processors. When we copy a user page page, we map it in such a way 12 * that accesses to this page will not touch the main data cache, but 13 * will be cached in the mini data cache. This prevents us thrashing 14 * the main data cache on page faults. 15 */ 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 20 #include <asm/pgtable.h> 21 #include <asm/tlbflush.h> 22 #include <asm/cacheflush.h> 23 24 #include "mm.h" 25 26 /* 27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 28 * specific hacks for copying pages efficiently. 29 */ 30 #define COPYPAGE_MINICACHE 0xffff8000 31 32 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 33 L_PTE_MT_MINICACHE) 34 35 static DEFINE_SPINLOCK(minicache_lock); 36 37 /* 38 * XScale mini-dcache optimised copy_user_highpage 39 * 40 * We flush the destination cache lines just before we write the data into the 41 * corresponding address. Since the Dcache is read-allocate, this removes the 42 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 43 * and merged as appropriate. 44 */ 45 static void __naked 46 mc_copy_user_page(void *from, void *to) 47 { 48 /* 49 * Strangely enough, best performance is achieved 50 * when prefetching destination as well. (NP) 51 */ 52 asm volatile( 53 "stmfd sp!, {r4, r5, lr} \n\ 54 mov lr, %2 \n\ 55 pld [r0, #0] \n\ 56 pld [r0, #32] \n\ 57 pld [r1, #0] \n\ 58 pld [r1, #32] \n\ 59 1: pld [r0, #64] \n\ 60 pld [r0, #96] \n\ 61 pld [r1, #64] \n\ 62 pld [r1, #96] \n\ 63 2: ldrd r2, [r0], #8 \n\ 64 ldrd r4, [r0], #8 \n\ 65 mov ip, r1 \n\ 66 strd r2, [r1], #8 \n\ 67 ldrd r2, [r0], #8 \n\ 68 strd r4, [r1], #8 \n\ 69 ldrd r4, [r0], #8 \n\ 70 strd r2, [r1], #8 \n\ 71 strd r4, [r1], #8 \n\ 72 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 73 ldrd r2, [r0], #8 \n\ 74 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 75 ldrd r4, [r0], #8 \n\ 76 mov ip, r1 \n\ 77 strd r2, [r1], #8 \n\ 78 ldrd r2, [r0], #8 \n\ 79 strd r4, [r1], #8 \n\ 80 ldrd r4, [r0], #8 \n\ 81 strd r2, [r1], #8 \n\ 82 strd r4, [r1], #8 \n\ 83 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 84 subs lr, lr, #1 \n\ 85 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 86 bgt 1b \n\ 87 beq 2b \n\ 88 ldmfd sp!, {r4, r5, pc} " 89 : 90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 91 } 92 93 void xscale_mc_copy_user_highpage(struct page *to, struct page *from, 94 unsigned long vaddr) 95 { 96 void *kto = kmap_atomic(to, KM_USER1); 97 98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 99 __flush_dcache_page(page_mapping(from), from); 100 101 spin_lock(&minicache_lock); 102 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 105 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 107 108 spin_unlock(&minicache_lock); 109 110 kunmap_atomic(kto, KM_USER1); 111 } 112 113 /* 114 * XScale optimised clear_user_page 115 */ 116 void 117 xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 118 { 119 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 120 asm volatile( 121 "mov r1, %2 \n\ 122 mov r2, #0 \n\ 123 mov r3, #0 \n\ 124 1: mov ip, %0 \n\ 125 strd r2, [%0], #8 \n\ 126 strd r2, [%0], #8 \n\ 127 strd r2, [%0], #8 \n\ 128 strd r2, [%0], #8 \n\ 129 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 130 subs r1, r1, #1 \n\ 131 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 132 bne 1b" 133 : "=r" (ptr) 134 : "0" (kaddr), "I" (PAGE_SIZE / 32) 135 : "r1", "r2", "r3", "ip"); 136 kunmap_atomic(kaddr, KM_USER0); 137 } 138 139 struct cpu_user_fns xscale_mc_user_fns __initdata = { 140 .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, 141 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, 142 }; 143