1 /* 2 * linux/arch/arm/mm/copypage-v4wb.c 3 * 4 * Copyright (C) 1995-1999 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/init.h> 11 #include <linux/highmem.h> 12 13 /* 14 * ARMv4 optimised copy_user_highpage 15 * 16 * We flush the destination cache lines just before we write the data into the 17 * corresponding address. Since the Dcache is read-allocate, this removes the 18 * Dcache aliasing issue. The writes will be forwarded to the write buffer, 19 * and merged as appropriate. 20 * 21 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 22 * instruction. If your processor does not supply this, you have to write your 23 * own copy_user_highpage that does the right thing. 24 */ 25 static void v4wb_copy_user_page(void *kto, const void *kfrom) 26 { 27 int tmp; 28 29 asm volatile ("\ 30 ldmia %1!, {r3, r4, ip, lr} @ 4\n\ 31 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 32 stmia %0!, {r3, r4, ip, lr} @ 4\n\ 33 ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ 34 stmia %0!, {r3, r4, ip, lr} @ 4\n\ 35 ldmia %1!, {r3, r4, ip, lr} @ 4\n\ 36 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 37 stmia %0!, {r3, r4, ip, lr} @ 4\n\ 38 ldmia %1!, {r3, r4, ip, lr} @ 4\n\ 39 subs %2, %2, #1 @ 1\n\ 40 stmia %0!, {r3, r4, ip, lr} @ 4\n\ 41 ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ 42 bne 1b @ 1\n\ 43 mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" 44 : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) 45 : "2" (PAGE_SIZE / 64) 46 : "r3", "r4", "ip", "lr"); 47 } 48 49 void v4wb_copy_user_highpage(struct page *to, struct page *from, 50 unsigned long vaddr, struct vm_area_struct *vma) 51 { 52 void *kto, *kfrom; 53 54 kto = kmap_atomic(to); 55 kfrom = kmap_atomic(from); 56 flush_cache_page(vma, vaddr, page_to_pfn(from)); 57 v4wb_copy_user_page(kto, kfrom); 58 kunmap_atomic(kfrom); 59 kunmap_atomic(kto); 60 } 61 62 /* 63 * ARMv4 optimised clear_user_page 64 * 65 * Same story as above. 66 */ 67 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) 68 { 69 void *ptr, *kaddr = kmap_atomic(page); 70 asm volatile("\ 71 mov r1, %2 @ 1\n\ 72 mov r2, #0 @ 1\n\ 73 mov r3, #0 @ 1\n\ 74 mov ip, #0 @ 1\n\ 75 mov lr, #0 @ 1\n\ 76 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 77 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 78 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 79 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 80 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 81 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 82 subs r1, r1, #1 @ 1\n\ 83 bne 1b @ 1\n\ 84 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" 85 : "=r" (ptr) 86 : "0" (kaddr), "I" (PAGE_SIZE / 64) 87 : "r1", "r2", "r3", "ip", "lr"); 88 kunmap_atomic(kaddr); 89 } 90 91 struct cpu_user_fns v4wb_user_fns __initdata = { 92 .cpu_clear_user_highpage = v4wb_clear_user_highpage, 93 .cpu_copy_user_highpage = v4wb_copy_user_highpage, 94 }; 95