copypage-xscale.c (7fbb8759eff9a348efa5f352ffaa51c364837c4b) copypage-xscale.c (063b0a4207e43acbeff3d4b09f43e750e0212b48)
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
1/*
2 * linux/arch/arm/lib/copypage-xscale.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/highmem.h>
18
19
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23
24#include "mm.h"
25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define COPYPAGE_MINICACHE 0xffff8000
31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_MT_MINICACHE)
34
35static DEFINE_SPINLOCK(minicache_lock);
36
37/*
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23
24#include "mm.h"
25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define COPYPAGE_MINICACHE 0xffff8000
31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_MT_MINICACHE)
34
35static DEFINE_SPINLOCK(minicache_lock);
36
37/*
38 * XScale mini-dcache optimised copy_user_page
38 * XScale mini-dcache optimised copy_user_highpage
39 *
40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate.
44 */
45static void __attribute__((naked))
46mc_copy_user_page(void *from, void *to)

--- 38 unchanged lines hidden (view full) ---

85 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
86 bgt 1b \n\
87 beq 2b \n\
88 ldmfd sp!, {r4, r5, pc} "
89 :
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91}
92
39 *
40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the
42 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
43 * and merged as appropriate.
44 */
45static void __attribute__((naked))
46mc_copy_user_page(void *from, void *to)

--- 38 unchanged lines hidden (view full) ---

85 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
86 bgt 1b \n\
87 beq 2b \n\
88 ldmfd sp!, {r4, r5, pc} "
89 :
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91}
92
93void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr)
94{
95{
95 struct page *page = virt_to_page(kfrom);
96 void *kto = kmap_atomic(to, KM_USER1);
96
97
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
98 __flush_dcache_page(page_mapping(page), page);
98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
99 __flush_dcache_page(page_mapping(from), from);
99
100 spin_lock(&minicache_lock);
101
100
101 spin_lock(&minicache_lock);
102
102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
103 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
104
105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
106
107 spin_unlock(&minicache_lock);
104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105
106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107
108 spin_unlock(&minicache_lock);
109
110 kunmap_atomic(kto, KM_USER1);
108}
109
110/*
111 * XScale optimised clear_user_page
112 */
113void __attribute__((naked))
114xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
115{

--- 12 unchanged lines hidden (view full) ---

128 bne 1b \n\
129 mov pc, lr"
130 :
131 : "I" (PAGE_SIZE / 32));
132}
133
134struct cpu_user_fns xscale_mc_user_fns __initdata = {
135 .cpu_clear_user_page = xscale_mc_clear_user_page,
111}
112
113/*
114 * XScale optimised clear_user_page
115 */
116void __attribute__((naked))
117xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
118{

--- 12 unchanged lines hidden (view full) ---

131 bne 1b \n\
132 mov pc, lr"
133 :
134 : "I" (PAGE_SIZE / 32));
135}
136
137struct cpu_user_fns xscale_mc_user_fns __initdata = {
138 .cpu_clear_user_page = xscale_mc_clear_user_page,
136 .cpu_copy_user_page = xscale_mc_copy_user_page,
139 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
137};
140};