xref: /openbmc/linux/arch/sh/mm/nommu.c (revision 12eb4683)
1 /*
2  * arch/sh/mm/nommu.c
3  *
4  * Various helper routines and stubs for MMUless SH.
5  *
6  * Copyright (C) 2002 - 2009 Paul Mundt
7  *
8  * Released under the terms of the GNU GPL v2.0.
9  */
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/string.h>
13 #include <linux/mm.h>
14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h>
16 #include <asm/page.h>
17 #include <asm/uaccess.h>
18 
19 /*
20  * Nothing too terribly exciting here ..
21  */
22 void copy_page(void *to, void *from)
23 {
24 	memcpy(to, from, PAGE_SIZE);
25 }
26 
27 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
28 {
29 	memcpy(to, from, n);
30 	return 0;
31 }
32 
33 __kernel_size_t __clear_user(void *to, __kernel_size_t n)
34 {
35 	memset(to, 0, n);
36 	return 0;
37 }
38 
39 void local_flush_tlb_all(void)
40 {
41 	BUG();
42 }
43 
44 void local_flush_tlb_mm(struct mm_struct *mm)
45 {
46 	BUG();
47 }
48 
49 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
50 			    unsigned long end)
51 {
52 	BUG();
53 }
54 
55 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
56 {
57 	BUG();
58 }
59 
60 void local_flush_tlb_one(unsigned long asid, unsigned long page)
61 {
62 	BUG();
63 }
64 
65 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
66 {
67 	BUG();
68 }
69 
70 void __flush_tlb_global(void)
71 {
72 }
73 
74 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
75 {
76 }
77 
78 void __init kmap_coherent_init(void)
79 {
80 }
81 
82 void *kmap_coherent(struct page *page, unsigned long addr)
83 {
84 	BUG();
85 	return NULL;
86 }
87 
88 void kunmap_coherent(void *kvaddr)
89 {
90 	BUG();
91 }
92 
93 void __init page_table_range_init(unsigned long start, unsigned long end,
94 				  pgd_t *pgd_base)
95 {
96 }
97 
98 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
99 {
100 }
101 
102 void pgtable_cache_init(void)
103 {
104 }
105