xref: /openbmc/linux/arch/m68k/mm/mcfmmu.c (revision e285d5bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based upon linux/arch/m68k/mm/sun3mmu.c
4  * Based upon linux/arch/ppc/mm/mmu_context.c
5  *
6  * Implementations of mm routines specific to the Coldfire MMU.
7  *
8  * Copyright (c) 2008 Freescale Semiconductor, Inc.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/bootmem.h>
17 #include <linux/memblock.h>
18 
19 #include <asm/setup.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mcf_pgalloc.h>
24 #include <asm/tlbflush.h>
25 
26 #define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
27 
28 mm_context_t next_mmu_context;
29 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
30 atomic_t nr_free_contexts;
31 struct mm_struct *context_mm[LAST_CONTEXT+1];
32 unsigned long num_pages;
33 
34 /*
35  * ColdFire paging_init derived from sun3.
36  */
37 void __init paging_init(void)
38 {
39 	pgd_t *pg_dir;
40 	pte_t *pg_table;
41 	unsigned long address, size;
42 	unsigned long next_pgtable, bootmem_end;
43 	unsigned long zones_size[MAX_NR_ZONES];
44 	enum zone_type zone;
45 	int i;
46 
47 	empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
48 	memset((void *) empty_zero_page, 0, PAGE_SIZE);
49 
50 	pg_dir = swapper_pg_dir;
51 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
52 
53 	size = num_pages * sizeof(pte_t);
54 	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
55 	next_pgtable = (unsigned long) alloc_bootmem_pages(size);
56 
57 	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
58 	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
59 
60 	address = PAGE_OFFSET;
61 	while (address < (unsigned long)high_memory) {
62 		pg_table = (pte_t *) next_pgtable;
63 		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
64 		pgd_val(*pg_dir) = (unsigned long) pg_table;
65 		pg_dir++;
66 
67 		/* now change pg_table to kernel virtual addresses */
68 		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
69 			pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
70 			if (address >= (unsigned long) high_memory)
71 				pte_val(pte) = 0;
72 
73 			set_pte(pg_table, pte);
74 			address += PAGE_SIZE;
75 		}
76 	}
77 
78 	current->mm = NULL;
79 
80 	for (zone = 0; zone < MAX_NR_ZONES; zone++)
81 		zones_size[zone] = 0x0;
82 	zones_size[ZONE_DMA] = num_pages;
83 	free_area_init(zones_size);
84 }
85 
86 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
87 {
88 	unsigned long flags, mmuar, mmutr;
89 	struct mm_struct *mm;
90 	pgd_t *pgd;
91 	pmd_t *pmd;
92 	pte_t *pte;
93 	int asid;
94 
95 	local_irq_save(flags);
96 
97 	mmuar = (dtlb) ? mmu_read(MMUAR) :
98 		regs->pc + (extension_word * sizeof(long));
99 
100 	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
101 	if (!mm) {
102 		local_irq_restore(flags);
103 		return -1;
104 	}
105 
106 	pgd = pgd_offset(mm, mmuar);
107 	if (pgd_none(*pgd))  {
108 		local_irq_restore(flags);
109 		return -1;
110 	}
111 
112 	pmd = pmd_offset(pgd, mmuar);
113 	if (pmd_none(*pmd)) {
114 		local_irq_restore(flags);
115 		return -1;
116 	}
117 
118 	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
119 				: pte_offset_map(pmd, mmuar);
120 	if (pte_none(*pte) || !pte_present(*pte)) {
121 		local_irq_restore(flags);
122 		return -1;
123 	}
124 
125 	if (write) {
126 		if (!pte_write(*pte)) {
127 			local_irq_restore(flags);
128 			return -1;
129 		}
130 		set_pte(pte, pte_mkdirty(*pte));
131 	}
132 
133 	set_pte(pte, pte_mkyoung(*pte));
134 	asid = mm->context & 0xff;
135 	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
136 		set_pte(pte, pte_wrprotect(*pte));
137 
138 	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
139 	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
140 		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
141 	mmu_write(MMUTR, mmutr);
142 
143 	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
144 		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
145 
146 	if (dtlb)
147 		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
148 	else
149 		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
150 
151 	local_irq_restore(flags);
152 	return 0;
153 }
154 
155 void __init cf_bootmem_alloc(void)
156 {
157 	unsigned long memstart;
158 
159 	/* _rambase and _ramend will be naturally page aligned */
160 	m68k_memory[0].addr = _rambase;
161 	m68k_memory[0].size = _ramend - _rambase;
162 
163 	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
164 
165 	/* compute total pages in system */
166 	num_pages = PFN_DOWN(_ramend - _rambase);
167 
168 	/* page numbers */
169 	memstart = PAGE_ALIGN(_ramstart);
170 	min_low_pfn = PFN_DOWN(_rambase);
171 	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
172 	high_memory = (void *)_ramend;
173 
174 	/* Reserve kernel text/data/bss */
175 	memblock_reserve(_rambase, memstart - _rambase);
176 
177 	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
178 	module_fixup(NULL, __start_fixup, __stop_fixup);
179 
180 	/* setup node data */
181 	m68k_setup_node(0);
182 }
183 
184 /*
185  * Initialize the context management stuff.
186  * The following was taken from arch/ppc/mmu_context.c
187  */
188 void __init cf_mmu_context_init(void)
189 {
190 	/*
191 	 * Some processors have too few contexts to reserve one for
192 	 * init_mm, and require using context 0 for a normal task.
193 	 * Other processors reserve the use of context zero for the kernel.
194 	 * This code assumes FIRST_CONTEXT < 32.
195 	 */
196 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
197 	next_mmu_context = FIRST_CONTEXT;
198 	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
199 }
200 
201 /*
202  * Steal a context from a task that has one at the moment.
203  * This is only used on 8xx and 4xx and we presently assume that
204  * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
205  * whether the MM we steal is in use.
206  * We also assume that this is only used on systems that don't
207  * use an MMU hash table - this is true for 8xx and 4xx.
208  * This isn't an LRU system, it just frees up each context in
209  * turn (sort-of pseudo-random replacement :).  This would be the
210  * place to implement an LRU scheme if anyone was motivated to do it.
211  *  -- paulus
212  */
213 void steal_context(void)
214 {
215 	struct mm_struct *mm;
216 	/*
217 	 * free up context `next_mmu_context'
218 	 * if we shouldn't free context 0, don't...
219 	 */
220 	if (next_mmu_context < FIRST_CONTEXT)
221 		next_mmu_context = FIRST_CONTEXT;
222 	mm = context_mm[next_mmu_context];
223 	flush_tlb_mm(mm);
224 	destroy_context(mm);
225 }
226 
227