xref: /openbmc/linux/arch/alpha/mm/init.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  *  linux/arch/alpha/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  */
6 
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
8 
9 #include <linux/pagemap.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hwrpb.h>
29 #include <asm/dma.h>
30 #include <asm/mmu_context.h>
31 #include <asm/console.h>
32 #include <asm/tlb.h>
33 
34 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35 
36 extern void die_if_kernel(char *,struct pt_regs *,long);
37 
38 static struct pcb_struct original_pcb;
39 
40 pgd_t *
41 pgd_alloc(struct mm_struct *mm)
42 {
43 	pgd_t *ret, *init;
44 
45 	ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 	init = pgd_offset(&init_mm, 0UL);
47 	if (ret) {
48 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 			(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51 #else
52 		pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53 #endif
54 
55 		/* The last PGD entry is the VPTB self-map.  */
56 		pgd_val(ret[PTRS_PER_PGD-1])
57 		  = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 	}
59 	return ret;
60 }
61 
62 pte_t *
63 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
64 {
65 	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
66 	return pte;
67 }
68 
69 
70 /*
71  * BAD_PAGE is the page that is used for page faults when linux
72  * is out-of-memory. Older versions of linux just did a
73  * do_exit(), but using this instead means there is less risk
74  * for a process dying in kernel mode, possibly leaving an inode
75  * unused etc..
76  *
77  * BAD_PAGETABLE is the accompanying page-table: it is initialized
78  * to point to BAD_PAGE entries.
79  *
80  * ZERO_PAGE is a special page that is used for zero-initialized
81  * data and COW.
82  */
83 pmd_t *
84 __bad_pagetable(void)
85 {
86 	memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
87 	return (pmd_t *) EMPTY_PGT;
88 }
89 
90 pte_t
91 __bad_page(void)
92 {
93 	memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
94 	return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
95 }
96 
97 static inline unsigned long
98 load_PCB(struct pcb_struct *pcb)
99 {
100 	register unsigned long sp __asm__("$30");
101 	pcb->ksp = sp;
102 	return __reload_thread(pcb);
103 }
104 
105 /* Set up initial PCB, VPTB, and other such nicities.  */
106 
107 static inline void
108 switch_to_system_map(void)
109 {
110 	unsigned long newptbr;
111 	unsigned long original_pcb_ptr;
112 
113 	/* Initialize the kernel's page tables.  Linux puts the vptb in
114 	   the last slot of the L1 page table.  */
115 	memset(swapper_pg_dir, 0, PAGE_SIZE);
116 	newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
117 	pgd_val(swapper_pg_dir[1023]) =
118 		(newptbr << 32) | pgprot_val(PAGE_KERNEL);
119 
120 	/* Set the vptb.  This is often done by the bootloader, but
121 	   shouldn't be required.  */
122 	if (hwrpb->vptb != 0xfffffffe00000000UL) {
123 		wrvptptr(0xfffffffe00000000UL);
124 		hwrpb->vptb = 0xfffffffe00000000UL;
125 		hwrpb_update_checksum(hwrpb);
126 	}
127 
128 	/* Also set up the real kernel PCB while we're at it.  */
129 	init_thread_info.pcb.ptbr = newptbr;
130 	init_thread_info.pcb.flags = 1;	/* set FEN, clear everything else */
131 	original_pcb_ptr = load_PCB(&init_thread_info.pcb);
132 	tbia();
133 
134 	/* Save off the contents of the original PCB so that we can
135 	   restore the original console's page tables for a clean reboot.
136 
137 	   Note that the PCB is supposed to be a physical address, but
138 	   since KSEG values also happen to work, folks get confused.
139 	   Check this here.  */
140 
141 	if (original_pcb_ptr < PAGE_OFFSET) {
142 		original_pcb_ptr = (unsigned long)
143 			phys_to_virt(original_pcb_ptr);
144 	}
145 	original_pcb = *(struct pcb_struct *) original_pcb_ptr;
146 }
147 
148 int callback_init_done;
149 
150 void * __init
151 callback_init(void * kernel_end)
152 {
153 	struct crb_struct * crb;
154 	pgd_t *pgd;
155 	pmd_t *pmd;
156 	void *two_pages;
157 
158 	/* Starting at the HWRPB, locate the CRB. */
159 	crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
160 
161 	if (alpha_using_srm) {
162 		/* Tell the console whither it is to be remapped. */
163 		if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
164 			__halt();		/* "We're boned."  --Bender */
165 
166 		/* Edit the procedure descriptors for DISPATCH and FIXUP. */
167 		crb->dispatch_va = (struct procdesc_struct *)
168 			(VMALLOC_START + (unsigned long)crb->dispatch_va
169 			 - crb->map[0].va);
170 		crb->fixup_va = (struct procdesc_struct *)
171 			(VMALLOC_START + (unsigned long)crb->fixup_va
172 			 - crb->map[0].va);
173 	}
174 
175 	switch_to_system_map();
176 
177 	/* Allocate one PGD and one PMD.  In the case of SRM, we'll need
178 	   these to actually remap the console.  There is an assumption
179 	   here that only one of each is needed, and this allows for 8MB.
180 	   On systems with larger consoles, additional pages will be
181 	   allocated as needed during the mapping process.
182 
183 	   In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
184 	   we need to allocate the PGD we use for vmalloc before we start
185 	   forking other tasks.  */
186 
187 	two_pages = (void *)
188 	  (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
189 	kernel_end = two_pages + 2*PAGE_SIZE;
190 	memset(two_pages, 0, 2*PAGE_SIZE);
191 
192 	pgd = pgd_offset_k(VMALLOC_START);
193 	pgd_set(pgd, (pmd_t *)two_pages);
194 	pmd = pmd_offset(pgd, VMALLOC_START);
195 	pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
196 
197 	if (alpha_using_srm) {
198 		static struct vm_struct console_remap_vm;
199 		unsigned long vaddr = VMALLOC_START;
200 		unsigned long i, j;
201 
202 		/* Set up the third level PTEs and update the virtual
203 		   addresses of the CRB entries.  */
204 		for (i = 0; i < crb->map_entries; ++i) {
205 			unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
206 			crb->map[i].va = vaddr;
207 			for (j = 0; j < crb->map[i].count; ++j) {
208 				/* Newer consoles (especially on larger
209 				   systems) may require more pages of
210 				   PTEs. Grab additional pages as needed. */
211 				if (pmd != pmd_offset(pgd, vaddr)) {
212 					memset(kernel_end, 0, PAGE_SIZE);
213 					pmd = pmd_offset(pgd, vaddr);
214 					pmd_set(pmd, (pte_t *)kernel_end);
215 					kernel_end += PAGE_SIZE;
216 				}
217 				set_pte(pte_offset_kernel(pmd, vaddr),
218 					pfn_pte(pfn, PAGE_KERNEL));
219 				pfn++;
220 				vaddr += PAGE_SIZE;
221 			}
222 		}
223 
224 		/* Let vmalloc know that we've allocated some space.  */
225 		console_remap_vm.flags = VM_ALLOC;
226 		console_remap_vm.addr = (void *) VMALLOC_START;
227 		console_remap_vm.size = vaddr - VMALLOC_START;
228 		vmlist = &console_remap_vm;
229 	}
230 
231 	callback_init_done = 1;
232 	return kernel_end;
233 }
234 
235 
236 #ifndef CONFIG_DISCONTIGMEM
237 /*
238  * paging_init() sets up the memory map.
239  */
240 void __init paging_init(void)
241 {
242 	unsigned long zones_size[MAX_NR_ZONES] = {0, };
243 	unsigned long dma_pfn, high_pfn;
244 
245 	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
246 	high_pfn = max_pfn = max_low_pfn;
247 
248 	if (dma_pfn >= high_pfn)
249 		zones_size[ZONE_DMA] = high_pfn;
250 	else {
251 		zones_size[ZONE_DMA] = dma_pfn;
252 		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
253 	}
254 
255 	/* Initialize mem_map[].  */
256 	free_area_init(zones_size);
257 
258 	/* Initialize the kernel's ZERO_PGE. */
259 	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
260 }
261 #endif /* CONFIG_DISCONTIGMEM */
262 
263 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
264 void
265 srm_paging_stop (void)
266 {
267 	/* Move the vptb back to where the SRM console expects it.  */
268 	swapper_pg_dir[1] = swapper_pg_dir[1023];
269 	tbia();
270 	wrvptptr(0x200000000UL);
271 	hwrpb->vptb = 0x200000000UL;
272 	hwrpb_update_checksum(hwrpb);
273 
274 	/* Reload the page tables that the console had in use.  */
275 	load_PCB(&original_pcb);
276 	tbia();
277 }
278 #endif
279 
280 #ifndef CONFIG_DISCONTIGMEM
281 static void __init
282 printk_memory_info(void)
283 {
284 	unsigned long codesize, reservedpages, datasize, initsize, tmp;
285 	extern int page_is_ram(unsigned long) __init;
286 	extern char _text, _etext, _data, _edata;
287 	extern char __init_begin, __init_end;
288 
289 	/* printk all informations */
290 	reservedpages = 0;
291 	for (tmp = 0; tmp < max_low_pfn; tmp++)
292 		/*
293 		 * Only count reserved RAM pages
294 		 */
295 		if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
296 			reservedpages++;
297 
298 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
299 	datasize =  (unsigned long) &_edata - (unsigned long) &_data;
300 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
301 
302 	printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
303 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
304 	       max_mapnr << (PAGE_SHIFT-10),
305 	       codesize >> 10,
306 	       reservedpages << (PAGE_SHIFT-10),
307 	       datasize >> 10,
308 	       initsize >> 10);
309 }
310 
311 void __init
312 mem_init(void)
313 {
314 	max_mapnr = num_physpages = max_low_pfn;
315 	totalram_pages += free_all_bootmem();
316 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
317 
318 	printk_memory_info();
319 }
320 #endif /* CONFIG_DISCONTIGMEM */
321 
322 void
323 free_reserved_mem(void *start, void *end)
324 {
325 	void *__start = start;
326 	for (; __start < end; __start += PAGE_SIZE) {
327 		ClearPageReserved(virt_to_page(__start));
328 		init_page_count(virt_to_page(__start));
329 		free_page((long)__start);
330 		totalram_pages++;
331 	}
332 }
333 
334 void
335 free_initmem(void)
336 {
337 	extern char __init_begin, __init_end;
338 
339 	free_reserved_mem(&__init_begin, &__init_end);
340 	printk ("Freeing unused kernel memory: %ldk freed\n",
341 		(&__init_end - &__init_begin) >> 10);
342 }
343 
344 #ifdef CONFIG_BLK_DEV_INITRD
345 void
346 free_initrd_mem(unsigned long start, unsigned long end)
347 {
348 	free_reserved_mem((void *)start, (void *)end);
349 	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
350 }
351 #endif
352