1 /* 2 * arch/s390/mm/init.c 3 * 4 * S390 version 5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * 8 * Derived from "arch/i386/mm/init.c" 9 * Copyright (C) 1995 Linus Torvalds 10 */ 11 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/errno.h> 16 #include <linux/string.h> 17 #include <linux/types.h> 18 #include <linux/ptrace.h> 19 #include <linux/mman.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/smp.h> 23 #include <linux/init.h> 24 #include <linux/pagemap.h> 25 #include <linux/bootmem.h> 26 #include <linux/pfn.h> 27 #include <linux/poison.h> 28 #include <linux/initrd.h> 29 #include <asm/processor.h> 30 #include <asm/system.h> 31 #include <asm/uaccess.h> 32 #include <asm/pgtable.h> 33 #include <asm/pgalloc.h> 34 #include <asm/dma.h> 35 #include <asm/lowcore.h> 36 #include <asm/tlb.h> 37 #include <asm/tlbflush.h> 38 #include <asm/sections.h> 39 40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 41 42 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); 43 44 char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 45 EXPORT_SYMBOL(empty_zero_page); 46 47 /* 48 * paging_init() sets up the page tables 49 */ 50 void __init paging_init(void) 51 { 52 static const int ssm_mask = 0x04000000L; 53 unsigned long max_zone_pfns[MAX_NR_ZONES]; 54 unsigned long pgd_type; 55 56 init_mm.pgd = swapper_pg_dir; 57 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; 58 #ifdef CONFIG_64BIT 59 /* A three level page table (4TB) is enough for the kernel space. */ 60 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 61 pgd_type = _REGION3_ENTRY_EMPTY; 62 #else 63 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; 64 pgd_type = _SEGMENT_ENTRY_EMPTY; 65 #endif 66 clear_table((unsigned long *) init_mm.pgd, pgd_type, 67 sizeof(unsigned long)*2048); 68 vmem_map_init(); 69 70 /* enable virtual mapping in kernel mode */ 71 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 72 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 73 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 74 __raw_local_irq_ssm(ssm_mask); 75 76 sparse_memory_present_with_active_regions(MAX_NUMNODES); 77 sparse_init(); 78 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 79 #ifdef CONFIG_ZONE_DMA 80 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 81 #endif 82 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 83 free_area_init_nodes(max_zone_pfns); 84 } 85 86 void __init mem_init(void) 87 { 88 unsigned long codesize, reservedpages, datasize, initsize; 89 90 max_mapnr = num_physpages = max_low_pfn; 91 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 92 93 /* clear the zero-page */ 94 memset(empty_zero_page, 0, PAGE_SIZE); 95 96 /* Setup guest page hinting */ 97 cmma_init(); 98 99 /* this will put all low memory onto the freelists */ 100 totalram_pages += free_all_bootmem(); 101 102 reservedpages = 0; 103 104 codesize = (unsigned long) &_etext - (unsigned long) &_text; 105 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 106 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 107 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", 108 nr_free_pages() << (PAGE_SHIFT-10), 109 max_mapnr << (PAGE_SHIFT-10), 110 codesize >> 10, 111 reservedpages << (PAGE_SHIFT-10), 112 datasize >>10, 113 initsize >> 10); 114 printk("Write protected kernel read-only data: %#lx - %#lx\n", 115 (unsigned long)&_stext, 116 PFN_ALIGN((unsigned long)&_eshared) - 1); 117 } 118 119 #ifdef CONFIG_DEBUG_PAGEALLOC 120 void kernel_map_pages(struct page *page, int numpages, int enable) 121 { 122 pgd_t *pgd; 123 pud_t *pud; 124 pmd_t *pmd; 125 pte_t *pte; 126 unsigned long address; 127 int i; 128 129 for (i = 0; i < numpages; i++) { 130 address = page_to_phys(page + i); 131 pgd = pgd_offset_k(address); 132 pud = pud_offset(pgd, address); 133 pmd = pmd_offset(pud, address); 134 pte = pte_offset_kernel(pmd, address); 135 if (!enable) { 136 ptep_invalidate(&init_mm, address, pte); 137 continue; 138 } 139 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 140 /* Flush cpu write queue. */ 141 mb(); 142 } 143 } 144 #endif 145 146 void free_initmem(void) 147 { 148 unsigned long addr; 149 150 addr = (unsigned long)(&__init_begin); 151 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 152 ClearPageReserved(virt_to_page(addr)); 153 init_page_count(virt_to_page(addr)); 154 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 155 free_page(addr); 156 totalram_pages++; 157 } 158 printk ("Freeing unused kernel memory: %ldk freed\n", 159 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); 160 } 161 162 #ifdef CONFIG_BLK_DEV_INITRD 163 void free_initrd_mem(unsigned long start, unsigned long end) 164 { 165 if (start < end) 166 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 167 for (; start < end; start += PAGE_SIZE) { 168 ClearPageReserved(virt_to_page(start)); 169 init_page_count(virt_to_page(start)); 170 free_page(start); 171 totalram_pages++; 172 } 173 } 174 #endif 175 176 #ifdef CONFIG_MEMORY_HOTPLUG 177 int arch_add_memory(int nid, u64 start, u64 size) 178 { 179 struct pglist_data *pgdat; 180 struct zone *zone; 181 int rc; 182 183 pgdat = NODE_DATA(nid); 184 zone = pgdat->node_zones + ZONE_MOVABLE; 185 rc = vmem_add_mapping(start, size); 186 if (rc) 187 return rc; 188 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); 189 if (rc) 190 vmem_remove_mapping(start, size); 191 return rc; 192 } 193 #endif /* CONFIG_MEMORY_HOTPLUG */ 194