1#ifdef CONFIG_PPC64 2#define PROVIDE32(x) PROVIDE(__unused__##x) 3#else 4#define PROVIDE32(x) PROVIDE(x) 5#endif 6#include <asm/page.h> 7#include <asm-generic/vmlinux.lds.h> 8#include <asm/cache.h> 9 10ENTRY(_stext) 11 12#ifdef CONFIG_PPC64 13OUTPUT_ARCH(powerpc:common64) 14jiffies = jiffies_64; 15#else 16OUTPUT_ARCH(powerpc:common) 17jiffies = jiffies_64 + 4; 18#endif 19SECTIONS 20{ 21 /* Sections to be discarded. */ 22 /DISCARD/ : { 23 *(.exitcall.exit) 24 EXIT_DATA 25 } 26 27 . = KERNELBASE; 28 29/* 30 * Text, read only data and other permanent read-only sections 31 */ 32 33 /* Text and gots */ 34 .text : AT(ADDR(.text) - LOAD_OFFSET) { 35 ALIGN_FUNCTION(); 36 *(.text.head) 37 _text = .; 38 *(.text .fixup .text.init.refok .exit.text.refok) 39 SCHED_TEXT 40 LOCK_TEXT 41 KPROBES_TEXT 42 43#ifdef CONFIG_PPC32 44 *(.got1) 45 __got2_start = .; 46 *(.got2) 47 __got2_end = .; 48#endif /* CONFIG_PPC32 */ 49 50 . = ALIGN(PAGE_SIZE); 51 _etext = .; 52 PROVIDE32 (etext = .); 53 } 54 55 /* Read-only data */ 56 RODATA 57 58 /* Exception & bug tables */ 59 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { 60 __start___ex_table = .; 61 *(__ex_table) 62 __stop___ex_table = .; 63 } 64 65 NOTES 66 67 BUG_TABLE 68 69/* 70 * Init sections discarded at runtime 71 */ 72 . = ALIGN(PAGE_SIZE); 73 __init_begin = .; 74 75 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 76 _sinittext = .; 77 INIT_TEXT 78 _einittext = .; 79 } 80 81 /* .exit.text is discarded at runtime, not link time, 82 * to deal with references from __bug_table 83 */ 84 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { 85 EXIT_TEXT 86 } 87 88 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 89 INIT_DATA 90 __vtop_table_begin = .; 91 *(.vtop_fixup); 92 __vtop_table_end = .; 93 __ptov_table_begin = .; 94 *(.ptov_fixup); 95 __ptov_table_end = .; 96#ifdef CONFIG_PPC_ISERIES 97 __dt_strings_start = .; 98 *(.dt_strings); 99 __dt_strings_end = .; 100#endif 101 } 102 103 . = ALIGN(16); 104 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 105 __setup_start = .; 106 *(.init.setup) 107 __setup_end = .; 108 } 109 110 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 111 __initcall_start = .; 112 INITCALLS 113 __initcall_end = .; 114 } 115 116 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 117 __con_initcall_start = .; 118 *(.con_initcall.init) 119 __con_initcall_end = .; 120 } 121 122 SECURITY_INIT 123 124 . = ALIGN(8); 125 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { 126 __start___ftr_fixup = .; 127 *(__ftr_fixup) 128 __stop___ftr_fixup = .; 129 } 130#ifdef CONFIG_PPC64 131 . = ALIGN(8); 132 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { 133 __start___fw_ftr_fixup = .; 134 *(__fw_ftr_fixup) 135 __stop___fw_ftr_fixup = .; 136 } 137#endif 138#ifdef CONFIG_BLK_DEV_INITRD 139 . = ALIGN(PAGE_SIZE); 140 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 141 __initramfs_start = .; 142 *(.init.ramfs) 143 __initramfs_end = .; 144 } 145#endif 146 . = ALIGN(PAGE_SIZE); 147 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { 148 __per_cpu_start = .; 149 *(.data.percpu) 150 *(.data.percpu.shared_aligned) 151 __per_cpu_end = .; 152 } 153 154 . = ALIGN(8); 155 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 156 __machine_desc_start = . ; 157 *(.machine.desc) 158 __machine_desc_end = . ; 159 } 160 161 /* freed after init ends here */ 162 . = ALIGN(PAGE_SIZE); 163 __init_end = .; 164 165/* 166 * And now the various read/write data 167 */ 168 169 . = ALIGN(PAGE_SIZE); 170 _sdata = .; 171 172#ifdef CONFIG_PPC32 173 .data : AT(ADDR(.data) - LOAD_OFFSET) { 174 DATA_DATA 175 *(.sdata) 176 *(.got.plt) *(.got) 177 } 178#else 179 .data : AT(ADDR(.data) - LOAD_OFFSET) { 180 DATA_DATA 181 *(.data.rel*) 182 *(.toc1) 183 *(.branch_lt) 184 } 185 186 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { 187 *(.opd) 188 } 189 190 .got : AT(ADDR(.got) - LOAD_OFFSET) { 191 __toc_start = .; 192 *(.got) 193 *(.toc) 194 } 195#endif 196 197 . = ALIGN(PAGE_SIZE); 198 _edata = .; 199 PROVIDE32 (edata = .); 200 201 /* The initial task and kernel stack */ 202#ifdef CONFIG_PPC32 203 . = ALIGN(8192); 204#else 205 . = ALIGN(16384); 206#endif 207 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 208 *(.data.init_task) 209 } 210 211 . = ALIGN(PAGE_SIZE); 212 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { 213 *(.data.page_aligned) 214 } 215 216 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { 217 *(.data.cacheline_aligned) 218 } 219 220 . = ALIGN(L1_CACHE_BYTES); 221 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { 222 *(.data.read_mostly) 223 } 224 225 . = ALIGN(PAGE_SIZE); 226 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 227 __nosave_begin = .; 228 *(.data.nosave) 229 . = ALIGN(PAGE_SIZE); 230 __nosave_end = .; 231 } 232 233/* 234 * And finally the bss 235 */ 236 237 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 238 __bss_start = .; 239 *(.sbss) *(.scommon) 240 *(.dynbss) 241 *(.bss) 242 *(COMMON) 243 __bss_stop = .; 244 } 245 246 . = ALIGN(PAGE_SIZE); 247 _end = . ; 248 PROVIDE32 (end = .); 249} 250