1/* 2 * ARC CPU startup Code 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Vineetg: Dec 2007 11 * -Check if we are running on Simulator or on real hardware 12 * to skip certain things during boot on simulator 13 */ 14 15#include <linux/linkage.h> 16#include <asm/asm-offsets.h> 17#include <asm/entry.h> 18#include <asm/arcregs.h> 19#include <asm/cache.h> 20#include <asm/irqflags.h> 21 22.macro CPU_EARLY_SETUP 23 24 ; Setting up Vectror Table (in case exception happens in early boot 25 sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] 26 27 ; Disable I-cache/D-cache if kernel so configured 28 lr r5, [ARC_REG_IC_BCR] 29 breq r5, 0, 1f ; I$ doesn't exist 30 lr r5, [ARC_REG_IC_CTRL] 31#ifdef CONFIG_ARC_HAS_ICACHE 32 bclr r5, r5, 0 ; 0 - Enable, 1 is Disable 33#else 34 bset r5, r5, 0 ; I$ exists, but is not used 35#endif 36 sr r5, [ARC_REG_IC_CTRL] 37 381: 39 lr r5, [ARC_REG_DC_BCR] 40 breq r5, 0, 1f ; D$ doesn't exist 41 lr r5, [ARC_REG_DC_CTRL] 42 bclr r5, r5, 6 ; Invalidate (discard w/o wback) 43#ifdef CONFIG_ARC_HAS_DCACHE 44 bclr r5, r5, 0 ; Enable (+Inv) 45#else 46 bset r5, r5, 0 ; Disable (+Inv) 47#endif 48 sr r5, [ARC_REG_DC_CTRL] 49 501: 51 52#ifdef CONFIG_ISA_ARCV2 53 ; Unaligned access is disabled at reset, so re-enable early as 54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access 55 ; by default 56 lr r5, [status32] 57 bset r5, r5, STATUS_AD_BIT 58 kflag r5 59#endif 60.endm 61 62 .section .init.text, "ax",@progbits 63 64;---------------------------------------------------------------- 65; Default Reset Handler (jumped into from Reset vector) 66; - Don't clobber r0,r1,r2 as they might have u-boot provided args 67; - Platforms can override this weak version if needed 68;---------------------------------------------------------------- 69WEAK(res_service) 70 j stext 71END(res_service) 72 73;---------------------------------------------------------------- 74; Kernel Entry point 75;---------------------------------------------------------------- 76ENTRY(stext) 77 78 CPU_EARLY_SETUP 79 80#ifdef CONFIG_SMP 81 GET_CPU_ID r5 82 cmp r5, 0 83 mov.nz r0, r5 84 bz .Lmaster_proceed 85 86 ; Non-Masters wait for Master to boot enough and bring them up 87 ; when they resume, tail-call to entry point 88 mov blink, @first_lines_of_secondary 89 j arc_platform_smp_wait_to_boot 90 91.Lmaster_proceed: 92#endif 93 94 ; Clear BSS before updating any globals 95 ; XXX: use ZOL here 96 mov r5, __bss_start 97 sub r6, __bss_stop, r5 98 lsr.f lp_count, r6, 2 99 lpnz 1f 100 st.ab 0, [r5, 4] 1011: 102 103 ; Uboot - kernel ABI 104 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 105 ; r1 = magic number (always zero as of now) 106 ; r2 = pointer to uboot provided cmdline or external DTB in mem 107 ; These are handled later in handle_uboot_args() 108 st r0, [@uboot_tag] 109 st r2, [@uboot_arg] 110 111 ; setup "current" tsk and optionally cache it in dedicated r25 112 mov r9, @init_task 113 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch 114 115 ; setup stack (fp, sp) 116 mov fp, 0 117 118 ; tsk->thread_info is really a PAGE, whose bottom hoists stack 119 GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) 120 121 j start_kernel ; "C" entry point 122END(stext) 123 124#ifdef CONFIG_SMP 125;---------------------------------------------------------------- 126; First lines of code run by secondary before jumping to 'C' 127;---------------------------------------------------------------- 128 .section .text, "ax",@progbits 129ENTRY(first_lines_of_secondary) 130 131 ; setup per-cpu idle task as "current" on this CPU 132 ld r0, [@secondary_idle_tsk] 133 SET_CURR_TASK_ON_CPU r0, r1 134 135 ; setup stack (fp, sp) 136 mov fp, 0 137 138 ; set it's stack base to tsk->thread_info bottom 139 GET_TSK_STACK_BASE r0, sp 140 141 j start_kernel_secondary 142END(first_lines_of_secondary) 143#endif 144