1/* SPDX-License-Identifier: GPL-2.0 */ 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4#include <linux/linkage.h> 5#include <abi/entry.h> 6#include <abi/pgtable-bits.h> 7#include <asm/errno.h> 8#include <asm/setup.h> 9#include <asm/unistd.h> 10#include <asm/asm-offsets.h> 11#include <linux/threads.h> 12#include <asm/setup.h> 13#include <asm/page.h> 14#include <asm/thread_info.h> 15 16#define PTE_INDX_MSK 0xffc 17#define PTE_INDX_SHIFT 10 18#define _PGDIR_SHIFT 22 19 20.macro tlbop_begin name, val0, val1, val2 21ENTRY(csky_\name) 22 mtcr a3, ss2 23 mtcr r6, ss3 24 mtcr a2, ss4 25 26 RD_PGDR r6 27 RD_MEH a3 28#ifdef CONFIG_CPU_HAS_TLBI 29 tlbi.vaas a3 30 sync.is 31 32 btsti a3, 31 33 bf 1f 34 RD_PGDR_K r6 351: 36#else 37 bgeni a2, 31 38 WR_MCIR a2 39 bgeni a2, 25 40 WR_MCIR a2 41#endif 42 bclri r6, 0 43 lrw a2, PHYS_OFFSET 44 subu r6, a2 45 bseti r6, 31 46 47 mov a2, a3 48 lsri a2, _PGDIR_SHIFT 49 lsli a2, 2 50 addu r6, a2 51 ldw r6, (r6) 52 53 lrw a2, PHYS_OFFSET 54 subu r6, a2 55 bseti r6, 31 56 57 lsri a3, PTE_INDX_SHIFT 58 lrw a2, PTE_INDX_MSK 59 and a3, a2 60 addu r6, a3 61 ldw a3, (r6) 62 63 movi a2, (_PAGE_PRESENT | \val0) 64 and a3, a2 65 cmpne a3, a2 66 bt \name 67 68 /* First read/write the page, just update the flags */ 69 ldw a3, (r6) 70 bgeni a2, PAGE_VALID_BIT 71 bseti a2, PAGE_ACCESSED_BIT 72 bseti a2, \val1 73 bseti a2, \val2 74 or a3, a2 75 stw a3, (r6) 76 77 /* Some cpu tlb-hardrefill bypass the cache */ 78#ifdef CONFIG_CPU_NEED_TLBSYNC 79 movi a2, 0x22 80 bseti a2, 6 81 mtcr r6, cr22 82 mtcr a2, cr17 83 sync 84#endif 85 86 mfcr a3, ss2 87 mfcr r6, ss3 88 mfcr a2, ss4 89 rte 90\name: 91 mfcr a3, ss2 92 mfcr r6, ss3 93 mfcr a2, ss4 94 SAVE_ALL EPC_KEEP 95.endm 96.macro tlbop_end is_write 97 RD_MEH a2 98 psrset ee, ie 99 mov a0, sp 100 movi a1, \is_write 101 jbsr do_page_fault 102 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 103 jmpi ret_from_exception 104.endm 105 106.text 107 108tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT 109tlbop_end 0 110 111tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 112tlbop_end 1 113 114tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 115#ifndef CONFIG_CPU_HAS_LDSTEX 116jbsr csky_cmpxchg_fixup 117#endif 118tlbop_end 1 119 120ENTRY(csky_systemcall) 121 SAVE_ALL EPC_INCREASE 122 123 psrset ee, ie 124 125 lrw r11, __NR_syscalls 126 cmphs syscallid, r11 /* Check nr of syscall */ 127 bt ret_from_exception 128 129 lrw r13, sys_call_table 130 ixw r13, syscallid 131 ldw r11, (r13) 132 cmpnei r11, 0 133 bf ret_from_exception 134 135 mov r9, sp 136 bmaski r10, THREAD_SHIFT 137 andn r9, r10 138 ldw r8, (r9, TINFO_FLAGS) 139 btsti r8, TIF_SYSCALL_TRACE 140 bt 1f 141#if defined(__CSKYABIV2__) 142 subi sp, 8 143 stw r5, (sp, 0x4) 144 stw r4, (sp, 0x0) 145 jsr r11 /* Do system call */ 146 addi sp, 8 147#else 148 jsr r11 149#endif 150 stw a0, (sp, LSAVE_A0) /* Save return value */ 151 jmpi ret_from_exception 152 1531: 154 movi a0, 0 /* enter system call */ 155 mov a1, sp /* sp = pt_regs pointer */ 156 jbsr syscall_trace 157 /* Prepare args before do system call */ 158 ldw a0, (sp, LSAVE_A0) 159 ldw a1, (sp, LSAVE_A1) 160 ldw a2, (sp, LSAVE_A2) 161 ldw a3, (sp, LSAVE_A3) 162#if defined(__CSKYABIV2__) 163 subi sp, 8 164 stw r5, (sp, 0x4) 165 stw r4, (sp, 0x0) 166#else 167 ldw r6, (sp, LSAVE_A4) 168 ldw r7, (sp, LSAVE_A5) 169#endif 170 jsr r11 /* Do system call */ 171#if defined(__CSKYABIV2__) 172 addi sp, 8 173#endif 174 stw a0, (sp, LSAVE_A0) /* Save return value */ 175 176 movi a0, 1 /* leave system call */ 177 mov a1, sp /* right now, sp --> pt_regs */ 178 jbsr syscall_trace 179 br ret_from_exception 180 181ENTRY(ret_from_kernel_thread) 182 jbsr schedule_tail 183 mov a0, r8 184 jsr r9 185 jbsr ret_from_exception 186 187ENTRY(ret_from_fork) 188 jbsr schedule_tail 189 mov r9, sp 190 bmaski r10, THREAD_SHIFT 191 andn r9, r10 192 ldw r8, (r9, TINFO_FLAGS) 193 movi r11_sig, 1 194 btsti r8, TIF_SYSCALL_TRACE 195 bf 3f 196 movi a0, 1 197 mov a1, sp /* sp = pt_regs pointer */ 198 jbsr syscall_trace 1993: 200 jbsr ret_from_exception 201 202ret_from_exception: 203 ld syscallid, (sp, LSAVE_PSR) 204 btsti syscallid, 31 205 bt 1f 206 207 /* 208 * Load address of current->thread_info, Then get address of task_struct 209 * Get task_needreshed in task_struct 210 */ 211 mov r9, sp 212 bmaski r10, THREAD_SHIFT 213 andn r9, r10 214 215resume_userspace: 216 ldw r8, (r9, TINFO_FLAGS) 217 andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 218 cmpnei r8, 0 219 bt exit_work 2201: RESTORE_ALL 221 222exit_work: 223 btsti r8, TIF_NEED_RESCHED 224 bt work_resched 225 /* If thread_info->flag is empty, RESTORE_ALL */ 226 cmpnei r8, 0 227 bf 1b 228 mov a1, sp 229 mov a0, r8 230 mov a2, r11_sig /* syscall? */ 231 btsti r8, TIF_SIGPENDING /* delivering a signal? */ 232 /* prevent further restarts(set r11 = 0) */ 233 clrt r11_sig 234 jbsr do_notify_resume /* do signals */ 235 br resume_userspace 236 237work_resched: 238 lrw syscallid, ret_from_exception 239 mov r15, syscallid /* Return address in link */ 240 jmpi schedule 241 242ENTRY(sys_rt_sigreturn) 243 movi r11_sig, 0 244 jmpi do_rt_sigreturn 245 246ENTRY(csky_trap) 247 SAVE_ALL EPC_KEEP 248 psrset ee 249 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 250 mov a0, sp /* Push Stack pointer arg */ 251 jbsr trap_c /* Call C-level trap handler */ 252 jmpi ret_from_exception 253 254/* 255 * Prototype from libc for abiv1: 256 * register unsigned int __result asm("a0"); 257 * asm( "trap 3" :"=r"(__result)::); 258 */ 259ENTRY(csky_get_tls) 260 USPTOKSP 261 262 /* increase epc for continue */ 263 mfcr a0, epc 264 INCTRAP a0 265 mtcr a0, epc 266 267 /* get current task thread_info with kernel 8K stack */ 268 bmaski a0, THREAD_SHIFT 269 not a0 270 subi sp, 1 271 and a0, sp 272 addi sp, 1 273 274 /* get tls */ 275 ldw a0, (a0, TINFO_TP_VALUE) 276 277 KSPTOUSP 278 rte 279 280ENTRY(csky_irq) 281 SAVE_ALL EPC_KEEP 282 psrset ee 283 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 284 285#ifdef CONFIG_PREEMPT 286 mov r9, sp /* Get current stack pointer */ 287 bmaski r10, THREAD_SHIFT 288 andn r9, r10 /* Get thread_info */ 289 290 /* 291 * Get task_struct->stack.preempt_count for current, 292 * and increase 1. 293 */ 294 ldw r8, (r9, TINFO_PREEMPT) 295 addi r8, 1 296 stw r8, (r9, TINFO_PREEMPT) 297#endif 298 299 mov a0, sp 300 jbsr csky_do_IRQ 301 302#ifdef CONFIG_PREEMPT 303 subi r8, 1 304 stw r8, (r9, TINFO_PREEMPT) 305 cmpnei r8, 0 306 bt 2f 307 ldw r8, (r9, TINFO_FLAGS) 308 btsti r8, TIF_NEED_RESCHED 309 bf 2f 3101: 311 jbsr preempt_schedule_irq /* irq en/disable is done inside */ 312 ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ 313 btsti r7, TIF_NEED_RESCHED 314 bt 1b /* go again */ 315#endif 3162: 317 jmpi ret_from_exception 318 319/* 320 * a0 = prev task_struct * 321 * a1 = next task_struct * 322 * a0 = return next 323 */ 324ENTRY(__switch_to) 325 lrw a3, TASK_THREAD 326 addu a3, a0 327 328 mfcr a2, psr /* Save PSR value */ 329 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ 330 bclri a2, 6 /* Disable interrupts */ 331 mtcr a2, psr 332 333 SAVE_SWITCH_STACK 334 335 stw sp, (a3, THREAD_KSP) 336 337 /* Set up next process to run */ 338 lrw a3, TASK_THREAD 339 addu a3, a1 340 341 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ 342 343 ldw a2, (a3, THREAD_SR) /* Set next PSR */ 344 mtcr a2, psr 345 346#if defined(__CSKYABIV2__) 347 addi r7, a1, TASK_THREAD_INFO 348 ldw tls, (r7, TINFO_TP_VALUE) 349#endif 350 351 RESTORE_SWITCH_STACK 352 353 rts 354ENDPROC(__switch_to) 355