1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_IRQFLAGS_H 17 #define __ASM_IRQFLAGS_H 18 19 #ifdef __KERNEL__ 20 21 #include <asm/alternative.h> 22 #include <asm/ptrace.h> 23 #include <asm/sysreg.h> 24 25 /* 26 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and 27 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' 28 * order: 29 * Masking debug exceptions causes all other exceptions to be masked too/ 30 * Masking SError masks irq, but not debug exceptions. Masking irqs has no 31 * side effects for other flags. Keeping to this order makes it easier for 32 * entry.S to know which exceptions should be unmasked. 33 * 34 * FIQ is never expected, but we mask it when we disable debug exceptions, and 35 * unmask it at all other times. 36 */ 37 38 /* 39 * CPU interrupt mask handling. 40 */ 41 static inline void arch_local_irq_enable(void) 42 { 43 asm volatile(ALTERNATIVE( 44 "msr daifclr, #2 // arch_local_irq_enable\n" 45 "nop", 46 __msr_s(SYS_ICC_PMR_EL1, "%0") 47 "dsb sy", 48 ARM64_HAS_IRQ_PRIO_MASKING) 49 : 50 : "r" ((unsigned long) GIC_PRIO_IRQON) 51 : "memory"); 52 } 53 54 static inline void arch_local_irq_disable(void) 55 { 56 asm volatile(ALTERNATIVE( 57 "msr daifset, #2 // arch_local_irq_disable", 58 __msr_s(SYS_ICC_PMR_EL1, "%0"), 59 ARM64_HAS_IRQ_PRIO_MASKING) 60 : 61 : "r" ((unsigned long) GIC_PRIO_IRQOFF) 62 : "memory"); 63 } 64 65 /* 66 * Save the current interrupt enable state. 67 */ 68 static inline unsigned long arch_local_save_flags(void) 69 { 70 unsigned long daif_bits; 71 unsigned long flags; 72 73 daif_bits = read_sysreg(daif); 74 75 /* 76 * The asm is logically equivalent to: 77 * 78 * if (system_uses_irq_prio_masking()) 79 * flags = (daif_bits & PSR_I_BIT) ? 80 * GIC_PRIO_IRQOFF : 81 * read_sysreg_s(SYS_ICC_PMR_EL1); 82 * else 83 * flags = daif_bits; 84 */ 85 asm volatile(ALTERNATIVE( 86 "mov %0, %1\n" 87 "nop\n" 88 "nop", 89 __mrs_s("%0", SYS_ICC_PMR_EL1) 90 "ands %1, %1, " __stringify(PSR_I_BIT) "\n" 91 "csel %0, %0, %2, eq", 92 ARM64_HAS_IRQ_PRIO_MASKING) 93 : "=&r" (flags), "+r" (daif_bits) 94 : "r" ((unsigned long) GIC_PRIO_IRQOFF) 95 : "memory"); 96 97 return flags; 98 } 99 100 static inline unsigned long arch_local_irq_save(void) 101 { 102 unsigned long flags; 103 104 flags = arch_local_save_flags(); 105 106 arch_local_irq_disable(); 107 108 return flags; 109 } 110 111 /* 112 * restore saved IRQ state 113 */ 114 static inline void arch_local_irq_restore(unsigned long flags) 115 { 116 asm volatile(ALTERNATIVE( 117 "msr daif, %0\n" 118 "nop", 119 __msr_s(SYS_ICC_PMR_EL1, "%0") 120 "dsb sy", 121 ARM64_HAS_IRQ_PRIO_MASKING) 122 : "+r" (flags) 123 : 124 : "memory"); 125 } 126 127 static inline int arch_irqs_disabled_flags(unsigned long flags) 128 { 129 int res; 130 131 asm volatile(ALTERNATIVE( 132 "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n" 133 "nop", 134 "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n" 135 "cset %w0, ls", 136 ARM64_HAS_IRQ_PRIO_MASKING) 137 : "=&r" (res) 138 : "r" ((int) flags) 139 : "memory"); 140 141 return res; 142 } 143 #endif 144 #endif 145