1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle 8384740dcSRalf Baechle */ 9384740dcSRalf Baechle #ifndef _ASM_IRQ_H 10384740dcSRalf Baechle #define _ASM_IRQ_H 11384740dcSRalf Baechle 12384740dcSRalf Baechle #include <linux/linkage.h> 13631330f5SRalf Baechle #include <linux/smp.h> 14*abd2363fSGrant Likely #include <linux/irqdomain.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <asm/mipsmtregs.h> 17384740dcSRalf Baechle 18384740dcSRalf Baechle #include <irq.h> 19384740dcSRalf Baechle 20384740dcSRalf Baechle #ifdef CONFIG_I8259 21384740dcSRalf Baechle static inline int irq_canonicalize(int irq) 22384740dcSRalf Baechle { 23384740dcSRalf Baechle return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); 24384740dcSRalf Baechle } 25384740dcSRalf Baechle #else 26384740dcSRalf Baechle #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 27384740dcSRalf Baechle #endif 28384740dcSRalf Baechle 29384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 30384740dcSRalf Baechle 31384740dcSRalf Baechle struct irqaction; 32384740dcSRalf Baechle 33384740dcSRalf Baechle extern unsigned long irq_hwmask[]; 34384740dcSRalf Baechle extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, 35384740dcSRalf Baechle unsigned long hwmask); 36384740dcSRalf Baechle 37384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 38384740dcSRalf Baechle { 39384740dcSRalf Baechle if (irq_hwmask[irq] & ST0_IM) 40384740dcSRalf Baechle set_c0_status(irq_hwmask[irq] & ST0_IM); 41384740dcSRalf Baechle } 42384740dcSRalf Baechle 43384740dcSRalf Baechle #else 44384740dcSRalf Baechle 45384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 46384740dcSRalf Baechle { 47384740dcSRalf Baechle } 48384740dcSRalf Baechle 49384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 50384740dcSRalf Baechle 51384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 52384740dcSRalf Baechle #include <linux/cpumask.h> 53384740dcSRalf Baechle 547c8d948fSThomas Gleixner extern int plat_set_irq_affinity(struct irq_data *d, 557c8d948fSThomas Gleixner const struct cpumask *affinity, bool force); 56930cd54bSThomas Gleixner extern void smtc_forward_irq(struct irq_data *d); 57384740dcSRalf Baechle 58384740dcSRalf Baechle /* 59384740dcSRalf Baechle * IRQ affinity hook invoked at the beginning of interrupt dispatch 60384740dcSRalf Baechle * if option is enabled. 61384740dcSRalf Baechle * 62384740dcSRalf Baechle * Up through Linux 2.6.22 (at least) cpumask operations are very 63384740dcSRalf Baechle * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 64384740dcSRalf Baechle * used a "fast path" per-IRQ-descriptor cache of affinity information 65384740dcSRalf Baechle * to reduce latency. As there is a project afoot to optimize the 66384740dcSRalf Baechle * cpumask implementations, this version is optimistically assuming 67384740dcSRalf Baechle * that cpumask.h macro overhead is reasonable during interrupt dispatch. 68384740dcSRalf Baechle */ 69930cd54bSThomas Gleixner static inline int handle_on_other_cpu(unsigned int irq) 70930cd54bSThomas Gleixner { 71930cd54bSThomas Gleixner struct irq_data *d = irq_get_irq_data(irq); 72930cd54bSThomas Gleixner 73930cd54bSThomas Gleixner if (cpumask_test_cpu(smp_processor_id(), d->affinity)) 74930cd54bSThomas Gleixner return 0; 75930cd54bSThomas Gleixner smtc_forward_irq(d); 76930cd54bSThomas Gleixner return 1; 77930cd54bSThomas Gleixner } 78384740dcSRalf Baechle 79384740dcSRalf Baechle #else /* Not doing SMTC affinity */ 80384740dcSRalf Baechle 81930cd54bSThomas Gleixner static inline int handle_on_other_cpu(unsigned int irq) { return 0; } 82384740dcSRalf Baechle 83384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 84384740dcSRalf Baechle 85384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 86384740dcSRalf Baechle 87930cd54bSThomas Gleixner static inline void smtc_im_backstop(unsigned int irq) 88930cd54bSThomas Gleixner { 89930cd54bSThomas Gleixner if (irq_hwmask[irq] & 0x0000ff00) 90930cd54bSThomas Gleixner write_c0_tccontext(read_c0_tccontext() & 91930cd54bSThomas Gleixner ~(irq_hwmask[irq] & 0x0000ff00)); 92930cd54bSThomas Gleixner } 93930cd54bSThomas Gleixner 94384740dcSRalf Baechle /* 95384740dcSRalf Baechle * Clear interrupt mask handling "backstop" if irq_hwmask 96384740dcSRalf Baechle * entry so indicates. This implies that the ack() or end() 97384740dcSRalf Baechle * functions will take over re-enabling the low-level mask. 98384740dcSRalf Baechle * Otherwise it will be done on return from exception. 99384740dcSRalf Baechle */ 100930cd54bSThomas Gleixner static inline int smtc_handle_on_other_cpu(unsigned int irq) 101930cd54bSThomas Gleixner { 102930cd54bSThomas Gleixner int ret = handle_on_other_cpu(irq); 103384740dcSRalf Baechle 104930cd54bSThomas Gleixner if (!ret) 105930cd54bSThomas Gleixner smtc_im_backstop(irq); 106930cd54bSThomas Gleixner return ret; 107930cd54bSThomas Gleixner } 108384740dcSRalf Baechle 109384740dcSRalf Baechle #else 110384740dcSRalf Baechle 111930cd54bSThomas Gleixner static inline void smtc_im_backstop(unsigned int irq) { } 112930cd54bSThomas Gleixner static inline int smtc_handle_on_other_cpu(unsigned int irq) 113930cd54bSThomas Gleixner { 114930cd54bSThomas Gleixner return handle_on_other_cpu(irq); 115930cd54bSThomas Gleixner } 116384740dcSRalf Baechle 117384740dcSRalf Baechle #endif 118384740dcSRalf Baechle 1198f99a162SWu Zhangjin extern void do_IRQ(unsigned int irq); 120384740dcSRalf Baechle 121384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 122384740dcSRalf Baechle 1238f99a162SWu Zhangjin extern void do_IRQ_no_affinity(unsigned int irq); 124384740dcSRalf Baechle 125384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 126384740dcSRalf Baechle 127384740dcSRalf Baechle extern void arch_init_irq(void); 128384740dcSRalf Baechle extern void spurious_interrupt(void); 129384740dcSRalf Baechle 130384740dcSRalf Baechle extern int allocate_irqno(void); 131384740dcSRalf Baechle extern void alloc_legacy_irqno(void); 132384740dcSRalf Baechle extern void free_irqno(unsigned int irq); 133384740dcSRalf Baechle 134384740dcSRalf Baechle /* 135384740dcSRalf Baechle * Before R2 the timer and performance counter interrupts were both fixed to 136384740dcSRalf Baechle * IE7. Since R2 their number has to be read from the c0_intctl register. 137384740dcSRalf Baechle */ 138384740dcSRalf Baechle #define CP0_LEGACY_COMPARE_IRQ 7 139384740dcSRalf Baechle 140384740dcSRalf Baechle extern int cp0_compare_irq; 141010c108dSDavid VomLehn extern int cp0_compare_irq_shift; 142384740dcSRalf Baechle extern int cp0_perfcount_irq; 143384740dcSRalf Baechle 144384740dcSRalf Baechle #endif /* _ASM_IRQ_H */ 145