1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle 8384740dcSRalf Baechle */ 9384740dcSRalf Baechle #ifndef _ASM_IRQ_H 10384740dcSRalf Baechle #define _ASM_IRQ_H 11384740dcSRalf Baechle 12384740dcSRalf Baechle #include <linux/linkage.h> 13384740dcSRalf Baechle 14384740dcSRalf Baechle #include <asm/mipsmtregs.h> 15384740dcSRalf Baechle 16384740dcSRalf Baechle #include <irq.h> 17384740dcSRalf Baechle 18384740dcSRalf Baechle #ifdef CONFIG_I8259 19384740dcSRalf Baechle static inline int irq_canonicalize(int irq) 20384740dcSRalf Baechle { 21384740dcSRalf Baechle return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); 22384740dcSRalf Baechle } 23384740dcSRalf Baechle #else 24384740dcSRalf Baechle #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 25384740dcSRalf Baechle #endif 26384740dcSRalf Baechle 27384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 28384740dcSRalf Baechle 29384740dcSRalf Baechle struct irqaction; 30384740dcSRalf Baechle 31384740dcSRalf Baechle extern unsigned long irq_hwmask[]; 32384740dcSRalf Baechle extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, 33384740dcSRalf Baechle unsigned long hwmask); 34384740dcSRalf Baechle 35384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 36384740dcSRalf Baechle { 37384740dcSRalf Baechle if (irq_hwmask[irq] & ST0_IM) 38384740dcSRalf Baechle set_c0_status(irq_hwmask[irq] & ST0_IM); 39384740dcSRalf Baechle } 40384740dcSRalf Baechle 41384740dcSRalf Baechle #else 42384740dcSRalf Baechle 43384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 44384740dcSRalf Baechle { 45384740dcSRalf Baechle } 46384740dcSRalf Baechle 47384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 48384740dcSRalf Baechle 49384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 50384740dcSRalf Baechle #include <linux/cpumask.h> 51384740dcSRalf Baechle 52*0de26520SRusty Russell extern void plat_set_irq_affinity(unsigned int irq, 53*0de26520SRusty Russell const struct cpumask *affinity); 54384740dcSRalf Baechle extern void smtc_forward_irq(unsigned int irq); 55384740dcSRalf Baechle 56384740dcSRalf Baechle /* 57384740dcSRalf Baechle * IRQ affinity hook invoked at the beginning of interrupt dispatch 58384740dcSRalf Baechle * if option is enabled. 59384740dcSRalf Baechle * 60384740dcSRalf Baechle * Up through Linux 2.6.22 (at least) cpumask operations are very 61384740dcSRalf Baechle * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 62384740dcSRalf Baechle * used a "fast path" per-IRQ-descriptor cache of affinity information 63384740dcSRalf Baechle * to reduce latency. As there is a project afoot to optimize the 64384740dcSRalf Baechle * cpumask implementations, this version is optimistically assuming 65384740dcSRalf Baechle * that cpumask.h macro overhead is reasonable during interrupt dispatch. 66384740dcSRalf Baechle */ 67384740dcSRalf Baechle #define IRQ_AFFINITY_HOOK(irq) \ 68384740dcSRalf Baechle do { \ 69384740dcSRalf Baechle if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ 70384740dcSRalf Baechle smtc_forward_irq(irq); \ 71384740dcSRalf Baechle irq_exit(); \ 72384740dcSRalf Baechle return; \ 73384740dcSRalf Baechle } \ 74384740dcSRalf Baechle } while (0) 75384740dcSRalf Baechle 76384740dcSRalf Baechle #else /* Not doing SMTC affinity */ 77384740dcSRalf Baechle 78384740dcSRalf Baechle #define IRQ_AFFINITY_HOOK(irq) do { } while (0) 79384740dcSRalf Baechle 80384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 81384740dcSRalf Baechle 82384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 83384740dcSRalf Baechle 84384740dcSRalf Baechle /* 85384740dcSRalf Baechle * Clear interrupt mask handling "backstop" if irq_hwmask 86384740dcSRalf Baechle * entry so indicates. This implies that the ack() or end() 87384740dcSRalf Baechle * functions will take over re-enabling the low-level mask. 88384740dcSRalf Baechle * Otherwise it will be done on return from exception. 89384740dcSRalf Baechle */ 90384740dcSRalf Baechle #define __DO_IRQ_SMTC_HOOK(irq) \ 91384740dcSRalf Baechle do { \ 92384740dcSRalf Baechle IRQ_AFFINITY_HOOK(irq); \ 93384740dcSRalf Baechle if (irq_hwmask[irq] & 0x0000ff00) \ 94384740dcSRalf Baechle write_c0_tccontext(read_c0_tccontext() & \ 95384740dcSRalf Baechle ~(irq_hwmask[irq] & 0x0000ff00)); \ 96384740dcSRalf Baechle } while (0) 97384740dcSRalf Baechle 98384740dcSRalf Baechle #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ 99384740dcSRalf Baechle do { \ 100384740dcSRalf Baechle if (irq_hwmask[irq] & 0x0000ff00) \ 101384740dcSRalf Baechle write_c0_tccontext(read_c0_tccontext() & \ 102384740dcSRalf Baechle ~(irq_hwmask[irq] & 0x0000ff00)); \ 103384740dcSRalf Baechle } while (0) 104384740dcSRalf Baechle 105384740dcSRalf Baechle #else 106384740dcSRalf Baechle 107384740dcSRalf Baechle #define __DO_IRQ_SMTC_HOOK(irq) \ 108384740dcSRalf Baechle do { \ 109384740dcSRalf Baechle IRQ_AFFINITY_HOOK(irq); \ 110384740dcSRalf Baechle } while (0) 111384740dcSRalf Baechle #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 112384740dcSRalf Baechle 113384740dcSRalf Baechle #endif 114384740dcSRalf Baechle 115384740dcSRalf Baechle /* 116384740dcSRalf Baechle * do_IRQ handles all normal device IRQ's (the special 117384740dcSRalf Baechle * SMP cross-CPU interrupts have their own specific 118384740dcSRalf Baechle * handlers). 119384740dcSRalf Baechle * 120384740dcSRalf Baechle * Ideally there should be away to get this into kernel/irq/handle.c to 121384740dcSRalf Baechle * avoid the overhead of a call for just a tiny function ... 122384740dcSRalf Baechle */ 123384740dcSRalf Baechle #define do_IRQ(irq) \ 124384740dcSRalf Baechle do { \ 125384740dcSRalf Baechle irq_enter(); \ 126384740dcSRalf Baechle __DO_IRQ_SMTC_HOOK(irq); \ 127384740dcSRalf Baechle generic_handle_irq(irq); \ 128384740dcSRalf Baechle irq_exit(); \ 129384740dcSRalf Baechle } while (0) 130384740dcSRalf Baechle 131384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 132384740dcSRalf Baechle /* 133384740dcSRalf Baechle * To avoid inefficient and in some cases pathological re-checking of 134384740dcSRalf Baechle * IRQ affinity, we have this variant that skips the affinity check. 135384740dcSRalf Baechle */ 136384740dcSRalf Baechle 137384740dcSRalf Baechle 138384740dcSRalf Baechle #define do_IRQ_no_affinity(irq) \ 139384740dcSRalf Baechle do { \ 140384740dcSRalf Baechle irq_enter(); \ 141384740dcSRalf Baechle __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ 142384740dcSRalf Baechle generic_handle_irq(irq); \ 143384740dcSRalf Baechle irq_exit(); \ 144384740dcSRalf Baechle } while (0) 145384740dcSRalf Baechle 146384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 147384740dcSRalf Baechle 148384740dcSRalf Baechle extern void arch_init_irq(void); 149384740dcSRalf Baechle extern void spurious_interrupt(void); 150384740dcSRalf Baechle 151384740dcSRalf Baechle extern int allocate_irqno(void); 152384740dcSRalf Baechle extern void alloc_legacy_irqno(void); 153384740dcSRalf Baechle extern void free_irqno(unsigned int irq); 154384740dcSRalf Baechle 155384740dcSRalf Baechle /* 156384740dcSRalf Baechle * Before R2 the timer and performance counter interrupts were both fixed to 157384740dcSRalf Baechle * IE7. Since R2 their number has to be read from the c0_intctl register. 158384740dcSRalf Baechle */ 159384740dcSRalf Baechle #define CP0_LEGACY_COMPARE_IRQ 7 160384740dcSRalf Baechle 161384740dcSRalf Baechle extern int cp0_compare_irq; 162384740dcSRalf Baechle extern int cp0_perfcount_irq; 163384740dcSRalf Baechle 164384740dcSRalf Baechle #endif /* _ASM_IRQ_H */ 165