1384740dcSRalf Baechle /* 2384740dcSRalf Baechle * This file is subject to the terms and conditions of the GNU General Public 3384740dcSRalf Baechle * License. See the file "COPYING" in the main directory of this archive 4384740dcSRalf Baechle * for more details. 5384740dcSRalf Baechle * 6384740dcSRalf Baechle * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle 7384740dcSRalf Baechle * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle 8384740dcSRalf Baechle */ 9384740dcSRalf Baechle #ifndef _ASM_IRQ_H 10384740dcSRalf Baechle #define _ASM_IRQ_H 11384740dcSRalf Baechle 12384740dcSRalf Baechle #include <linux/linkage.h> 13*631330f5SRalf Baechle #include <linux/smp.h> 14384740dcSRalf Baechle 15384740dcSRalf Baechle #include <asm/mipsmtregs.h> 16384740dcSRalf Baechle 17384740dcSRalf Baechle #include <irq.h> 18384740dcSRalf Baechle 19384740dcSRalf Baechle #ifdef CONFIG_I8259 20384740dcSRalf Baechle static inline int irq_canonicalize(int irq) 21384740dcSRalf Baechle { 22384740dcSRalf Baechle return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); 23384740dcSRalf Baechle } 24384740dcSRalf Baechle #else 25384740dcSRalf Baechle #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 26384740dcSRalf Baechle #endif 27384740dcSRalf Baechle 28384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC 29384740dcSRalf Baechle 30384740dcSRalf Baechle struct irqaction; 31384740dcSRalf Baechle 32384740dcSRalf Baechle extern unsigned long irq_hwmask[]; 33384740dcSRalf Baechle extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, 34384740dcSRalf Baechle unsigned long hwmask); 35384740dcSRalf Baechle 36384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 37384740dcSRalf Baechle { 38384740dcSRalf Baechle if (irq_hwmask[irq] & ST0_IM) 39384740dcSRalf Baechle set_c0_status(irq_hwmask[irq] & ST0_IM); 40384740dcSRalf Baechle } 41384740dcSRalf Baechle 42384740dcSRalf Baechle #else 43384740dcSRalf Baechle 44384740dcSRalf Baechle static inline void smtc_im_ack_irq(unsigned int irq) 45384740dcSRalf Baechle { 46384740dcSRalf Baechle } 47384740dcSRalf Baechle 48384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC */ 49384740dcSRalf Baechle 50384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 51384740dcSRalf Baechle #include <linux/cpumask.h> 52384740dcSRalf Baechle 53d5dedd45SYinghai Lu extern int plat_set_irq_affinity(unsigned int irq, 540de26520SRusty Russell const struct cpumask *affinity); 55384740dcSRalf Baechle extern void smtc_forward_irq(unsigned int irq); 56384740dcSRalf Baechle 57384740dcSRalf Baechle /* 58384740dcSRalf Baechle * IRQ affinity hook invoked at the beginning of interrupt dispatch 59384740dcSRalf Baechle * if option is enabled. 60384740dcSRalf Baechle * 61384740dcSRalf Baechle * Up through Linux 2.6.22 (at least) cpumask operations are very 62384740dcSRalf Baechle * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity 63384740dcSRalf Baechle * used a "fast path" per-IRQ-descriptor cache of affinity information 64384740dcSRalf Baechle * to reduce latency. As there is a project afoot to optimize the 65384740dcSRalf Baechle * cpumask implementations, this version is optimistically assuming 66384740dcSRalf Baechle * that cpumask.h macro overhead is reasonable during interrupt dispatch. 67384740dcSRalf Baechle */ 68384740dcSRalf Baechle #define IRQ_AFFINITY_HOOK(irq) \ 69384740dcSRalf Baechle do { \ 70e65e49d0SMike Travis if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ 71384740dcSRalf Baechle smtc_forward_irq(irq); \ 72384740dcSRalf Baechle irq_exit(); \ 73384740dcSRalf Baechle return; \ 74384740dcSRalf Baechle } \ 75384740dcSRalf Baechle } while (0) 76384740dcSRalf Baechle 77384740dcSRalf Baechle #else /* Not doing SMTC affinity */ 78384740dcSRalf Baechle 79384740dcSRalf Baechle #define IRQ_AFFINITY_HOOK(irq) do { } while (0) 80384740dcSRalf Baechle 81384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 82384740dcSRalf Baechle 83384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 84384740dcSRalf Baechle 85384740dcSRalf Baechle /* 86384740dcSRalf Baechle * Clear interrupt mask handling "backstop" if irq_hwmask 87384740dcSRalf Baechle * entry so indicates. This implies that the ack() or end() 88384740dcSRalf Baechle * functions will take over re-enabling the low-level mask. 89384740dcSRalf Baechle * Otherwise it will be done on return from exception. 90384740dcSRalf Baechle */ 91384740dcSRalf Baechle #define __DO_IRQ_SMTC_HOOK(irq) \ 92384740dcSRalf Baechle do { \ 93384740dcSRalf Baechle IRQ_AFFINITY_HOOK(irq); \ 94384740dcSRalf Baechle if (irq_hwmask[irq] & 0x0000ff00) \ 95384740dcSRalf Baechle write_c0_tccontext(read_c0_tccontext() & \ 96384740dcSRalf Baechle ~(irq_hwmask[irq] & 0x0000ff00)); \ 97384740dcSRalf Baechle } while (0) 98384740dcSRalf Baechle 99384740dcSRalf Baechle #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \ 100384740dcSRalf Baechle do { \ 101384740dcSRalf Baechle if (irq_hwmask[irq] & 0x0000ff00) \ 102384740dcSRalf Baechle write_c0_tccontext(read_c0_tccontext() & \ 103384740dcSRalf Baechle ~(irq_hwmask[irq] & 0x0000ff00)); \ 104384740dcSRalf Baechle } while (0) 105384740dcSRalf Baechle 106384740dcSRalf Baechle #else 107384740dcSRalf Baechle 108384740dcSRalf Baechle #define __DO_IRQ_SMTC_HOOK(irq) \ 109384740dcSRalf Baechle do { \ 110384740dcSRalf Baechle IRQ_AFFINITY_HOOK(irq); \ 111384740dcSRalf Baechle } while (0) 112384740dcSRalf Baechle #define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 113384740dcSRalf Baechle 114384740dcSRalf Baechle #endif 115384740dcSRalf Baechle 116384740dcSRalf Baechle /* 117384740dcSRalf Baechle * do_IRQ handles all normal device IRQ's (the special 118384740dcSRalf Baechle * SMP cross-CPU interrupts have their own specific 119384740dcSRalf Baechle * handlers). 120384740dcSRalf Baechle * 121384740dcSRalf Baechle * Ideally there should be away to get this into kernel/irq/handle.c to 122384740dcSRalf Baechle * avoid the overhead of a call for just a tiny function ... 123384740dcSRalf Baechle */ 124384740dcSRalf Baechle #define do_IRQ(irq) \ 125384740dcSRalf Baechle do { \ 126384740dcSRalf Baechle irq_enter(); \ 127384740dcSRalf Baechle __DO_IRQ_SMTC_HOOK(irq); \ 128384740dcSRalf Baechle generic_handle_irq(irq); \ 129384740dcSRalf Baechle irq_exit(); \ 130384740dcSRalf Baechle } while (0) 131384740dcSRalf Baechle 132384740dcSRalf Baechle #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 133384740dcSRalf Baechle /* 134384740dcSRalf Baechle * To avoid inefficient and in some cases pathological re-checking of 135384740dcSRalf Baechle * IRQ affinity, we have this variant that skips the affinity check. 136384740dcSRalf Baechle */ 137384740dcSRalf Baechle 138384740dcSRalf Baechle 139384740dcSRalf Baechle #define do_IRQ_no_affinity(irq) \ 140384740dcSRalf Baechle do { \ 141384740dcSRalf Baechle irq_enter(); \ 142384740dcSRalf Baechle __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \ 143384740dcSRalf Baechle generic_handle_irq(irq); \ 144384740dcSRalf Baechle irq_exit(); \ 145384740dcSRalf Baechle } while (0) 146384740dcSRalf Baechle 147384740dcSRalf Baechle #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 148384740dcSRalf Baechle 149384740dcSRalf Baechle extern void arch_init_irq(void); 150384740dcSRalf Baechle extern void spurious_interrupt(void); 151384740dcSRalf Baechle 152384740dcSRalf Baechle extern int allocate_irqno(void); 153384740dcSRalf Baechle extern void alloc_legacy_irqno(void); 154384740dcSRalf Baechle extern void free_irqno(unsigned int irq); 155384740dcSRalf Baechle 156384740dcSRalf Baechle /* 157384740dcSRalf Baechle * Before R2 the timer and performance counter interrupts were both fixed to 158384740dcSRalf Baechle * IE7. Since R2 their number has to be read from the c0_intctl register. 159384740dcSRalf Baechle */ 160384740dcSRalf Baechle #define CP0_LEGACY_COMPARE_IRQ 7 161384740dcSRalf Baechle 162384740dcSRalf Baechle extern int cp0_compare_irq; 163384740dcSRalf Baechle extern int cp0_perfcount_irq; 164384740dcSRalf Baechle 165384740dcSRalf Baechle #endif /* _ASM_IRQ_H */ 166