11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/ia64/kernel/irq.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * This file contains the code used by various IRQ handling routines: 772fdbdceSSimon Arlott * asking for different IRQs should be done through these routines 81da177e4SLinus Torvalds * instead of just grabbing them. Thus setups with different IRQ numbers 91da177e4SLinus Torvalds * shouldn't result in any weird surprises, and installing new handlers 101da177e4SLinus Torvalds * should be easier. 111da177e4SLinus Torvalds * 121da177e4SLinus Torvalds * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * 4/14/2004: Added code to handle cpu migration and do safe irq 1572fdbdceSSimon Arlott * migration without losing interrupts for iosapic 161da177e4SLinus Torvalds * architecture. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <asm/delay.h> 201da177e4SLinus Torvalds #include <asm/uaccess.h> 211da177e4SLinus Torvalds #include <linux/module.h> 221da177e4SLinus Torvalds #include <linux/seq_file.h> 231da177e4SLinus Torvalds #include <linux/interrupt.h> 241da177e4SLinus Torvalds #include <linux/kernel_stat.h> 251da177e4SLinus Torvalds 26d303e9e9STony Luck #include <asm/mca.h> 27d303e9e9STony Luck 281da177e4SLinus Torvalds /* 291da177e4SLinus Torvalds * 'what should we do if we get a hw irq event on an illegal vector'. 301da177e4SLinus Torvalds * each architecture has to answer this themselves. 311da177e4SLinus Torvalds */ 321da177e4SLinus Torvalds void ack_bad_irq(unsigned int irq) 331da177e4SLinus Torvalds { 341da177e4SLinus Torvalds printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); 351da177e4SLinus Torvalds } 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds #ifdef CONFIG_IA64_GENERIC 381115200aSKenji Kaneshige ia64_vector __ia64_irq_to_vector(int irq) 391115200aSKenji Kaneshige { 401115200aSKenji Kaneshige return irq_cfg[irq].vector; 411115200aSKenji Kaneshige } 421115200aSKenji Kaneshige 431da177e4SLinus Torvalds unsigned int __ia64_local_vector_to_irq (ia64_vector vec) 441da177e4SLinus Torvalds { 45e1b30a39SYasuaki Ishimatsu return __get_cpu_var(vector_irq)[vec]; 461da177e4SLinus Torvalds } 471da177e4SLinus Torvalds #endif 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds /* 501da177e4SLinus Torvalds * Interrupt statistics: 511da177e4SLinus Torvalds */ 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds atomic_t irq_err_count; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * /proc/interrupts printing: 571da177e4SLinus Torvalds */ 58e3d78122SThomas Gleixner int arch_show_interrupts(struct seq_file *p, int prec) 591da177e4SLinus Torvalds { 601da177e4SLinus Torvalds seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 611da177e4SLinus Torvalds return 0; 621da177e4SLinus Torvalds } 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #ifdef CONFIG_SMP 651da177e4SLinus Torvalds static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 681da177e4SLinus Torvalds { 691da177e4SLinus Torvalds if (irq < NR_IRQS) { 70a2178334SThomas Gleixner cpumask_copy(irq_get_irq_data(irq)->affinity, 71d3b66bf2SMike Travis cpumask_of(cpu_logical_id(hwid))); 721da177e4SLinus Torvalds irq_redir[irq] = (char) (redir & 0xff); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds } 7525d61578SJohn Keller 76d3b66bf2SMike Travis bool is_affinity_mask_valid(const struct cpumask *cpumask) 7725d61578SJohn Keller { 7825d61578SJohn Keller if (ia64_platform_is("sn2")) { 7925d61578SJohn Keller /* Only allow one CPU to be specified in the smp_affinity mask */ 806bdf197bSIngo Molnar if (cpumask_weight(cpumask) != 1) 8125d61578SJohn Keller return false; 8225d61578SJohn Keller } 8325d61578SJohn Keller return true; 8425d61578SJohn Keller } 8525d61578SJohn Keller 861da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 871da177e4SLinus Torvalds 88d303e9e9STony Luck int __init arch_early_irq_init(void) 89d303e9e9STony Luck { 90d303e9e9STony Luck ia64_mca_irq_init(); 91d303e9e9STony Luck return 0; 92d303e9e9STony Luck } 93d303e9e9STony Luck 941da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 951da177e4SLinus Torvalds unsigned int vectors_in_migration[NR_IRQS]; 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds /* 98d3b66bf2SMike Travis * Since cpu_online_mask is already updated, we just need to check for 991da177e4SLinus Torvalds * affinity that has zeros 1001da177e4SLinus Torvalds */ 1011da177e4SLinus Torvalds static void migrate_irqs(void) 1021da177e4SLinus Torvalds { 1031da177e4SLinus Torvalds int irq, new_cpu; 1041da177e4SLinus Torvalds 1051da177e4SLinus Torvalds for (irq=0; irq < NR_IRQS; irq++) { 106428a40c5SThomas Gleixner struct irq_desc *desc = irq_to_desc(irq); 107428a40c5SThomas Gleixner struct irq_data *data = irq_desc_get_irq_data(desc); 108428a40c5SThomas Gleixner struct irq_chip *chip = irq_data_get_irq_chip(data); 1091da177e4SLinus Torvalds 110f5e5bf08SThomas Gleixner if (irqd_irq_disabled(data)) 11129a00277SMagnus Damm continue; 11229a00277SMagnus Damm 1131da177e4SLinus Torvalds /* 1141da177e4SLinus Torvalds * No handling for now. 1151da177e4SLinus Torvalds * TBD: Implement a disable function so we can now 1161da177e4SLinus Torvalds * tell CPU not to respond to these local intr sources. 1171da177e4SLinus Torvalds * such as ITV,CPEI,MCA etc. 1181da177e4SLinus Torvalds */ 119428a40c5SThomas Gleixner if (irqd_is_per_cpu(data)) 1201da177e4SLinus Torvalds continue; 1211da177e4SLinus Torvalds 122428a40c5SThomas Gleixner if (cpumask_any_and(data->affinity, cpu_online_mask) 1230de26520SRusty Russell >= nr_cpu_ids) { 1241da177e4SLinus Torvalds /* 1251da177e4SLinus Torvalds * Save it for phase 2 processing 1261da177e4SLinus Torvalds */ 1271da177e4SLinus Torvalds vectors_in_migration[irq] = irq; 1281da177e4SLinus Torvalds 129d3b66bf2SMike Travis new_cpu = cpumask_any(cpu_online_mask); 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds /* 1321da177e4SLinus Torvalds * Al three are essential, currently WARN_ON.. maybe panic? 1331da177e4SLinus Torvalds */ 134428a40c5SThomas Gleixner if (chip && chip->irq_disable && 135428a40c5SThomas Gleixner chip->irq_enable && chip->irq_set_affinity) { 136428a40c5SThomas Gleixner chip->irq_disable(data); 137428a40c5SThomas Gleixner chip->irq_set_affinity(data, 138428a40c5SThomas Gleixner cpumask_of(new_cpu), false); 139428a40c5SThomas Gleixner chip->irq_enable(data); 1401da177e4SLinus Torvalds } else { 141428a40c5SThomas Gleixner WARN_ON((!chip || !chip->irq_disable || 142428a40c5SThomas Gleixner !chip->irq_enable || 143428a40c5SThomas Gleixner !chip->irq_set_affinity)); 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds } 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds } 1481da177e4SLinus Torvalds 1491da177e4SLinus Torvalds void fixup_irqs(void) 1501da177e4SLinus Torvalds { 1511da177e4SLinus Torvalds unsigned int irq; 1521da177e4SLinus Torvalds extern void ia64_process_pending_intr(void); 153ff741906SAshok Raj extern volatile int time_keeper_id; 1541da177e4SLinus Torvalds 155751fc784SHidetoshi Seto /* Mask ITV to disable timer */ 156751fc784SHidetoshi Seto ia64_set_itv(1 << 16); 157ff741906SAshok Raj 158ff741906SAshok Raj /* 159ff741906SAshok Raj * Find a new timesync master 160ff741906SAshok Raj */ 161ff741906SAshok Raj if (smp_processor_id() == time_keeper_id) { 162d3b66bf2SMike Travis time_keeper_id = cpumask_first(cpu_online_mask); 163ff741906SAshok Raj printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); 164ff741906SAshok Raj } 165ff741906SAshok Raj 1661da177e4SLinus Torvalds /* 16772fdbdceSSimon Arlott * Phase 1: Locate IRQs bound to this cpu and 1681da177e4SLinus Torvalds * relocate them for cpu removal. 1691da177e4SLinus Torvalds */ 1701da177e4SLinus Torvalds migrate_irqs(); 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds /* 1731da177e4SLinus Torvalds * Phase 2: Perform interrupt processing for all entries reported in 1741da177e4SLinus Torvalds * local APIC. 1751da177e4SLinus Torvalds */ 1761da177e4SLinus Torvalds ia64_process_pending_intr(); 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds /* 1791da177e4SLinus Torvalds * Phase 3: Now handle any interrupts not captured in local APIC. 1801da177e4SLinus Torvalds * This is to account for cases that device interrupted during the time the 1811da177e4SLinus Torvalds * rte was being disabled and re-programmed. 1821da177e4SLinus Torvalds */ 1831da177e4SLinus Torvalds for (irq=0; irq < NR_IRQS; irq++) { 1841da177e4SLinus Torvalds if (vectors_in_migration[irq]) { 1858c1addbcSTony Luck struct pt_regs *old_regs = set_irq_regs(NULL); 1868c1addbcSTony Luck 1871da177e4SLinus Torvalds vectors_in_migration[irq]=0; 1885fbb004aSIngo Molnar generic_handle_irq(irq); 1898c1addbcSTony Luck set_irq_regs(old_regs); 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds } 1921da177e4SLinus Torvalds 1931da177e4SLinus Torvalds /* 1941da177e4SLinus Torvalds * Now let processor die. We do irq disable and max_xtp() to 1951da177e4SLinus Torvalds * ensure there is no more interrupts routed to this processor. 1961da177e4SLinus Torvalds * But the local timer interrupt can have 1 pending which we 1971da177e4SLinus Torvalds * take care in timer_interrupt(). 1981da177e4SLinus Torvalds */ 1991da177e4SLinus Torvalds max_xtp(); 2001da177e4SLinus Torvalds local_irq_disable(); 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds #endif 203