1 /* 2 * linux/arch/ia64/kernel/irq.c 3 * 4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 5 * 6 * This file contains the code used by various IRQ handling routines: 7 * asking for different IRQs should be done through these routines 8 * instead of just grabbing them. Thus setups with different IRQ numbers 9 * shouldn't result in any weird surprises, and installing new handlers 10 * should be easier. 11 * 12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 13 * 14 * 4/14/2004: Added code to handle cpu migration and do safe irq 15 * migration without losing interrupts for iosapic 16 * architecture. 17 */ 18 19 #include <asm/delay.h> 20 #include <asm/uaccess.h> 21 #include <linux/module.h> 22 #include <linux/seq_file.h> 23 #include <linux/interrupt.h> 24 #include <linux/kernel_stat.h> 25 26 /* 27 * 'what should we do if we get a hw irq event on an illegal vector'. 28 * each architecture has to answer this themselves. 29 */ 30 void ack_bad_irq(unsigned int irq) 31 { 32 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); 33 } 34 35 #ifdef CONFIG_IA64_GENERIC 36 ia64_vector __ia64_irq_to_vector(int irq) 37 { 38 return irq_cfg[irq].vector; 39 } 40 41 unsigned int __ia64_local_vector_to_irq (ia64_vector vec) 42 { 43 return __get_cpu_var(vector_irq)[vec]; 44 } 45 #endif 46 47 /* 48 * Interrupt statistics: 49 */ 50 51 atomic_t irq_err_count; 52 53 /* 54 * /proc/interrupts printing: 55 */ 56 57 int show_interrupts(struct seq_file *p, void *v) 58 { 59 int i = *(loff_t *) v, j; 60 struct irqaction * action; 61 unsigned long flags; 62 63 if (i == 0) { 64 char cpuname[16]; 65 seq_printf(p, " "); 66 for_each_online_cpu(j) { 67 snprintf(cpuname, 10, "CPU%d", j); 68 seq_printf(p, "%10s ", cpuname); 69 } 70 seq_putc(p, '\n'); 71 } 72 73 if (i < NR_IRQS) { 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 75 action = irq_desc[i].action; 76 if (!action) 77 goto skip; 78 seq_printf(p, "%3d: ",i); 79 #ifndef CONFIG_SMP 80 seq_printf(p, "%10u ", kstat_irqs(i)); 81 #else 82 for_each_online_cpu(j) { 83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 84 } 85 #endif 86 seq_printf(p, " %14s", irq_desc[i].chip->name); 87 seq_printf(p, " %s", action->name); 88 89 for (action=action->next; action; action = action->next) 90 seq_printf(p, ", %s", action->name); 91 92 seq_putc(p, '\n'); 93 skip: 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 95 } else if (i == NR_IRQS) 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 97 return 0; 98 } 99 100 #ifdef CONFIG_SMP 101 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; 102 103 void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 104 { 105 if (irq < NR_IRQS) { 106 cpumask_copy(irq_desc[irq].affinity, 107 cpumask_of(cpu_logical_id(hwid))); 108 irq_redir[irq] = (char) (redir & 0xff); 109 } 110 } 111 112 bool is_affinity_mask_valid(const struct cpumask *cpumask) 113 { 114 if (ia64_platform_is("sn2")) { 115 /* Only allow one CPU to be specified in the smp_affinity mask */ 116 if (cpumask_weight(cpumask) != 1) 117 return false; 118 } 119 return true; 120 } 121 122 #endif /* CONFIG_SMP */ 123 124 #ifdef CONFIG_HOTPLUG_CPU 125 unsigned int vectors_in_migration[NR_IRQS]; 126 127 /* 128 * Since cpu_online_mask is already updated, we just need to check for 129 * affinity that has zeros 130 */ 131 static void migrate_irqs(void) 132 { 133 int irq, new_cpu; 134 135 for (irq=0; irq < NR_IRQS; irq++) { 136 struct irq_desc *desc = irq_to_desc(irq); 137 struct irq_data *data = irq_desc_get_irq_data(desc); 138 struct irq_chip *chip = irq_data_get_irq_chip(data); 139 140 if (desc->status == IRQ_DISABLED) 141 continue; 142 143 /* 144 * No handling for now. 145 * TBD: Implement a disable function so we can now 146 * tell CPU not to respond to these local intr sources. 147 * such as ITV,CPEI,MCA etc. 148 */ 149 if (irqd_is_per_cpu(data)) 150 continue; 151 152 if (cpumask_any_and(data->affinity, cpu_online_mask) 153 >= nr_cpu_ids) { 154 /* 155 * Save it for phase 2 processing 156 */ 157 vectors_in_migration[irq] = irq; 158 159 new_cpu = cpumask_any(cpu_online_mask); 160 161 /* 162 * Al three are essential, currently WARN_ON.. maybe panic? 163 */ 164 if (chip && chip->irq_disable && 165 chip->irq_enable && chip->irq_set_affinity) { 166 chip->irq_disable(data); 167 chip->irq_set_affinity(data, 168 cpumask_of(new_cpu), false); 169 chip->irq_enable(data); 170 } else { 171 WARN_ON((!chip || !chip->irq_disable || 172 !chip->irq_enable || 173 !chip->irq_set_affinity)); 174 } 175 } 176 } 177 } 178 179 void fixup_irqs(void) 180 { 181 unsigned int irq; 182 extern void ia64_process_pending_intr(void); 183 extern volatile int time_keeper_id; 184 185 /* Mask ITV to disable timer */ 186 ia64_set_itv(1 << 16); 187 188 /* 189 * Find a new timesync master 190 */ 191 if (smp_processor_id() == time_keeper_id) { 192 time_keeper_id = cpumask_first(cpu_online_mask); 193 printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); 194 } 195 196 /* 197 * Phase 1: Locate IRQs bound to this cpu and 198 * relocate them for cpu removal. 199 */ 200 migrate_irqs(); 201 202 /* 203 * Phase 2: Perform interrupt processing for all entries reported in 204 * local APIC. 205 */ 206 ia64_process_pending_intr(); 207 208 /* 209 * Phase 3: Now handle any interrupts not captured in local APIC. 210 * This is to account for cases that device interrupted during the time the 211 * rte was being disabled and re-programmed. 212 */ 213 for (irq=0; irq < NR_IRQS; irq++) { 214 if (vectors_in_migration[irq]) { 215 struct pt_regs *old_regs = set_irq_regs(NULL); 216 217 vectors_in_migration[irq]=0; 218 generic_handle_irq(irq); 219 set_irq_regs(old_regs); 220 } 221 } 222 223 /* 224 * Now let processor die. We do irq disable and max_xtp() to 225 * ensure there is no more interrupts routed to this processor. 226 * But the local timer interrupt can have 1 pending which we 227 * take care in timer_interrupt(). 228 */ 229 max_xtp(); 230 local_irq_disable(); 231 } 232 #endif 233