1 /* 2 * linux/drivers/s390/cio/thinint_qdio.c 3 * 4 * thin interrupt support for qdio 5 * 6 * Copyright 2000-2008 IBM Corp. 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * Jan Glauber <jang@linux.vnet.ibm.com> 10 */ 11 #include <linux/io.h> 12 #include <asm/atomic.h> 13 #include <asm/debug.h> 14 #include <asm/qdio.h> 15 #include <asm/airq.h> 16 #include <asm/isc.h> 17 18 #include "cio.h" 19 #include "ioasm.h" 20 #include "qdio.h" 21 #include "qdio_debug.h" 22 #include "qdio_perf.h" 23 24 /* 25 * Restriction: only 63 iqdio subchannels would have its own indicator, 26 * after that, subsequent subchannels share one indicator 27 */ 28 #define TIQDIO_NR_NONSHARED_IND 63 29 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 30 #define TIQDIO_SHARED_IND 63 31 32 /* list of thin interrupt input queues */ 33 static LIST_HEAD(tiq_list); 34 DEFINE_MUTEX(tiq_list_lock); 35 36 /* adapter local summary indicator */ 37 static unsigned char *tiqdio_alsi; 38 39 /* device state change indicators */ 40 struct indicator_t { 41 u32 ind; /* u32 because of compare-and-swap performance */ 42 atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 43 }; 44 static struct indicator_t *q_indicators; 45 46 static void tiqdio_tasklet_fn(unsigned long data); 47 static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); 48 49 static int css_qdio_omit_svs; 50 51 static inline unsigned long do_clear_global_summary(void) 52 { 53 register unsigned long __fn asm("1") = 3; 54 register unsigned long __tmp asm("2"); 55 register unsigned long __time asm("3"); 56 57 asm volatile( 58 " .insn rre,0xb2650000,2,0" 59 : "+d" (__fn), "=d" (__tmp), "=d" (__time)); 60 return __time; 61 } 62 63 /* returns addr for the device state change indicator */ 64 static u32 *get_indicator(void) 65 { 66 int i; 67 68 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 69 if (!atomic_read(&q_indicators[i].count)) { 70 atomic_set(&q_indicators[i].count, 1); 71 return &q_indicators[i].ind; 72 } 73 74 /* use the shared indicator */ 75 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 76 return &q_indicators[TIQDIO_SHARED_IND].ind; 77 } 78 79 static void put_indicator(u32 *addr) 80 { 81 int i; 82 83 if (!addr) 84 return; 85 i = ((unsigned long)addr - (unsigned long)q_indicators) / 86 sizeof(struct indicator_t); 87 atomic_dec(&q_indicators[i].count); 88 } 89 90 void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 91 { 92 struct qdio_q *q; 93 int i; 94 95 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ 96 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 97 css_qdio_omit_svs = 1; 98 99 mutex_lock(&tiq_list_lock); 100 for_each_input_queue(irq_ptr, q, i) 101 list_add_rcu(&q->entry, &tiq_list); 102 mutex_unlock(&tiq_list_lock); 103 xchg(irq_ptr->dsci, 1); 104 } 105 106 /* 107 * we cannot stop the tiqdio tasklet here since it is for all 108 * thinint qdio devices and it must run as long as there is a 109 * thinint device left 110 */ 111 void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 112 { 113 struct qdio_q *q; 114 int i; 115 116 for (i = 0; i < irq_ptr->nr_input_qs; i++) { 117 q = irq_ptr->input_qs[i]; 118 /* if establish triggered an error */ 119 if (!q || !q->entry.prev || !q->entry.next) 120 continue; 121 122 mutex_lock(&tiq_list_lock); 123 list_del_rcu(&q->entry); 124 mutex_unlock(&tiq_list_lock); 125 synchronize_rcu(); 126 } 127 } 128 129 static inline int tiqdio_inbound_q_done(struct qdio_q *q) 130 { 131 unsigned char state = 0; 132 133 if (!atomic_read(&q->nr_buf_used)) 134 return 1; 135 136 qdio_siga_sync_q(q); 137 get_buf_state(q, q->first_to_check, &state, 0); 138 139 if (state == SLSB_P_INPUT_PRIMED) 140 /* more work coming */ 141 return 0; 142 return 1; 143 } 144 145 static inline int shared_ind(struct qdio_irq *irq_ptr) 146 { 147 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 148 } 149 150 static void __tiqdio_inbound_processing(struct qdio_q *q) 151 { 152 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 153 qdio_sync_after_thinint(q); 154 155 /* 156 * Maybe we have work on our outbound queues... at least 157 * we have to check the PCI capable queues. 158 */ 159 qdio_check_outbound_after_thinint(q); 160 161 if (!qdio_inbound_q_moved(q)) 162 return; 163 164 qdio_kick_handler(q); 165 166 if (!tiqdio_inbound_q_done(q)) { 167 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 168 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 169 tasklet_schedule(&q->tasklet); 170 } 171 172 qdio_stop_polling(q); 173 /* 174 * We need to check again to not lose initiative after 175 * resetting the ACK state. 176 */ 177 if (!tiqdio_inbound_q_done(q)) { 178 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 179 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 180 tasklet_schedule(&q->tasklet); 181 } 182 } 183 184 void tiqdio_inbound_processing(unsigned long data) 185 { 186 struct qdio_q *q = (struct qdio_q *)data; 187 188 __tiqdio_inbound_processing(q); 189 } 190 191 /* check for work on all inbound thinint queues */ 192 static void tiqdio_tasklet_fn(unsigned long data) 193 { 194 struct qdio_q *q; 195 196 qdio_perf_stat_inc(&perf_stats.tasklet_thinint); 197 again: 198 199 /* protect tiq_list entries, only changed in activate or shutdown */ 200 rcu_read_lock(); 201 202 list_for_each_entry_rcu(q, &tiq_list, entry) 203 /* only process queues from changed sets */ 204 if (*q->irq_ptr->dsci) { 205 206 /* only clear it if the indicator is non-shared */ 207 if (!shared_ind(q->irq_ptr)) 208 xchg(q->irq_ptr->dsci, 0); 209 /* 210 * don't call inbound processing directly since 211 * that could starve other thinint queues 212 */ 213 tasklet_schedule(&q->tasklet); 214 } 215 216 rcu_read_unlock(); 217 218 /* 219 * if we used the shared indicator clear it now after all queues 220 * were processed 221 */ 222 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 223 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 224 225 /* prevent racing */ 226 if (*tiqdio_alsi) 227 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 228 } 229 230 /* check for more work */ 231 if (*tiqdio_alsi) { 232 xchg(tiqdio_alsi, 0); 233 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); 234 goto again; 235 } 236 } 237 238 /** 239 * tiqdio_thinint_handler - thin interrupt handler for qdio 240 * @ind: pointer to adapter local summary indicator 241 * @drv_data: NULL 242 */ 243 static void tiqdio_thinint_handler(void *ind, void *drv_data) 244 { 245 qdio_perf_stat_inc(&perf_stats.thin_int); 246 247 /* 248 * SVS only when needed: issue SVS to benefit from iqdio interrupt 249 * avoidance (SVS clears adapter interrupt suppression overwrite) 250 */ 251 if (!css_qdio_omit_svs) 252 do_clear_global_summary(); 253 254 /* 255 * reset local summary indicator (tiqdio_alsi) to stop adapter 256 * interrupts for now, the tasklet will clean all dsci's 257 */ 258 xchg((u8 *)ind, 0); 259 tasklet_hi_schedule(&tiqdio_tasklet); 260 } 261 262 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 263 { 264 struct scssc_area *scssc_area; 265 int rc; 266 267 scssc_area = (struct scssc_area *)irq_ptr->chsc_page; 268 memset(scssc_area, 0, PAGE_SIZE); 269 270 if (reset) { 271 scssc_area->summary_indicator_addr = 0; 272 scssc_area->subchannel_indicator_addr = 0; 273 } else { 274 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); 275 scssc_area->subchannel_indicator_addr = 276 virt_to_phys(irq_ptr->dsci); 277 } 278 279 scssc_area->request = (struct chsc_header) { 280 .length = 0x0fe0, 281 .code = 0x0021, 282 }; 283 scssc_area->operation_code = 0; 284 scssc_area->ks = PAGE_DEFAULT_KEY; 285 scssc_area->kc = PAGE_DEFAULT_KEY; 286 scssc_area->isc = QDIO_AIRQ_ISC; 287 scssc_area->schid = irq_ptr->schid; 288 289 /* enable the time delay disablement facility */ 290 if (css_general_characteristics.aif_tdd) 291 scssc_area->word_with_d_bit = 0x10000000; 292 293 rc = chsc(scssc_area); 294 if (rc) 295 return -EIO; 296 297 rc = chsc_error_from_response(scssc_area->response.code); 298 if (rc) { 299 DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 300 scssc_area->response.code); 301 DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); 302 return rc; 303 } 304 305 DBF_EVENT("setscind"); 306 DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); 307 DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); 308 return 0; 309 } 310 311 /* allocate non-shared indicators and shared indicator */ 312 int __init tiqdio_allocate_memory(void) 313 { 314 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, 315 GFP_KERNEL); 316 if (!q_indicators) 317 return -ENOMEM; 318 return 0; 319 } 320 321 void tiqdio_free_memory(void) 322 { 323 kfree(q_indicators); 324 } 325 326 int __init tiqdio_register_thinints(void) 327 { 328 isc_register(QDIO_AIRQ_ISC); 329 tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, 330 NULL, QDIO_AIRQ_ISC); 331 if (IS_ERR(tiqdio_alsi)) { 332 DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); 333 tiqdio_alsi = NULL; 334 isc_unregister(QDIO_AIRQ_ISC); 335 return -ENOMEM; 336 } 337 return 0; 338 } 339 340 int qdio_establish_thinint(struct qdio_irq *irq_ptr) 341 { 342 if (!is_thinint_irq(irq_ptr)) 343 return 0; 344 345 /* Check for aif time delay disablement. If installed, 346 * omit SVS even under LPAR 347 */ 348 if (css_general_characteristics.aif_tdd) 349 css_qdio_omit_svs = 1; 350 return set_subchannel_ind(irq_ptr, 0); 351 } 352 353 void qdio_setup_thinint(struct qdio_irq *irq_ptr) 354 { 355 if (!is_thinint_irq(irq_ptr)) 356 return; 357 irq_ptr->dsci = get_indicator(); 358 DBF_HEX(&irq_ptr->dsci, sizeof(void *)); 359 } 360 361 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 362 { 363 if (!is_thinint_irq(irq_ptr)) 364 return; 365 366 /* reset adapter interrupt indicators */ 367 put_indicator(irq_ptr->dsci); 368 set_subchannel_ind(irq_ptr, 1); 369 } 370 371 void __exit tiqdio_unregister_thinints(void) 372 { 373 WARN_ON(!list_empty(&tiq_list)); 374 375 if (tiqdio_alsi) { 376 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 377 isc_unregister(QDIO_AIRQ_ISC); 378 } 379 tasklet_kill(&tiqdio_tasklet); 380 } 381