1779e6e1cSJan Glauber /* 2779e6e1cSJan Glauber * linux/drivers/s390/cio/thinint_qdio.c 3779e6e1cSJan Glauber * 4779e6e1cSJan Glauber * thin interrupt support for qdio 5779e6e1cSJan Glauber * 6779e6e1cSJan Glauber * Copyright 2000-2008 IBM Corp. 7779e6e1cSJan Glauber * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8779e6e1cSJan Glauber * Cornelia Huck <cornelia.huck@de.ibm.com> 9779e6e1cSJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com> 10779e6e1cSJan Glauber */ 11779e6e1cSJan Glauber #include <linux/io.h> 12779e6e1cSJan Glauber #include <asm/atomic.h> 13779e6e1cSJan Glauber #include <asm/debug.h> 14779e6e1cSJan Glauber #include <asm/qdio.h> 15779e6e1cSJan Glauber #include <asm/airq.h> 16779e6e1cSJan Glauber #include <asm/isc.h> 17779e6e1cSJan Glauber 18779e6e1cSJan Glauber #include "cio.h" 19779e6e1cSJan Glauber #include "ioasm.h" 20779e6e1cSJan Glauber #include "qdio.h" 21779e6e1cSJan Glauber #include "qdio_debug.h" 22779e6e1cSJan Glauber #include "qdio_perf.h" 23779e6e1cSJan Glauber 24779e6e1cSJan Glauber /* 25779e6e1cSJan Glauber * Restriction: only 63 iqdio subchannels would have its own indicator, 26779e6e1cSJan Glauber * after that, subsequent subchannels share one indicator 27779e6e1cSJan Glauber */ 28779e6e1cSJan Glauber #define TIQDIO_NR_NONSHARED_IND 63 29779e6e1cSJan Glauber #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 30779e6e1cSJan Glauber #define TIQDIO_SHARED_IND 63 31779e6e1cSJan Glauber 32779e6e1cSJan Glauber /* list of thin interrupt input queues */ 33779e6e1cSJan Glauber static LIST_HEAD(tiq_list); 34779e6e1cSJan Glauber 35779e6e1cSJan Glauber /* adapter local summary indicator */ 36779e6e1cSJan Glauber static unsigned char *tiqdio_alsi; 37779e6e1cSJan Glauber 38779e6e1cSJan Glauber /* device state change indicators */ 39779e6e1cSJan Glauber struct indicator_t { 40779e6e1cSJan Glauber u32 ind; /* u32 because of compare-and-swap performance */ 41779e6e1cSJan Glauber atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 42779e6e1cSJan Glauber }; 43779e6e1cSJan Glauber static struct indicator_t *q_indicators; 44779e6e1cSJan Glauber 45779e6e1cSJan Glauber static void tiqdio_tasklet_fn(unsigned long data); 46779e6e1cSJan Glauber static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); 47779e6e1cSJan Glauber 48779e6e1cSJan Glauber static int css_qdio_omit_svs; 49779e6e1cSJan Glauber 50779e6e1cSJan Glauber static inline unsigned long do_clear_global_summary(void) 51779e6e1cSJan Glauber { 52779e6e1cSJan Glauber register unsigned long __fn asm("1") = 3; 53779e6e1cSJan Glauber register unsigned long __tmp asm("2"); 54779e6e1cSJan Glauber register unsigned long __time asm("3"); 55779e6e1cSJan Glauber 56779e6e1cSJan Glauber asm volatile( 57779e6e1cSJan Glauber " .insn rre,0xb2650000,2,0" 58779e6e1cSJan Glauber : "+d" (__fn), "=d" (__tmp), "=d" (__time)); 59779e6e1cSJan Glauber return __time; 60779e6e1cSJan Glauber } 61779e6e1cSJan Glauber 62779e6e1cSJan Glauber /* returns addr for the device state change indicator */ 63779e6e1cSJan Glauber static u32 *get_indicator(void) 64779e6e1cSJan Glauber { 65779e6e1cSJan Glauber int i; 66779e6e1cSJan Glauber 67779e6e1cSJan Glauber for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 68779e6e1cSJan Glauber if (!atomic_read(&q_indicators[i].count)) { 69779e6e1cSJan Glauber atomic_set(&q_indicators[i].count, 1); 70779e6e1cSJan Glauber return &q_indicators[i].ind; 71779e6e1cSJan Glauber } 72779e6e1cSJan Glauber 73779e6e1cSJan Glauber /* use the shared indicator */ 74779e6e1cSJan Glauber atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 75779e6e1cSJan Glauber return &q_indicators[TIQDIO_SHARED_IND].ind; 76779e6e1cSJan Glauber } 77779e6e1cSJan Glauber 78779e6e1cSJan Glauber static void put_indicator(u32 *addr) 79779e6e1cSJan Glauber { 80779e6e1cSJan Glauber int i; 81779e6e1cSJan Glauber 82779e6e1cSJan Glauber if (!addr) 83779e6e1cSJan Glauber return; 84779e6e1cSJan Glauber i = ((unsigned long)addr - (unsigned long)q_indicators) / 85779e6e1cSJan Glauber sizeof(struct indicator_t); 86779e6e1cSJan Glauber atomic_dec(&q_indicators[i].count); 87779e6e1cSJan Glauber } 88779e6e1cSJan Glauber 89779e6e1cSJan Glauber void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 90779e6e1cSJan Glauber { 91779e6e1cSJan Glauber struct qdio_q *q; 92779e6e1cSJan Glauber int i; 93779e6e1cSJan Glauber 94779e6e1cSJan Glauber /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ 95779e6e1cSJan Glauber if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 96779e6e1cSJan Glauber css_qdio_omit_svs = 1; 97779e6e1cSJan Glauber 98779e6e1cSJan Glauber for_each_input_queue(irq_ptr, q, i) { 99779e6e1cSJan Glauber list_add_rcu(&q->entry, &tiq_list); 100779e6e1cSJan Glauber synchronize_rcu(); 101779e6e1cSJan Glauber } 102779e6e1cSJan Glauber xchg(irq_ptr->dsci, 1); 103779e6e1cSJan Glauber tasklet_schedule(&tiqdio_tasklet); 104779e6e1cSJan Glauber } 105779e6e1cSJan Glauber 106779e6e1cSJan Glauber /* 107779e6e1cSJan Glauber * we cannot stop the tiqdio tasklet here since it is for all 108779e6e1cSJan Glauber * thinint qdio devices and it must run as long as there is a 109779e6e1cSJan Glauber * thinint device left 110779e6e1cSJan Glauber */ 111779e6e1cSJan Glauber void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 112779e6e1cSJan Glauber { 113779e6e1cSJan Glauber struct qdio_q *q; 114779e6e1cSJan Glauber int i; 115779e6e1cSJan Glauber 116*53b41ba7SJan Glauber for (i = 0; i < irq_ptr->nr_input_qs; i++) { 117*53b41ba7SJan Glauber q = irq_ptr->input_qs[i]; 118*53b41ba7SJan Glauber /* if establish triggered an error */ 119*53b41ba7SJan Glauber if (!q || !q->entry.prev || !q->entry.next) 120*53b41ba7SJan Glauber continue; 121779e6e1cSJan Glauber list_del_rcu(&q->entry); 122779e6e1cSJan Glauber synchronize_rcu(); 123779e6e1cSJan Glauber } 124779e6e1cSJan Glauber } 125779e6e1cSJan Glauber 126779e6e1cSJan Glauber static inline int tiqdio_inbound_q_done(struct qdio_q *q) 127779e6e1cSJan Glauber { 128779e6e1cSJan Glauber unsigned char state; 129779e6e1cSJan Glauber 130779e6e1cSJan Glauber if (!atomic_read(&q->nr_buf_used)) 131779e6e1cSJan Glauber return 1; 132779e6e1cSJan Glauber 133779e6e1cSJan Glauber qdio_siga_sync_q(q); 134779e6e1cSJan Glauber get_buf_state(q, q->first_to_check, &state); 135779e6e1cSJan Glauber 136779e6e1cSJan Glauber if (state == SLSB_P_INPUT_PRIMED) 137779e6e1cSJan Glauber /* more work coming */ 138779e6e1cSJan Glauber return 0; 139779e6e1cSJan Glauber return 1; 140779e6e1cSJan Glauber } 141779e6e1cSJan Glauber 142779e6e1cSJan Glauber static inline int shared_ind(struct qdio_irq *irq_ptr) 143779e6e1cSJan Glauber { 144779e6e1cSJan Glauber return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 145779e6e1cSJan Glauber } 146779e6e1cSJan Glauber 147779e6e1cSJan Glauber static void __tiqdio_inbound_processing(struct qdio_q *q) 148779e6e1cSJan Glauber { 149779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound); 150779e6e1cSJan Glauber qdio_sync_after_thinint(q); 151779e6e1cSJan Glauber 152779e6e1cSJan Glauber /* 153779e6e1cSJan Glauber * Maybe we have work on our outbound queues... at least 154779e6e1cSJan Glauber * we have to check the PCI capable queues. 155779e6e1cSJan Glauber */ 156779e6e1cSJan Glauber qdio_check_outbound_after_thinint(q); 157779e6e1cSJan Glauber 158779e6e1cSJan Glauber again: 159779e6e1cSJan Glauber if (!qdio_inbound_q_moved(q)) 160779e6e1cSJan Glauber return; 161779e6e1cSJan Glauber 162779e6e1cSJan Glauber qdio_kick_inbound_handler(q); 163779e6e1cSJan Glauber 164779e6e1cSJan Glauber if (!tiqdio_inbound_q_done(q)) { 165779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 166779e6e1cSJan Glauber goto again; 167779e6e1cSJan Glauber } 168779e6e1cSJan Glauber 169779e6e1cSJan Glauber qdio_stop_polling(q); 170779e6e1cSJan Glauber /* 171779e6e1cSJan Glauber * We need to check again to not lose initiative after 172779e6e1cSJan Glauber * resetting the ACK state. 173779e6e1cSJan Glauber */ 174779e6e1cSJan Glauber if (!tiqdio_inbound_q_done(q)) { 175779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 176779e6e1cSJan Glauber goto again; 177779e6e1cSJan Glauber } 178779e6e1cSJan Glauber } 179779e6e1cSJan Glauber 180779e6e1cSJan Glauber void tiqdio_inbound_processing(unsigned long data) 181779e6e1cSJan Glauber { 182779e6e1cSJan Glauber struct qdio_q *q = (struct qdio_q *)data; 183779e6e1cSJan Glauber 184779e6e1cSJan Glauber __tiqdio_inbound_processing(q); 185779e6e1cSJan Glauber } 186779e6e1cSJan Glauber 187779e6e1cSJan Glauber /* check for work on all inbound thinint queues */ 188779e6e1cSJan Glauber static void tiqdio_tasklet_fn(unsigned long data) 189779e6e1cSJan Glauber { 190779e6e1cSJan Glauber struct qdio_q *q; 191779e6e1cSJan Glauber 192779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.tasklet_thinint); 193779e6e1cSJan Glauber again: 194779e6e1cSJan Glauber 195779e6e1cSJan Glauber /* protect tiq_list entries, only changed in activate or shutdown */ 196779e6e1cSJan Glauber rcu_read_lock(); 197779e6e1cSJan Glauber 198779e6e1cSJan Glauber list_for_each_entry_rcu(q, &tiq_list, entry) 199779e6e1cSJan Glauber /* only process queues from changed sets */ 200779e6e1cSJan Glauber if (*q->irq_ptr->dsci) { 201779e6e1cSJan Glauber 202779e6e1cSJan Glauber /* only clear it if the indicator is non-shared */ 203779e6e1cSJan Glauber if (!shared_ind(q->irq_ptr)) 204779e6e1cSJan Glauber xchg(q->irq_ptr->dsci, 0); 205779e6e1cSJan Glauber /* 206779e6e1cSJan Glauber * don't call inbound processing directly since 207779e6e1cSJan Glauber * that could starve other thinint queues 208779e6e1cSJan Glauber */ 209779e6e1cSJan Glauber tasklet_schedule(&q->tasklet); 210779e6e1cSJan Glauber } 211779e6e1cSJan Glauber 212779e6e1cSJan Glauber rcu_read_unlock(); 213779e6e1cSJan Glauber 214779e6e1cSJan Glauber /* 215779e6e1cSJan Glauber * if we used the shared indicator clear it now after all queues 216779e6e1cSJan Glauber * were processed 217779e6e1cSJan Glauber */ 218779e6e1cSJan Glauber if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 219779e6e1cSJan Glauber xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 220779e6e1cSJan Glauber 221779e6e1cSJan Glauber /* prevent racing */ 222779e6e1cSJan Glauber if (*tiqdio_alsi) 223779e6e1cSJan Glauber xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 224779e6e1cSJan Glauber } 225779e6e1cSJan Glauber 226779e6e1cSJan Glauber /* check for more work */ 227779e6e1cSJan Glauber if (*tiqdio_alsi) { 228779e6e1cSJan Glauber xchg(tiqdio_alsi, 0); 229779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); 230779e6e1cSJan Glauber goto again; 231779e6e1cSJan Glauber } 232779e6e1cSJan Glauber } 233779e6e1cSJan Glauber 234779e6e1cSJan Glauber /** 235779e6e1cSJan Glauber * tiqdio_thinint_handler - thin interrupt handler for qdio 236779e6e1cSJan Glauber * @ind: pointer to adapter local summary indicator 237779e6e1cSJan Glauber * @drv_data: NULL 238779e6e1cSJan Glauber */ 239779e6e1cSJan Glauber static void tiqdio_thinint_handler(void *ind, void *drv_data) 240779e6e1cSJan Glauber { 241779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thin_int); 242779e6e1cSJan Glauber 243779e6e1cSJan Glauber /* 244779e6e1cSJan Glauber * SVS only when needed: issue SVS to benefit from iqdio interrupt 245779e6e1cSJan Glauber * avoidance (SVS clears adapter interrupt suppression overwrite) 246779e6e1cSJan Glauber */ 247779e6e1cSJan Glauber if (!css_qdio_omit_svs) 248779e6e1cSJan Glauber do_clear_global_summary(); 249779e6e1cSJan Glauber 250779e6e1cSJan Glauber /* 251779e6e1cSJan Glauber * reset local summary indicator (tiqdio_alsi) to stop adapter 252779e6e1cSJan Glauber * interrupts for now, the tasklet will clean all dsci's 253779e6e1cSJan Glauber */ 254779e6e1cSJan Glauber xchg((u8 *)ind, 0); 255779e6e1cSJan Glauber tasklet_hi_schedule(&tiqdio_tasklet); 256779e6e1cSJan Glauber } 257779e6e1cSJan Glauber 258779e6e1cSJan Glauber static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 259779e6e1cSJan Glauber { 260779e6e1cSJan Glauber struct scssc_area *scssc_area; 261779e6e1cSJan Glauber char dbf_text[15]; 262779e6e1cSJan Glauber void *ptr; 263779e6e1cSJan Glauber int rc; 264779e6e1cSJan Glauber 265779e6e1cSJan Glauber scssc_area = (struct scssc_area *)irq_ptr->chsc_page; 266779e6e1cSJan Glauber memset(scssc_area, 0, PAGE_SIZE); 267779e6e1cSJan Glauber 268779e6e1cSJan Glauber if (reset) { 269779e6e1cSJan Glauber scssc_area->summary_indicator_addr = 0; 270779e6e1cSJan Glauber scssc_area->subchannel_indicator_addr = 0; 271779e6e1cSJan Glauber } else { 272779e6e1cSJan Glauber scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); 273779e6e1cSJan Glauber scssc_area->subchannel_indicator_addr = 274779e6e1cSJan Glauber virt_to_phys(irq_ptr->dsci); 275779e6e1cSJan Glauber } 276779e6e1cSJan Glauber 277779e6e1cSJan Glauber scssc_area->request = (struct chsc_header) { 278779e6e1cSJan Glauber .length = 0x0fe0, 279779e6e1cSJan Glauber .code = 0x0021, 280779e6e1cSJan Glauber }; 281779e6e1cSJan Glauber scssc_area->operation_code = 0; 282779e6e1cSJan Glauber scssc_area->ks = PAGE_DEFAULT_KEY; 283779e6e1cSJan Glauber scssc_area->kc = PAGE_DEFAULT_KEY; 284779e6e1cSJan Glauber scssc_area->isc = QDIO_AIRQ_ISC; 285779e6e1cSJan Glauber scssc_area->schid = irq_ptr->schid; 286779e6e1cSJan Glauber 287779e6e1cSJan Glauber /* enable the time delay disablement facility */ 288779e6e1cSJan Glauber if (css_general_characteristics.aif_tdd) 289779e6e1cSJan Glauber scssc_area->word_with_d_bit = 0x10000000; 290779e6e1cSJan Glauber 291779e6e1cSJan Glauber rc = chsc(scssc_area); 292779e6e1cSJan Glauber if (rc) 293779e6e1cSJan Glauber return -EIO; 294779e6e1cSJan Glauber 295779e6e1cSJan Glauber rc = chsc_error_from_response(scssc_area->response.code); 296779e6e1cSJan Glauber if (rc) { 297779e6e1cSJan Glauber sprintf(dbf_text, "sidR%4x", scssc_area->response.code); 298779e6e1cSJan Glauber QDIO_DBF_TEXT1(0, trace, dbf_text); 299779e6e1cSJan Glauber QDIO_DBF_TEXT1(0, setup, dbf_text); 300779e6e1cSJan Glauber ptr = &scssc_area->response; 301779e6e1cSJan Glauber QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); 302779e6e1cSJan Glauber return rc; 303779e6e1cSJan Glauber } 304779e6e1cSJan Glauber 305779e6e1cSJan Glauber QDIO_DBF_TEXT2(0, setup, "setscind"); 306779e6e1cSJan Glauber QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, 307779e6e1cSJan Glauber sizeof(unsigned long)); 308779e6e1cSJan Glauber QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, 309779e6e1cSJan Glauber sizeof(unsigned long)); 310779e6e1cSJan Glauber return 0; 311779e6e1cSJan Glauber } 312779e6e1cSJan Glauber 313779e6e1cSJan Glauber /* allocate non-shared indicators and shared indicator */ 314779e6e1cSJan Glauber int __init tiqdio_allocate_memory(void) 315779e6e1cSJan Glauber { 316779e6e1cSJan Glauber q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, 317779e6e1cSJan Glauber GFP_KERNEL); 318779e6e1cSJan Glauber if (!q_indicators) 319779e6e1cSJan Glauber return -ENOMEM; 320779e6e1cSJan Glauber return 0; 321779e6e1cSJan Glauber } 322779e6e1cSJan Glauber 323779e6e1cSJan Glauber void tiqdio_free_memory(void) 324779e6e1cSJan Glauber { 325779e6e1cSJan Glauber kfree(q_indicators); 326779e6e1cSJan Glauber } 327779e6e1cSJan Glauber 328779e6e1cSJan Glauber int __init tiqdio_register_thinints(void) 329779e6e1cSJan Glauber { 330779e6e1cSJan Glauber char dbf_text[20]; 331779e6e1cSJan Glauber 332779e6e1cSJan Glauber isc_register(QDIO_AIRQ_ISC); 333779e6e1cSJan Glauber tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, 334779e6e1cSJan Glauber NULL, QDIO_AIRQ_ISC); 335779e6e1cSJan Glauber if (IS_ERR(tiqdio_alsi)) { 336779e6e1cSJan Glauber sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); 337779e6e1cSJan Glauber QDIO_DBF_TEXT0(0, setup, dbf_text); 338779e6e1cSJan Glauber tiqdio_alsi = NULL; 339779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 340779e6e1cSJan Glauber return -ENOMEM; 341779e6e1cSJan Glauber } 342779e6e1cSJan Glauber return 0; 343779e6e1cSJan Glauber } 344779e6e1cSJan Glauber 345779e6e1cSJan Glauber int qdio_establish_thinint(struct qdio_irq *irq_ptr) 346779e6e1cSJan Glauber { 347779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 348779e6e1cSJan Glauber return 0; 349779e6e1cSJan Glauber 350779e6e1cSJan Glauber /* Check for aif time delay disablement. If installed, 351779e6e1cSJan Glauber * omit SVS even under LPAR 352779e6e1cSJan Glauber */ 353779e6e1cSJan Glauber if (css_general_characteristics.aif_tdd) 354779e6e1cSJan Glauber css_qdio_omit_svs = 1; 355779e6e1cSJan Glauber return set_subchannel_ind(irq_ptr, 0); 356779e6e1cSJan Glauber } 357779e6e1cSJan Glauber 358779e6e1cSJan Glauber void qdio_setup_thinint(struct qdio_irq *irq_ptr) 359779e6e1cSJan Glauber { 360779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 361779e6e1cSJan Glauber return; 362779e6e1cSJan Glauber irq_ptr->dsci = get_indicator(); 363779e6e1cSJan Glauber QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); 364779e6e1cSJan Glauber } 365779e6e1cSJan Glauber 366779e6e1cSJan Glauber void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 367779e6e1cSJan Glauber { 368779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 369779e6e1cSJan Glauber return; 370779e6e1cSJan Glauber 371779e6e1cSJan Glauber /* reset adapter interrupt indicators */ 372779e6e1cSJan Glauber put_indicator(irq_ptr->dsci); 373779e6e1cSJan Glauber set_subchannel_ind(irq_ptr, 1); 374779e6e1cSJan Glauber } 375779e6e1cSJan Glauber 376779e6e1cSJan Glauber void __exit tiqdio_unregister_thinints(void) 377779e6e1cSJan Glauber { 378779e6e1cSJan Glauber tasklet_disable(&tiqdio_tasklet); 379779e6e1cSJan Glauber 380779e6e1cSJan Glauber if (tiqdio_alsi) { 381779e6e1cSJan Glauber s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 382779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 383779e6e1cSJan Glauber } 384779e6e1cSJan Glauber } 385