1779e6e1cSJan Glauber /* 2779e6e1cSJan Glauber * linux/drivers/s390/cio/thinint_qdio.c 3779e6e1cSJan Glauber * 4779e6e1cSJan Glauber * thin interrupt support for qdio 5779e6e1cSJan Glauber * 6779e6e1cSJan Glauber * Copyright 2000-2008 IBM Corp. 7779e6e1cSJan Glauber * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8779e6e1cSJan Glauber * Cornelia Huck <cornelia.huck@de.ibm.com> 9779e6e1cSJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com> 10779e6e1cSJan Glauber */ 11779e6e1cSJan Glauber #include <linux/io.h> 12779e6e1cSJan Glauber #include <asm/atomic.h> 13779e6e1cSJan Glauber #include <asm/debug.h> 14779e6e1cSJan Glauber #include <asm/qdio.h> 15779e6e1cSJan Glauber #include <asm/airq.h> 16779e6e1cSJan Glauber #include <asm/isc.h> 17779e6e1cSJan Glauber 18779e6e1cSJan Glauber #include "cio.h" 19779e6e1cSJan Glauber #include "ioasm.h" 20779e6e1cSJan Glauber #include "qdio.h" 21779e6e1cSJan Glauber #include "qdio_debug.h" 22779e6e1cSJan Glauber #include "qdio_perf.h" 23779e6e1cSJan Glauber 24779e6e1cSJan Glauber /* 25779e6e1cSJan Glauber * Restriction: only 63 iqdio subchannels would have its own indicator, 26779e6e1cSJan Glauber * after that, subsequent subchannels share one indicator 27779e6e1cSJan Glauber */ 28779e6e1cSJan Glauber #define TIQDIO_NR_NONSHARED_IND 63 29779e6e1cSJan Glauber #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 30779e6e1cSJan Glauber #define TIQDIO_SHARED_IND 63 31779e6e1cSJan Glauber 32779e6e1cSJan Glauber /* list of thin interrupt input queues */ 33779e6e1cSJan Glauber static LIST_HEAD(tiq_list); 34b4547402SJan Glauber DEFINE_MUTEX(tiq_list_lock); 35779e6e1cSJan Glauber 36779e6e1cSJan Glauber /* adapter local summary indicator */ 37779e6e1cSJan Glauber static unsigned char *tiqdio_alsi; 38779e6e1cSJan Glauber 39779e6e1cSJan Glauber /* device state change indicators */ 40779e6e1cSJan Glauber struct indicator_t { 41779e6e1cSJan Glauber u32 ind; /* u32 because of compare-and-swap performance */ 42779e6e1cSJan Glauber atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 43779e6e1cSJan Glauber }; 44779e6e1cSJan Glauber static struct indicator_t *q_indicators; 45779e6e1cSJan Glauber 46779e6e1cSJan Glauber static void tiqdio_tasklet_fn(unsigned long data); 47779e6e1cSJan Glauber static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); 48779e6e1cSJan Glauber 49779e6e1cSJan Glauber static int css_qdio_omit_svs; 50779e6e1cSJan Glauber 51779e6e1cSJan Glauber static inline unsigned long do_clear_global_summary(void) 52779e6e1cSJan Glauber { 53779e6e1cSJan Glauber register unsigned long __fn asm("1") = 3; 54779e6e1cSJan Glauber register unsigned long __tmp asm("2"); 55779e6e1cSJan Glauber register unsigned long __time asm("3"); 56779e6e1cSJan Glauber 57779e6e1cSJan Glauber asm volatile( 58779e6e1cSJan Glauber " .insn rre,0xb2650000,2,0" 59779e6e1cSJan Glauber : "+d" (__fn), "=d" (__tmp), "=d" (__time)); 60779e6e1cSJan Glauber return __time; 61779e6e1cSJan Glauber } 62779e6e1cSJan Glauber 63779e6e1cSJan Glauber /* returns addr for the device state change indicator */ 64779e6e1cSJan Glauber static u32 *get_indicator(void) 65779e6e1cSJan Glauber { 66779e6e1cSJan Glauber int i; 67779e6e1cSJan Glauber 68779e6e1cSJan Glauber for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 69779e6e1cSJan Glauber if (!atomic_read(&q_indicators[i].count)) { 70779e6e1cSJan Glauber atomic_set(&q_indicators[i].count, 1); 71779e6e1cSJan Glauber return &q_indicators[i].ind; 72779e6e1cSJan Glauber } 73779e6e1cSJan Glauber 74779e6e1cSJan Glauber /* use the shared indicator */ 75779e6e1cSJan Glauber atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 76779e6e1cSJan Glauber return &q_indicators[TIQDIO_SHARED_IND].ind; 77779e6e1cSJan Glauber } 78779e6e1cSJan Glauber 79779e6e1cSJan Glauber static void put_indicator(u32 *addr) 80779e6e1cSJan Glauber { 81779e6e1cSJan Glauber int i; 82779e6e1cSJan Glauber 83779e6e1cSJan Glauber if (!addr) 84779e6e1cSJan Glauber return; 85779e6e1cSJan Glauber i = ((unsigned long)addr - (unsigned long)q_indicators) / 86779e6e1cSJan Glauber sizeof(struct indicator_t); 87779e6e1cSJan Glauber atomic_dec(&q_indicators[i].count); 88779e6e1cSJan Glauber } 89779e6e1cSJan Glauber 90779e6e1cSJan Glauber void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 91779e6e1cSJan Glauber { 92779e6e1cSJan Glauber struct qdio_q *q; 93779e6e1cSJan Glauber int i; 94779e6e1cSJan Glauber 95779e6e1cSJan Glauber /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ 96779e6e1cSJan Glauber if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 97779e6e1cSJan Glauber css_qdio_omit_svs = 1; 98779e6e1cSJan Glauber 99b4547402SJan Glauber mutex_lock(&tiq_list_lock); 100b4547402SJan Glauber for_each_input_queue(irq_ptr, q, i) 101779e6e1cSJan Glauber list_add_rcu(&q->entry, &tiq_list); 102b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 103779e6e1cSJan Glauber xchg(irq_ptr->dsci, 1); 104779e6e1cSJan Glauber } 105779e6e1cSJan Glauber 106779e6e1cSJan Glauber /* 107779e6e1cSJan Glauber * we cannot stop the tiqdio tasklet here since it is for all 108779e6e1cSJan Glauber * thinint qdio devices and it must run as long as there is a 109779e6e1cSJan Glauber * thinint device left 110779e6e1cSJan Glauber */ 111779e6e1cSJan Glauber void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 112779e6e1cSJan Glauber { 113779e6e1cSJan Glauber struct qdio_q *q; 114779e6e1cSJan Glauber int i; 115779e6e1cSJan Glauber 11653b41ba7SJan Glauber for (i = 0; i < irq_ptr->nr_input_qs; i++) { 11753b41ba7SJan Glauber q = irq_ptr->input_qs[i]; 11853b41ba7SJan Glauber /* if establish triggered an error */ 11953b41ba7SJan Glauber if (!q || !q->entry.prev || !q->entry.next) 12053b41ba7SJan Glauber continue; 121b4547402SJan Glauber 122b4547402SJan Glauber mutex_lock(&tiq_list_lock); 123779e6e1cSJan Glauber list_del_rcu(&q->entry); 124b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 125779e6e1cSJan Glauber synchronize_rcu(); 126779e6e1cSJan Glauber } 127779e6e1cSJan Glauber } 128779e6e1cSJan Glauber 129779e6e1cSJan Glauber static inline int tiqdio_inbound_q_done(struct qdio_q *q) 130779e6e1cSJan Glauber { 1319a1ce28aSJan Glauber unsigned char state = 0; 132779e6e1cSJan Glauber 133779e6e1cSJan Glauber if (!atomic_read(&q->nr_buf_used)) 134779e6e1cSJan Glauber return 1; 135779e6e1cSJan Glauber 136779e6e1cSJan Glauber qdio_siga_sync_q(q); 13750f769dfSJan Glauber get_buf_state(q, q->first_to_check, &state, 0); 138779e6e1cSJan Glauber 139779e6e1cSJan Glauber if (state == SLSB_P_INPUT_PRIMED) 140779e6e1cSJan Glauber /* more work coming */ 141779e6e1cSJan Glauber return 0; 142779e6e1cSJan Glauber return 1; 143779e6e1cSJan Glauber } 144779e6e1cSJan Glauber 145779e6e1cSJan Glauber static inline int shared_ind(struct qdio_irq *irq_ptr) 146779e6e1cSJan Glauber { 147779e6e1cSJan Glauber return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 148779e6e1cSJan Glauber } 149779e6e1cSJan Glauber 150779e6e1cSJan Glauber static void __tiqdio_inbound_processing(struct qdio_q *q) 151779e6e1cSJan Glauber { 152779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound); 153779e6e1cSJan Glauber qdio_sync_after_thinint(q); 154779e6e1cSJan Glauber 155779e6e1cSJan Glauber /* 156779e6e1cSJan Glauber * Maybe we have work on our outbound queues... at least 157779e6e1cSJan Glauber * we have to check the PCI capable queues. 158779e6e1cSJan Glauber */ 159779e6e1cSJan Glauber qdio_check_outbound_after_thinint(q); 160779e6e1cSJan Glauber 161779e6e1cSJan Glauber if (!qdio_inbound_q_moved(q)) 162779e6e1cSJan Glauber return; 163779e6e1cSJan Glauber 164779e6e1cSJan Glauber qdio_kick_inbound_handler(q); 165779e6e1cSJan Glauber 166779e6e1cSJan Glauber if (!tiqdio_inbound_q_done(q)) { 167779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 168c38f9608SJan Glauber if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 169c38f9608SJan Glauber tasklet_schedule(&q->tasklet); 170779e6e1cSJan Glauber } 171779e6e1cSJan Glauber 172779e6e1cSJan Glauber qdio_stop_polling(q); 173779e6e1cSJan Glauber /* 174779e6e1cSJan Glauber * We need to check again to not lose initiative after 175779e6e1cSJan Glauber * resetting the ACK state. 176779e6e1cSJan Glauber */ 177779e6e1cSJan Glauber if (!tiqdio_inbound_q_done(q)) { 178779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 179c38f9608SJan Glauber if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 180c38f9608SJan Glauber tasklet_schedule(&q->tasklet); 181779e6e1cSJan Glauber } 182779e6e1cSJan Glauber } 183779e6e1cSJan Glauber 184779e6e1cSJan Glauber void tiqdio_inbound_processing(unsigned long data) 185779e6e1cSJan Glauber { 186779e6e1cSJan Glauber struct qdio_q *q = (struct qdio_q *)data; 187779e6e1cSJan Glauber 188779e6e1cSJan Glauber __tiqdio_inbound_processing(q); 189779e6e1cSJan Glauber } 190779e6e1cSJan Glauber 191779e6e1cSJan Glauber /* check for work on all inbound thinint queues */ 192779e6e1cSJan Glauber static void tiqdio_tasklet_fn(unsigned long data) 193779e6e1cSJan Glauber { 194779e6e1cSJan Glauber struct qdio_q *q; 195779e6e1cSJan Glauber 196779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.tasklet_thinint); 197779e6e1cSJan Glauber again: 198779e6e1cSJan Glauber 199779e6e1cSJan Glauber /* protect tiq_list entries, only changed in activate or shutdown */ 200779e6e1cSJan Glauber rcu_read_lock(); 201779e6e1cSJan Glauber 202779e6e1cSJan Glauber list_for_each_entry_rcu(q, &tiq_list, entry) 203779e6e1cSJan Glauber /* only process queues from changed sets */ 204779e6e1cSJan Glauber if (*q->irq_ptr->dsci) { 205779e6e1cSJan Glauber 206779e6e1cSJan Glauber /* only clear it if the indicator is non-shared */ 207779e6e1cSJan Glauber if (!shared_ind(q->irq_ptr)) 208779e6e1cSJan Glauber xchg(q->irq_ptr->dsci, 0); 209779e6e1cSJan Glauber /* 210779e6e1cSJan Glauber * don't call inbound processing directly since 211779e6e1cSJan Glauber * that could starve other thinint queues 212779e6e1cSJan Glauber */ 213779e6e1cSJan Glauber tasklet_schedule(&q->tasklet); 214779e6e1cSJan Glauber } 215779e6e1cSJan Glauber 216779e6e1cSJan Glauber rcu_read_unlock(); 217779e6e1cSJan Glauber 218779e6e1cSJan Glauber /* 219779e6e1cSJan Glauber * if we used the shared indicator clear it now after all queues 220779e6e1cSJan Glauber * were processed 221779e6e1cSJan Glauber */ 222779e6e1cSJan Glauber if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 223779e6e1cSJan Glauber xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 224779e6e1cSJan Glauber 225779e6e1cSJan Glauber /* prevent racing */ 226779e6e1cSJan Glauber if (*tiqdio_alsi) 227779e6e1cSJan Glauber xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 228779e6e1cSJan Glauber } 229779e6e1cSJan Glauber 230779e6e1cSJan Glauber /* check for more work */ 231779e6e1cSJan Glauber if (*tiqdio_alsi) { 232779e6e1cSJan Glauber xchg(tiqdio_alsi, 0); 233779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); 234779e6e1cSJan Glauber goto again; 235779e6e1cSJan Glauber } 236779e6e1cSJan Glauber } 237779e6e1cSJan Glauber 238779e6e1cSJan Glauber /** 239779e6e1cSJan Glauber * tiqdio_thinint_handler - thin interrupt handler for qdio 240779e6e1cSJan Glauber * @ind: pointer to adapter local summary indicator 241779e6e1cSJan Glauber * @drv_data: NULL 242779e6e1cSJan Glauber */ 243779e6e1cSJan Glauber static void tiqdio_thinint_handler(void *ind, void *drv_data) 244779e6e1cSJan Glauber { 245779e6e1cSJan Glauber qdio_perf_stat_inc(&perf_stats.thin_int); 246779e6e1cSJan Glauber 247779e6e1cSJan Glauber /* 248779e6e1cSJan Glauber * SVS only when needed: issue SVS to benefit from iqdio interrupt 249779e6e1cSJan Glauber * avoidance (SVS clears adapter interrupt suppression overwrite) 250779e6e1cSJan Glauber */ 251779e6e1cSJan Glauber if (!css_qdio_omit_svs) 252779e6e1cSJan Glauber do_clear_global_summary(); 253779e6e1cSJan Glauber 254779e6e1cSJan Glauber /* 255779e6e1cSJan Glauber * reset local summary indicator (tiqdio_alsi) to stop adapter 256779e6e1cSJan Glauber * interrupts for now, the tasklet will clean all dsci's 257779e6e1cSJan Glauber */ 258779e6e1cSJan Glauber xchg((u8 *)ind, 0); 259779e6e1cSJan Glauber tasklet_hi_schedule(&tiqdio_tasklet); 260779e6e1cSJan Glauber } 261779e6e1cSJan Glauber 262779e6e1cSJan Glauber static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 263779e6e1cSJan Glauber { 264779e6e1cSJan Glauber struct scssc_area *scssc_area; 265779e6e1cSJan Glauber int rc; 266779e6e1cSJan Glauber 267779e6e1cSJan Glauber scssc_area = (struct scssc_area *)irq_ptr->chsc_page; 268779e6e1cSJan Glauber memset(scssc_area, 0, PAGE_SIZE); 269779e6e1cSJan Glauber 270779e6e1cSJan Glauber if (reset) { 271779e6e1cSJan Glauber scssc_area->summary_indicator_addr = 0; 272779e6e1cSJan Glauber scssc_area->subchannel_indicator_addr = 0; 273779e6e1cSJan Glauber } else { 274779e6e1cSJan Glauber scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); 275779e6e1cSJan Glauber scssc_area->subchannel_indicator_addr = 276779e6e1cSJan Glauber virt_to_phys(irq_ptr->dsci); 277779e6e1cSJan Glauber } 278779e6e1cSJan Glauber 279779e6e1cSJan Glauber scssc_area->request = (struct chsc_header) { 280779e6e1cSJan Glauber .length = 0x0fe0, 281779e6e1cSJan Glauber .code = 0x0021, 282779e6e1cSJan Glauber }; 283779e6e1cSJan Glauber scssc_area->operation_code = 0; 284779e6e1cSJan Glauber scssc_area->ks = PAGE_DEFAULT_KEY; 285779e6e1cSJan Glauber scssc_area->kc = PAGE_DEFAULT_KEY; 286779e6e1cSJan Glauber scssc_area->isc = QDIO_AIRQ_ISC; 287779e6e1cSJan Glauber scssc_area->schid = irq_ptr->schid; 288779e6e1cSJan Glauber 289779e6e1cSJan Glauber /* enable the time delay disablement facility */ 290779e6e1cSJan Glauber if (css_general_characteristics.aif_tdd) 291779e6e1cSJan Glauber scssc_area->word_with_d_bit = 0x10000000; 292779e6e1cSJan Glauber 293779e6e1cSJan Glauber rc = chsc(scssc_area); 294779e6e1cSJan Glauber if (rc) 295779e6e1cSJan Glauber return -EIO; 296779e6e1cSJan Glauber 297779e6e1cSJan Glauber rc = chsc_error_from_response(scssc_area->response.code); 298779e6e1cSJan Glauber if (rc) { 29922f99347SJan Glauber DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 30022f99347SJan Glauber scssc_area->response.code); 30122f99347SJan Glauber DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); 302779e6e1cSJan Glauber return rc; 303779e6e1cSJan Glauber } 304779e6e1cSJan Glauber 30522f99347SJan Glauber DBF_EVENT("setscind"); 30622f99347SJan Glauber DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); 30722f99347SJan Glauber DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); 308779e6e1cSJan Glauber return 0; 309779e6e1cSJan Glauber } 310779e6e1cSJan Glauber 311779e6e1cSJan Glauber /* allocate non-shared indicators and shared indicator */ 312779e6e1cSJan Glauber int __init tiqdio_allocate_memory(void) 313779e6e1cSJan Glauber { 314779e6e1cSJan Glauber q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, 315779e6e1cSJan Glauber GFP_KERNEL); 316779e6e1cSJan Glauber if (!q_indicators) 317779e6e1cSJan Glauber return -ENOMEM; 318779e6e1cSJan Glauber return 0; 319779e6e1cSJan Glauber } 320779e6e1cSJan Glauber 321779e6e1cSJan Glauber void tiqdio_free_memory(void) 322779e6e1cSJan Glauber { 323779e6e1cSJan Glauber kfree(q_indicators); 324779e6e1cSJan Glauber } 325779e6e1cSJan Glauber 326779e6e1cSJan Glauber int __init tiqdio_register_thinints(void) 327779e6e1cSJan Glauber { 328779e6e1cSJan Glauber isc_register(QDIO_AIRQ_ISC); 329779e6e1cSJan Glauber tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, 330779e6e1cSJan Glauber NULL, QDIO_AIRQ_ISC); 331779e6e1cSJan Glauber if (IS_ERR(tiqdio_alsi)) { 33222f99347SJan Glauber DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); 333779e6e1cSJan Glauber tiqdio_alsi = NULL; 334779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 335779e6e1cSJan Glauber return -ENOMEM; 336779e6e1cSJan Glauber } 337779e6e1cSJan Glauber return 0; 338779e6e1cSJan Glauber } 339779e6e1cSJan Glauber 340779e6e1cSJan Glauber int qdio_establish_thinint(struct qdio_irq *irq_ptr) 341779e6e1cSJan Glauber { 342779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 343779e6e1cSJan Glauber return 0; 344779e6e1cSJan Glauber 345779e6e1cSJan Glauber /* Check for aif time delay disablement. If installed, 346779e6e1cSJan Glauber * omit SVS even under LPAR 347779e6e1cSJan Glauber */ 348779e6e1cSJan Glauber if (css_general_characteristics.aif_tdd) 349779e6e1cSJan Glauber css_qdio_omit_svs = 1; 350779e6e1cSJan Glauber return set_subchannel_ind(irq_ptr, 0); 351779e6e1cSJan Glauber } 352779e6e1cSJan Glauber 353779e6e1cSJan Glauber void qdio_setup_thinint(struct qdio_irq *irq_ptr) 354779e6e1cSJan Glauber { 355779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 356779e6e1cSJan Glauber return; 357779e6e1cSJan Glauber irq_ptr->dsci = get_indicator(); 35822f99347SJan Glauber DBF_HEX(&irq_ptr->dsci, sizeof(void *)); 359779e6e1cSJan Glauber } 360779e6e1cSJan Glauber 361779e6e1cSJan Glauber void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 362779e6e1cSJan Glauber { 363779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 364779e6e1cSJan Glauber return; 365779e6e1cSJan Glauber 366779e6e1cSJan Glauber /* reset adapter interrupt indicators */ 367779e6e1cSJan Glauber put_indicator(irq_ptr->dsci); 368779e6e1cSJan Glauber set_subchannel_ind(irq_ptr, 1); 369779e6e1cSJan Glauber } 370779e6e1cSJan Glauber 371779e6e1cSJan Glauber void __exit tiqdio_unregister_thinints(void) 372779e6e1cSJan Glauber { 373*9e890ad8SJan Glauber WARN_ON(!list_empty(&tiq_list)); 374779e6e1cSJan Glauber 375779e6e1cSJan Glauber if (tiqdio_alsi) { 376779e6e1cSJan Glauber s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 377779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 378779e6e1cSJan Glauber } 379*9e890ad8SJan Glauber tasklet_kill(&tiqdio_tasklet); 380779e6e1cSJan Glauber } 381