1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2779e6e1cSJan Glauber /* 3a53c8fabSHeiko Carstens * Copyright IBM Corp. 2000, 2009 4779e6e1cSJan Glauber * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5779e6e1cSJan Glauber * Cornelia Huck <cornelia.huck@de.ibm.com> 6779e6e1cSJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com> 7779e6e1cSJan Glauber */ 8779e6e1cSJan Glauber #include <linux/io.h> 95a0e3ad6STejun Heo #include <linux/slab.h> 1030d77c3eSJan Glauber #include <linux/kernel_stat.h> 1160063497SArun Sharma #include <linux/atomic.h> 12b2d09103SIngo Molnar #include <linux/rculist.h> 13b2d09103SIngo Molnar 14779e6e1cSJan Glauber #include <asm/debug.h> 15779e6e1cSJan Glauber #include <asm/qdio.h> 16779e6e1cSJan Glauber #include <asm/airq.h> 17779e6e1cSJan Glauber #include <asm/isc.h> 18779e6e1cSJan Glauber 19779e6e1cSJan Glauber #include "cio.h" 20779e6e1cSJan Glauber #include "ioasm.h" 21779e6e1cSJan Glauber #include "qdio.h" 22779e6e1cSJan Glauber #include "qdio_debug.h" 23779e6e1cSJan Glauber 24779e6e1cSJan Glauber /* 25779e6e1cSJan Glauber * Restriction: only 63 iqdio subchannels would have its own indicator, 26779e6e1cSJan Glauber * after that, subsequent subchannels share one indicator 27779e6e1cSJan Glauber */ 28779e6e1cSJan Glauber #define TIQDIO_NR_NONSHARED_IND 63 29779e6e1cSJan Glauber #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 305f4026f8SJan Glauber #define TIQDIO_SHARED_IND 63 315f4026f8SJan Glauber 325f4026f8SJan Glauber /* device state change indicators */ 335f4026f8SJan Glauber struct indicator_t { 345f4026f8SJan Glauber u32 ind; /* u32 because of compare-and-swap performance */ 355f4026f8SJan Glauber atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 365f4026f8SJan Glauber }; 37779e6e1cSJan Glauber 38779e6e1cSJan Glauber /* list of thin interrupt input queues */ 39779e6e1cSJan Glauber static LIST_HEAD(tiq_list); 40c4736d96SMartin Schwidefsky static DEFINE_MUTEX(tiq_list_lock); 41779e6e1cSJan Glauber 425f4026f8SJan Glauber static struct indicator_t *q_indicators; 43779e6e1cSJan Glauber 44a2b86019SJan Glauber u64 last_ai_time; 45d36deae7SJan Glauber 46779e6e1cSJan Glauber /* returns addr for the device state change indicator */ 47779e6e1cSJan Glauber static u32 *get_indicator(void) 48779e6e1cSJan Glauber { 49779e6e1cSJan Glauber int i; 50779e6e1cSJan Glauber 51779e6e1cSJan Glauber for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 52648a6f44SSebastian Ott if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1)) 53779e6e1cSJan Glauber return &q_indicators[i].ind; 54779e6e1cSJan Glauber 55779e6e1cSJan Glauber /* use the shared indicator */ 56779e6e1cSJan Glauber atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 57779e6e1cSJan Glauber return &q_indicators[TIQDIO_SHARED_IND].ind; 58779e6e1cSJan Glauber } 59779e6e1cSJan Glauber 60779e6e1cSJan Glauber static void put_indicator(u32 *addr) 61779e6e1cSJan Glauber { 6230e8eb86SSebastian Ott struct indicator_t *ind = container_of(addr, struct indicator_t, ind); 63779e6e1cSJan Glauber 64779e6e1cSJan Glauber if (!addr) 65779e6e1cSJan Glauber return; 6630e8eb86SSebastian Ott atomic_dec(&ind->count); 67779e6e1cSJan Glauber } 68779e6e1cSJan Glauber 6994c43bdaSJulian Wiedmann void tiqdio_add_device(struct qdio_irq *irq_ptr) 70779e6e1cSJan Glauber { 71b4547402SJan Glauber mutex_lock(&tiq_list_lock); 7294c43bdaSJulian Wiedmann list_add_rcu(&irq_ptr->entry, &tiq_list); 73b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 74779e6e1cSJan Glauber } 75779e6e1cSJan Glauber 7694c43bdaSJulian Wiedmann void tiqdio_remove_device(struct qdio_irq *irq_ptr) 77779e6e1cSJan Glauber { 78b4547402SJan Glauber mutex_lock(&tiq_list_lock); 7994c43bdaSJulian Wiedmann list_del_rcu(&irq_ptr->entry); 80b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 81779e6e1cSJan Glauber synchronize_rcu(); 8294c43bdaSJulian Wiedmann INIT_LIST_HEAD(&irq_ptr->entry); 83779e6e1cSJan Glauber } 84779e6e1cSJan Glauber 855f4026f8SJan Glauber static inline int references_shared_dsci(struct qdio_irq *irq_ptr) 865f4026f8SJan Glauber { 875f4026f8SJan Glauber return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 885f4026f8SJan Glauber } 895f4026f8SJan Glauber 905f4026f8SJan Glauber int test_nonshared_ind(struct qdio_irq *irq_ptr) 915f4026f8SJan Glauber { 925f4026f8SJan Glauber if (!is_thinint_irq(irq_ptr)) 935f4026f8SJan Glauber return 0; 949c159bbcSJulian Wiedmann if (references_shared_dsci(irq_ptr)) 955f4026f8SJan Glauber return 0; 965f4026f8SJan Glauber if (*irq_ptr->dsci) 975f4026f8SJan Glauber return 1; 985f4026f8SJan Glauber else 995f4026f8SJan Glauber return 0; 1005f4026f8SJan Glauber } 1015f4026f8SJan Glauber 102b02f0c2eSJan Glauber static inline u32 clear_shared_ind(void) 103779e6e1cSJan Glauber { 104b02f0c2eSJan Glauber if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) 105b02f0c2eSJan Glauber return 0; 106b02f0c2eSJan Glauber return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 107779e6e1cSJan Glauber } 108779e6e1cSJan Glauber 109104ea556Sfrank.blaschka@de.ibm.com static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) 110104ea556Sfrank.blaschka@de.ibm.com { 111104ea556Sfrank.blaschka@de.ibm.com struct qdio_q *q; 112104ea556Sfrank.blaschka@de.ibm.com int i; 113104ea556Sfrank.blaschka@de.ibm.com 1149c159bbcSJulian Wiedmann if (!references_shared_dsci(irq)) 1151e4a382fSJulian Wiedmann xchg(irq->dsci, 0); 116104ea556Sfrank.blaschka@de.ibm.com 1170a6e6345SJulian Wiedmann if (irq->irq_poll) { 1180a6e6345SJulian Wiedmann if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state)) 1190a6e6345SJulian Wiedmann irq->irq_poll(irq->cdev, irq->int_parm); 1200a6e6345SJulian Wiedmann else 12146112810SJulian Wiedmann QDIO_PERF_STAT_INC(irq, int_discarded); 1220a6e6345SJulian Wiedmann 1230a6e6345SJulian Wiedmann return; 124104ea556Sfrank.blaschka@de.ibm.com } 125104ea556Sfrank.blaschka@de.ibm.com 126a8a4ee27SJulian Wiedmann for_each_input_queue(irq, q, i) { 127104ea556Sfrank.blaschka@de.ibm.com /* 128104ea556Sfrank.blaschka@de.ibm.com * Call inbound processing but not directly 129104ea556Sfrank.blaschka@de.ibm.com * since that could starve other thinint queues. 130104ea556Sfrank.blaschka@de.ibm.com */ 131104ea556Sfrank.blaschka@de.ibm.com tasklet_schedule(&q->tasklet); 132104ea556Sfrank.blaschka@de.ibm.com } 133104ea556Sfrank.blaschka@de.ibm.com } 134104ea556Sfrank.blaschka@de.ibm.com 135cf9a031cSJan Glauber /** 136cf9a031cSJan Glauber * tiqdio_thinint_handler - thin interrupt handler for qdio 13772a01d0bSJulian Wiedmann * @airq: pointer to adapter interrupt descriptor 13834c636a0SSebastian Ott * @floating: flag to recognize floating vs. directed interrupts (unused) 139cf9a031cSJan Glauber */ 14030e63ef2SSebastian Ott static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) 141779e6e1cSJan Glauber { 142b02f0c2eSJan Glauber u32 si_used = clear_shared_ind(); 14394c43bdaSJulian Wiedmann struct qdio_irq *irq; 144779e6e1cSJan Glauber 145d36deae7SJan Glauber last_ai_time = S390_lowcore.int_clock; 146420f42ecSHeiko Carstens inc_irq_stat(IRQIO_QAI); 147d36deae7SJan Glauber 148779e6e1cSJan Glauber /* protect tiq_list entries, only changed in activate or shutdown */ 149779e6e1cSJan Glauber rcu_read_lock(); 150779e6e1cSJan Glauber 15194c43bdaSJulian Wiedmann list_for_each_entry_rcu(irq, &tiq_list, entry) { 152d36deae7SJan Glauber /* only process queues from changed sets */ 153104ea556Sfrank.blaschka@de.ibm.com if (unlikely(references_shared_dsci(irq))) { 1544f325184SJan Glauber if (!si_used) 1554f325184SJan Glauber continue; 156104ea556Sfrank.blaschka@de.ibm.com } else if (!*irq->dsci) 157d36deae7SJan Glauber continue; 158d36deae7SJan Glauber 159104ea556Sfrank.blaschka@de.ibm.com tiqdio_call_inq_handlers(irq); 160d36deae7SJan Glauber 16146112810SJulian Wiedmann QDIO_PERF_STAT_INC(irq, adapter_int); 162d36deae7SJan Glauber } 163779e6e1cSJan Glauber rcu_read_unlock(); 164779e6e1cSJan Glauber } 165779e6e1cSJan Glauber 166d86f71fdSJulian Wiedmann static struct airq_struct tiqdio_airq = { 167d86f71fdSJulian Wiedmann .handler = tiqdio_thinint_handler, 168d86f71fdSJulian Wiedmann .isc = QDIO_AIRQ_ISC, 169d86f71fdSJulian Wiedmann }; 170d86f71fdSJulian Wiedmann 171779e6e1cSJan Glauber static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 172779e6e1cSJan Glauber { 173ca4ba153SSebastian Ott struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; 174ca4ba153SSebastian Ott u64 summary_indicator_addr, subchannel_indicator_addr; 175779e6e1cSJan Glauber int rc; 176779e6e1cSJan Glauber 177779e6e1cSJan Glauber if (reset) { 178ca4ba153SSebastian Ott summary_indicator_addr = 0; 179ca4ba153SSebastian Ott subchannel_indicator_addr = 0; 180779e6e1cSJan Glauber } else { 181f4eae94fSMartin Schwidefsky summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); 182ca4ba153SSebastian Ott subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); 183779e6e1cSJan Glauber } 184779e6e1cSJan Glauber 185ca4ba153SSebastian Ott rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, 18692892240SJulian Wiedmann subchannel_indicator_addr, tiqdio_airq.isc); 187779e6e1cSJan Glauber if (rc) { 18822f99347SJan Glauber DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 189ca4ba153SSebastian Ott scssc->response.code); 190ca4ba153SSebastian Ott goto out; 191779e6e1cSJan Glauber } 192779e6e1cSJan Glauber 19322f99347SJan Glauber DBF_EVENT("setscind"); 194ca4ba153SSebastian Ott DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); 195ca4ba153SSebastian Ott DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); 196ca4ba153SSebastian Ott out: 197ca4ba153SSebastian Ott return rc; 198779e6e1cSJan Glauber } 199779e6e1cSJan Glauber 200779e6e1cSJan Glauber /* allocate non-shared indicators and shared indicator */ 201779e6e1cSJan Glauber int __init tiqdio_allocate_memory(void) 202779e6e1cSJan Glauber { 2036396bb22SKees Cook q_indicators = kcalloc(TIQDIO_NR_INDICATORS, 2046396bb22SKees Cook sizeof(struct indicator_t), 205779e6e1cSJan Glauber GFP_KERNEL); 206779e6e1cSJan Glauber if (!q_indicators) 207779e6e1cSJan Glauber return -ENOMEM; 208779e6e1cSJan Glauber return 0; 209779e6e1cSJan Glauber } 210779e6e1cSJan Glauber 211779e6e1cSJan Glauber void tiqdio_free_memory(void) 212779e6e1cSJan Glauber { 213779e6e1cSJan Glauber kfree(q_indicators); 214779e6e1cSJan Glauber } 215779e6e1cSJan Glauber 216779e6e1cSJan Glauber int __init tiqdio_register_thinints(void) 217779e6e1cSJan Glauber { 218f4eae94fSMartin Schwidefsky int rc; 219f4eae94fSMartin Schwidefsky 220f4eae94fSMartin Schwidefsky rc = register_adapter_interrupt(&tiqdio_airq); 221f4eae94fSMartin Schwidefsky if (rc) { 222f4eae94fSMartin Schwidefsky DBF_EVENT("RTI:%x", rc); 223f4eae94fSMartin Schwidefsky return rc; 224779e6e1cSJan Glauber } 225779e6e1cSJan Glauber return 0; 226779e6e1cSJan Glauber } 227779e6e1cSJan Glauber 228779e6e1cSJan Glauber int qdio_establish_thinint(struct qdio_irq *irq_ptr) 229779e6e1cSJan Glauber { 230*75e82becSJulian Wiedmann int rc; 231*75e82becSJulian Wiedmann 232779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 233779e6e1cSJan Glauber return 0; 234779e6e1cSJan Glauber 235779e6e1cSJan Glauber irq_ptr->dsci = get_indicator(); 23622f99347SJan Glauber DBF_HEX(&irq_ptr->dsci, sizeof(void *)); 237*75e82becSJulian Wiedmann 238*75e82becSJulian Wiedmann rc = set_subchannel_ind(irq_ptr, 0); 239*75e82becSJulian Wiedmann if (rc) 240*75e82becSJulian Wiedmann put_indicator(irq_ptr->dsci); 241*75e82becSJulian Wiedmann 242*75e82becSJulian Wiedmann return rc; 243779e6e1cSJan Glauber } 244779e6e1cSJan Glauber 245779e6e1cSJan Glauber void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 246779e6e1cSJan Glauber { 247779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 248779e6e1cSJan Glauber return; 249779e6e1cSJan Glauber 250779e6e1cSJan Glauber /* reset adapter interrupt indicators */ 251779e6e1cSJan Glauber set_subchannel_ind(irq_ptr, 1); 2524814a2b3SJan Glauber put_indicator(irq_ptr->dsci); 253779e6e1cSJan Glauber } 254779e6e1cSJan Glauber 255779e6e1cSJan Glauber void __exit tiqdio_unregister_thinints(void) 256779e6e1cSJan Glauber { 2579e890ad8SJan Glauber WARN_ON(!list_empty(&tiq_list)); 258f4eae94fSMartin Schwidefsky unregister_adapter_interrupt(&tiqdio_airq); 259779e6e1cSJan Glauber } 260