1779e6e1cSJan Glauber /* 2a53c8fabSHeiko Carstens * Copyright IBM Corp. 2000, 2009 3779e6e1cSJan Glauber * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 4779e6e1cSJan Glauber * Cornelia Huck <cornelia.huck@de.ibm.com> 5779e6e1cSJan Glauber * Jan Glauber <jang@linux.vnet.ibm.com> 6779e6e1cSJan Glauber */ 7779e6e1cSJan Glauber #include <linux/io.h> 85a0e3ad6STejun Heo #include <linux/slab.h> 930d77c3eSJan Glauber #include <linux/kernel_stat.h> 1060063497SArun Sharma #include <linux/atomic.h> 11779e6e1cSJan Glauber #include <asm/debug.h> 12779e6e1cSJan Glauber #include <asm/qdio.h> 13779e6e1cSJan Glauber #include <asm/airq.h> 14779e6e1cSJan Glauber #include <asm/isc.h> 15779e6e1cSJan Glauber 16779e6e1cSJan Glauber #include "cio.h" 17779e6e1cSJan Glauber #include "ioasm.h" 18779e6e1cSJan Glauber #include "qdio.h" 19779e6e1cSJan Glauber #include "qdio_debug.h" 20779e6e1cSJan Glauber 21779e6e1cSJan Glauber /* 22779e6e1cSJan Glauber * Restriction: only 63 iqdio subchannels would have its own indicator, 23779e6e1cSJan Glauber * after that, subsequent subchannels share one indicator 24779e6e1cSJan Glauber */ 25779e6e1cSJan Glauber #define TIQDIO_NR_NONSHARED_IND 63 26779e6e1cSJan Glauber #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 275f4026f8SJan Glauber #define TIQDIO_SHARED_IND 63 285f4026f8SJan Glauber 295f4026f8SJan Glauber /* device state change indicators */ 305f4026f8SJan Glauber struct indicator_t { 315f4026f8SJan Glauber u32 ind; /* u32 because of compare-and-swap performance */ 325f4026f8SJan Glauber atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 335f4026f8SJan Glauber }; 34779e6e1cSJan Glauber 35779e6e1cSJan Glauber /* list of thin interrupt input queues */ 36779e6e1cSJan Glauber static LIST_HEAD(tiq_list); 37c4736d96SMartin Schwidefsky static DEFINE_MUTEX(tiq_list_lock); 38779e6e1cSJan Glauber 39779e6e1cSJan Glauber /* adapter local summary indicator */ 40d36deae7SJan Glauber static u8 *tiqdio_alsi; 41779e6e1cSJan Glauber 425f4026f8SJan Glauber static struct indicator_t *q_indicators; 43779e6e1cSJan Glauber 44a2b86019SJan Glauber u64 last_ai_time; 45d36deae7SJan Glauber 46779e6e1cSJan Glauber /* returns addr for the device state change indicator */ 47779e6e1cSJan Glauber static u32 *get_indicator(void) 48779e6e1cSJan Glauber { 49779e6e1cSJan Glauber int i; 50779e6e1cSJan Glauber 51779e6e1cSJan Glauber for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 52779e6e1cSJan Glauber if (!atomic_read(&q_indicators[i].count)) { 53779e6e1cSJan Glauber atomic_set(&q_indicators[i].count, 1); 54779e6e1cSJan Glauber return &q_indicators[i].ind; 55779e6e1cSJan Glauber } 56779e6e1cSJan Glauber 57779e6e1cSJan Glauber /* use the shared indicator */ 58779e6e1cSJan Glauber atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 59779e6e1cSJan Glauber return &q_indicators[TIQDIO_SHARED_IND].ind; 60779e6e1cSJan Glauber } 61779e6e1cSJan Glauber 62779e6e1cSJan Glauber static void put_indicator(u32 *addr) 63779e6e1cSJan Glauber { 64779e6e1cSJan Glauber int i; 65779e6e1cSJan Glauber 66779e6e1cSJan Glauber if (!addr) 67779e6e1cSJan Glauber return; 68779e6e1cSJan Glauber i = ((unsigned long)addr - (unsigned long)q_indicators) / 69779e6e1cSJan Glauber sizeof(struct indicator_t); 70779e6e1cSJan Glauber atomic_dec(&q_indicators[i].count); 71779e6e1cSJan Glauber } 72779e6e1cSJan Glauber 73779e6e1cSJan Glauber void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 74779e6e1cSJan Glauber { 75b4547402SJan Glauber mutex_lock(&tiq_list_lock); 76104ea556Sfrank.blaschka@de.ibm.com list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); 77b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 78d0c9d4a8SJan Glauber xchg(irq_ptr->dsci, 1 << 7); 79779e6e1cSJan Glauber } 80779e6e1cSJan Glauber 81779e6e1cSJan Glauber void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 82779e6e1cSJan Glauber { 83779e6e1cSJan Glauber struct qdio_q *q; 84779e6e1cSJan Glauber 85104ea556Sfrank.blaschka@de.ibm.com q = irq_ptr->input_qs[0]; 8653b41ba7SJan Glauber /* if establish triggered an error */ 8753b41ba7SJan Glauber if (!q || !q->entry.prev || !q->entry.next) 88104ea556Sfrank.blaschka@de.ibm.com return; 89b4547402SJan Glauber 90b4547402SJan Glauber mutex_lock(&tiq_list_lock); 91779e6e1cSJan Glauber list_del_rcu(&q->entry); 92b4547402SJan Glauber mutex_unlock(&tiq_list_lock); 93779e6e1cSJan Glauber synchronize_rcu(); 94779e6e1cSJan Glauber } 95779e6e1cSJan Glauber 965f4026f8SJan Glauber static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) 975f4026f8SJan Glauber { 985f4026f8SJan Glauber return irq_ptr->nr_input_qs > 1; 995f4026f8SJan Glauber } 1005f4026f8SJan Glauber 1015f4026f8SJan Glauber static inline int references_shared_dsci(struct qdio_irq *irq_ptr) 1025f4026f8SJan Glauber { 1035f4026f8SJan Glauber return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 1045f4026f8SJan Glauber } 1055f4026f8SJan Glauber 1065f4026f8SJan Glauber static inline int shared_ind(struct qdio_irq *irq_ptr) 1075f4026f8SJan Glauber { 1085f4026f8SJan Glauber return references_shared_dsci(irq_ptr) || 1095f4026f8SJan Glauber has_multiple_inq_on_dsci(irq_ptr); 1105f4026f8SJan Glauber } 1115f4026f8SJan Glauber 1125f4026f8SJan Glauber void clear_nonshared_ind(struct qdio_irq *irq_ptr) 1135f4026f8SJan Glauber { 1145f4026f8SJan Glauber if (!is_thinint_irq(irq_ptr)) 1155f4026f8SJan Glauber return; 1165f4026f8SJan Glauber if (shared_ind(irq_ptr)) 1175f4026f8SJan Glauber return; 1185f4026f8SJan Glauber xchg(irq_ptr->dsci, 0); 1195f4026f8SJan Glauber } 1205f4026f8SJan Glauber 1215f4026f8SJan Glauber int test_nonshared_ind(struct qdio_irq *irq_ptr) 1225f4026f8SJan Glauber { 1235f4026f8SJan Glauber if (!is_thinint_irq(irq_ptr)) 1245f4026f8SJan Glauber return 0; 1255f4026f8SJan Glauber if (shared_ind(irq_ptr)) 1265f4026f8SJan Glauber return 0; 1275f4026f8SJan Glauber if (*irq_ptr->dsci) 1285f4026f8SJan Glauber return 1; 1295f4026f8SJan Glauber else 1305f4026f8SJan Glauber return 0; 1315f4026f8SJan Glauber } 1325f4026f8SJan Glauber 133b02f0c2eSJan Glauber static inline u32 clear_shared_ind(void) 134779e6e1cSJan Glauber { 135b02f0c2eSJan Glauber if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) 136b02f0c2eSJan Glauber return 0; 137b02f0c2eSJan Glauber return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 138779e6e1cSJan Glauber } 139779e6e1cSJan Glauber 140104ea556Sfrank.blaschka@de.ibm.com static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) 141104ea556Sfrank.blaschka@de.ibm.com { 142104ea556Sfrank.blaschka@de.ibm.com struct qdio_q *q; 143104ea556Sfrank.blaschka@de.ibm.com int i; 144104ea556Sfrank.blaschka@de.ibm.com 145104ea556Sfrank.blaschka@de.ibm.com for_each_input_queue(irq, q, i) { 146104ea556Sfrank.blaschka@de.ibm.com if (!references_shared_dsci(irq) && 147104ea556Sfrank.blaschka@de.ibm.com has_multiple_inq_on_dsci(irq)) 148104ea556Sfrank.blaschka@de.ibm.com xchg(q->irq_ptr->dsci, 0); 149104ea556Sfrank.blaschka@de.ibm.com 150104ea556Sfrank.blaschka@de.ibm.com if (q->u.in.queue_start_poll) { 151104ea556Sfrank.blaschka@de.ibm.com /* skip if polling is enabled or already in work */ 152104ea556Sfrank.blaschka@de.ibm.com if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 153104ea556Sfrank.blaschka@de.ibm.com &q->u.in.queue_irq_state)) { 154104ea556Sfrank.blaschka@de.ibm.com qperf_inc(q, int_discarded); 155104ea556Sfrank.blaschka@de.ibm.com continue; 156104ea556Sfrank.blaschka@de.ibm.com } 157104ea556Sfrank.blaschka@de.ibm.com 158104ea556Sfrank.blaschka@de.ibm.com /* avoid dsci clear here, done after processing */ 159104ea556Sfrank.blaschka@de.ibm.com q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 160104ea556Sfrank.blaschka@de.ibm.com q->irq_ptr->int_parm); 161104ea556Sfrank.blaschka@de.ibm.com } else { 1625f4026f8SJan Glauber if (!shared_ind(q->irq_ptr)) 163104ea556Sfrank.blaschka@de.ibm.com xchg(q->irq_ptr->dsci, 0); 164104ea556Sfrank.blaschka@de.ibm.com 165104ea556Sfrank.blaschka@de.ibm.com /* 166104ea556Sfrank.blaschka@de.ibm.com * Call inbound processing but not directly 167104ea556Sfrank.blaschka@de.ibm.com * since that could starve other thinint queues. 168104ea556Sfrank.blaschka@de.ibm.com */ 169104ea556Sfrank.blaschka@de.ibm.com tasklet_schedule(&q->tasklet); 170104ea556Sfrank.blaschka@de.ibm.com } 171104ea556Sfrank.blaschka@de.ibm.com } 172104ea556Sfrank.blaschka@de.ibm.com } 173104ea556Sfrank.blaschka@de.ibm.com 174cf9a031cSJan Glauber /** 175cf9a031cSJan Glauber * tiqdio_thinint_handler - thin interrupt handler for qdio 176d36deae7SJan Glauber * @alsi: pointer to adapter local summary indicator 177d36deae7SJan Glauber * @data: NULL 178cf9a031cSJan Glauber */ 179d36deae7SJan Glauber static void tiqdio_thinint_handler(void *alsi, void *data) 180779e6e1cSJan Glauber { 181b02f0c2eSJan Glauber u32 si_used = clear_shared_ind(); 182779e6e1cSJan Glauber struct qdio_q *q; 183779e6e1cSJan Glauber 184d36deae7SJan Glauber last_ai_time = S390_lowcore.int_clock; 185420f42ecSHeiko Carstens inc_irq_stat(IRQIO_QAI); 186d36deae7SJan Glauber 187779e6e1cSJan Glauber /* protect tiq_list entries, only changed in activate or shutdown */ 188779e6e1cSJan Glauber rcu_read_lock(); 189779e6e1cSJan Glauber 190cf9a031cSJan Glauber /* check for work on all inbound thinint queues */ 191d36deae7SJan Glauber list_for_each_entry_rcu(q, &tiq_list, entry) { 192104ea556Sfrank.blaschka@de.ibm.com struct qdio_irq *irq; 193779e6e1cSJan Glauber 194d36deae7SJan Glauber /* only process queues from changed sets */ 195104ea556Sfrank.blaschka@de.ibm.com irq = q->irq_ptr; 196104ea556Sfrank.blaschka@de.ibm.com if (unlikely(references_shared_dsci(irq))) { 1974f325184SJan Glauber if (!si_used) 1984f325184SJan Glauber continue; 199104ea556Sfrank.blaschka@de.ibm.com } else if (!*irq->dsci) 200d36deae7SJan Glauber continue; 201d36deae7SJan Glauber 202104ea556Sfrank.blaschka@de.ibm.com tiqdio_call_inq_handlers(irq); 203d36deae7SJan Glauber 204d36deae7SJan Glauber qperf_inc(q, adapter_int); 205d36deae7SJan Glauber } 206779e6e1cSJan Glauber rcu_read_unlock(); 207779e6e1cSJan Glauber } 208779e6e1cSJan Glauber 209779e6e1cSJan Glauber static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 210779e6e1cSJan Glauber { 211*ca4ba153SSebastian Ott struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; 212*ca4ba153SSebastian Ott u64 summary_indicator_addr, subchannel_indicator_addr; 213779e6e1cSJan Glauber int rc; 214779e6e1cSJan Glauber 215779e6e1cSJan Glauber if (reset) { 216*ca4ba153SSebastian Ott summary_indicator_addr = 0; 217*ca4ba153SSebastian Ott subchannel_indicator_addr = 0; 218779e6e1cSJan Glauber } else { 219*ca4ba153SSebastian Ott summary_indicator_addr = virt_to_phys(tiqdio_alsi); 220*ca4ba153SSebastian Ott subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); 221779e6e1cSJan Glauber } 222779e6e1cSJan Glauber 223*ca4ba153SSebastian Ott rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, 224*ca4ba153SSebastian Ott subchannel_indicator_addr); 225779e6e1cSJan Glauber if (rc) { 22622f99347SJan Glauber DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 227*ca4ba153SSebastian Ott scssc->response.code); 228*ca4ba153SSebastian Ott goto out; 229779e6e1cSJan Glauber } 230779e6e1cSJan Glauber 23122f99347SJan Glauber DBF_EVENT("setscind"); 232*ca4ba153SSebastian Ott DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); 233*ca4ba153SSebastian Ott DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); 234*ca4ba153SSebastian Ott out: 235*ca4ba153SSebastian Ott return rc; 236779e6e1cSJan Glauber } 237779e6e1cSJan Glauber 238779e6e1cSJan Glauber /* allocate non-shared indicators and shared indicator */ 239779e6e1cSJan Glauber int __init tiqdio_allocate_memory(void) 240779e6e1cSJan Glauber { 241779e6e1cSJan Glauber q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, 242779e6e1cSJan Glauber GFP_KERNEL); 243779e6e1cSJan Glauber if (!q_indicators) 244779e6e1cSJan Glauber return -ENOMEM; 245779e6e1cSJan Glauber return 0; 246779e6e1cSJan Glauber } 247779e6e1cSJan Glauber 248779e6e1cSJan Glauber void tiqdio_free_memory(void) 249779e6e1cSJan Glauber { 250779e6e1cSJan Glauber kfree(q_indicators); 251779e6e1cSJan Glauber } 252779e6e1cSJan Glauber 253779e6e1cSJan Glauber int __init tiqdio_register_thinints(void) 254779e6e1cSJan Glauber { 255779e6e1cSJan Glauber isc_register(QDIO_AIRQ_ISC); 256779e6e1cSJan Glauber tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, 257779e6e1cSJan Glauber NULL, QDIO_AIRQ_ISC); 258779e6e1cSJan Glauber if (IS_ERR(tiqdio_alsi)) { 25922f99347SJan Glauber DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); 260779e6e1cSJan Glauber tiqdio_alsi = NULL; 261779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 262779e6e1cSJan Glauber return -ENOMEM; 263779e6e1cSJan Glauber } 264779e6e1cSJan Glauber return 0; 265779e6e1cSJan Glauber } 266779e6e1cSJan Glauber 267779e6e1cSJan Glauber int qdio_establish_thinint(struct qdio_irq *irq_ptr) 268779e6e1cSJan Glauber { 269779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 270779e6e1cSJan Glauber return 0; 271779e6e1cSJan Glauber return set_subchannel_ind(irq_ptr, 0); 272779e6e1cSJan Glauber } 273779e6e1cSJan Glauber 274779e6e1cSJan Glauber void qdio_setup_thinint(struct qdio_irq *irq_ptr) 275779e6e1cSJan Glauber { 276779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 277779e6e1cSJan Glauber return; 278779e6e1cSJan Glauber irq_ptr->dsci = get_indicator(); 27922f99347SJan Glauber DBF_HEX(&irq_ptr->dsci, sizeof(void *)); 280779e6e1cSJan Glauber } 281779e6e1cSJan Glauber 282779e6e1cSJan Glauber void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 283779e6e1cSJan Glauber { 284779e6e1cSJan Glauber if (!is_thinint_irq(irq_ptr)) 285779e6e1cSJan Glauber return; 286779e6e1cSJan Glauber 287779e6e1cSJan Glauber /* reset adapter interrupt indicators */ 288779e6e1cSJan Glauber set_subchannel_ind(irq_ptr, 1); 2894814a2b3SJan Glauber put_indicator(irq_ptr->dsci); 290779e6e1cSJan Glauber } 291779e6e1cSJan Glauber 292779e6e1cSJan Glauber void __exit tiqdio_unregister_thinints(void) 293779e6e1cSJan Glauber { 2949e890ad8SJan Glauber WARN_ON(!list_empty(&tiq_list)); 295779e6e1cSJan Glauber 296779e6e1cSJan Glauber if (tiqdio_alsi) { 297779e6e1cSJan Glauber s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 298779e6e1cSJan Glauber isc_unregister(QDIO_AIRQ_ISC); 299779e6e1cSJan Glauber } 300779e6e1cSJan Glauber } 301