1 /* 2 * linux/drivers/s390/cio/thinint_qdio.c 3 * 4 * thin interrupt support for qdio 5 * 6 * Copyright 2000-2008 IBM Corp. 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * Jan Glauber <jang@linux.vnet.ibm.com> 10 */ 11 #include <linux/io.h> 12 #include <asm/atomic.h> 13 #include <asm/debug.h> 14 #include <asm/qdio.h> 15 #include <asm/airq.h> 16 #include <asm/isc.h> 17 18 #include "cio.h" 19 #include "ioasm.h" 20 #include "qdio.h" 21 #include "qdio_debug.h" 22 #include "qdio_perf.h" 23 24 /* 25 * Restriction: only 63 iqdio subchannels would have its own indicator, 26 * after that, subsequent subchannels share one indicator 27 */ 28 #define TIQDIO_NR_NONSHARED_IND 63 29 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 30 #define TIQDIO_SHARED_IND 63 31 32 /* list of thin interrupt input queues */ 33 static LIST_HEAD(tiq_list); 34 35 /* adapter local summary indicator */ 36 static unsigned char *tiqdio_alsi; 37 38 /* device state change indicators */ 39 struct indicator_t { 40 u32 ind; /* u32 because of compare-and-swap performance */ 41 atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 42 }; 43 static struct indicator_t *q_indicators; 44 45 static void tiqdio_tasklet_fn(unsigned long data); 46 static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); 47 48 static int css_qdio_omit_svs; 49 50 static inline unsigned long do_clear_global_summary(void) 51 { 52 register unsigned long __fn asm("1") = 3; 53 register unsigned long __tmp asm("2"); 54 register unsigned long __time asm("3"); 55 56 asm volatile( 57 " .insn rre,0xb2650000,2,0" 58 : "+d" (__fn), "=d" (__tmp), "=d" (__time)); 59 return __time; 60 } 61 62 /* returns addr for the device state change indicator */ 63 static u32 *get_indicator(void) 64 { 65 int i; 66 67 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 68 if (!atomic_read(&q_indicators[i].count)) { 69 atomic_set(&q_indicators[i].count, 1); 70 return &q_indicators[i].ind; 71 } 72 73 /* use the shared indicator */ 74 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 75 return &q_indicators[TIQDIO_SHARED_IND].ind; 76 } 77 78 static void put_indicator(u32 *addr) 79 { 80 int i; 81 82 if (!addr) 83 return; 84 i = ((unsigned long)addr - (unsigned long)q_indicators) / 85 sizeof(struct indicator_t); 86 atomic_dec(&q_indicators[i].count); 87 } 88 89 void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 90 { 91 struct qdio_q *q; 92 int i; 93 94 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ 95 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) 96 css_qdio_omit_svs = 1; 97 98 for_each_input_queue(irq_ptr, q, i) { 99 list_add_rcu(&q->entry, &tiq_list); 100 synchronize_rcu(); 101 } 102 xchg(irq_ptr->dsci, 1); 103 tasklet_schedule(&tiqdio_tasklet); 104 } 105 106 /* 107 * we cannot stop the tiqdio tasklet here since it is for all 108 * thinint qdio devices and it must run as long as there is a 109 * thinint device left 110 */ 111 void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 112 { 113 struct qdio_q *q; 114 int i; 115 116 for_each_input_queue(irq_ptr, q, i) { 117 list_del_rcu(&q->entry); 118 synchronize_rcu(); 119 } 120 } 121 122 static inline int tiqdio_inbound_q_done(struct qdio_q *q) 123 { 124 unsigned char state; 125 126 if (!atomic_read(&q->nr_buf_used)) 127 return 1; 128 129 qdio_siga_sync_q(q); 130 get_buf_state(q, q->first_to_check, &state); 131 132 if (state == SLSB_P_INPUT_PRIMED) 133 /* more work coming */ 134 return 0; 135 return 1; 136 } 137 138 static inline int shared_ind(struct qdio_irq *irq_ptr) 139 { 140 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 141 } 142 143 static void __tiqdio_inbound_processing(struct qdio_q *q) 144 { 145 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 146 qdio_sync_after_thinint(q); 147 148 /* 149 * Maybe we have work on our outbound queues... at least 150 * we have to check the PCI capable queues. 151 */ 152 qdio_check_outbound_after_thinint(q); 153 154 again: 155 if (!qdio_inbound_q_moved(q)) 156 return; 157 158 qdio_kick_inbound_handler(q); 159 160 if (!tiqdio_inbound_q_done(q)) { 161 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 162 goto again; 163 } 164 165 qdio_stop_polling(q); 166 /* 167 * We need to check again to not lose initiative after 168 * resetting the ACK state. 169 */ 170 if (!tiqdio_inbound_q_done(q)) { 171 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 172 goto again; 173 } 174 } 175 176 void tiqdio_inbound_processing(unsigned long data) 177 { 178 struct qdio_q *q = (struct qdio_q *)data; 179 180 __tiqdio_inbound_processing(q); 181 } 182 183 /* check for work on all inbound thinint queues */ 184 static void tiqdio_tasklet_fn(unsigned long data) 185 { 186 struct qdio_q *q; 187 188 qdio_perf_stat_inc(&perf_stats.tasklet_thinint); 189 again: 190 191 /* protect tiq_list entries, only changed in activate or shutdown */ 192 rcu_read_lock(); 193 194 list_for_each_entry_rcu(q, &tiq_list, entry) 195 /* only process queues from changed sets */ 196 if (*q->irq_ptr->dsci) { 197 198 /* only clear it if the indicator is non-shared */ 199 if (!shared_ind(q->irq_ptr)) 200 xchg(q->irq_ptr->dsci, 0); 201 /* 202 * don't call inbound processing directly since 203 * that could starve other thinint queues 204 */ 205 tasklet_schedule(&q->tasklet); 206 } 207 208 rcu_read_unlock(); 209 210 /* 211 * if we used the shared indicator clear it now after all queues 212 * were processed 213 */ 214 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 215 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 216 217 /* prevent racing */ 218 if (*tiqdio_alsi) 219 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); 220 } 221 222 /* check for more work */ 223 if (*tiqdio_alsi) { 224 xchg(tiqdio_alsi, 0); 225 qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); 226 goto again; 227 } 228 } 229 230 /** 231 * tiqdio_thinint_handler - thin interrupt handler for qdio 232 * @ind: pointer to adapter local summary indicator 233 * @drv_data: NULL 234 */ 235 static void tiqdio_thinint_handler(void *ind, void *drv_data) 236 { 237 qdio_perf_stat_inc(&perf_stats.thin_int); 238 239 /* 240 * SVS only when needed: issue SVS to benefit from iqdio interrupt 241 * avoidance (SVS clears adapter interrupt suppression overwrite) 242 */ 243 if (!css_qdio_omit_svs) 244 do_clear_global_summary(); 245 246 /* 247 * reset local summary indicator (tiqdio_alsi) to stop adapter 248 * interrupts for now, the tasklet will clean all dsci's 249 */ 250 xchg((u8 *)ind, 0); 251 tasklet_hi_schedule(&tiqdio_tasklet); 252 } 253 254 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 255 { 256 struct scssc_area *scssc_area; 257 char dbf_text[15]; 258 void *ptr; 259 int rc; 260 261 scssc_area = (struct scssc_area *)irq_ptr->chsc_page; 262 memset(scssc_area, 0, PAGE_SIZE); 263 264 if (reset) { 265 scssc_area->summary_indicator_addr = 0; 266 scssc_area->subchannel_indicator_addr = 0; 267 } else { 268 scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); 269 scssc_area->subchannel_indicator_addr = 270 virt_to_phys(irq_ptr->dsci); 271 } 272 273 scssc_area->request = (struct chsc_header) { 274 .length = 0x0fe0, 275 .code = 0x0021, 276 }; 277 scssc_area->operation_code = 0; 278 scssc_area->ks = PAGE_DEFAULT_KEY; 279 scssc_area->kc = PAGE_DEFAULT_KEY; 280 scssc_area->isc = QDIO_AIRQ_ISC; 281 scssc_area->schid = irq_ptr->schid; 282 283 /* enable the time delay disablement facility */ 284 if (css_general_characteristics.aif_tdd) 285 scssc_area->word_with_d_bit = 0x10000000; 286 287 rc = chsc(scssc_area); 288 if (rc) 289 return -EIO; 290 291 rc = chsc_error_from_response(scssc_area->response.code); 292 if (rc) { 293 sprintf(dbf_text, "sidR%4x", scssc_area->response.code); 294 QDIO_DBF_TEXT1(0, trace, dbf_text); 295 QDIO_DBF_TEXT1(0, setup, dbf_text); 296 ptr = &scssc_area->response; 297 QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); 298 return rc; 299 } 300 301 QDIO_DBF_TEXT2(0, setup, "setscind"); 302 QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, 303 sizeof(unsigned long)); 304 QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, 305 sizeof(unsigned long)); 306 return 0; 307 } 308 309 /* allocate non-shared indicators and shared indicator */ 310 int __init tiqdio_allocate_memory(void) 311 { 312 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, 313 GFP_KERNEL); 314 if (!q_indicators) 315 return -ENOMEM; 316 return 0; 317 } 318 319 void tiqdio_free_memory(void) 320 { 321 kfree(q_indicators); 322 } 323 324 int __init tiqdio_register_thinints(void) 325 { 326 char dbf_text[20]; 327 328 isc_register(QDIO_AIRQ_ISC); 329 tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, 330 NULL, QDIO_AIRQ_ISC); 331 if (IS_ERR(tiqdio_alsi)) { 332 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); 333 QDIO_DBF_TEXT0(0, setup, dbf_text); 334 tiqdio_alsi = NULL; 335 isc_unregister(QDIO_AIRQ_ISC); 336 return -ENOMEM; 337 } 338 return 0; 339 } 340 341 int qdio_establish_thinint(struct qdio_irq *irq_ptr) 342 { 343 if (!is_thinint_irq(irq_ptr)) 344 return 0; 345 346 /* Check for aif time delay disablement. If installed, 347 * omit SVS even under LPAR 348 */ 349 if (css_general_characteristics.aif_tdd) 350 css_qdio_omit_svs = 1; 351 return set_subchannel_ind(irq_ptr, 0); 352 } 353 354 void qdio_setup_thinint(struct qdio_irq *irq_ptr) 355 { 356 if (!is_thinint_irq(irq_ptr)) 357 return; 358 irq_ptr->dsci = get_indicator(); 359 QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); 360 } 361 362 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 363 { 364 if (!is_thinint_irq(irq_ptr)) 365 return; 366 367 /* reset adapter interrupt indicators */ 368 put_indicator(irq_ptr->dsci); 369 set_subchannel_ind(irq_ptr, 1); 370 } 371 372 void __exit tiqdio_unregister_thinints(void) 373 { 374 tasklet_disable(&tiqdio_tasklet); 375 376 if (tiqdio_alsi) { 377 s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); 378 isc_unregister(QDIO_AIRQ_ISC); 379 } 380 } 381