1 /* 2 * driver/s390/cio/qdio_setup.c 3 * 4 * qdio queue initialization 5 * 6 * Copyright (C) IBM Corp. 2008 7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 8 */ 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <asm/qdio.h> 13 14 #include "cio.h" 15 #include "css.h" 16 #include "device.h" 17 #include "ioasm.h" 18 #include "chsc.h" 19 #include "qdio.h" 20 #include "qdio_debug.h" 21 22 static struct kmem_cache *qdio_q_cache; 23 static struct kmem_cache *qdio_aob_cache; 24 25 struct qaob *qdio_allocate_aob(void) 26 { 27 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); 28 } 29 EXPORT_SYMBOL_GPL(qdio_allocate_aob); 30 31 void qdio_release_aob(struct qaob *aob) 32 { 33 kmem_cache_free(qdio_aob_cache, aob); 34 } 35 EXPORT_SYMBOL_GPL(qdio_release_aob); 36 37 /* 38 * qebsm is only available under 64bit but the adapter sets the feature 39 * flag anyway, so we manually override it. 40 */ 41 static inline int qebsm_possible(void) 42 { 43 #ifdef CONFIG_64BIT 44 return css_general_characteristics.qebsm; 45 #endif 46 return 0; 47 } 48 49 /* 50 * qib_param_field: pointer to 128 bytes or NULL, if no param field 51 * nr_input_qs: pointer to nr_queues*128 words of data or NULL 52 */ 53 static void set_impl_params(struct qdio_irq *irq_ptr, 54 unsigned int qib_param_field_format, 55 unsigned char *qib_param_field, 56 unsigned long *input_slib_elements, 57 unsigned long *output_slib_elements) 58 { 59 struct qdio_q *q; 60 int i, j; 61 62 if (!irq_ptr) 63 return; 64 65 irq_ptr->qib.pfmt = qib_param_field_format; 66 if (qib_param_field) 67 memcpy(irq_ptr->qib.parm, qib_param_field, 68 QDIO_MAX_BUFFERS_PER_Q); 69 70 if (!input_slib_elements) 71 goto output; 72 73 for_each_input_queue(irq_ptr, q, i) { 74 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 75 q->slib->slibe[j].parms = 76 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 77 } 78 output: 79 if (!output_slib_elements) 80 return; 81 82 for_each_output_queue(irq_ptr, q, i) { 83 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 84 q->slib->slibe[j].parms = 85 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 86 } 87 } 88 89 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) 90 { 91 struct qdio_q *q; 92 int i; 93 94 for (i = 0; i < nr_queues; i++) { 95 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 96 if (!q) 97 return -ENOMEM; 98 99 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 100 if (!q->slib) { 101 kmem_cache_free(qdio_q_cache, q); 102 return -ENOMEM; 103 } 104 irq_ptr_qs[i] = q; 105 } 106 return 0; 107 } 108 109 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) 110 { 111 int rc; 112 113 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); 114 if (rc) 115 return rc; 116 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); 117 return rc; 118 } 119 120 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 121 qdio_handler_t *handler, int i) 122 { 123 struct slib *slib = q->slib; 124 125 /* queue must be cleared for qdio_establish */ 126 memset(q, 0, sizeof(*q)); 127 memset(slib, 0, PAGE_SIZE); 128 q->slib = slib; 129 q->irq_ptr = irq_ptr; 130 q->mask = 1 << (31 - i); 131 q->nr = i; 132 q->handler = handler; 133 } 134 135 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 136 void **sbals_array, int i) 137 { 138 struct qdio_q *prev; 139 int j; 140 141 DBF_HEX(&q, sizeof(void *)); 142 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 143 144 /* fill in sbal */ 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 146 q->sbal[j] = *sbals_array++; 147 BUG_ON((unsigned long)q->sbal[j] & 0xff); 148 } 149 150 /* fill in slib */ 151 if (i > 0) { 152 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] 153 : irq_ptr->output_qs[i - 1]; 154 prev->slib->nsliba = (unsigned long)q->slib; 155 } 156 157 q->slib->sla = (unsigned long)q->sl; 158 q->slib->slsba = (unsigned long)&q->slsb.val[0]; 159 160 /* fill in sl */ 161 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 162 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 163 } 164 165 static void setup_queues(struct qdio_irq *irq_ptr, 166 struct qdio_initialize *qdio_init) 167 { 168 struct qdio_q *q; 169 void **input_sbal_array = qdio_init->input_sbal_addr_array; 170 void **output_sbal_array = qdio_init->output_sbal_addr_array; 171 struct qdio_outbuf_state *output_sbal_state_array = 172 qdio_init->output_sbal_state_array; 173 int i; 174 175 for_each_input_queue(irq_ptr, q, i) { 176 DBF_EVENT("inq:%1d", i); 177 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 178 179 q->is_input_q = 1; 180 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? 181 qdio_init->queue_start_poll_array[i] : NULL; 182 183 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 184 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 185 186 if (is_thinint_irq(irq_ptr)) { 187 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 188 (unsigned long) q); 189 } else { 190 tasklet_init(&q->tasklet, qdio_inbound_processing, 191 (unsigned long) q); 192 } 193 } 194 195 for_each_output_queue(irq_ptr, q, i) { 196 DBF_EVENT("outq:%1d", i); 197 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 198 199 q->u.out.sbal_state = output_sbal_state_array; 200 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; 201 202 q->is_input_q = 0; 203 q->u.out.scan_threshold = qdio_init->scan_threshold; 204 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 205 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 206 207 tasklet_init(&q->tasklet, qdio_outbound_processing, 208 (unsigned long) q); 209 setup_timer(&q->u.out.timer, (void(*)(unsigned long)) 210 &qdio_outbound_timer, (unsigned long)q); 211 } 212 } 213 214 static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) 215 { 216 if (qdioac & AC1_SIGA_INPUT_NEEDED) 217 irq_ptr->siga_flag.input = 1; 218 if (qdioac & AC1_SIGA_OUTPUT_NEEDED) 219 irq_ptr->siga_flag.output = 1; 220 if (qdioac & AC1_SIGA_SYNC_NEEDED) 221 irq_ptr->siga_flag.sync = 1; 222 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) 223 irq_ptr->siga_flag.sync_after_ai = 1; 224 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) 225 irq_ptr->siga_flag.sync_out_after_pci = 1; 226 } 227 228 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 229 unsigned char qdioac, unsigned long token) 230 { 231 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) 232 goto no_qebsm; 233 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || 234 (!(qdioac & AC1_SC_QEBSM_ENABLED))) 235 goto no_qebsm; 236 237 irq_ptr->sch_token = token; 238 239 DBF_EVENT("V=V:1"); 240 DBF_EVENT("%8lx", irq_ptr->sch_token); 241 return; 242 243 no_qebsm: 244 irq_ptr->sch_token = 0; 245 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 246 DBF_EVENT("noV=V"); 247 } 248 249 /* 250 * If there is a qdio_irq we use the chsc_page and store the information 251 * in the qdio_irq, otherwise we copy it to the specified structure. 252 */ 253 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, 254 struct subchannel_id *schid, 255 struct qdio_ssqd_desc *data) 256 { 257 struct chsc_ssqd_area *ssqd; 258 int rc; 259 260 DBF_EVENT("getssqd:%4x", schid->sch_no); 261 if (irq_ptr != NULL) 262 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; 263 else 264 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); 265 memset(ssqd, 0, PAGE_SIZE); 266 267 ssqd->request = (struct chsc_header) { 268 .length = 0x0010, 269 .code = 0x0024, 270 }; 271 ssqd->first_sch = schid->sch_no; 272 ssqd->last_sch = schid->sch_no; 273 ssqd->ssid = schid->ssid; 274 275 if (chsc(ssqd)) 276 return -EIO; 277 rc = chsc_error_from_response(ssqd->response.code); 278 if (rc) 279 return rc; 280 281 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || 282 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || 283 (ssqd->qdio_ssqd.sch != schid->sch_no)) 284 return -EINVAL; 285 286 if (irq_ptr != NULL) 287 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, 288 sizeof(struct qdio_ssqd_desc)); 289 else { 290 memcpy(data, &ssqd->qdio_ssqd, 291 sizeof(struct qdio_ssqd_desc)); 292 free_page((unsigned long)ssqd); 293 } 294 return 0; 295 } 296 297 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) 298 { 299 unsigned char qdioac; 300 int rc; 301 302 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); 303 if (rc) { 304 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); 305 DBF_ERROR("rc:%x", rc); 306 /* all flags set, worst case */ 307 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | 308 AC1_SIGA_SYNC_NEEDED; 309 } else 310 qdioac = irq_ptr->ssqd_desc.qdioac1; 311 312 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); 313 process_ac_flags(irq_ptr, qdioac); 314 DBF_EVENT("qdioac:%4x", qdioac); 315 } 316 317 void qdio_release_memory(struct qdio_irq *irq_ptr) 318 { 319 struct qdio_q *q; 320 int i; 321 322 /* 323 * Must check queue array manually since irq_ptr->nr_input_queues / 324 * irq_ptr->nr_input_queues may not yet be set. 325 */ 326 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 327 q = irq_ptr->input_qs[i]; 328 if (q) { 329 free_page((unsigned long) q->slib); 330 kmem_cache_free(qdio_q_cache, q); 331 } 332 } 333 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 334 q = irq_ptr->output_qs[i]; 335 if (q) { 336 if (q->u.out.use_cq) { 337 int n; 338 339 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { 340 struct qaob *aob = q->u.out.aobs[n]; 341 if (aob) { 342 qdio_release_aob(aob); 343 q->u.out.aobs[n] = NULL; 344 } 345 } 346 347 qdio_disable_async_operation(&q->u.out); 348 } 349 free_page((unsigned long) q->slib); 350 kmem_cache_free(qdio_q_cache, q); 351 } 352 } 353 free_page((unsigned long) irq_ptr->qdr); 354 free_page(irq_ptr->chsc_page); 355 free_page((unsigned long) irq_ptr); 356 } 357 358 static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, 359 struct qdio_q **irq_ptr_qs, 360 int i, int nr) 361 { 362 irq_ptr->qdr->qdf0[i + nr].sliba = 363 (unsigned long)irq_ptr_qs[i]->slib; 364 365 irq_ptr->qdr->qdf0[i + nr].sla = 366 (unsigned long)irq_ptr_qs[i]->sl; 367 368 irq_ptr->qdr->qdf0[i + nr].slsba = 369 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 370 371 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; 372 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; 373 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; 374 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; 375 } 376 377 static void setup_qdr(struct qdio_irq *irq_ptr, 378 struct qdio_initialize *qdio_init) 379 { 380 int i; 381 382 irq_ptr->qdr->qfmt = qdio_init->q_format; 383 irq_ptr->qdr->ac = qdio_init->qdr_ac; 384 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 385 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 386 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 387 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 388 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 389 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; 390 391 for (i = 0; i < qdio_init->no_input_qs; i++) 392 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 393 394 for (i = 0; i < qdio_init->no_output_qs; i++) 395 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, 396 qdio_init->no_input_qs); 397 } 398 399 static void setup_qib(struct qdio_irq *irq_ptr, 400 struct qdio_initialize *init_data) 401 { 402 if (qebsm_possible()) 403 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 404 405 irq_ptr->qib.rflags |= init_data->qib_rflags; 406 407 irq_ptr->qib.qfmt = init_data->q_format; 408 if (init_data->no_input_qs) 409 irq_ptr->qib.isliba = 410 (unsigned long)(irq_ptr->input_qs[0]->slib); 411 if (init_data->no_output_qs) 412 irq_ptr->qib.osliba = 413 (unsigned long)(irq_ptr->output_qs[0]->slib); 414 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); 415 } 416 417 int qdio_setup_irq(struct qdio_initialize *init_data) 418 { 419 struct ciw *ciw; 420 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 421 int rc; 422 423 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 424 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 425 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); 426 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); 427 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 428 429 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; 430 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; 431 432 /* wipes qib.ac, required by ar7063 */ 433 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 434 435 irq_ptr->int_parm = init_data->int_parm; 436 irq_ptr->nr_input_qs = init_data->no_input_qs; 437 irq_ptr->nr_output_qs = init_data->no_output_qs; 438 439 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); 440 irq_ptr->cdev = init_data->cdev; 441 setup_queues(irq_ptr, init_data); 442 443 setup_qib(irq_ptr, init_data); 444 qdio_setup_thinint(irq_ptr); 445 set_impl_params(irq_ptr, init_data->qib_param_field_format, 446 init_data->qib_param_field, 447 init_data->input_slib_elements, 448 init_data->output_slib_elements); 449 450 /* fill input and output descriptors */ 451 setup_qdr(irq_ptr, init_data); 452 453 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ 454 455 /* get qdio commands */ 456 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 457 if (!ciw) { 458 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 459 rc = -EINVAL; 460 goto out_err; 461 } 462 irq_ptr->equeue = *ciw; 463 464 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 465 if (!ciw) { 466 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 467 rc = -EINVAL; 468 goto out_err; 469 } 470 irq_ptr->aqueue = *ciw; 471 472 /* set new interrupt handler */ 473 irq_ptr->orig_handler = init_data->cdev->handler; 474 init_data->cdev->handler = qdio_int_handler; 475 return 0; 476 out_err: 477 qdio_release_memory(irq_ptr); 478 return rc; 479 } 480 481 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 482 struct ccw_device *cdev) 483 { 484 char s[80]; 485 486 snprintf(s, 80, "qdio: %s %s on SC %x using " 487 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n", 488 dev_name(&cdev->dev), 489 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 490 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 491 irq_ptr->schid.sch_no, 492 is_thinint_irq(irq_ptr), 493 (irq_ptr->sch_token) ? 1 : 0, 494 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, 495 css_general_characteristics.aif_tdd, 496 (irq_ptr->siga_flag.input) ? "R" : " ", 497 (irq_ptr->siga_flag.output) ? "W" : " ", 498 (irq_ptr->siga_flag.sync) ? "S" : " ", 499 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", 500 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); 501 printk(KERN_INFO "%s", s); 502 } 503 504 int qdio_enable_async_operation(struct qdio_output_q *outq) 505 { 506 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, 507 GFP_ATOMIC); 508 if (!outq->aobs) { 509 outq->use_cq = 0; 510 return -ENOMEM; 511 } 512 outq->use_cq = 1; 513 return 0; 514 } 515 516 void qdio_disable_async_operation(struct qdio_output_q *q) 517 { 518 kfree(q->aobs); 519 q->aobs = NULL; 520 q->use_cq = 0; 521 } 522 523 int __init qdio_setup_init(void) 524 { 525 int rc; 526 527 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 528 256, 0, NULL); 529 if (!qdio_q_cache) 530 return -ENOMEM; 531 532 qdio_aob_cache = kmem_cache_create("qdio_aob", 533 sizeof(struct qaob), 534 sizeof(struct qaob), 535 0, 536 NULL); 537 if (!qdio_aob_cache) { 538 rc = -ENOMEM; 539 goto free_qdio_q_cache; 540 } 541 542 /* Check for OSA/FCP thin interrupts (bit 67). */ 543 DBF_EVENT("thinint:%1d", 544 (css_general_characteristics.aif_osa) ? 1 : 0); 545 546 /* Check for QEBSM support in general (bit 58). */ 547 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 548 rc = 0; 549 out: 550 return rc; 551 free_qdio_q_cache: 552 kmem_cache_destroy(qdio_q_cache); 553 goto out; 554 } 555 556 void qdio_setup_exit(void) 557 { 558 kmem_cache_destroy(qdio_aob_cache); 559 kmem_cache_destroy(qdio_q_cache); 560 } 561