1 /* 2 * qdio queue initialization 3 * 4 * Copyright IBM Corp. 2008 5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 6 */ 7 #include <linux/kernel.h> 8 #include <linux/slab.h> 9 #include <linux/export.h> 10 #include <asm/qdio.h> 11 12 #include "cio.h" 13 #include "css.h" 14 #include "device.h" 15 #include "ioasm.h" 16 #include "chsc.h" 17 #include "qdio.h" 18 #include "qdio_debug.h" 19 20 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 21 22 static struct kmem_cache *qdio_q_cache; 23 static struct kmem_cache *qdio_aob_cache; 24 25 struct qaob *qdio_allocate_aob(void) 26 { 27 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); 28 } 29 EXPORT_SYMBOL_GPL(qdio_allocate_aob); 30 31 void qdio_release_aob(struct qaob *aob) 32 { 33 kmem_cache_free(qdio_aob_cache, aob); 34 } 35 EXPORT_SYMBOL_GPL(qdio_release_aob); 36 37 /** 38 * qdio_free_buffers() - free qdio buffers 39 * @buf: array of pointers to qdio buffers 40 * @count: number of qdio buffers to free 41 */ 42 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count) 43 { 44 int pos; 45 46 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) 47 free_page((unsigned long) buf[pos]); 48 } 49 EXPORT_SYMBOL_GPL(qdio_free_buffers); 50 51 /** 52 * qdio_alloc_buffers() - allocate qdio buffers 53 * @buf: array of pointers to qdio buffers 54 * @count: number of qdio buffers to allocate 55 */ 56 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count) 57 { 58 int pos; 59 60 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) { 61 buf[pos] = (void *) get_zeroed_page(GFP_KERNEL); 62 if (!buf[pos]) { 63 qdio_free_buffers(buf, count); 64 return -ENOMEM; 65 } 66 } 67 for (pos = 0; pos < count; pos++) 68 if (pos % QBUFF_PER_PAGE) 69 buf[pos] = buf[pos - 1] + 1; 70 return 0; 71 } 72 EXPORT_SYMBOL_GPL(qdio_alloc_buffers); 73 74 /** 75 * qdio_reset_buffers() - reset qdio buffers 76 * @buf: array of pointers to qdio buffers 77 * @count: number of qdio buffers that will be zeroed 78 */ 79 void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count) 80 { 81 int pos; 82 83 for (pos = 0; pos < count; pos++) 84 memset(buf[pos], 0, sizeof(struct qdio_buffer)); 85 } 86 EXPORT_SYMBOL_GPL(qdio_reset_buffers); 87 88 /* 89 * qebsm is only available under 64bit but the adapter sets the feature 90 * flag anyway, so we manually override it. 91 */ 92 static inline int qebsm_possible(void) 93 { 94 #ifdef CONFIG_64BIT 95 return css_general_characteristics.qebsm; 96 #endif 97 return 0; 98 } 99 100 /* 101 * qib_param_field: pointer to 128 bytes or NULL, if no param field 102 * nr_input_qs: pointer to nr_queues*128 words of data or NULL 103 */ 104 static void set_impl_params(struct qdio_irq *irq_ptr, 105 unsigned int qib_param_field_format, 106 unsigned char *qib_param_field, 107 unsigned long *input_slib_elements, 108 unsigned long *output_slib_elements) 109 { 110 struct qdio_q *q; 111 int i, j; 112 113 if (!irq_ptr) 114 return; 115 116 irq_ptr->qib.pfmt = qib_param_field_format; 117 if (qib_param_field) 118 memcpy(irq_ptr->qib.parm, qib_param_field, 119 QDIO_MAX_BUFFERS_PER_Q); 120 121 if (!input_slib_elements) 122 goto output; 123 124 for_each_input_queue(irq_ptr, q, i) { 125 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 126 q->slib->slibe[j].parms = 127 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 128 } 129 output: 130 if (!output_slib_elements) 131 return; 132 133 for_each_output_queue(irq_ptr, q, i) { 134 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 135 q->slib->slibe[j].parms = 136 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 137 } 138 } 139 140 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) 141 { 142 struct qdio_q *q; 143 int i; 144 145 for (i = 0; i < nr_queues; i++) { 146 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 147 if (!q) 148 return -ENOMEM; 149 150 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 151 if (!q->slib) { 152 kmem_cache_free(qdio_q_cache, q); 153 return -ENOMEM; 154 } 155 irq_ptr_qs[i] = q; 156 } 157 return 0; 158 } 159 160 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) 161 { 162 int rc; 163 164 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); 165 if (rc) 166 return rc; 167 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); 168 return rc; 169 } 170 171 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 172 qdio_handler_t *handler, int i) 173 { 174 struct slib *slib = q->slib; 175 176 /* queue must be cleared for qdio_establish */ 177 memset(q, 0, sizeof(*q)); 178 memset(slib, 0, PAGE_SIZE); 179 q->slib = slib; 180 q->irq_ptr = irq_ptr; 181 q->mask = 1 << (31 - i); 182 q->nr = i; 183 q->handler = handler; 184 } 185 186 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 187 void **sbals_array, int i) 188 { 189 struct qdio_q *prev; 190 int j; 191 192 DBF_HEX(&q, sizeof(void *)); 193 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 194 195 /* fill in sbal */ 196 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 197 q->sbal[j] = *sbals_array++; 198 199 /* fill in slib */ 200 if (i > 0) { 201 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] 202 : irq_ptr->output_qs[i - 1]; 203 prev->slib->nsliba = (unsigned long)q->slib; 204 } 205 206 q->slib->sla = (unsigned long)q->sl; 207 q->slib->slsba = (unsigned long)&q->slsb.val[0]; 208 209 /* fill in sl */ 210 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 211 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 212 } 213 214 static void setup_queues(struct qdio_irq *irq_ptr, 215 struct qdio_initialize *qdio_init) 216 { 217 struct qdio_q *q; 218 void **input_sbal_array = qdio_init->input_sbal_addr_array; 219 void **output_sbal_array = qdio_init->output_sbal_addr_array; 220 struct qdio_outbuf_state *output_sbal_state_array = 221 qdio_init->output_sbal_state_array; 222 int i; 223 224 for_each_input_queue(irq_ptr, q, i) { 225 DBF_EVENT("inq:%1d", i); 226 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 227 228 q->is_input_q = 1; 229 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? 230 qdio_init->queue_start_poll_array[i] : NULL; 231 232 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 233 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 234 235 if (is_thinint_irq(irq_ptr)) { 236 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 237 (unsigned long) q); 238 } else { 239 tasklet_init(&q->tasklet, qdio_inbound_processing, 240 (unsigned long) q); 241 } 242 } 243 244 for_each_output_queue(irq_ptr, q, i) { 245 DBF_EVENT("outq:%1d", i); 246 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 247 248 q->u.out.sbal_state = output_sbal_state_array; 249 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; 250 251 q->is_input_q = 0; 252 q->u.out.scan_threshold = qdio_init->scan_threshold; 253 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 254 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 255 256 tasklet_init(&q->tasklet, qdio_outbound_processing, 257 (unsigned long) q); 258 setup_timer(&q->u.out.timer, (void(*)(unsigned long)) 259 &qdio_outbound_timer, (unsigned long)q); 260 } 261 } 262 263 static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) 264 { 265 if (qdioac & AC1_SIGA_INPUT_NEEDED) 266 irq_ptr->siga_flag.input = 1; 267 if (qdioac & AC1_SIGA_OUTPUT_NEEDED) 268 irq_ptr->siga_flag.output = 1; 269 if (qdioac & AC1_SIGA_SYNC_NEEDED) 270 irq_ptr->siga_flag.sync = 1; 271 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) 272 irq_ptr->siga_flag.sync_after_ai = 1; 273 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) 274 irq_ptr->siga_flag.sync_out_after_pci = 1; 275 } 276 277 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 278 unsigned char qdioac, unsigned long token) 279 { 280 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) 281 goto no_qebsm; 282 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || 283 (!(qdioac & AC1_SC_QEBSM_ENABLED))) 284 goto no_qebsm; 285 286 irq_ptr->sch_token = token; 287 288 DBF_EVENT("V=V:1"); 289 DBF_EVENT("%8lx", irq_ptr->sch_token); 290 return; 291 292 no_qebsm: 293 irq_ptr->sch_token = 0; 294 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 295 DBF_EVENT("noV=V"); 296 } 297 298 /* 299 * If there is a qdio_irq we use the chsc_page and store the information 300 * in the qdio_irq, otherwise we copy it to the specified structure. 301 */ 302 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, 303 struct subchannel_id *schid, 304 struct qdio_ssqd_desc *data) 305 { 306 struct chsc_ssqd_area *ssqd; 307 int rc; 308 309 DBF_EVENT("getssqd:%4x", schid->sch_no); 310 if (!irq_ptr) { 311 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); 312 if (!ssqd) 313 return -ENOMEM; 314 } else { 315 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; 316 } 317 318 rc = chsc_ssqd(*schid, ssqd); 319 if (rc) 320 goto out; 321 322 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || 323 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || 324 (ssqd->qdio_ssqd.sch != schid->sch_no)) 325 rc = -EINVAL; 326 327 if (!rc) 328 memcpy(data, &ssqd->qdio_ssqd, sizeof(*data)); 329 330 out: 331 if (!irq_ptr) 332 free_page((unsigned long)ssqd); 333 334 return rc; 335 } 336 337 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) 338 { 339 unsigned char qdioac; 340 int rc; 341 342 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc); 343 if (rc) { 344 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); 345 DBF_ERROR("rc:%x", rc); 346 /* all flags set, worst case */ 347 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | 348 AC1_SIGA_SYNC_NEEDED; 349 } else 350 qdioac = irq_ptr->ssqd_desc.qdioac1; 351 352 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); 353 process_ac_flags(irq_ptr, qdioac); 354 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); 355 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); 356 } 357 358 void qdio_release_memory(struct qdio_irq *irq_ptr) 359 { 360 struct qdio_q *q; 361 int i; 362 363 /* 364 * Must check queue array manually since irq_ptr->nr_input_queues / 365 * irq_ptr->nr_input_queues may not yet be set. 366 */ 367 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 368 q = irq_ptr->input_qs[i]; 369 if (q) { 370 free_page((unsigned long) q->slib); 371 kmem_cache_free(qdio_q_cache, q); 372 } 373 } 374 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 375 q = irq_ptr->output_qs[i]; 376 if (q) { 377 if (q->u.out.use_cq) { 378 int n; 379 380 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { 381 struct qaob *aob = q->u.out.aobs[n]; 382 if (aob) { 383 qdio_release_aob(aob); 384 q->u.out.aobs[n] = NULL; 385 } 386 } 387 388 qdio_disable_async_operation(&q->u.out); 389 } 390 free_page((unsigned long) q->slib); 391 kmem_cache_free(qdio_q_cache, q); 392 } 393 } 394 free_page((unsigned long) irq_ptr->qdr); 395 free_page(irq_ptr->chsc_page); 396 free_page((unsigned long) irq_ptr); 397 } 398 399 static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, 400 struct qdio_q **irq_ptr_qs, 401 int i, int nr) 402 { 403 irq_ptr->qdr->qdf0[i + nr].sliba = 404 (unsigned long)irq_ptr_qs[i]->slib; 405 406 irq_ptr->qdr->qdf0[i + nr].sla = 407 (unsigned long)irq_ptr_qs[i]->sl; 408 409 irq_ptr->qdr->qdf0[i + nr].slsba = 410 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 411 412 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; 413 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; 414 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; 415 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; 416 } 417 418 static void setup_qdr(struct qdio_irq *irq_ptr, 419 struct qdio_initialize *qdio_init) 420 { 421 int i; 422 423 irq_ptr->qdr->qfmt = qdio_init->q_format; 424 irq_ptr->qdr->ac = qdio_init->qdr_ac; 425 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 426 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 427 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 428 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 429 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 430 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; 431 432 for (i = 0; i < qdio_init->no_input_qs; i++) 433 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 434 435 for (i = 0; i < qdio_init->no_output_qs; i++) 436 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, 437 qdio_init->no_input_qs); 438 } 439 440 static void setup_qib(struct qdio_irq *irq_ptr, 441 struct qdio_initialize *init_data) 442 { 443 if (qebsm_possible()) 444 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 445 446 irq_ptr->qib.rflags |= init_data->qib_rflags; 447 448 irq_ptr->qib.qfmt = init_data->q_format; 449 if (init_data->no_input_qs) 450 irq_ptr->qib.isliba = 451 (unsigned long)(irq_ptr->input_qs[0]->slib); 452 if (init_data->no_output_qs) 453 irq_ptr->qib.osliba = 454 (unsigned long)(irq_ptr->output_qs[0]->slib); 455 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); 456 } 457 458 int qdio_setup_irq(struct qdio_initialize *init_data) 459 { 460 struct ciw *ciw; 461 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 462 int rc; 463 464 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 465 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 466 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); 467 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); 468 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 469 470 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; 471 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; 472 473 /* wipes qib.ac, required by ar7063 */ 474 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 475 476 irq_ptr->int_parm = init_data->int_parm; 477 irq_ptr->nr_input_qs = init_data->no_input_qs; 478 irq_ptr->nr_output_qs = init_data->no_output_qs; 479 irq_ptr->cdev = init_data->cdev; 480 ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); 481 setup_queues(irq_ptr, init_data); 482 483 setup_qib(irq_ptr, init_data); 484 qdio_setup_thinint(irq_ptr); 485 set_impl_params(irq_ptr, init_data->qib_param_field_format, 486 init_data->qib_param_field, 487 init_data->input_slib_elements, 488 init_data->output_slib_elements); 489 490 /* fill input and output descriptors */ 491 setup_qdr(irq_ptr, init_data); 492 493 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ 494 495 /* get qdio commands */ 496 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 497 if (!ciw) { 498 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 499 rc = -EINVAL; 500 goto out_err; 501 } 502 irq_ptr->equeue = *ciw; 503 504 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 505 if (!ciw) { 506 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 507 rc = -EINVAL; 508 goto out_err; 509 } 510 irq_ptr->aqueue = *ciw; 511 512 /* set new interrupt handler */ 513 irq_ptr->orig_handler = init_data->cdev->handler; 514 init_data->cdev->handler = qdio_int_handler; 515 return 0; 516 out_err: 517 qdio_release_memory(irq_ptr); 518 return rc; 519 } 520 521 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 522 struct ccw_device *cdev) 523 { 524 char s[80]; 525 526 snprintf(s, 80, "qdio: %s %s on SC %x using " 527 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", 528 dev_name(&cdev->dev), 529 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 530 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 531 irq_ptr->schid.sch_no, 532 is_thinint_irq(irq_ptr), 533 (irq_ptr->sch_token) ? 1 : 0, 534 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, 535 css_general_characteristics.aif_tdd, 536 (irq_ptr->siga_flag.input) ? "R" : " ", 537 (irq_ptr->siga_flag.output) ? "W" : " ", 538 (irq_ptr->siga_flag.sync) ? "S" : " ", 539 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", 540 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); 541 printk(KERN_INFO "%s", s); 542 } 543 544 int qdio_enable_async_operation(struct qdio_output_q *outq) 545 { 546 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, 547 GFP_ATOMIC); 548 if (!outq->aobs) { 549 outq->use_cq = 0; 550 return -ENOMEM; 551 } 552 outq->use_cq = 1; 553 return 0; 554 } 555 556 void qdio_disable_async_operation(struct qdio_output_q *q) 557 { 558 kfree(q->aobs); 559 q->aobs = NULL; 560 q->use_cq = 0; 561 } 562 563 int __init qdio_setup_init(void) 564 { 565 int rc; 566 567 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 568 256, 0, NULL); 569 if (!qdio_q_cache) 570 return -ENOMEM; 571 572 qdio_aob_cache = kmem_cache_create("qdio_aob", 573 sizeof(struct qaob), 574 sizeof(struct qaob), 575 0, 576 NULL); 577 if (!qdio_aob_cache) { 578 rc = -ENOMEM; 579 goto free_qdio_q_cache; 580 } 581 582 /* Check for OSA/FCP thin interrupts (bit 67). */ 583 DBF_EVENT("thinint:%1d", 584 (css_general_characteristics.aif_osa) ? 1 : 0); 585 586 /* Check for QEBSM support in general (bit 58). */ 587 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 588 rc = 0; 589 out: 590 return rc; 591 free_qdio_q_cache: 592 kmem_cache_destroy(qdio_q_cache); 593 goto out; 594 } 595 596 void qdio_setup_exit(void) 597 { 598 kmem_cache_destroy(qdio_aob_cache); 599 kmem_cache_destroy(qdio_q_cache); 600 } 601