1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * qdio queue initialization 4 * 5 * Copyright IBM Corp. 2008 6 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 7 */ 8 #include <linux/kernel.h> 9 #include <linux/slab.h> 10 #include <linux/export.h> 11 #include <linux/io.h> 12 #include <asm/qdio.h> 13 14 #include "cio.h" 15 #include "css.h" 16 #include "device.h" 17 #include "ioasm.h" 18 #include "chsc.h" 19 #include "qdio.h" 20 #include "qdio_debug.h" 21 22 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 23 24 static struct kmem_cache *qdio_q_cache; 25 static struct kmem_cache *qdio_aob_cache; 26 27 struct qaob *qdio_allocate_aob(void) 28 { 29 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); 30 } 31 32 void qdio_release_aob(struct qaob *aob) 33 { 34 kmem_cache_free(qdio_aob_cache, aob); 35 } 36 EXPORT_SYMBOL_GPL(qdio_release_aob); 37 38 /** 39 * qdio_free_buffers() - free qdio buffers 40 * @buf: array of pointers to qdio buffers 41 * @count: number of qdio buffers to free 42 */ 43 void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count) 44 { 45 int pos; 46 47 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) 48 free_page((unsigned long) buf[pos]); 49 } 50 EXPORT_SYMBOL_GPL(qdio_free_buffers); 51 52 /** 53 * qdio_alloc_buffers() - allocate qdio buffers 54 * @buf: array of pointers to qdio buffers 55 * @count: number of qdio buffers to allocate 56 */ 57 int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count) 58 { 59 int pos; 60 61 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) { 62 buf[pos] = (void *) get_zeroed_page(GFP_KERNEL); 63 if (!buf[pos]) { 64 qdio_free_buffers(buf, count); 65 return -ENOMEM; 66 } 67 } 68 for (pos = 0; pos < count; pos++) 69 if (pos % QBUFF_PER_PAGE) 70 buf[pos] = buf[pos - 1] + 1; 71 return 0; 72 } 73 EXPORT_SYMBOL_GPL(qdio_alloc_buffers); 74 75 /** 76 * qdio_reset_buffers() - reset qdio buffers 77 * @buf: array of pointers to qdio buffers 78 * @count: number of qdio buffers that will be zeroed 79 */ 80 void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count) 81 { 82 int pos; 83 84 for (pos = 0; pos < count; pos++) 85 memset(buf[pos], 0, sizeof(struct qdio_buffer)); 86 } 87 EXPORT_SYMBOL_GPL(qdio_reset_buffers); 88 89 /* 90 * qebsm is only available under 64bit but the adapter sets the feature 91 * flag anyway, so we manually override it. 92 */ 93 static inline int qebsm_possible(void) 94 { 95 return css_general_characteristics.qebsm; 96 } 97 98 /* 99 * qib_param_field: pointer to 128 bytes or NULL, if no param field 100 * nr_input_qs: pointer to nr_queues*128 words of data or NULL 101 */ 102 static void set_impl_params(struct qdio_irq *irq_ptr, 103 unsigned int qib_param_field_format, 104 unsigned char *qib_param_field, 105 unsigned long *input_slib_elements, 106 unsigned long *output_slib_elements) 107 { 108 struct qdio_q *q; 109 int i, j; 110 111 if (!irq_ptr) 112 return; 113 114 irq_ptr->qib.pfmt = qib_param_field_format; 115 if (qib_param_field) 116 memcpy(irq_ptr->qib.parm, qib_param_field, 117 sizeof(irq_ptr->qib.parm)); 118 119 if (!input_slib_elements) 120 goto output; 121 122 for_each_input_queue(irq_ptr, q, i) { 123 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 124 q->slib->slibe[j].parms = 125 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 126 } 127 output: 128 if (!output_slib_elements) 129 return; 130 131 for_each_output_queue(irq_ptr, q, i) { 132 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 133 q->slib->slibe[j].parms = 134 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 135 } 136 } 137 138 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) 139 { 140 struct qdio_q *q; 141 int i; 142 143 for (i = 0; i < nr_queues; i++) { 144 q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); 145 if (!q) 146 return -ENOMEM; 147 148 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 149 if (!q->slib) { 150 kmem_cache_free(qdio_q_cache, q); 151 return -ENOMEM; 152 } 153 irq_ptr_qs[i] = q; 154 } 155 return 0; 156 } 157 158 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) 159 { 160 int rc; 161 162 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); 163 if (rc) 164 return rc; 165 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); 166 return rc; 167 } 168 169 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 170 qdio_handler_t *handler, int i) 171 { 172 struct slib *slib = q->slib; 173 174 /* queue must be cleared for qdio_establish */ 175 memset(q, 0, sizeof(*q)); 176 memset(slib, 0, PAGE_SIZE); 177 q->slib = slib; 178 q->irq_ptr = irq_ptr; 179 q->mask = 1 << (31 - i); 180 q->nr = i; 181 q->handler = handler; 182 } 183 184 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 185 struct qdio_buffer **sbals_array, int i) 186 { 187 struct qdio_q *prev; 188 int j; 189 190 DBF_HEX(&q, sizeof(void *)); 191 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 192 193 /* fill in sbal */ 194 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 195 q->sbal[j] = *sbals_array++; 196 197 /* fill in slib */ 198 if (i > 0) { 199 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] 200 : irq_ptr->output_qs[i - 1]; 201 prev->slib->nsliba = (unsigned long)q->slib; 202 } 203 204 q->slib->sla = (unsigned long)q->sl; 205 q->slib->slsba = (unsigned long)&q->slsb.val[0]; 206 207 /* fill in sl */ 208 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 209 q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); 210 } 211 212 static void setup_queues(struct qdio_irq *irq_ptr, 213 struct qdio_initialize *qdio_init) 214 { 215 struct qdio_q *q; 216 struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array; 217 struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array; 218 struct qdio_outbuf_state *output_sbal_state_array = 219 qdio_init->output_sbal_state_array; 220 int i; 221 222 for_each_input_queue(irq_ptr, q, i) { 223 DBF_EVENT("inq:%1d", i); 224 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 225 226 q->is_input_q = 1; 227 228 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 229 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 230 231 if (is_thinint_irq(irq_ptr)) { 232 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 233 (unsigned long) q); 234 } else { 235 tasklet_init(&q->tasklet, qdio_inbound_processing, 236 (unsigned long) q); 237 } 238 } 239 240 for_each_output_queue(irq_ptr, q, i) { 241 DBF_EVENT("outq:%1d", i); 242 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 243 244 q->u.out.sbal_state = output_sbal_state_array; 245 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; 246 247 q->is_input_q = 0; 248 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 249 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 250 251 tasklet_init(&q->tasklet, qdio_outbound_processing, 252 (unsigned long) q); 253 timer_setup(&q->u.out.timer, qdio_outbound_timer, 0); 254 } 255 } 256 257 static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) 258 { 259 if (qdioac & AC1_SIGA_INPUT_NEEDED) 260 irq_ptr->siga_flag.input = 1; 261 if (qdioac & AC1_SIGA_OUTPUT_NEEDED) 262 irq_ptr->siga_flag.output = 1; 263 if (qdioac & AC1_SIGA_SYNC_NEEDED) 264 irq_ptr->siga_flag.sync = 1; 265 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) 266 irq_ptr->siga_flag.sync_after_ai = 1; 267 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) 268 irq_ptr->siga_flag.sync_out_after_pci = 1; 269 } 270 271 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 272 unsigned char qdioac, unsigned long token) 273 { 274 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) 275 goto no_qebsm; 276 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || 277 (!(qdioac & AC1_SC_QEBSM_ENABLED))) 278 goto no_qebsm; 279 280 irq_ptr->sch_token = token; 281 282 DBF_EVENT("V=V:1"); 283 DBF_EVENT("%8lx", irq_ptr->sch_token); 284 return; 285 286 no_qebsm: 287 irq_ptr->sch_token = 0; 288 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 289 DBF_EVENT("noV=V"); 290 } 291 292 /* 293 * If there is a qdio_irq we use the chsc_page and store the information 294 * in the qdio_irq, otherwise we copy it to the specified structure. 295 */ 296 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, 297 struct subchannel_id *schid, 298 struct qdio_ssqd_desc *data) 299 { 300 struct chsc_ssqd_area *ssqd; 301 int rc; 302 303 DBF_EVENT("getssqd:%4x", schid->sch_no); 304 if (!irq_ptr) { 305 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); 306 if (!ssqd) 307 return -ENOMEM; 308 } else { 309 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; 310 } 311 312 rc = chsc_ssqd(*schid, ssqd); 313 if (rc) 314 goto out; 315 316 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || 317 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || 318 (ssqd->qdio_ssqd.sch != schid->sch_no)) 319 rc = -EINVAL; 320 321 if (!rc) 322 memcpy(data, &ssqd->qdio_ssqd, sizeof(*data)); 323 324 out: 325 if (!irq_ptr) 326 free_page((unsigned long)ssqd); 327 328 return rc; 329 } 330 331 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) 332 { 333 unsigned char qdioac; 334 int rc; 335 336 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc); 337 if (rc) { 338 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); 339 DBF_ERROR("rc:%x", rc); 340 /* all flags set, worst case */ 341 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | 342 AC1_SIGA_SYNC_NEEDED; 343 } else 344 qdioac = irq_ptr->ssqd_desc.qdioac1; 345 346 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); 347 process_ac_flags(irq_ptr, qdioac); 348 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); 349 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); 350 } 351 352 void qdio_release_memory(struct qdio_irq *irq_ptr) 353 { 354 struct qdio_q *q; 355 int i; 356 357 /* 358 * Must check queue array manually since irq_ptr->nr_input_queues / 359 * irq_ptr->nr_input_queues may not yet be set. 360 */ 361 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 362 q = irq_ptr->input_qs[i]; 363 if (q) { 364 free_page((unsigned long) q->slib); 365 kmem_cache_free(qdio_q_cache, q); 366 } 367 } 368 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 369 q = irq_ptr->output_qs[i]; 370 if (q) { 371 if (q->u.out.use_cq) { 372 int n; 373 374 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { 375 struct qaob *aob = q->u.out.aobs[n]; 376 if (aob) { 377 qdio_release_aob(aob); 378 q->u.out.aobs[n] = NULL; 379 } 380 } 381 382 qdio_disable_async_operation(&q->u.out); 383 } 384 free_page((unsigned long) q->slib); 385 kmem_cache_free(qdio_q_cache, q); 386 } 387 } 388 free_page((unsigned long) irq_ptr->qdr); 389 free_page(irq_ptr->chsc_page); 390 free_page((unsigned long) irq_ptr); 391 } 392 393 static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, 394 struct qdio_q **irq_ptr_qs, 395 int i, int nr) 396 { 397 irq_ptr->qdr->qdf0[i + nr].sliba = 398 (unsigned long)irq_ptr_qs[i]->slib; 399 400 irq_ptr->qdr->qdf0[i + nr].sla = 401 (unsigned long)irq_ptr_qs[i]->sl; 402 403 irq_ptr->qdr->qdf0[i + nr].slsba = 404 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 405 406 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; 407 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; 408 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; 409 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; 410 } 411 412 static void setup_qdr(struct qdio_irq *irq_ptr, 413 struct qdio_initialize *qdio_init) 414 { 415 int i; 416 417 irq_ptr->qdr->qfmt = qdio_init->q_format; 418 irq_ptr->qdr->ac = qdio_init->qdr_ac; 419 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 420 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 421 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 422 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 423 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 424 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; 425 426 for (i = 0; i < qdio_init->no_input_qs; i++) 427 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 428 429 for (i = 0; i < qdio_init->no_output_qs; i++) 430 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, 431 qdio_init->no_input_qs); 432 } 433 434 static void setup_qib(struct qdio_irq *irq_ptr, 435 struct qdio_initialize *init_data) 436 { 437 if (qebsm_possible()) 438 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 439 440 irq_ptr->qib.rflags |= init_data->qib_rflags; 441 442 irq_ptr->qib.qfmt = init_data->q_format; 443 if (init_data->no_input_qs) 444 irq_ptr->qib.isliba = 445 (unsigned long)(irq_ptr->input_qs[0]->slib); 446 if (init_data->no_output_qs) 447 irq_ptr->qib.osliba = 448 (unsigned long)(irq_ptr->output_qs[0]->slib); 449 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); 450 } 451 452 int qdio_setup_irq(struct qdio_initialize *init_data) 453 { 454 struct ciw *ciw; 455 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 456 457 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 458 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 459 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); 460 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); 461 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 462 463 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; 464 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; 465 466 /* wipes qib.ac, required by ar7063 */ 467 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 468 469 irq_ptr->int_parm = init_data->int_parm; 470 irq_ptr->nr_input_qs = init_data->no_input_qs; 471 irq_ptr->nr_output_qs = init_data->no_output_qs; 472 irq_ptr->cdev = init_data->cdev; 473 irq_ptr->scan_threshold = init_data->scan_threshold; 474 ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid); 475 setup_queues(irq_ptr, init_data); 476 477 if (init_data->irq_poll) { 478 irq_ptr->irq_poll = init_data->irq_poll; 479 set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state); 480 } else { 481 irq_ptr->irq_poll = NULL; 482 } 483 484 setup_qib(irq_ptr, init_data); 485 qdio_setup_thinint(irq_ptr); 486 set_impl_params(irq_ptr, init_data->qib_param_field_format, 487 init_data->qib_param_field, 488 init_data->input_slib_elements, 489 init_data->output_slib_elements); 490 491 /* fill input and output descriptors */ 492 setup_qdr(irq_ptr, init_data); 493 494 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ 495 496 /* get qdio commands */ 497 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 498 if (!ciw) { 499 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 500 return -EINVAL; 501 } 502 irq_ptr->equeue = *ciw; 503 504 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 505 if (!ciw) { 506 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 507 return -EINVAL; 508 } 509 irq_ptr->aqueue = *ciw; 510 511 /* set new interrupt handler */ 512 spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev)); 513 irq_ptr->orig_handler = init_data->cdev->handler; 514 init_data->cdev->handler = qdio_int_handler; 515 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 516 return 0; 517 } 518 519 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 520 struct ccw_device *cdev) 521 { 522 char s[80]; 523 524 snprintf(s, 80, "qdio: %s %s on SC %x using " 525 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", 526 dev_name(&cdev->dev), 527 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 528 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 529 irq_ptr->schid.sch_no, 530 is_thinint_irq(irq_ptr), 531 (irq_ptr->sch_token) ? 1 : 0, 532 pci_out_supported(irq_ptr) ? 1 : 0, 533 css_general_characteristics.aif_tdd, 534 (irq_ptr->siga_flag.input) ? "R" : " ", 535 (irq_ptr->siga_flag.output) ? "W" : " ", 536 (irq_ptr->siga_flag.sync) ? "S" : " ", 537 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", 538 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); 539 printk(KERN_INFO "%s", s); 540 } 541 542 int qdio_enable_async_operation(struct qdio_output_q *outq) 543 { 544 outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *), 545 GFP_KERNEL); 546 if (!outq->aobs) { 547 outq->use_cq = 0; 548 return -ENOMEM; 549 } 550 outq->use_cq = 1; 551 return 0; 552 } 553 554 void qdio_disable_async_operation(struct qdio_output_q *q) 555 { 556 kfree(q->aobs); 557 q->aobs = NULL; 558 q->use_cq = 0; 559 } 560 561 int __init qdio_setup_init(void) 562 { 563 int rc; 564 565 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 566 256, 0, NULL); 567 if (!qdio_q_cache) 568 return -ENOMEM; 569 570 qdio_aob_cache = kmem_cache_create("qdio_aob", 571 sizeof(struct qaob), 572 sizeof(struct qaob), 573 0, 574 NULL); 575 if (!qdio_aob_cache) { 576 rc = -ENOMEM; 577 goto free_qdio_q_cache; 578 } 579 580 /* Check for OSA/FCP thin interrupts (bit 67). */ 581 DBF_EVENT("thinint:%1d", 582 (css_general_characteristics.aif_osa) ? 1 : 0); 583 584 /* Check for QEBSM support in general (bit 58). */ 585 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 586 rc = 0; 587 out: 588 return rc; 589 free_qdio_q_cache: 590 kmem_cache_destroy(qdio_q_cache); 591 goto out; 592 } 593 594 void qdio_setup_exit(void) 595 { 596 kmem_cache_destroy(qdio_aob_cache); 597 kmem_cache_destroy(qdio_q_cache); 598 } 599