1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 #include <net/sock.h> 37 38 #include <asm/ebcdic.h> 39 #include <asm/chpid.h> 40 #include <asm/sysinfo.h> 41 #include <asm/diag.h> 42 #include <asm/cio.h> 43 #include <asm/ccwdev.h> 44 #include <asm/cpcmd.h> 45 46 #include "qeth_core.h" 47 48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 50 /* N P A M L V H */ 51 [QETH_DBF_SETUP] = {"qeth_setup", 52 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 54 &debug_sprintf_view, NULL}, 55 [QETH_DBF_CTRL] = {"qeth_control", 56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 57 }; 58 EXPORT_SYMBOL_GPL(qeth_dbf); 59 60 struct kmem_cache *qeth_core_header_cache; 61 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 62 static struct kmem_cache *qeth_qdio_outbuf_cache; 63 64 static struct device *qeth_core_root_dev; 65 static struct dentry *qeth_debugfs_root; 66 static struct lock_class_key qdio_out_skb_queue_key; 67 68 static void qeth_issue_next_read_cb(struct qeth_card *card, 69 struct qeth_cmd_buffer *iob, 70 unsigned int data_length); 71 static int qeth_qdio_establish(struct qeth_card *); 72 static void qeth_free_qdio_queues(struct qeth_card *card); 73 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 74 struct qeth_qdio_out_buffer *buf, 75 enum iucv_tx_notify notification); 76 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 77 int budget); 78 79 static void qeth_close_dev_handler(struct work_struct *work) 80 { 81 struct qeth_card *card; 82 83 card = container_of(work, struct qeth_card, close_dev_work); 84 QETH_CARD_TEXT(card, 2, "cldevhdl"); 85 ccwgroup_set_offline(card->gdev); 86 } 87 88 static const char *qeth_get_cardname(struct qeth_card *card) 89 { 90 if (IS_VM_NIC(card)) { 91 switch (card->info.type) { 92 case QETH_CARD_TYPE_OSD: 93 return " Virtual NIC QDIO"; 94 case QETH_CARD_TYPE_IQD: 95 return " Virtual NIC Hiper"; 96 case QETH_CARD_TYPE_OSM: 97 return " Virtual NIC QDIO - OSM"; 98 case QETH_CARD_TYPE_OSX: 99 return " Virtual NIC QDIO - OSX"; 100 default: 101 return " unknown"; 102 } 103 } else { 104 switch (card->info.type) { 105 case QETH_CARD_TYPE_OSD: 106 return " OSD Express"; 107 case QETH_CARD_TYPE_IQD: 108 return " HiperSockets"; 109 case QETH_CARD_TYPE_OSN: 110 return " OSN QDIO"; 111 case QETH_CARD_TYPE_OSM: 112 return " OSM QDIO"; 113 case QETH_CARD_TYPE_OSX: 114 return " OSX QDIO"; 115 default: 116 return " unknown"; 117 } 118 } 119 return " n/a"; 120 } 121 122 /* max length to be returned: 14 */ 123 const char *qeth_get_cardname_short(struct qeth_card *card) 124 { 125 if (IS_VM_NIC(card)) { 126 switch (card->info.type) { 127 case QETH_CARD_TYPE_OSD: 128 return "Virt.NIC QDIO"; 129 case QETH_CARD_TYPE_IQD: 130 return "Virt.NIC Hiper"; 131 case QETH_CARD_TYPE_OSM: 132 return "Virt.NIC OSM"; 133 case QETH_CARD_TYPE_OSX: 134 return "Virt.NIC OSX"; 135 default: 136 return "unknown"; 137 } 138 } else { 139 switch (card->info.type) { 140 case QETH_CARD_TYPE_OSD: 141 switch (card->info.link_type) { 142 case QETH_LINK_TYPE_FAST_ETH: 143 return "OSD_100"; 144 case QETH_LINK_TYPE_HSTR: 145 return "HSTR"; 146 case QETH_LINK_TYPE_GBIT_ETH: 147 return "OSD_1000"; 148 case QETH_LINK_TYPE_10GBIT_ETH: 149 return "OSD_10GIG"; 150 case QETH_LINK_TYPE_25GBIT_ETH: 151 return "OSD_25GIG"; 152 case QETH_LINK_TYPE_LANE_ETH100: 153 return "OSD_FE_LANE"; 154 case QETH_LINK_TYPE_LANE_TR: 155 return "OSD_TR_LANE"; 156 case QETH_LINK_TYPE_LANE_ETH1000: 157 return "OSD_GbE_LANE"; 158 case QETH_LINK_TYPE_LANE: 159 return "OSD_ATM_LANE"; 160 default: 161 return "OSD_Express"; 162 } 163 case QETH_CARD_TYPE_IQD: 164 return "HiperSockets"; 165 case QETH_CARD_TYPE_OSN: 166 return "OSN"; 167 case QETH_CARD_TYPE_OSM: 168 return "OSM_1000"; 169 case QETH_CARD_TYPE_OSX: 170 return "OSX_10GIG"; 171 default: 172 return "unknown"; 173 } 174 } 175 return "n/a"; 176 } 177 178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 179 int clear_start_mask) 180 { 181 unsigned long flags; 182 183 spin_lock_irqsave(&card->thread_mask_lock, flags); 184 card->thread_allowed_mask = threads; 185 if (clear_start_mask) 186 card->thread_start_mask &= threads; 187 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 188 wake_up(&card->wait_q); 189 } 190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 191 192 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 193 { 194 unsigned long flags; 195 int rc = 0; 196 197 spin_lock_irqsave(&card->thread_mask_lock, flags); 198 rc = (card->thread_running_mask & threads); 199 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 200 return rc; 201 } 202 EXPORT_SYMBOL_GPL(qeth_threads_running); 203 204 static void qeth_clear_working_pool_list(struct qeth_card *card) 205 { 206 struct qeth_buffer_pool_entry *pool_entry, *tmp; 207 struct qeth_qdio_q *queue = card->qdio.in_q; 208 unsigned int i; 209 210 QETH_CARD_TEXT(card, 5, "clwrklst"); 211 list_for_each_entry_safe(pool_entry, tmp, 212 &card->qdio.in_buf_pool.entry_list, list) 213 list_del(&pool_entry->list); 214 215 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 216 queue->bufs[i].pool_entry = NULL; 217 } 218 219 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 220 { 221 unsigned int i; 222 223 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 224 if (entry->elements[i]) 225 __free_page(entry->elements[i]); 226 } 227 228 kfree(entry); 229 } 230 231 static void qeth_free_buffer_pool(struct qeth_card *card) 232 { 233 struct qeth_buffer_pool_entry *entry, *tmp; 234 235 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 236 init_list) { 237 list_del(&entry->init_list); 238 qeth_free_pool_entry(entry); 239 } 240 } 241 242 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 243 { 244 struct qeth_buffer_pool_entry *entry; 245 unsigned int i; 246 247 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 248 if (!entry) 249 return NULL; 250 251 for (i = 0; i < pages; i++) { 252 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 253 254 if (!entry->elements[i]) { 255 qeth_free_pool_entry(entry); 256 return NULL; 257 } 258 } 259 260 return entry; 261 } 262 263 static int qeth_alloc_buffer_pool(struct qeth_card *card) 264 { 265 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 266 unsigned int i; 267 268 QETH_CARD_TEXT(card, 5, "alocpool"); 269 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 270 struct qeth_buffer_pool_entry *entry; 271 272 entry = qeth_alloc_pool_entry(buf_elements); 273 if (!entry) { 274 qeth_free_buffer_pool(card); 275 return -ENOMEM; 276 } 277 278 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 279 } 280 return 0; 281 } 282 283 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 284 { 285 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 286 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 287 struct qeth_buffer_pool_entry *entry, *tmp; 288 int delta = count - pool->buf_count; 289 LIST_HEAD(entries); 290 291 QETH_CARD_TEXT(card, 2, "realcbp"); 292 293 /* Defer until queue is allocated: */ 294 if (!card->qdio.in_q) 295 goto out; 296 297 /* Remove entries from the pool: */ 298 while (delta < 0) { 299 entry = list_first_entry(&pool->entry_list, 300 struct qeth_buffer_pool_entry, 301 init_list); 302 list_del(&entry->init_list); 303 qeth_free_pool_entry(entry); 304 305 delta++; 306 } 307 308 /* Allocate additional entries: */ 309 while (delta > 0) { 310 entry = qeth_alloc_pool_entry(buf_elements); 311 if (!entry) { 312 list_for_each_entry_safe(entry, tmp, &entries, 313 init_list) { 314 list_del(&entry->init_list); 315 qeth_free_pool_entry(entry); 316 } 317 318 return -ENOMEM; 319 } 320 321 list_add(&entry->init_list, &entries); 322 323 delta--; 324 } 325 326 list_splice(&entries, &pool->entry_list); 327 328 out: 329 card->qdio.in_buf_pool.buf_count = count; 330 pool->buf_count = count; 331 return 0; 332 } 333 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 334 335 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 336 { 337 if (!q) 338 return; 339 340 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 341 kfree(q); 342 } 343 344 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 345 { 346 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 347 int i; 348 349 if (!q) 350 return NULL; 351 352 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 353 kfree(q); 354 return NULL; 355 } 356 357 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 358 q->bufs[i].buffer = q->qdio_bufs[i]; 359 360 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 361 return q; 362 } 363 364 static int qeth_cq_init(struct qeth_card *card) 365 { 366 int rc; 367 368 if (card->options.cq == QETH_CQ_ENABLED) { 369 QETH_CARD_TEXT(card, 2, "cqinit"); 370 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 371 QDIO_MAX_BUFFERS_PER_Q); 372 card->qdio.c_q->next_buf_to_init = 127; 373 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 374 card->qdio.no_in_queues - 1, 0, 375 127); 376 if (rc) { 377 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 378 goto out; 379 } 380 } 381 rc = 0; 382 out: 383 return rc; 384 } 385 386 static int qeth_alloc_cq(struct qeth_card *card) 387 { 388 int rc; 389 390 if (card->options.cq == QETH_CQ_ENABLED) { 391 int i; 392 struct qdio_outbuf_state *outbuf_states; 393 394 QETH_CARD_TEXT(card, 2, "cqon"); 395 card->qdio.c_q = qeth_alloc_qdio_queue(); 396 if (!card->qdio.c_q) { 397 rc = -1; 398 goto kmsg_out; 399 } 400 card->qdio.no_in_queues = 2; 401 card->qdio.out_bufstates = 402 kcalloc(card->qdio.no_out_queues * 403 QDIO_MAX_BUFFERS_PER_Q, 404 sizeof(struct qdio_outbuf_state), 405 GFP_KERNEL); 406 outbuf_states = card->qdio.out_bufstates; 407 if (outbuf_states == NULL) { 408 rc = -1; 409 goto free_cq_out; 410 } 411 for (i = 0; i < card->qdio.no_out_queues; ++i) { 412 card->qdio.out_qs[i]->bufstates = outbuf_states; 413 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 414 } 415 } else { 416 QETH_CARD_TEXT(card, 2, "nocq"); 417 card->qdio.c_q = NULL; 418 card->qdio.no_in_queues = 1; 419 } 420 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 421 rc = 0; 422 out: 423 return rc; 424 free_cq_out: 425 qeth_free_qdio_queue(card->qdio.c_q); 426 card->qdio.c_q = NULL; 427 kmsg_out: 428 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 429 goto out; 430 } 431 432 static void qeth_free_cq(struct qeth_card *card) 433 { 434 if (card->qdio.c_q) { 435 --card->qdio.no_in_queues; 436 qeth_free_qdio_queue(card->qdio.c_q); 437 card->qdio.c_q = NULL; 438 } 439 kfree(card->qdio.out_bufstates); 440 card->qdio.out_bufstates = NULL; 441 } 442 443 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 444 int delayed) 445 { 446 enum iucv_tx_notify n; 447 448 switch (sbalf15) { 449 case 0: 450 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 451 break; 452 case 4: 453 case 16: 454 case 17: 455 case 18: 456 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 457 TX_NOTIFY_UNREACHABLE; 458 break; 459 default: 460 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 461 TX_NOTIFY_GENERALERROR; 462 break; 463 } 464 465 return n; 466 } 467 468 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 469 int forced_cleanup) 470 { 471 if (q->card->options.cq != QETH_CQ_ENABLED) 472 return; 473 474 if (q->bufs[bidx]->next_pending != NULL) { 475 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 476 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 477 478 while (c) { 479 if (forced_cleanup || 480 atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) { 481 struct qeth_qdio_out_buffer *f = c; 482 483 QETH_CARD_TEXT(f->q->card, 5, "fp"); 484 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 485 /* release here to avoid interleaving between 486 outbound tasklet and inbound tasklet 487 regarding notifications and lifecycle */ 488 qeth_tx_complete_buf(c, forced_cleanup, 0); 489 490 c = f->next_pending; 491 WARN_ON_ONCE(head->next_pending != f); 492 head->next_pending = c; 493 kmem_cache_free(qeth_qdio_outbuf_cache, f); 494 } else { 495 head = c; 496 c = c->next_pending; 497 } 498 499 } 500 } 501 } 502 503 static void qeth_qdio_handle_aob(struct qeth_card *card, 504 unsigned long phys_aob_addr) 505 { 506 enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK; 507 struct qaob *aob; 508 struct qeth_qdio_out_buffer *buffer; 509 enum iucv_tx_notify notification; 510 unsigned int i; 511 512 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 513 QETH_CARD_TEXT(card, 5, "haob"); 514 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 515 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 516 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 517 518 if (aob->aorc) { 519 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 520 new_state = QETH_QDIO_BUF_QAOB_ERROR; 521 } 522 523 switch (atomic_xchg(&buffer->state, new_state)) { 524 case QETH_QDIO_BUF_PRIMED: 525 /* Faster than TX completion code, let it handle the async 526 * completion for us. 527 */ 528 break; 529 case QETH_QDIO_BUF_PENDING: 530 /* TX completion code is active and will handle the async 531 * completion for us. 532 */ 533 break; 534 case QETH_QDIO_BUF_NEED_QAOB: 535 /* TX completion code is already finished. */ 536 notification = qeth_compute_cq_notification(aob->aorc, 1); 537 qeth_notify_skbs(buffer->q, buffer, notification); 538 539 /* Free dangling allocations. The attached skbs are handled by 540 * qeth_cleanup_handled_pending(). 541 */ 542 for (i = 0; 543 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 544 i++) { 545 void *data = phys_to_virt(aob->sba[i]); 546 547 if (data && buffer->is_header[i]) 548 kmem_cache_free(qeth_core_header_cache, data); 549 buffer->is_header[i] = 0; 550 } 551 552 atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY); 553 break; 554 default: 555 WARN_ON_ONCE(1); 556 } 557 558 qdio_release_aob(aob); 559 } 560 561 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 562 void *data) 563 { 564 ccw->cmd_code = cmd_code; 565 ccw->flags = flags | CCW_FLAG_SLI; 566 ccw->count = len; 567 ccw->cda = (__u32) __pa(data); 568 } 569 570 static int __qeth_issue_next_read(struct qeth_card *card) 571 { 572 struct qeth_cmd_buffer *iob = card->read_cmd; 573 struct qeth_channel *channel = iob->channel; 574 struct ccw1 *ccw = __ccw_from_cmd(iob); 575 int rc; 576 577 QETH_CARD_TEXT(card, 5, "issnxrd"); 578 if (channel->state != CH_STATE_UP) 579 return -EIO; 580 581 memset(iob->data, 0, iob->length); 582 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 583 iob->callback = qeth_issue_next_read_cb; 584 /* keep the cmd alive after completion: */ 585 qeth_get_cmd(iob); 586 587 QETH_CARD_TEXT(card, 6, "noirqpnd"); 588 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 589 if (!rc) { 590 channel->active_cmd = iob; 591 } else { 592 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 593 rc, CARD_DEVID(card)); 594 qeth_unlock_channel(card, channel); 595 qeth_put_cmd(iob); 596 card->read_or_write_problem = 1; 597 qeth_schedule_recovery(card); 598 } 599 return rc; 600 } 601 602 static int qeth_issue_next_read(struct qeth_card *card) 603 { 604 int ret; 605 606 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 607 ret = __qeth_issue_next_read(card); 608 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 609 610 return ret; 611 } 612 613 static void qeth_enqueue_cmd(struct qeth_card *card, 614 struct qeth_cmd_buffer *iob) 615 { 616 spin_lock_irq(&card->lock); 617 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 618 spin_unlock_irq(&card->lock); 619 } 620 621 static void qeth_dequeue_cmd(struct qeth_card *card, 622 struct qeth_cmd_buffer *iob) 623 { 624 spin_lock_irq(&card->lock); 625 list_del(&iob->list_entry); 626 spin_unlock_irq(&card->lock); 627 } 628 629 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 630 { 631 iob->rc = reason; 632 complete(&iob->done); 633 } 634 EXPORT_SYMBOL_GPL(qeth_notify_cmd); 635 636 static void qeth_flush_local_addrs4(struct qeth_card *card) 637 { 638 struct qeth_local_addr *addr; 639 struct hlist_node *tmp; 640 unsigned int i; 641 642 spin_lock_irq(&card->local_addrs4_lock); 643 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 644 hash_del_rcu(&addr->hnode); 645 kfree_rcu(addr, rcu); 646 } 647 spin_unlock_irq(&card->local_addrs4_lock); 648 } 649 650 static void qeth_flush_local_addrs6(struct qeth_card *card) 651 { 652 struct qeth_local_addr *addr; 653 struct hlist_node *tmp; 654 unsigned int i; 655 656 spin_lock_irq(&card->local_addrs6_lock); 657 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 658 hash_del_rcu(&addr->hnode); 659 kfree_rcu(addr, rcu); 660 } 661 spin_unlock_irq(&card->local_addrs6_lock); 662 } 663 664 static void qeth_flush_local_addrs(struct qeth_card *card) 665 { 666 qeth_flush_local_addrs4(card); 667 qeth_flush_local_addrs6(card); 668 } 669 670 static void qeth_add_local_addrs4(struct qeth_card *card, 671 struct qeth_ipacmd_local_addrs4 *cmd) 672 { 673 unsigned int i; 674 675 if (cmd->addr_length != 676 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 677 dev_err_ratelimited(&card->gdev->dev, 678 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 679 cmd->addr_length); 680 return; 681 } 682 683 spin_lock(&card->local_addrs4_lock); 684 for (i = 0; i < cmd->count; i++) { 685 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 686 struct qeth_local_addr *addr; 687 bool duplicate = false; 688 689 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 690 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 691 duplicate = true; 692 break; 693 } 694 } 695 696 if (duplicate) 697 continue; 698 699 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 700 if (!addr) { 701 dev_err(&card->gdev->dev, 702 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 703 &cmd->addrs[i].addr); 704 continue; 705 } 706 707 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 708 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 709 } 710 spin_unlock(&card->local_addrs4_lock); 711 } 712 713 static void qeth_add_local_addrs6(struct qeth_card *card, 714 struct qeth_ipacmd_local_addrs6 *cmd) 715 { 716 unsigned int i; 717 718 if (cmd->addr_length != 719 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 720 dev_err_ratelimited(&card->gdev->dev, 721 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 722 cmd->addr_length); 723 return; 724 } 725 726 spin_lock(&card->local_addrs6_lock); 727 for (i = 0; i < cmd->count; i++) { 728 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 729 struct qeth_local_addr *addr; 730 bool duplicate = false; 731 732 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 733 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 734 duplicate = true; 735 break; 736 } 737 } 738 739 if (duplicate) 740 continue; 741 742 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 743 if (!addr) { 744 dev_err(&card->gdev->dev, 745 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 746 &cmd->addrs[i].addr); 747 continue; 748 } 749 750 addr->addr = cmd->addrs[i].addr; 751 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 752 } 753 spin_unlock(&card->local_addrs6_lock); 754 } 755 756 static void qeth_del_local_addrs4(struct qeth_card *card, 757 struct qeth_ipacmd_local_addrs4 *cmd) 758 { 759 unsigned int i; 760 761 if (cmd->addr_length != 762 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 763 dev_err_ratelimited(&card->gdev->dev, 764 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 765 cmd->addr_length); 766 return; 767 } 768 769 spin_lock(&card->local_addrs4_lock); 770 for (i = 0; i < cmd->count; i++) { 771 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 772 unsigned int key = ipv4_addr_hash(addr->addr); 773 struct qeth_local_addr *tmp; 774 775 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 776 if (tmp->addr.s6_addr32[3] == addr->addr) { 777 hash_del_rcu(&tmp->hnode); 778 kfree_rcu(tmp, rcu); 779 break; 780 } 781 } 782 } 783 spin_unlock(&card->local_addrs4_lock); 784 } 785 786 static void qeth_del_local_addrs6(struct qeth_card *card, 787 struct qeth_ipacmd_local_addrs6 *cmd) 788 { 789 unsigned int i; 790 791 if (cmd->addr_length != 792 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 793 dev_err_ratelimited(&card->gdev->dev, 794 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 795 cmd->addr_length); 796 return; 797 } 798 799 spin_lock(&card->local_addrs6_lock); 800 for (i = 0; i < cmd->count; i++) { 801 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 802 u32 key = ipv6_addr_hash(&addr->addr); 803 struct qeth_local_addr *tmp; 804 805 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 806 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 807 hash_del_rcu(&tmp->hnode); 808 kfree_rcu(tmp, rcu); 809 break; 810 } 811 } 812 } 813 spin_unlock(&card->local_addrs6_lock); 814 } 815 816 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 817 struct sk_buff *skb) 818 { 819 struct qeth_local_addr *tmp; 820 bool is_local = false; 821 unsigned int key; 822 __be32 next_hop; 823 824 if (hash_empty(card->local_addrs4)) 825 return false; 826 827 rcu_read_lock(); 828 next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4)); 829 key = ipv4_addr_hash(next_hop); 830 831 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 832 if (tmp->addr.s6_addr32[3] == next_hop) { 833 is_local = true; 834 break; 835 } 836 } 837 rcu_read_unlock(); 838 839 return is_local; 840 } 841 842 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 843 struct sk_buff *skb) 844 { 845 struct qeth_local_addr *tmp; 846 struct in6_addr *next_hop; 847 bool is_local = false; 848 u32 key; 849 850 if (hash_empty(card->local_addrs6)) 851 return false; 852 853 rcu_read_lock(); 854 next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6)); 855 key = ipv6_addr_hash(next_hop); 856 857 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 858 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 859 is_local = true; 860 break; 861 } 862 } 863 rcu_read_unlock(); 864 865 return is_local; 866 } 867 868 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 869 { 870 struct qeth_card *card = m->private; 871 struct qeth_local_addr *tmp; 872 unsigned int i; 873 874 rcu_read_lock(); 875 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 876 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 877 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 878 seq_printf(m, "%pI6c\n", &tmp->addr); 879 rcu_read_unlock(); 880 881 return 0; 882 } 883 884 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 885 886 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 887 struct qeth_card *card) 888 { 889 const char *ipa_name; 890 int com = cmd->hdr.command; 891 892 ipa_name = qeth_get_ipa_cmd_name(com); 893 894 if (rc) 895 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 896 ipa_name, com, CARD_DEVID(card), rc, 897 qeth_get_ipa_msg(rc)); 898 else 899 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 900 ipa_name, com, CARD_DEVID(card)); 901 } 902 903 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 904 struct qeth_ipa_cmd *cmd) 905 { 906 QETH_CARD_TEXT(card, 5, "chkipad"); 907 908 if (IS_IPA_REPLY(cmd)) { 909 if (cmd->hdr.command != IPA_CMD_SETCCID && 910 cmd->hdr.command != IPA_CMD_DELCCID && 911 cmd->hdr.command != IPA_CMD_MODCCID && 912 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 913 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 914 return cmd; 915 } 916 917 /* handle unsolicited event: */ 918 switch (cmd->hdr.command) { 919 case IPA_CMD_STOPLAN: 920 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 921 dev_err(&card->gdev->dev, 922 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 923 netdev_name(card->dev)); 924 schedule_work(&card->close_dev_work); 925 } else { 926 dev_warn(&card->gdev->dev, 927 "The link for interface %s on CHPID 0x%X failed\n", 928 netdev_name(card->dev), card->info.chpid); 929 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 930 netif_carrier_off(card->dev); 931 } 932 return NULL; 933 case IPA_CMD_STARTLAN: 934 dev_info(&card->gdev->dev, 935 "The link for %s on CHPID 0x%X has been restored\n", 936 netdev_name(card->dev), card->info.chpid); 937 if (card->info.hwtrap) 938 card->info.hwtrap = 2; 939 qeth_schedule_recovery(card); 940 return NULL; 941 case IPA_CMD_SETBRIDGEPORT_IQD: 942 case IPA_CMD_SETBRIDGEPORT_OSA: 943 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 944 if (card->discipline->control_event_handler(card, cmd)) 945 return cmd; 946 return NULL; 947 case IPA_CMD_MODCCID: 948 return cmd; 949 case IPA_CMD_REGISTER_LOCAL_ADDR: 950 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 951 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 952 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 953 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 954 955 QETH_CARD_TEXT(card, 3, "irla"); 956 return NULL; 957 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 958 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 959 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 960 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 961 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 962 963 QETH_CARD_TEXT(card, 3, "urla"); 964 return NULL; 965 default: 966 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 967 return cmd; 968 } 969 } 970 971 static void qeth_clear_ipacmd_list(struct qeth_card *card) 972 { 973 struct qeth_cmd_buffer *iob; 974 unsigned long flags; 975 976 QETH_CARD_TEXT(card, 4, "clipalst"); 977 978 spin_lock_irqsave(&card->lock, flags); 979 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 980 qeth_notify_cmd(iob, -ECANCELED); 981 spin_unlock_irqrestore(&card->lock, flags); 982 } 983 984 static int qeth_check_idx_response(struct qeth_card *card, 985 unsigned char *buffer) 986 { 987 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 988 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 989 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 990 buffer[4]); 991 QETH_CARD_TEXT(card, 2, "ckidxres"); 992 QETH_CARD_TEXT(card, 2, " idxterm"); 993 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 994 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 995 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 996 dev_err(&card->gdev->dev, 997 "The device does not support the configured transport mode\n"); 998 return -EPROTONOSUPPORT; 999 } 1000 return -EIO; 1001 } 1002 return 0; 1003 } 1004 1005 void qeth_put_cmd(struct qeth_cmd_buffer *iob) 1006 { 1007 if (refcount_dec_and_test(&iob->ref_count)) { 1008 kfree(iob->data); 1009 kfree(iob); 1010 } 1011 } 1012 EXPORT_SYMBOL_GPL(qeth_put_cmd); 1013 1014 static void qeth_release_buffer_cb(struct qeth_card *card, 1015 struct qeth_cmd_buffer *iob, 1016 unsigned int data_length) 1017 { 1018 qeth_put_cmd(iob); 1019 } 1020 1021 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 1022 { 1023 qeth_notify_cmd(iob, rc); 1024 qeth_put_cmd(iob); 1025 } 1026 1027 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 1028 unsigned int length, unsigned int ccws, 1029 long timeout) 1030 { 1031 struct qeth_cmd_buffer *iob; 1032 1033 if (length > QETH_BUFSIZE) 1034 return NULL; 1035 1036 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 1037 if (!iob) 1038 return NULL; 1039 1040 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 1041 GFP_KERNEL | GFP_DMA); 1042 if (!iob->data) { 1043 kfree(iob); 1044 return NULL; 1045 } 1046 1047 init_completion(&iob->done); 1048 spin_lock_init(&iob->lock); 1049 refcount_set(&iob->ref_count, 1); 1050 iob->channel = channel; 1051 iob->timeout = timeout; 1052 iob->length = length; 1053 return iob; 1054 } 1055 EXPORT_SYMBOL_GPL(qeth_alloc_cmd); 1056 1057 static void qeth_issue_next_read_cb(struct qeth_card *card, 1058 struct qeth_cmd_buffer *iob, 1059 unsigned int data_length) 1060 { 1061 struct qeth_cmd_buffer *request = NULL; 1062 struct qeth_ipa_cmd *cmd = NULL; 1063 struct qeth_reply *reply = NULL; 1064 struct qeth_cmd_buffer *tmp; 1065 unsigned long flags; 1066 int rc = 0; 1067 1068 QETH_CARD_TEXT(card, 4, "sndctlcb"); 1069 rc = qeth_check_idx_response(card, iob->data); 1070 switch (rc) { 1071 case 0: 1072 break; 1073 case -EIO: 1074 qeth_schedule_recovery(card); 1075 fallthrough; 1076 default: 1077 qeth_clear_ipacmd_list(card); 1078 goto err_idx; 1079 } 1080 1081 cmd = __ipa_reply(iob); 1082 if (cmd) { 1083 cmd = qeth_check_ipa_data(card, cmd); 1084 if (!cmd) 1085 goto out; 1086 if (IS_OSN(card) && card->osn_info.assist_cb && 1087 cmd->hdr.command != IPA_CMD_STARTLAN) { 1088 card->osn_info.assist_cb(card->dev, cmd); 1089 goto out; 1090 } 1091 } 1092 1093 /* match against pending cmd requests */ 1094 spin_lock_irqsave(&card->lock, flags); 1095 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 1096 if (tmp->match && tmp->match(tmp, iob)) { 1097 request = tmp; 1098 /* take the object outside the lock */ 1099 qeth_get_cmd(request); 1100 break; 1101 } 1102 } 1103 spin_unlock_irqrestore(&card->lock, flags); 1104 1105 if (!request) 1106 goto out; 1107 1108 reply = &request->reply; 1109 if (!reply->callback) { 1110 rc = 0; 1111 goto no_callback; 1112 } 1113 1114 spin_lock_irqsave(&request->lock, flags); 1115 if (request->rc) 1116 /* Bail out when the requestor has already left: */ 1117 rc = request->rc; 1118 else 1119 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 1120 (unsigned long)iob); 1121 spin_unlock_irqrestore(&request->lock, flags); 1122 1123 no_callback: 1124 if (rc <= 0) 1125 qeth_notify_cmd(request, rc); 1126 qeth_put_cmd(request); 1127 out: 1128 memcpy(&card->seqno.pdu_hdr_ack, 1129 QETH_PDU_HEADER_SEQ_NO(iob->data), 1130 QETH_SEQ_NO_LENGTH); 1131 __qeth_issue_next_read(card); 1132 err_idx: 1133 qeth_put_cmd(iob); 1134 } 1135 1136 static int qeth_set_thread_start_bit(struct qeth_card *card, 1137 unsigned long thread) 1138 { 1139 unsigned long flags; 1140 int rc = 0; 1141 1142 spin_lock_irqsave(&card->thread_mask_lock, flags); 1143 if (!(card->thread_allowed_mask & thread)) 1144 rc = -EPERM; 1145 else if (card->thread_start_mask & thread) 1146 rc = -EBUSY; 1147 else 1148 card->thread_start_mask |= thread; 1149 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1150 1151 return rc; 1152 } 1153 1154 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1155 unsigned long thread) 1156 { 1157 unsigned long flags; 1158 1159 spin_lock_irqsave(&card->thread_mask_lock, flags); 1160 card->thread_start_mask &= ~thread; 1161 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1162 wake_up(&card->wait_q); 1163 } 1164 1165 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1166 unsigned long thread) 1167 { 1168 unsigned long flags; 1169 1170 spin_lock_irqsave(&card->thread_mask_lock, flags); 1171 card->thread_running_mask &= ~thread; 1172 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1173 wake_up_all(&card->wait_q); 1174 } 1175 1176 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1177 { 1178 unsigned long flags; 1179 int rc = 0; 1180 1181 spin_lock_irqsave(&card->thread_mask_lock, flags); 1182 if (card->thread_start_mask & thread) { 1183 if ((card->thread_allowed_mask & thread) && 1184 !(card->thread_running_mask & thread)) { 1185 rc = 1; 1186 card->thread_start_mask &= ~thread; 1187 card->thread_running_mask |= thread; 1188 } else 1189 rc = -EPERM; 1190 } 1191 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1192 return rc; 1193 } 1194 1195 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1196 { 1197 int rc = 0; 1198 1199 wait_event(card->wait_q, 1200 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1201 return rc; 1202 } 1203 1204 int qeth_schedule_recovery(struct qeth_card *card) 1205 { 1206 int rc; 1207 1208 QETH_CARD_TEXT(card, 2, "startrec"); 1209 1210 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1211 if (!rc) 1212 schedule_work(&card->kernel_thread_starter); 1213 1214 return rc; 1215 } 1216 1217 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1218 struct irb *irb) 1219 { 1220 int dstat, cstat; 1221 char *sense; 1222 1223 sense = (char *) irb->ecw; 1224 cstat = irb->scsw.cmd.cstat; 1225 dstat = irb->scsw.cmd.dstat; 1226 1227 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1228 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1229 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1230 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1231 dev_warn(&cdev->dev, "The qeth device driver " 1232 "failed to recover an error on the device\n"); 1233 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1234 CCW_DEVID(cdev), dstat, cstat); 1235 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1236 16, 1, irb, 64, 1); 1237 return -EIO; 1238 } 1239 1240 if (dstat & DEV_STAT_UNIT_CHECK) { 1241 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1242 SENSE_RESETTING_EVENT_FLAG) { 1243 QETH_CARD_TEXT(card, 2, "REVIND"); 1244 return -EIO; 1245 } 1246 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1247 SENSE_COMMAND_REJECT_FLAG) { 1248 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1249 return -EIO; 1250 } 1251 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1252 QETH_CARD_TEXT(card, 2, "AFFE"); 1253 return -EIO; 1254 } 1255 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1256 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1257 return 0; 1258 } 1259 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1260 return -EIO; 1261 } 1262 return 0; 1263 } 1264 1265 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1266 struct irb *irb) 1267 { 1268 if (!IS_ERR(irb)) 1269 return 0; 1270 1271 switch (PTR_ERR(irb)) { 1272 case -EIO: 1273 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1274 CCW_DEVID(cdev)); 1275 QETH_CARD_TEXT(card, 2, "ckirberr"); 1276 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1277 return -EIO; 1278 case -ETIMEDOUT: 1279 dev_warn(&cdev->dev, "A hardware operation timed out" 1280 " on the device\n"); 1281 QETH_CARD_TEXT(card, 2, "ckirberr"); 1282 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1283 return -ETIMEDOUT; 1284 default: 1285 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1286 PTR_ERR(irb), CCW_DEVID(cdev)); 1287 QETH_CARD_TEXT(card, 2, "ckirberr"); 1288 QETH_CARD_TEXT(card, 2, " rc???"); 1289 return PTR_ERR(irb); 1290 } 1291 } 1292 1293 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1294 struct irb *irb) 1295 { 1296 int rc; 1297 int cstat, dstat; 1298 struct qeth_cmd_buffer *iob = NULL; 1299 struct ccwgroup_device *gdev; 1300 struct qeth_channel *channel; 1301 struct qeth_card *card; 1302 1303 /* while we hold the ccwdev lock, this stays valid: */ 1304 gdev = dev_get_drvdata(&cdev->dev); 1305 card = dev_get_drvdata(&gdev->dev); 1306 1307 QETH_CARD_TEXT(card, 5, "irq"); 1308 1309 if (card->read.ccwdev == cdev) { 1310 channel = &card->read; 1311 QETH_CARD_TEXT(card, 5, "read"); 1312 } else if (card->write.ccwdev == cdev) { 1313 channel = &card->write; 1314 QETH_CARD_TEXT(card, 5, "write"); 1315 } else { 1316 channel = &card->data; 1317 QETH_CARD_TEXT(card, 5, "data"); 1318 } 1319 1320 if (intparm == 0) { 1321 QETH_CARD_TEXT(card, 5, "irqunsol"); 1322 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1323 QETH_CARD_TEXT(card, 5, "irqunexp"); 1324 1325 dev_err(&cdev->dev, 1326 "Received IRQ with intparm %lx, expected %px\n", 1327 intparm, channel->active_cmd); 1328 if (channel->active_cmd) 1329 qeth_cancel_cmd(channel->active_cmd, -EIO); 1330 } else { 1331 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1332 } 1333 1334 channel->active_cmd = NULL; 1335 qeth_unlock_channel(card, channel); 1336 1337 rc = qeth_check_irb_error(card, cdev, irb); 1338 if (rc) { 1339 /* IO was terminated, free its resources. */ 1340 if (iob) 1341 qeth_cancel_cmd(iob, rc); 1342 return; 1343 } 1344 1345 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1346 channel->state = CH_STATE_STOPPED; 1347 wake_up(&card->wait_q); 1348 } 1349 1350 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1351 channel->state = CH_STATE_HALTED; 1352 wake_up(&card->wait_q); 1353 } 1354 1355 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1356 SCSW_FCTL_HALT_FUNC))) { 1357 qeth_cancel_cmd(iob, -ECANCELED); 1358 iob = NULL; 1359 } 1360 1361 cstat = irb->scsw.cmd.cstat; 1362 dstat = irb->scsw.cmd.dstat; 1363 1364 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1365 (dstat & DEV_STAT_UNIT_CHECK) || 1366 (cstat)) { 1367 if (irb->esw.esw0.erw.cons) { 1368 dev_warn(&channel->ccwdev->dev, 1369 "The qeth device driver failed to recover " 1370 "an error on the device\n"); 1371 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1372 CCW_DEVID(channel->ccwdev), cstat, 1373 dstat); 1374 print_hex_dump(KERN_WARNING, "qeth: irb ", 1375 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1376 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1377 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1378 } 1379 1380 rc = qeth_get_problem(card, cdev, irb); 1381 if (rc) { 1382 card->read_or_write_problem = 1; 1383 if (iob) 1384 qeth_cancel_cmd(iob, rc); 1385 qeth_clear_ipacmd_list(card); 1386 qeth_schedule_recovery(card); 1387 return; 1388 } 1389 } 1390 1391 if (iob) { 1392 /* sanity check: */ 1393 if (irb->scsw.cmd.count > iob->length) { 1394 qeth_cancel_cmd(iob, -EIO); 1395 return; 1396 } 1397 if (iob->callback) 1398 iob->callback(card, iob, 1399 iob->length - irb->scsw.cmd.count); 1400 } 1401 } 1402 1403 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1404 struct qeth_qdio_out_buffer *buf, 1405 enum iucv_tx_notify notification) 1406 { 1407 struct sk_buff *skb; 1408 1409 skb_queue_walk(&buf->skb_list, skb) { 1410 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1411 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1412 if (skb->sk && skb->sk->sk_family == PF_IUCV) 1413 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1414 } 1415 } 1416 1417 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 1418 int budget) 1419 { 1420 struct qeth_qdio_out_q *queue = buf->q; 1421 struct sk_buff *skb; 1422 1423 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1424 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); 1425 1426 /* Empty buffer? */ 1427 if (buf->next_element_to_fill == 0) 1428 return; 1429 1430 QETH_TXQ_STAT_INC(queue, bufs); 1431 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1432 if (error) { 1433 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1434 } else { 1435 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1436 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1437 } 1438 1439 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1440 unsigned int bytes = qdisc_pkt_len(skb); 1441 bool is_tso = skb_is_gso(skb); 1442 unsigned int packets; 1443 1444 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1445 if (!error) { 1446 if (skb->ip_summed == CHECKSUM_PARTIAL) 1447 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1448 if (skb_is_nonlinear(skb)) 1449 QETH_TXQ_STAT_INC(queue, skbs_sg); 1450 if (is_tso) { 1451 QETH_TXQ_STAT_INC(queue, skbs_tso); 1452 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1453 } 1454 } 1455 1456 napi_consume_skb(skb, budget); 1457 } 1458 } 1459 1460 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1461 struct qeth_qdio_out_buffer *buf, 1462 bool error, int budget) 1463 { 1464 int i; 1465 1466 /* is PCI flag set on buffer? */ 1467 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1468 atomic_dec(&queue->set_pci_flags_count); 1469 1470 qeth_tx_complete_buf(buf, error, budget); 1471 1472 for (i = 0; i < queue->max_elements; ++i) { 1473 void *data = phys_to_virt(buf->buffer->element[i].addr); 1474 1475 if (data && buf->is_header[i]) 1476 kmem_cache_free(qeth_core_header_cache, data); 1477 buf->is_header[i] = 0; 1478 } 1479 1480 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1481 buf->next_element_to_fill = 0; 1482 buf->frames = 0; 1483 buf->bytes = 0; 1484 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1485 } 1486 1487 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1488 { 1489 int j; 1490 1491 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1492 if (!q->bufs[j]) 1493 continue; 1494 qeth_cleanup_handled_pending(q, j, 1); 1495 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1496 if (free) { 1497 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1498 q->bufs[j] = NULL; 1499 } 1500 } 1501 } 1502 1503 static void qeth_drain_output_queues(struct qeth_card *card) 1504 { 1505 int i; 1506 1507 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1508 /* clear outbound buffers to free skbs */ 1509 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1510 if (card->qdio.out_qs[i]) 1511 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1512 } 1513 } 1514 1515 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1516 { 1517 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1518 1519 if (card->qdio.no_out_queues == max) 1520 return; 1521 1522 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1523 qeth_free_qdio_queues(card); 1524 1525 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1526 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1527 1528 card->qdio.no_out_queues = max; 1529 } 1530 1531 static int qeth_update_from_chp_desc(struct qeth_card *card) 1532 { 1533 struct ccw_device *ccwdev; 1534 struct channel_path_desc_fmt0 *chp_dsc; 1535 1536 QETH_CARD_TEXT(card, 2, "chp_desc"); 1537 1538 ccwdev = card->data.ccwdev; 1539 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1540 if (!chp_dsc) 1541 return -ENOMEM; 1542 1543 card->info.func_level = 0x4100 + chp_dsc->desc; 1544 1545 if (IS_OSD(card) || IS_OSX(card)) 1546 /* CHPP field bit 6 == 1 -> single queue */ 1547 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1548 1549 kfree(chp_dsc); 1550 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1551 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1552 return 0; 1553 } 1554 1555 static void qeth_init_qdio_info(struct qeth_card *card) 1556 { 1557 QETH_CARD_TEXT(card, 4, "intqdinf"); 1558 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1559 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1560 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1561 1562 /* inbound */ 1563 card->qdio.no_in_queues = 1; 1564 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1565 if (IS_IQD(card)) 1566 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1567 else 1568 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1569 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1570 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1571 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1572 } 1573 1574 static void qeth_set_initial_options(struct qeth_card *card) 1575 { 1576 card->options.route4.type = NO_ROUTER; 1577 card->options.route6.type = NO_ROUTER; 1578 card->options.isolation = ISOLATION_MODE_NONE; 1579 card->options.cq = QETH_CQ_DISABLED; 1580 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1581 } 1582 1583 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1584 { 1585 unsigned long flags; 1586 int rc = 0; 1587 1588 spin_lock_irqsave(&card->thread_mask_lock, flags); 1589 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1590 (u8) card->thread_start_mask, 1591 (u8) card->thread_allowed_mask, 1592 (u8) card->thread_running_mask); 1593 rc = (card->thread_start_mask & thread); 1594 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1595 return rc; 1596 } 1597 1598 static int qeth_do_reset(void *data); 1599 static void qeth_start_kernel_thread(struct work_struct *work) 1600 { 1601 struct task_struct *ts; 1602 struct qeth_card *card = container_of(work, struct qeth_card, 1603 kernel_thread_starter); 1604 QETH_CARD_TEXT(card, 2, "strthrd"); 1605 1606 if (card->read.state != CH_STATE_UP && 1607 card->write.state != CH_STATE_UP) 1608 return; 1609 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1610 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1611 if (IS_ERR(ts)) { 1612 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1613 qeth_clear_thread_running_bit(card, 1614 QETH_RECOVER_THREAD); 1615 } 1616 } 1617 } 1618 1619 static void qeth_buffer_reclaim_work(struct work_struct *); 1620 static void qeth_setup_card(struct qeth_card *card) 1621 { 1622 QETH_CARD_TEXT(card, 2, "setupcrd"); 1623 1624 card->info.type = CARD_RDEV(card)->id.driver_info; 1625 card->state = CARD_STATE_DOWN; 1626 spin_lock_init(&card->lock); 1627 spin_lock_init(&card->thread_mask_lock); 1628 mutex_init(&card->conf_mutex); 1629 mutex_init(&card->discipline_mutex); 1630 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1631 INIT_LIST_HEAD(&card->cmd_waiter_list); 1632 init_waitqueue_head(&card->wait_q); 1633 qeth_set_initial_options(card); 1634 /* IP address takeover */ 1635 INIT_LIST_HEAD(&card->ipato.entries); 1636 qeth_init_qdio_info(card); 1637 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1638 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1639 hash_init(card->rx_mode_addrs); 1640 hash_init(card->local_addrs4); 1641 hash_init(card->local_addrs6); 1642 spin_lock_init(&card->local_addrs4_lock); 1643 spin_lock_init(&card->local_addrs6_lock); 1644 } 1645 1646 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1647 { 1648 struct qeth_card *card = container_of(slr, struct qeth_card, 1649 qeth_service_level); 1650 if (card->info.mcl_level[0]) 1651 seq_printf(m, "qeth: %s firmware level %s\n", 1652 CARD_BUS_ID(card), card->info.mcl_level); 1653 } 1654 1655 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1656 { 1657 struct qeth_card *card; 1658 1659 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1660 card = kzalloc(sizeof(*card), GFP_KERNEL); 1661 if (!card) 1662 goto out; 1663 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1664 1665 card->gdev = gdev; 1666 dev_set_drvdata(&gdev->dev, card); 1667 CARD_RDEV(card) = gdev->cdev[0]; 1668 CARD_WDEV(card) = gdev->cdev[1]; 1669 CARD_DDEV(card) = gdev->cdev[2]; 1670 1671 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1672 dev_name(&gdev->dev)); 1673 if (!card->event_wq) 1674 goto out_wq; 1675 1676 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1677 if (!card->read_cmd) 1678 goto out_read_cmd; 1679 1680 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1681 qeth_debugfs_root); 1682 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1683 &qeth_debugfs_local_addr_fops); 1684 1685 card->qeth_service_level.seq_print = qeth_core_sl_print; 1686 register_service_level(&card->qeth_service_level); 1687 return card; 1688 1689 out_read_cmd: 1690 destroy_workqueue(card->event_wq); 1691 out_wq: 1692 dev_set_drvdata(&gdev->dev, NULL); 1693 kfree(card); 1694 out: 1695 return NULL; 1696 } 1697 1698 static int qeth_clear_channel(struct qeth_card *card, 1699 struct qeth_channel *channel) 1700 { 1701 int rc; 1702 1703 QETH_CARD_TEXT(card, 3, "clearch"); 1704 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1705 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1706 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1707 1708 if (rc) 1709 return rc; 1710 rc = wait_event_interruptible_timeout(card->wait_q, 1711 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1712 if (rc == -ERESTARTSYS) 1713 return rc; 1714 if (channel->state != CH_STATE_STOPPED) 1715 return -ETIME; 1716 channel->state = CH_STATE_DOWN; 1717 return 0; 1718 } 1719 1720 static int qeth_halt_channel(struct qeth_card *card, 1721 struct qeth_channel *channel) 1722 { 1723 int rc; 1724 1725 QETH_CARD_TEXT(card, 3, "haltch"); 1726 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1727 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1728 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1729 1730 if (rc) 1731 return rc; 1732 rc = wait_event_interruptible_timeout(card->wait_q, 1733 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1734 if (rc == -ERESTARTSYS) 1735 return rc; 1736 if (channel->state != CH_STATE_HALTED) 1737 return -ETIME; 1738 return 0; 1739 } 1740 1741 static int qeth_stop_channel(struct qeth_channel *channel) 1742 { 1743 struct ccw_device *cdev = channel->ccwdev; 1744 int rc; 1745 1746 rc = ccw_device_set_offline(cdev); 1747 1748 spin_lock_irq(get_ccwdev_lock(cdev)); 1749 if (channel->active_cmd) { 1750 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1751 channel->active_cmd); 1752 channel->active_cmd = NULL; 1753 } 1754 cdev->handler = NULL; 1755 spin_unlock_irq(get_ccwdev_lock(cdev)); 1756 1757 return rc; 1758 } 1759 1760 static int qeth_start_channel(struct qeth_channel *channel) 1761 { 1762 struct ccw_device *cdev = channel->ccwdev; 1763 int rc; 1764 1765 channel->state = CH_STATE_DOWN; 1766 atomic_set(&channel->irq_pending, 0); 1767 1768 spin_lock_irq(get_ccwdev_lock(cdev)); 1769 cdev->handler = qeth_irq; 1770 spin_unlock_irq(get_ccwdev_lock(cdev)); 1771 1772 rc = ccw_device_set_online(cdev); 1773 if (rc) 1774 goto err; 1775 1776 return 0; 1777 1778 err: 1779 spin_lock_irq(get_ccwdev_lock(cdev)); 1780 cdev->handler = NULL; 1781 spin_unlock_irq(get_ccwdev_lock(cdev)); 1782 return rc; 1783 } 1784 1785 static int qeth_halt_channels(struct qeth_card *card) 1786 { 1787 int rc1 = 0, rc2 = 0, rc3 = 0; 1788 1789 QETH_CARD_TEXT(card, 3, "haltchs"); 1790 rc1 = qeth_halt_channel(card, &card->read); 1791 rc2 = qeth_halt_channel(card, &card->write); 1792 rc3 = qeth_halt_channel(card, &card->data); 1793 if (rc1) 1794 return rc1; 1795 if (rc2) 1796 return rc2; 1797 return rc3; 1798 } 1799 1800 static int qeth_clear_channels(struct qeth_card *card) 1801 { 1802 int rc1 = 0, rc2 = 0, rc3 = 0; 1803 1804 QETH_CARD_TEXT(card, 3, "clearchs"); 1805 rc1 = qeth_clear_channel(card, &card->read); 1806 rc2 = qeth_clear_channel(card, &card->write); 1807 rc3 = qeth_clear_channel(card, &card->data); 1808 if (rc1) 1809 return rc1; 1810 if (rc2) 1811 return rc2; 1812 return rc3; 1813 } 1814 1815 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1816 { 1817 int rc = 0; 1818 1819 QETH_CARD_TEXT(card, 3, "clhacrd"); 1820 1821 if (halt) 1822 rc = qeth_halt_channels(card); 1823 if (rc) 1824 return rc; 1825 return qeth_clear_channels(card); 1826 } 1827 1828 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1829 { 1830 int rc = 0; 1831 1832 QETH_CARD_TEXT(card, 3, "qdioclr"); 1833 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1834 QETH_QDIO_CLEANING)) { 1835 case QETH_QDIO_ESTABLISHED: 1836 if (IS_IQD(card)) 1837 rc = qdio_shutdown(CARD_DDEV(card), 1838 QDIO_FLAG_CLEANUP_USING_HALT); 1839 else 1840 rc = qdio_shutdown(CARD_DDEV(card), 1841 QDIO_FLAG_CLEANUP_USING_CLEAR); 1842 if (rc) 1843 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1844 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1845 break; 1846 case QETH_QDIO_CLEANING: 1847 return rc; 1848 default: 1849 break; 1850 } 1851 rc = qeth_clear_halt_card(card, use_halt); 1852 if (rc) 1853 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1854 return rc; 1855 } 1856 1857 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1858 { 1859 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1860 struct diag26c_vnic_resp *response = NULL; 1861 struct diag26c_vnic_req *request = NULL; 1862 struct ccw_dev_id id; 1863 char userid[80]; 1864 int rc = 0; 1865 1866 QETH_CARD_TEXT(card, 2, "vmlayer"); 1867 1868 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1869 if (rc) 1870 goto out; 1871 1872 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1873 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1874 if (!request || !response) { 1875 rc = -ENOMEM; 1876 goto out; 1877 } 1878 1879 ccw_device_get_id(CARD_RDEV(card), &id); 1880 request->resp_buf_len = sizeof(*response); 1881 request->resp_version = DIAG26C_VERSION6_VM65918; 1882 request->req_format = DIAG26C_VNIC_INFO; 1883 ASCEBC(userid, 8); 1884 memcpy(&request->sys_name, userid, 8); 1885 request->devno = id.devno; 1886 1887 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1888 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1889 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1890 if (rc) 1891 goto out; 1892 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1893 1894 if (request->resp_buf_len < sizeof(*response) || 1895 response->version != request->resp_version) { 1896 rc = -EIO; 1897 goto out; 1898 } 1899 1900 if (response->protocol == VNIC_INFO_PROT_L2) 1901 disc = QETH_DISCIPLINE_LAYER2; 1902 else if (response->protocol == VNIC_INFO_PROT_L3) 1903 disc = QETH_DISCIPLINE_LAYER3; 1904 1905 out: 1906 kfree(response); 1907 kfree(request); 1908 if (rc) 1909 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1910 return disc; 1911 } 1912 1913 /* Determine whether the device requires a specific layer discipline */ 1914 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1915 { 1916 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1917 1918 if (IS_OSM(card) || IS_OSN(card)) 1919 disc = QETH_DISCIPLINE_LAYER2; 1920 else if (IS_VM_NIC(card)) 1921 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1922 qeth_vm_detect_layer(card); 1923 1924 switch (disc) { 1925 case QETH_DISCIPLINE_LAYER2: 1926 QETH_CARD_TEXT(card, 3, "force l2"); 1927 break; 1928 case QETH_DISCIPLINE_LAYER3: 1929 QETH_CARD_TEXT(card, 3, "force l3"); 1930 break; 1931 default: 1932 QETH_CARD_TEXT(card, 3, "force no"); 1933 } 1934 1935 return disc; 1936 } 1937 1938 static void qeth_set_blkt_defaults(struct qeth_card *card) 1939 { 1940 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1941 1942 if (card->info.use_v1_blkt) { 1943 card->info.blkt.time_total = 0; 1944 card->info.blkt.inter_packet = 0; 1945 card->info.blkt.inter_packet_jumbo = 0; 1946 } else { 1947 card->info.blkt.time_total = 250; 1948 card->info.blkt.inter_packet = 5; 1949 card->info.blkt.inter_packet_jumbo = 15; 1950 } 1951 } 1952 1953 static void qeth_idx_init(struct qeth_card *card) 1954 { 1955 memset(&card->seqno, 0, sizeof(card->seqno)); 1956 1957 card->token.issuer_rm_w = 0x00010103UL; 1958 card->token.cm_filter_w = 0x00010108UL; 1959 card->token.cm_connection_w = 0x0001010aUL; 1960 card->token.ulp_filter_w = 0x0001010bUL; 1961 card->token.ulp_connection_w = 0x0001010dUL; 1962 1963 switch (card->info.type) { 1964 case QETH_CARD_TYPE_IQD: 1965 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1966 break; 1967 case QETH_CARD_TYPE_OSD: 1968 case QETH_CARD_TYPE_OSN: 1969 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1970 break; 1971 default: 1972 break; 1973 } 1974 } 1975 1976 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1977 struct qeth_cmd_buffer *iob) 1978 { 1979 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1980 QETH_SEQ_NO_LENGTH); 1981 if (iob->channel == &card->write) 1982 card->seqno.trans_hdr++; 1983 } 1984 1985 static int qeth_peer_func_level(int level) 1986 { 1987 if ((level & 0xff) == 8) 1988 return (level & 0xff) + 0x400; 1989 if (((level >> 8) & 3) == 1) 1990 return (level & 0xff) + 0x200; 1991 return level; 1992 } 1993 1994 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1995 struct qeth_cmd_buffer *iob) 1996 { 1997 qeth_idx_finalize_cmd(card, iob); 1998 1999 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 2000 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 2001 card->seqno.pdu_hdr++; 2002 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 2003 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 2004 2005 iob->callback = qeth_release_buffer_cb; 2006 } 2007 2008 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 2009 struct qeth_cmd_buffer *reply) 2010 { 2011 /* MPC cmds are issued strictly in sequence. */ 2012 return !IS_IPA(reply->data); 2013 } 2014 2015 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 2016 const void *data, 2017 unsigned int data_length) 2018 { 2019 struct qeth_cmd_buffer *iob; 2020 2021 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 2022 if (!iob) 2023 return NULL; 2024 2025 memcpy(iob->data, data, data_length); 2026 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 2027 iob->data); 2028 iob->finalize = qeth_mpc_finalize_cmd; 2029 iob->match = qeth_mpc_match_reply; 2030 return iob; 2031 } 2032 2033 /** 2034 * qeth_send_control_data() - send control command to the card 2035 * @card: qeth_card structure pointer 2036 * @iob: qeth_cmd_buffer pointer 2037 * @reply_cb: callback function pointer 2038 * @cb_card: pointer to the qeth_card structure 2039 * @cb_reply: pointer to the qeth_reply structure 2040 * @cb_cmd: pointer to the original iob for non-IPA 2041 * commands, or to the qeth_ipa_cmd structure 2042 * for the IPA commands. 2043 * @reply_param: private pointer passed to the callback 2044 * 2045 * Callback function gets called one or more times, with cb_cmd 2046 * pointing to the response returned by the hardware. Callback 2047 * function must return 2048 * > 0 if more reply blocks are expected, 2049 * 0 if the last or only reply block is received, and 2050 * < 0 on error. 2051 * Callback function can get the value of the reply_param pointer from the 2052 * field 'param' of the structure qeth_reply. 2053 */ 2054 2055 static int qeth_send_control_data(struct qeth_card *card, 2056 struct qeth_cmd_buffer *iob, 2057 int (*reply_cb)(struct qeth_card *cb_card, 2058 struct qeth_reply *cb_reply, 2059 unsigned long cb_cmd), 2060 void *reply_param) 2061 { 2062 struct qeth_channel *channel = iob->channel; 2063 struct qeth_reply *reply = &iob->reply; 2064 long timeout = iob->timeout; 2065 int rc; 2066 2067 QETH_CARD_TEXT(card, 2, "sendctl"); 2068 2069 reply->callback = reply_cb; 2070 reply->param = reply_param; 2071 2072 timeout = wait_event_interruptible_timeout(card->wait_q, 2073 qeth_trylock_channel(channel), 2074 timeout); 2075 if (timeout <= 0) { 2076 qeth_put_cmd(iob); 2077 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2078 } 2079 2080 if (iob->finalize) 2081 iob->finalize(card, iob); 2082 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 2083 2084 qeth_enqueue_cmd(card, iob); 2085 2086 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 2087 qeth_get_cmd(iob); 2088 2089 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2090 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 2091 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 2092 (addr_t) iob, 0, 0, timeout); 2093 if (!rc) 2094 channel->active_cmd = iob; 2095 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2096 if (rc) { 2097 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 2098 CARD_DEVID(card), rc); 2099 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2100 qeth_dequeue_cmd(card, iob); 2101 qeth_put_cmd(iob); 2102 qeth_unlock_channel(card, channel); 2103 goto out; 2104 } 2105 2106 timeout = wait_for_completion_interruptible_timeout(&iob->done, 2107 timeout); 2108 if (timeout <= 0) 2109 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2110 2111 qeth_dequeue_cmd(card, iob); 2112 2113 if (reply_cb) { 2114 /* Wait until the callback for a late reply has completed: */ 2115 spin_lock_irq(&iob->lock); 2116 if (rc) 2117 /* Zap any callback that's still pending: */ 2118 iob->rc = rc; 2119 spin_unlock_irq(&iob->lock); 2120 } 2121 2122 if (!rc) 2123 rc = iob->rc; 2124 2125 out: 2126 qeth_put_cmd(iob); 2127 return rc; 2128 } 2129 2130 struct qeth_node_desc { 2131 struct node_descriptor nd1; 2132 struct node_descriptor nd2; 2133 struct node_descriptor nd3; 2134 }; 2135 2136 static void qeth_read_conf_data_cb(struct qeth_card *card, 2137 struct qeth_cmd_buffer *iob, 2138 unsigned int data_length) 2139 { 2140 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2141 int rc = 0; 2142 u8 *tag; 2143 2144 QETH_CARD_TEXT(card, 2, "cfgunit"); 2145 2146 if (data_length < sizeof(*nd)) { 2147 rc = -EINVAL; 2148 goto out; 2149 } 2150 2151 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2152 nd->nd1.plant[1] == _ascebc['M']; 2153 tag = (u8 *)&nd->nd1.tag; 2154 card->info.chpid = tag[0]; 2155 card->info.unit_addr2 = tag[1]; 2156 2157 tag = (u8 *)&nd->nd2.tag; 2158 card->info.cula = tag[1]; 2159 2160 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2161 nd->nd3.model[1] == 0xF0 && 2162 nd->nd3.model[2] >= 0xF1 && 2163 nd->nd3.model[2] <= 0xF4; 2164 2165 out: 2166 qeth_notify_cmd(iob, rc); 2167 qeth_put_cmd(iob); 2168 } 2169 2170 static int qeth_read_conf_data(struct qeth_card *card) 2171 { 2172 struct qeth_channel *channel = &card->data; 2173 struct qeth_cmd_buffer *iob; 2174 struct ciw *ciw; 2175 2176 /* scan for RCD command in extended SenseID data */ 2177 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2178 if (!ciw || ciw->cmd == 0) 2179 return -EOPNOTSUPP; 2180 if (ciw->count < sizeof(struct qeth_node_desc)) 2181 return -EINVAL; 2182 2183 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2184 if (!iob) 2185 return -ENOMEM; 2186 2187 iob->callback = qeth_read_conf_data_cb; 2188 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2189 iob->data); 2190 2191 return qeth_send_control_data(card, iob, NULL, NULL); 2192 } 2193 2194 static int qeth_idx_check_activate_response(struct qeth_card *card, 2195 struct qeth_channel *channel, 2196 struct qeth_cmd_buffer *iob) 2197 { 2198 int rc; 2199 2200 rc = qeth_check_idx_response(card, iob->data); 2201 if (rc) 2202 return rc; 2203 2204 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2205 return 0; 2206 2207 /* negative reply: */ 2208 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2209 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2210 2211 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2212 case QETH_IDX_ACT_ERR_EXCL: 2213 dev_err(&channel->ccwdev->dev, 2214 "The adapter is used exclusively by another host\n"); 2215 return -EBUSY; 2216 case QETH_IDX_ACT_ERR_AUTH: 2217 case QETH_IDX_ACT_ERR_AUTH_USER: 2218 dev_err(&channel->ccwdev->dev, 2219 "Setting the device online failed because of insufficient authorization\n"); 2220 return -EPERM; 2221 default: 2222 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2223 CCW_DEVID(channel->ccwdev)); 2224 return -EIO; 2225 } 2226 } 2227 2228 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2229 struct qeth_cmd_buffer *iob, 2230 unsigned int data_length) 2231 { 2232 struct qeth_channel *channel = iob->channel; 2233 u16 peer_level; 2234 int rc; 2235 2236 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2237 2238 rc = qeth_idx_check_activate_response(card, channel, iob); 2239 if (rc) 2240 goto out; 2241 2242 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2243 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2244 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2245 CCW_DEVID(channel->ccwdev), 2246 card->info.func_level, peer_level); 2247 rc = -EINVAL; 2248 goto out; 2249 } 2250 2251 memcpy(&card->token.issuer_rm_r, 2252 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2253 QETH_MPC_TOKEN_LENGTH); 2254 memcpy(&card->info.mcl_level[0], 2255 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2256 2257 out: 2258 qeth_notify_cmd(iob, rc); 2259 qeth_put_cmd(iob); 2260 } 2261 2262 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2263 struct qeth_cmd_buffer *iob, 2264 unsigned int data_length) 2265 { 2266 struct qeth_channel *channel = iob->channel; 2267 u16 peer_level; 2268 int rc; 2269 2270 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2271 2272 rc = qeth_idx_check_activate_response(card, channel, iob); 2273 if (rc) 2274 goto out; 2275 2276 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2277 if ((peer_level & ~0x0100) != 2278 qeth_peer_func_level(card->info.func_level)) { 2279 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2280 CCW_DEVID(channel->ccwdev), 2281 card->info.func_level, peer_level); 2282 rc = -EINVAL; 2283 } 2284 2285 out: 2286 qeth_notify_cmd(iob, rc); 2287 qeth_put_cmd(iob); 2288 } 2289 2290 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2291 struct qeth_cmd_buffer *iob) 2292 { 2293 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2294 u8 port = ((u8)card->dev->dev_port) | 0x80; 2295 struct ccw1 *ccw = __ccw_from_cmd(iob); 2296 2297 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2298 iob->data); 2299 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2300 iob->finalize = qeth_idx_finalize_cmd; 2301 2302 port |= QETH_IDX_ACT_INVAL_FRAME; 2303 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2304 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2305 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2306 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2307 &card->info.func_level, 2); 2308 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2309 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2310 } 2311 2312 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2313 { 2314 struct qeth_channel *channel = &card->read; 2315 struct qeth_cmd_buffer *iob; 2316 int rc; 2317 2318 QETH_CARD_TEXT(card, 2, "idxread"); 2319 2320 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2321 if (!iob) 2322 return -ENOMEM; 2323 2324 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2325 qeth_idx_setup_activate_cmd(card, iob); 2326 iob->callback = qeth_idx_activate_read_channel_cb; 2327 2328 rc = qeth_send_control_data(card, iob, NULL, NULL); 2329 if (rc) 2330 return rc; 2331 2332 channel->state = CH_STATE_UP; 2333 return 0; 2334 } 2335 2336 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2337 { 2338 struct qeth_channel *channel = &card->write; 2339 struct qeth_cmd_buffer *iob; 2340 int rc; 2341 2342 QETH_CARD_TEXT(card, 2, "idxwrite"); 2343 2344 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2345 if (!iob) 2346 return -ENOMEM; 2347 2348 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2349 qeth_idx_setup_activate_cmd(card, iob); 2350 iob->callback = qeth_idx_activate_write_channel_cb; 2351 2352 rc = qeth_send_control_data(card, iob, NULL, NULL); 2353 if (rc) 2354 return rc; 2355 2356 channel->state = CH_STATE_UP; 2357 return 0; 2358 } 2359 2360 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2361 unsigned long data) 2362 { 2363 struct qeth_cmd_buffer *iob; 2364 2365 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2366 2367 iob = (struct qeth_cmd_buffer *) data; 2368 memcpy(&card->token.cm_filter_r, 2369 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2370 QETH_MPC_TOKEN_LENGTH); 2371 return 0; 2372 } 2373 2374 static int qeth_cm_enable(struct qeth_card *card) 2375 { 2376 struct qeth_cmd_buffer *iob; 2377 2378 QETH_CARD_TEXT(card, 2, "cmenable"); 2379 2380 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2381 if (!iob) 2382 return -ENOMEM; 2383 2384 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2385 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2386 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2387 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2388 2389 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2390 } 2391 2392 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2393 unsigned long data) 2394 { 2395 struct qeth_cmd_buffer *iob; 2396 2397 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2398 2399 iob = (struct qeth_cmd_buffer *) data; 2400 memcpy(&card->token.cm_connection_r, 2401 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2402 QETH_MPC_TOKEN_LENGTH); 2403 return 0; 2404 } 2405 2406 static int qeth_cm_setup(struct qeth_card *card) 2407 { 2408 struct qeth_cmd_buffer *iob; 2409 2410 QETH_CARD_TEXT(card, 2, "cmsetup"); 2411 2412 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2413 if (!iob) 2414 return -ENOMEM; 2415 2416 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2417 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2418 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2419 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2420 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2421 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2422 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2423 } 2424 2425 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2426 { 2427 if (link_type == QETH_LINK_TYPE_LANE_TR || 2428 link_type == QETH_LINK_TYPE_HSTR) { 2429 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2430 return false; 2431 } 2432 2433 return true; 2434 } 2435 2436 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2437 { 2438 struct net_device *dev = card->dev; 2439 unsigned int new_mtu; 2440 2441 if (!max_mtu) { 2442 /* IQD needs accurate max MTU to set up its RX buffers: */ 2443 if (IS_IQD(card)) 2444 return -EINVAL; 2445 /* tolerate quirky HW: */ 2446 max_mtu = ETH_MAX_MTU; 2447 } 2448 2449 rtnl_lock(); 2450 if (IS_IQD(card)) { 2451 /* move any device with default MTU to new max MTU: */ 2452 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2453 2454 /* adjust RX buffer size to new max MTU: */ 2455 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2456 if (dev->max_mtu && dev->max_mtu != max_mtu) 2457 qeth_free_qdio_queues(card); 2458 } else { 2459 if (dev->mtu) 2460 new_mtu = dev->mtu; 2461 /* default MTUs for first setup: */ 2462 else if (IS_LAYER2(card)) 2463 new_mtu = ETH_DATA_LEN; 2464 else 2465 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2466 } 2467 2468 dev->max_mtu = max_mtu; 2469 dev->mtu = min(new_mtu, max_mtu); 2470 rtnl_unlock(); 2471 return 0; 2472 } 2473 2474 static int qeth_get_mtu_outof_framesize(int framesize) 2475 { 2476 switch (framesize) { 2477 case 0x4000: 2478 return 8192; 2479 case 0x6000: 2480 return 16384; 2481 case 0xa000: 2482 return 32768; 2483 case 0xffff: 2484 return 57344; 2485 default: 2486 return 0; 2487 } 2488 } 2489 2490 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2491 unsigned long data) 2492 { 2493 __u16 mtu, framesize; 2494 __u16 len; 2495 struct qeth_cmd_buffer *iob; 2496 u8 link_type = 0; 2497 2498 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2499 2500 iob = (struct qeth_cmd_buffer *) data; 2501 memcpy(&card->token.ulp_filter_r, 2502 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2503 QETH_MPC_TOKEN_LENGTH); 2504 if (IS_IQD(card)) { 2505 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2506 mtu = qeth_get_mtu_outof_framesize(framesize); 2507 } else { 2508 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2509 } 2510 *(u16 *)reply->param = mtu; 2511 2512 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2513 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2514 memcpy(&link_type, 2515 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2516 if (!qeth_is_supported_link_type(card, link_type)) 2517 return -EPROTONOSUPPORT; 2518 } 2519 2520 card->info.link_type = link_type; 2521 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2522 return 0; 2523 } 2524 2525 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2526 { 2527 if (IS_OSN(card)) 2528 return QETH_PROT_OSN2; 2529 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2530 } 2531 2532 static int qeth_ulp_enable(struct qeth_card *card) 2533 { 2534 u8 prot_type = qeth_mpc_select_prot_type(card); 2535 struct qeth_cmd_buffer *iob; 2536 u16 max_mtu; 2537 int rc; 2538 2539 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2540 2541 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2542 if (!iob) 2543 return -ENOMEM; 2544 2545 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2546 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2547 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2548 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2549 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2550 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2551 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2552 if (rc) 2553 return rc; 2554 return qeth_update_max_mtu(card, max_mtu); 2555 } 2556 2557 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2558 unsigned long data) 2559 { 2560 struct qeth_cmd_buffer *iob; 2561 2562 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2563 2564 iob = (struct qeth_cmd_buffer *) data; 2565 memcpy(&card->token.ulp_connection_r, 2566 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2567 QETH_MPC_TOKEN_LENGTH); 2568 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2569 3)) { 2570 QETH_CARD_TEXT(card, 2, "olmlimit"); 2571 dev_err(&card->gdev->dev, "A connection could not be " 2572 "established because of an OLM limit\n"); 2573 return -EMLINK; 2574 } 2575 return 0; 2576 } 2577 2578 static int qeth_ulp_setup(struct qeth_card *card) 2579 { 2580 __u16 temp; 2581 struct qeth_cmd_buffer *iob; 2582 2583 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2584 2585 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2586 if (!iob) 2587 return -ENOMEM; 2588 2589 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2590 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2591 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2592 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2593 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2594 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2595 2596 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2597 temp = (card->info.cula << 8) + card->info.unit_addr2; 2598 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2599 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2600 } 2601 2602 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2603 { 2604 struct qeth_qdio_out_buffer *newbuf; 2605 2606 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2607 if (!newbuf) 2608 return -ENOMEM; 2609 2610 newbuf->buffer = q->qdio_bufs[bidx]; 2611 skb_queue_head_init(&newbuf->skb_list); 2612 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2613 newbuf->q = q; 2614 newbuf->next_pending = q->bufs[bidx]; 2615 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2616 q->bufs[bidx] = newbuf; 2617 return 0; 2618 } 2619 2620 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2621 { 2622 if (!q) 2623 return; 2624 2625 qeth_drain_output_queue(q, true); 2626 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2627 kfree(q); 2628 } 2629 2630 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2631 { 2632 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2633 2634 if (!q) 2635 return NULL; 2636 2637 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2638 kfree(q); 2639 return NULL; 2640 } 2641 return q; 2642 } 2643 2644 static void qeth_tx_completion_timer(struct timer_list *timer) 2645 { 2646 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2647 2648 napi_schedule(&queue->napi); 2649 QETH_TXQ_STAT_INC(queue, completion_timer); 2650 } 2651 2652 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2653 { 2654 int i, j; 2655 2656 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2657 2658 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2659 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2660 return 0; 2661 2662 QETH_CARD_TEXT(card, 2, "inq"); 2663 card->qdio.in_q = qeth_alloc_qdio_queue(); 2664 if (!card->qdio.in_q) 2665 goto out_nomem; 2666 2667 /* inbound buffer pool */ 2668 if (qeth_alloc_buffer_pool(card)) 2669 goto out_freeinq; 2670 2671 /* outbound */ 2672 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2673 struct qeth_qdio_out_q *queue; 2674 2675 queue = qeth_alloc_output_queue(); 2676 if (!queue) 2677 goto out_freeoutq; 2678 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2679 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2680 card->qdio.out_qs[i] = queue; 2681 queue->card = card; 2682 queue->queue_no = i; 2683 spin_lock_init(&queue->lock); 2684 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2685 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2686 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2687 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2688 2689 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2690 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2691 WARN_ON(queue->bufs[j]); 2692 if (qeth_init_qdio_out_buf(queue, j)) 2693 goto out_freeoutqbufs; 2694 } 2695 } 2696 2697 /* completion */ 2698 if (qeth_alloc_cq(card)) 2699 goto out_freeoutq; 2700 2701 return 0; 2702 2703 out_freeoutqbufs: 2704 while (j > 0) { 2705 --j; 2706 kmem_cache_free(qeth_qdio_outbuf_cache, 2707 card->qdio.out_qs[i]->bufs[j]); 2708 card->qdio.out_qs[i]->bufs[j] = NULL; 2709 } 2710 out_freeoutq: 2711 while (i > 0) { 2712 qeth_free_output_queue(card->qdio.out_qs[--i]); 2713 card->qdio.out_qs[i] = NULL; 2714 } 2715 qeth_free_buffer_pool(card); 2716 out_freeinq: 2717 qeth_free_qdio_queue(card->qdio.in_q); 2718 card->qdio.in_q = NULL; 2719 out_nomem: 2720 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2721 return -ENOMEM; 2722 } 2723 2724 static void qeth_free_qdio_queues(struct qeth_card *card) 2725 { 2726 int i, j; 2727 2728 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2729 QETH_QDIO_UNINITIALIZED) 2730 return; 2731 2732 qeth_free_cq(card); 2733 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2734 if (card->qdio.in_q->bufs[j].rx_skb) 2735 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2736 } 2737 qeth_free_qdio_queue(card->qdio.in_q); 2738 card->qdio.in_q = NULL; 2739 /* inbound buffer pool */ 2740 qeth_free_buffer_pool(card); 2741 /* free outbound qdio_qs */ 2742 for (i = 0; i < card->qdio.no_out_queues; i++) { 2743 qeth_free_output_queue(card->qdio.out_qs[i]); 2744 card->qdio.out_qs[i] = NULL; 2745 } 2746 } 2747 2748 static void qeth_fill_qib_parms(struct qeth_card *card, 2749 struct qeth_qib_parms *parms) 2750 { 2751 struct qeth_qdio_out_q *queue; 2752 unsigned int i; 2753 2754 parms->pcit_magic[0] = 'P'; 2755 parms->pcit_magic[1] = 'C'; 2756 parms->pcit_magic[2] = 'I'; 2757 parms->pcit_magic[3] = 'T'; 2758 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2759 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2760 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2761 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2762 2763 parms->blkt_magic[0] = 'B'; 2764 parms->blkt_magic[1] = 'L'; 2765 parms->blkt_magic[2] = 'K'; 2766 parms->blkt_magic[3] = 'T'; 2767 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2768 parms->blkt_total = card->info.blkt.time_total; 2769 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2770 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2771 2772 /* Prio-queueing implicitly uses the default priorities: */ 2773 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2774 return; 2775 2776 parms->pque_magic[0] = 'P'; 2777 parms->pque_magic[1] = 'Q'; 2778 parms->pque_magic[2] = 'U'; 2779 parms->pque_magic[3] = 'E'; 2780 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2781 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2782 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2783 2784 qeth_for_each_output_queue(card, queue, i) 2785 parms->pque_priority[i] = queue->priority; 2786 } 2787 2788 static int qeth_qdio_activate(struct qeth_card *card) 2789 { 2790 QETH_CARD_TEXT(card, 3, "qdioact"); 2791 return qdio_activate(CARD_DDEV(card)); 2792 } 2793 2794 static int qeth_dm_act(struct qeth_card *card) 2795 { 2796 struct qeth_cmd_buffer *iob; 2797 2798 QETH_CARD_TEXT(card, 2, "dmact"); 2799 2800 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2801 if (!iob) 2802 return -ENOMEM; 2803 2804 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2805 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2806 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2807 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2808 return qeth_send_control_data(card, iob, NULL, NULL); 2809 } 2810 2811 static int qeth_mpc_initialize(struct qeth_card *card) 2812 { 2813 int rc; 2814 2815 QETH_CARD_TEXT(card, 2, "mpcinit"); 2816 2817 rc = qeth_issue_next_read(card); 2818 if (rc) { 2819 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2820 return rc; 2821 } 2822 rc = qeth_cm_enable(card); 2823 if (rc) { 2824 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2825 return rc; 2826 } 2827 rc = qeth_cm_setup(card); 2828 if (rc) { 2829 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2830 return rc; 2831 } 2832 rc = qeth_ulp_enable(card); 2833 if (rc) { 2834 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2835 return rc; 2836 } 2837 rc = qeth_ulp_setup(card); 2838 if (rc) { 2839 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2840 return rc; 2841 } 2842 rc = qeth_alloc_qdio_queues(card); 2843 if (rc) { 2844 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2845 return rc; 2846 } 2847 rc = qeth_qdio_establish(card); 2848 if (rc) { 2849 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2850 qeth_free_qdio_queues(card); 2851 return rc; 2852 } 2853 rc = qeth_qdio_activate(card); 2854 if (rc) { 2855 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2856 return rc; 2857 } 2858 rc = qeth_dm_act(card); 2859 if (rc) { 2860 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2861 return rc; 2862 } 2863 2864 return 0; 2865 } 2866 2867 static void qeth_print_status_message(struct qeth_card *card) 2868 { 2869 switch (card->info.type) { 2870 case QETH_CARD_TYPE_OSD: 2871 case QETH_CARD_TYPE_OSM: 2872 case QETH_CARD_TYPE_OSX: 2873 /* VM will use a non-zero first character 2874 * to indicate a HiperSockets like reporting 2875 * of the level OSA sets the first character to zero 2876 * */ 2877 if (!card->info.mcl_level[0]) { 2878 sprintf(card->info.mcl_level, "%02x%02x", 2879 card->info.mcl_level[2], 2880 card->info.mcl_level[3]); 2881 break; 2882 } 2883 fallthrough; 2884 case QETH_CARD_TYPE_IQD: 2885 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2886 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2887 card->info.mcl_level[0]]; 2888 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2889 card->info.mcl_level[1]]; 2890 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2891 card->info.mcl_level[2]]; 2892 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2893 card->info.mcl_level[3]]; 2894 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2895 } 2896 break; 2897 default: 2898 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2899 } 2900 dev_info(&card->gdev->dev, 2901 "Device is a%s card%s%s%s\nwith link type %s.\n", 2902 qeth_get_cardname(card), 2903 (card->info.mcl_level[0]) ? " (level: " : "", 2904 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2905 (card->info.mcl_level[0]) ? ")" : "", 2906 qeth_get_cardname_short(card)); 2907 } 2908 2909 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2910 { 2911 struct qeth_buffer_pool_entry *entry; 2912 2913 QETH_CARD_TEXT(card, 5, "inwrklst"); 2914 2915 list_for_each_entry(entry, 2916 &card->qdio.init_pool.entry_list, init_list) { 2917 qeth_put_buffer_pool_entry(card, entry); 2918 } 2919 } 2920 2921 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2922 struct qeth_card *card) 2923 { 2924 struct qeth_buffer_pool_entry *entry; 2925 int i, free; 2926 2927 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2928 return NULL; 2929 2930 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2931 free = 1; 2932 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2933 if (page_count(entry->elements[i]) > 1) { 2934 free = 0; 2935 break; 2936 } 2937 } 2938 if (free) { 2939 list_del_init(&entry->list); 2940 return entry; 2941 } 2942 } 2943 2944 /* no free buffer in pool so take first one and swap pages */ 2945 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2946 struct qeth_buffer_pool_entry, list); 2947 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2948 if (page_count(entry->elements[i]) > 1) { 2949 struct page *page = dev_alloc_page(); 2950 2951 if (!page) 2952 return NULL; 2953 2954 __free_page(entry->elements[i]); 2955 entry->elements[i] = page; 2956 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2957 } 2958 } 2959 list_del_init(&entry->list); 2960 return entry; 2961 } 2962 2963 static int qeth_init_input_buffer(struct qeth_card *card, 2964 struct qeth_qdio_buffer *buf) 2965 { 2966 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2967 int i; 2968 2969 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2970 buf->rx_skb = netdev_alloc_skb(card->dev, 2971 ETH_HLEN + 2972 sizeof(struct ipv6hdr)); 2973 if (!buf->rx_skb) 2974 return -ENOMEM; 2975 } 2976 2977 if (!pool_entry) { 2978 pool_entry = qeth_find_free_buffer_pool_entry(card); 2979 if (!pool_entry) 2980 return -ENOBUFS; 2981 2982 buf->pool_entry = pool_entry; 2983 } 2984 2985 /* 2986 * since the buffer is accessed only from the input_tasklet 2987 * there shouldn't be a need to synchronize; also, since we use 2988 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2989 * buffers 2990 */ 2991 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2992 buf->buffer->element[i].length = PAGE_SIZE; 2993 buf->buffer->element[i].addr = 2994 page_to_phys(pool_entry->elements[i]); 2995 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2996 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2997 else 2998 buf->buffer->element[i].eflags = 0; 2999 buf->buffer->element[i].sflags = 0; 3000 } 3001 return 0; 3002 } 3003 3004 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 3005 struct qeth_qdio_out_q *queue) 3006 { 3007 if (!IS_IQD(card) || 3008 qeth_iqd_is_mcast_queue(card, queue) || 3009 card->options.cq == QETH_CQ_ENABLED || 3010 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 3011 return 1; 3012 3013 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 3014 } 3015 3016 static int qeth_init_qdio_queues(struct qeth_card *card) 3017 { 3018 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 3019 unsigned int i; 3020 int rc; 3021 3022 QETH_CARD_TEXT(card, 2, "initqdqs"); 3023 3024 /* inbound queue */ 3025 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 3026 memset(&card->rx, 0, sizeof(struct qeth_rx)); 3027 3028 qeth_initialize_working_pool_list(card); 3029 /*give only as many buffers to hardware as we have buffer pool entries*/ 3030 for (i = 0; i < rx_bufs; i++) { 3031 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 3032 if (rc) 3033 return rc; 3034 } 3035 3036 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 3037 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs); 3038 if (rc) { 3039 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 3040 return rc; 3041 } 3042 3043 /* completion */ 3044 rc = qeth_cq_init(card); 3045 if (rc) { 3046 return rc; 3047 } 3048 3049 /* outbound queue */ 3050 for (i = 0; i < card->qdio.no_out_queues; ++i) { 3051 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 3052 3053 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 3054 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 3055 queue->next_buf_to_fill = 0; 3056 queue->do_pack = 0; 3057 queue->prev_hdr = NULL; 3058 queue->coalesced_frames = 0; 3059 queue->bulk_start = 0; 3060 queue->bulk_count = 0; 3061 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 3062 atomic_set(&queue->used_buffers, 0); 3063 atomic_set(&queue->set_pci_flags_count, 0); 3064 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 3065 } 3066 return 0; 3067 } 3068 3069 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 3070 struct qeth_cmd_buffer *iob) 3071 { 3072 qeth_mpc_finalize_cmd(card, iob); 3073 3074 /* override with IPA-specific values: */ 3075 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 3076 } 3077 3078 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3079 u16 cmd_length, 3080 bool (*match)(struct qeth_cmd_buffer *iob, 3081 struct qeth_cmd_buffer *reply)) 3082 { 3083 u8 prot_type = qeth_mpc_select_prot_type(card); 3084 u16 total_length = iob->length; 3085 3086 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 3087 iob->data); 3088 iob->finalize = qeth_ipa_finalize_cmd; 3089 iob->match = match; 3090 3091 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 3092 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 3093 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 3094 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 3095 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 3096 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 3097 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 3098 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 3099 } 3100 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 3101 3102 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 3103 struct qeth_cmd_buffer *reply) 3104 { 3105 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 3106 3107 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 3108 } 3109 3110 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3111 enum qeth_ipa_cmds cmd_code, 3112 enum qeth_prot_versions prot, 3113 unsigned int data_length) 3114 { 3115 struct qeth_cmd_buffer *iob; 3116 struct qeth_ipacmd_hdr *hdr; 3117 3118 data_length += offsetof(struct qeth_ipa_cmd, data); 3119 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3120 QETH_IPA_TIMEOUT); 3121 if (!iob) 3122 return NULL; 3123 3124 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); 3125 3126 hdr = &__ipa_cmd(iob)->hdr; 3127 hdr->command = cmd_code; 3128 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3129 /* hdr->seqno is set by qeth_send_control_data() */ 3130 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3131 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3132 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3133 hdr->param_count = 1; 3134 hdr->prot_version = prot; 3135 return iob; 3136 } 3137 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3138 3139 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3140 struct qeth_reply *reply, unsigned long data) 3141 { 3142 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3143 3144 return (cmd->hdr.return_code) ? -EIO : 0; 3145 } 3146 3147 /** 3148 * qeth_send_ipa_cmd() - send an IPA command 3149 * 3150 * See qeth_send_control_data() for explanation of the arguments. 3151 */ 3152 3153 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3154 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3155 unsigned long), 3156 void *reply_param) 3157 { 3158 int rc; 3159 3160 QETH_CARD_TEXT(card, 4, "sendipa"); 3161 3162 if (card->read_or_write_problem) { 3163 qeth_put_cmd(iob); 3164 return -EIO; 3165 } 3166 3167 if (reply_cb == NULL) 3168 reply_cb = qeth_send_ipa_cmd_cb; 3169 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3170 if (rc == -ETIME) { 3171 qeth_clear_ipacmd_list(card); 3172 qeth_schedule_recovery(card); 3173 } 3174 return rc; 3175 } 3176 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3177 3178 static int qeth_send_startlan_cb(struct qeth_card *card, 3179 struct qeth_reply *reply, unsigned long data) 3180 { 3181 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3182 3183 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3184 return -ENETDOWN; 3185 3186 return (cmd->hdr.return_code) ? -EIO : 0; 3187 } 3188 3189 static int qeth_send_startlan(struct qeth_card *card) 3190 { 3191 struct qeth_cmd_buffer *iob; 3192 3193 QETH_CARD_TEXT(card, 2, "strtlan"); 3194 3195 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3196 if (!iob) 3197 return -ENOMEM; 3198 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3199 } 3200 3201 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3202 { 3203 if (!cmd->hdr.return_code) 3204 cmd->hdr.return_code = 3205 cmd->data.setadapterparms.hdr.return_code; 3206 return cmd->hdr.return_code; 3207 } 3208 3209 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3210 struct qeth_reply *reply, unsigned long data) 3211 { 3212 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3213 struct qeth_query_cmds_supp *query_cmd; 3214 3215 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3216 if (qeth_setadpparms_inspect_rc(cmd)) 3217 return -EIO; 3218 3219 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3220 if (query_cmd->lan_type & 0x7f) { 3221 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3222 return -EPROTONOSUPPORT; 3223 3224 card->info.link_type = query_cmd->lan_type; 3225 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3226 } 3227 3228 card->options.adp.supported = query_cmd->supported_cmds; 3229 return 0; 3230 } 3231 3232 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3233 enum qeth_ipa_setadp_cmd adp_cmd, 3234 unsigned int data_length) 3235 { 3236 struct qeth_ipacmd_setadpparms_hdr *hdr; 3237 struct qeth_cmd_buffer *iob; 3238 3239 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3240 data_length + 3241 offsetof(struct qeth_ipacmd_setadpparms, 3242 data)); 3243 if (!iob) 3244 return NULL; 3245 3246 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3247 hdr->cmdlength = sizeof(*hdr) + data_length; 3248 hdr->command_code = adp_cmd; 3249 hdr->used_total = 1; 3250 hdr->seq_no = 1; 3251 return iob; 3252 } 3253 3254 static int qeth_query_setadapterparms(struct qeth_card *card) 3255 { 3256 int rc; 3257 struct qeth_cmd_buffer *iob; 3258 3259 QETH_CARD_TEXT(card, 3, "queryadp"); 3260 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3261 SETADP_DATA_SIZEOF(query_cmds_supp)); 3262 if (!iob) 3263 return -ENOMEM; 3264 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3265 return rc; 3266 } 3267 3268 static int qeth_query_ipassists_cb(struct qeth_card *card, 3269 struct qeth_reply *reply, unsigned long data) 3270 { 3271 struct qeth_ipa_cmd *cmd; 3272 3273 QETH_CARD_TEXT(card, 2, "qipasscb"); 3274 3275 cmd = (struct qeth_ipa_cmd *) data; 3276 3277 switch (cmd->hdr.return_code) { 3278 case IPA_RC_SUCCESS: 3279 break; 3280 case IPA_RC_NOTSUPP: 3281 case IPA_RC_L2_UNSUPPORTED_CMD: 3282 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3283 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3284 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3285 return -EOPNOTSUPP; 3286 default: 3287 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3288 CARD_DEVID(card), cmd->hdr.return_code); 3289 return -EIO; 3290 } 3291 3292 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3293 card->options.ipa4 = cmd->hdr.assists; 3294 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3295 card->options.ipa6 = cmd->hdr.assists; 3296 else 3297 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3298 CARD_DEVID(card)); 3299 return 0; 3300 } 3301 3302 static int qeth_query_ipassists(struct qeth_card *card, 3303 enum qeth_prot_versions prot) 3304 { 3305 int rc; 3306 struct qeth_cmd_buffer *iob; 3307 3308 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3309 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3310 if (!iob) 3311 return -ENOMEM; 3312 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3313 return rc; 3314 } 3315 3316 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3317 struct qeth_reply *reply, unsigned long data) 3318 { 3319 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3320 struct qeth_query_switch_attributes *attrs; 3321 struct qeth_switch_info *sw_info; 3322 3323 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3324 if (qeth_setadpparms_inspect_rc(cmd)) 3325 return -EIO; 3326 3327 sw_info = (struct qeth_switch_info *)reply->param; 3328 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3329 sw_info->capabilities = attrs->capabilities; 3330 sw_info->settings = attrs->settings; 3331 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3332 sw_info->settings); 3333 return 0; 3334 } 3335 3336 int qeth_query_switch_attributes(struct qeth_card *card, 3337 struct qeth_switch_info *sw_info) 3338 { 3339 struct qeth_cmd_buffer *iob; 3340 3341 QETH_CARD_TEXT(card, 2, "qswiattr"); 3342 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3343 return -EOPNOTSUPP; 3344 if (!netif_carrier_ok(card->dev)) 3345 return -ENOMEDIUM; 3346 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3347 if (!iob) 3348 return -ENOMEM; 3349 return qeth_send_ipa_cmd(card, iob, 3350 qeth_query_switch_attributes_cb, sw_info); 3351 } 3352 3353 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3354 enum qeth_diags_cmds sub_cmd, 3355 unsigned int data_length) 3356 { 3357 struct qeth_ipacmd_diagass *cmd; 3358 struct qeth_cmd_buffer *iob; 3359 3360 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3361 DIAG_HDR_LEN + data_length); 3362 if (!iob) 3363 return NULL; 3364 3365 cmd = &__ipa_cmd(iob)->data.diagass; 3366 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3367 cmd->subcmd = sub_cmd; 3368 return iob; 3369 } 3370 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3371 3372 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3373 struct qeth_reply *reply, unsigned long data) 3374 { 3375 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3376 u16 rc = cmd->hdr.return_code; 3377 3378 if (rc) { 3379 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3380 return -EIO; 3381 } 3382 3383 card->info.diagass_support = cmd->data.diagass.ext; 3384 return 0; 3385 } 3386 3387 static int qeth_query_setdiagass(struct qeth_card *card) 3388 { 3389 struct qeth_cmd_buffer *iob; 3390 3391 QETH_CARD_TEXT(card, 2, "qdiagass"); 3392 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3393 if (!iob) 3394 return -ENOMEM; 3395 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3396 } 3397 3398 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3399 { 3400 unsigned long info = get_zeroed_page(GFP_KERNEL); 3401 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3402 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3403 struct ccw_dev_id ccwid; 3404 int level; 3405 3406 tid->chpid = card->info.chpid; 3407 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3408 tid->ssid = ccwid.ssid; 3409 tid->devno = ccwid.devno; 3410 if (!info) 3411 return; 3412 level = stsi(NULL, 0, 0, 0); 3413 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3414 tid->lparnr = info222->lpar_number; 3415 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3416 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3417 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3418 } 3419 free_page(info); 3420 } 3421 3422 static int qeth_hw_trap_cb(struct qeth_card *card, 3423 struct qeth_reply *reply, unsigned long data) 3424 { 3425 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3426 u16 rc = cmd->hdr.return_code; 3427 3428 if (rc) { 3429 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3430 return -EIO; 3431 } 3432 return 0; 3433 } 3434 3435 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3436 { 3437 struct qeth_cmd_buffer *iob; 3438 struct qeth_ipa_cmd *cmd; 3439 3440 QETH_CARD_TEXT(card, 2, "diagtrap"); 3441 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3442 if (!iob) 3443 return -ENOMEM; 3444 cmd = __ipa_cmd(iob); 3445 cmd->data.diagass.type = 1; 3446 cmd->data.diagass.action = action; 3447 switch (action) { 3448 case QETH_DIAGS_TRAP_ARM: 3449 cmd->data.diagass.options = 0x0003; 3450 cmd->data.diagass.ext = 0x00010000 + 3451 sizeof(struct qeth_trap_id); 3452 qeth_get_trap_id(card, 3453 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3454 break; 3455 case QETH_DIAGS_TRAP_DISARM: 3456 cmd->data.diagass.options = 0x0001; 3457 break; 3458 case QETH_DIAGS_TRAP_CAPTURE: 3459 break; 3460 } 3461 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3462 } 3463 3464 static int qeth_check_qdio_errors(struct qeth_card *card, 3465 struct qdio_buffer *buf, 3466 unsigned int qdio_error, 3467 const char *dbftext) 3468 { 3469 if (qdio_error) { 3470 QETH_CARD_TEXT(card, 2, dbftext); 3471 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3472 buf->element[15].sflags); 3473 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3474 buf->element[14].sflags); 3475 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3476 if ((buf->element[15].sflags) == 0x12) { 3477 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3478 return 0; 3479 } else 3480 return 1; 3481 } 3482 return 0; 3483 } 3484 3485 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3486 unsigned int count) 3487 { 3488 struct qeth_qdio_q *queue = card->qdio.in_q; 3489 struct list_head *lh; 3490 int i; 3491 int rc; 3492 int newcount = 0; 3493 3494 /* only requeue at a certain threshold to avoid SIGAs */ 3495 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3496 for (i = queue->next_buf_to_init; 3497 i < queue->next_buf_to_init + count; ++i) { 3498 if (qeth_init_input_buffer(card, 3499 &queue->bufs[QDIO_BUFNR(i)])) { 3500 break; 3501 } else { 3502 newcount++; 3503 } 3504 } 3505 3506 if (newcount < count) { 3507 /* we are in memory shortage so we switch back to 3508 traditional skb allocation and drop packages */ 3509 atomic_set(&card->force_alloc_skb, 3); 3510 count = newcount; 3511 } else { 3512 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3513 } 3514 3515 if (!count) { 3516 i = 0; 3517 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3518 i++; 3519 if (i == card->qdio.in_buf_pool.buf_count) { 3520 QETH_CARD_TEXT(card, 2, "qsarbw"); 3521 schedule_delayed_work( 3522 &card->buffer_reclaim_work, 3523 QETH_RECLAIM_WORK_TIME); 3524 } 3525 return 0; 3526 } 3527 3528 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3529 queue->next_buf_to_init, count); 3530 if (rc) { 3531 QETH_CARD_TEXT(card, 2, "qinberr"); 3532 } 3533 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3534 count); 3535 return count; 3536 } 3537 3538 return 0; 3539 } 3540 3541 static void qeth_buffer_reclaim_work(struct work_struct *work) 3542 { 3543 struct qeth_card *card = container_of(to_delayed_work(work), 3544 struct qeth_card, 3545 buffer_reclaim_work); 3546 3547 local_bh_disable(); 3548 napi_schedule(&card->napi); 3549 /* kick-start the NAPI softirq: */ 3550 local_bh_enable(); 3551 } 3552 3553 static void qeth_handle_send_error(struct qeth_card *card, 3554 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3555 { 3556 int sbalf15 = buffer->buffer->element[15].sflags; 3557 3558 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3559 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3560 3561 if (!qdio_err) 3562 return; 3563 3564 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3565 return; 3566 3567 QETH_CARD_TEXT(card, 1, "lnkfail"); 3568 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3569 (u16)qdio_err, (u8)sbalf15); 3570 } 3571 3572 /** 3573 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3574 * @queue: queue to check for packing buffer 3575 * 3576 * Returns number of buffers that were prepared for flush. 3577 */ 3578 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3579 { 3580 struct qeth_qdio_out_buffer *buffer; 3581 3582 buffer = queue->bufs[queue->next_buf_to_fill]; 3583 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3584 (buffer->next_element_to_fill > 0)) { 3585 /* it's a packing buffer */ 3586 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3587 queue->next_buf_to_fill = 3588 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3589 return 1; 3590 } 3591 return 0; 3592 } 3593 3594 /* 3595 * Switched to packing state if the number of used buffers on a queue 3596 * reaches a certain limit. 3597 */ 3598 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3599 { 3600 if (!queue->do_pack) { 3601 if (atomic_read(&queue->used_buffers) 3602 >= QETH_HIGH_WATERMARK_PACK){ 3603 /* switch non-PACKING -> PACKING */ 3604 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3605 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3606 queue->do_pack = 1; 3607 } 3608 } 3609 } 3610 3611 /* 3612 * Switches from packing to non-packing mode. If there is a packing 3613 * buffer on the queue this buffer will be prepared to be flushed. 3614 * In that case 1 is returned to inform the caller. If no buffer 3615 * has to be flushed, zero is returned. 3616 */ 3617 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3618 { 3619 if (queue->do_pack) { 3620 if (atomic_read(&queue->used_buffers) 3621 <= QETH_LOW_WATERMARK_PACK) { 3622 /* switch PACKING -> non-PACKING */ 3623 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3624 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3625 queue->do_pack = 0; 3626 return qeth_prep_flush_pack_buffer(queue); 3627 } 3628 } 3629 return 0; 3630 } 3631 3632 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3633 int count) 3634 { 3635 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3636 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3637 struct qeth_card *card = queue->card; 3638 int rc; 3639 int i; 3640 3641 for (i = index; i < index + count; ++i) { 3642 unsigned int bidx = QDIO_BUFNR(i); 3643 struct sk_buff *skb; 3644 3645 buf = queue->bufs[bidx]; 3646 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3647 SBAL_EFLAGS_LAST_ENTRY; 3648 queue->coalesced_frames += buf->frames; 3649 3650 if (queue->bufstates) 3651 queue->bufstates[bidx].user = buf; 3652 3653 if (IS_IQD(card)) { 3654 skb_queue_walk(&buf->skb_list, skb) 3655 skb_tx_timestamp(skb); 3656 } 3657 } 3658 3659 if (!IS_IQD(card)) { 3660 if (!queue->do_pack) { 3661 if ((atomic_read(&queue->used_buffers) >= 3662 (QETH_HIGH_WATERMARK_PACK - 3663 QETH_WATERMARK_PACK_FUZZ)) && 3664 !atomic_read(&queue->set_pci_flags_count)) { 3665 /* it's likely that we'll go to packing 3666 * mode soon */ 3667 atomic_inc(&queue->set_pci_flags_count); 3668 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3669 } 3670 } else { 3671 if (!atomic_read(&queue->set_pci_flags_count)) { 3672 /* 3673 * there's no outstanding PCI any more, so we 3674 * have to request a PCI to be sure the the PCI 3675 * will wake at some time in the future then we 3676 * can flush packed buffers that might still be 3677 * hanging around, which can happen if no 3678 * further send was requested by the stack 3679 */ 3680 atomic_inc(&queue->set_pci_flags_count); 3681 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3682 } 3683 } 3684 3685 if (atomic_read(&queue->set_pci_flags_count)) 3686 qdio_flags |= QDIO_FLAG_PCI_OUT; 3687 } 3688 3689 QETH_TXQ_STAT_INC(queue, doorbell); 3690 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3691 queue->queue_no, index, count); 3692 3693 /* Fake the TX completion interrupt: */ 3694 if (IS_IQD(card)) { 3695 unsigned int frames = READ_ONCE(queue->max_coalesced_frames); 3696 unsigned int usecs = READ_ONCE(queue->coalesce_usecs); 3697 3698 if (frames && queue->coalesced_frames >= frames) { 3699 napi_schedule(&queue->napi); 3700 queue->coalesced_frames = 0; 3701 QETH_TXQ_STAT_INC(queue, coal_frames); 3702 } else if (usecs) { 3703 qeth_tx_arm_timer(queue, usecs); 3704 } 3705 } 3706 3707 if (rc) { 3708 /* ignore temporary SIGA errors without busy condition */ 3709 if (rc == -ENOBUFS) 3710 return; 3711 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3712 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3713 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3714 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3715 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3716 3717 /* this must not happen under normal circumstances. if it 3718 * happens something is really wrong -> recover */ 3719 qeth_schedule_recovery(queue->card); 3720 return; 3721 } 3722 } 3723 3724 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3725 { 3726 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3727 3728 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3729 queue->prev_hdr = NULL; 3730 queue->bulk_count = 0; 3731 } 3732 3733 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3734 { 3735 /* 3736 * check if weed have to switch to non-packing mode or if 3737 * we have to get a pci flag out on the queue 3738 */ 3739 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3740 !atomic_read(&queue->set_pci_flags_count)) { 3741 unsigned int index, flush_cnt; 3742 bool q_was_packing; 3743 3744 spin_lock(&queue->lock); 3745 3746 index = queue->next_buf_to_fill; 3747 q_was_packing = queue->do_pack; 3748 3749 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3750 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3751 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3752 3753 if (flush_cnt) { 3754 qeth_flush_buffers(queue, index, flush_cnt); 3755 if (q_was_packing) 3756 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3757 } 3758 3759 spin_unlock(&queue->lock); 3760 } 3761 } 3762 3763 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3764 { 3765 struct qeth_card *card = (struct qeth_card *)card_ptr; 3766 3767 napi_schedule_irqoff(&card->napi); 3768 } 3769 3770 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3771 { 3772 int rc; 3773 3774 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3775 rc = -1; 3776 goto out; 3777 } else { 3778 if (card->options.cq == cq) { 3779 rc = 0; 3780 goto out; 3781 } 3782 3783 qeth_free_qdio_queues(card); 3784 card->options.cq = cq; 3785 rc = 0; 3786 } 3787 out: 3788 return rc; 3789 3790 } 3791 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3792 3793 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3794 unsigned int queue, int first_element, 3795 int count) 3796 { 3797 struct qeth_qdio_q *cq = card->qdio.c_q; 3798 int i; 3799 int rc; 3800 3801 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3802 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3803 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3804 3805 if (qdio_err) { 3806 netif_tx_stop_all_queues(card->dev); 3807 qeth_schedule_recovery(card); 3808 return; 3809 } 3810 3811 for (i = first_element; i < first_element + count; ++i) { 3812 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3813 int e = 0; 3814 3815 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3816 buffer->element[e].addr) { 3817 unsigned long phys_aob_addr = buffer->element[e].addr; 3818 3819 qeth_qdio_handle_aob(card, phys_aob_addr); 3820 ++e; 3821 } 3822 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3823 } 3824 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3825 card->qdio.c_q->next_buf_to_init, 3826 count); 3827 if (rc) { 3828 dev_warn(&card->gdev->dev, 3829 "QDIO reported an error, rc=%i\n", rc); 3830 QETH_CARD_TEXT(card, 2, "qcqherr"); 3831 } 3832 3833 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3834 } 3835 3836 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3837 unsigned int qdio_err, int queue, 3838 int first_elem, int count, 3839 unsigned long card_ptr) 3840 { 3841 struct qeth_card *card = (struct qeth_card *)card_ptr; 3842 3843 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3844 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3845 3846 if (qdio_err) 3847 qeth_schedule_recovery(card); 3848 } 3849 3850 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3851 unsigned int qdio_error, int __queue, 3852 int first_element, int count, 3853 unsigned long card_ptr) 3854 { 3855 struct qeth_card *card = (struct qeth_card *) card_ptr; 3856 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3857 struct net_device *dev = card->dev; 3858 struct netdev_queue *txq; 3859 int i; 3860 3861 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3862 if (qdio_error & QDIO_ERROR_FATAL) { 3863 QETH_CARD_TEXT(card, 2, "achkcond"); 3864 netif_tx_stop_all_queues(dev); 3865 qeth_schedule_recovery(card); 3866 return; 3867 } 3868 3869 for (i = first_element; i < (first_element + count); ++i) { 3870 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; 3871 3872 qeth_handle_send_error(card, buf, qdio_error); 3873 qeth_clear_output_buffer(queue, buf, qdio_error, 0); 3874 } 3875 3876 atomic_sub(count, &queue->used_buffers); 3877 qeth_check_outbound_queue(queue); 3878 3879 txq = netdev_get_tx_queue(dev, __queue); 3880 /* xmit may have observed the full-condition, but not yet stopped the 3881 * txq. In which case the code below won't trigger. So before returning, 3882 * xmit will re-check the txq's fill level and wake it up if needed. 3883 */ 3884 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) 3885 netif_tx_wake_queue(txq); 3886 } 3887 3888 /** 3889 * Note: Function assumes that we have 4 outbound queues. 3890 */ 3891 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3892 { 3893 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3894 u8 tos; 3895 3896 switch (card->qdio.do_prio_queueing) { 3897 case QETH_PRIO_Q_ING_TOS: 3898 case QETH_PRIO_Q_ING_PREC: 3899 switch (qeth_get_ip_version(skb)) { 3900 case 4: 3901 tos = ipv4_get_dsfield(ip_hdr(skb)); 3902 break; 3903 case 6: 3904 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3905 break; 3906 default: 3907 return card->qdio.default_out_queue; 3908 } 3909 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3910 return ~tos >> 6 & 3; 3911 if (tos & IPTOS_MINCOST) 3912 return 3; 3913 if (tos & IPTOS_RELIABILITY) 3914 return 2; 3915 if (tos & IPTOS_THROUGHPUT) 3916 return 1; 3917 if (tos & IPTOS_LOWDELAY) 3918 return 0; 3919 break; 3920 case QETH_PRIO_Q_ING_SKB: 3921 if (skb->priority > 5) 3922 return 0; 3923 return ~skb->priority >> 1 & 3; 3924 case QETH_PRIO_Q_ING_VLAN: 3925 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3926 return ~ntohs(veth->h_vlan_TCI) >> 3927 (VLAN_PRIO_SHIFT + 1) & 3; 3928 break; 3929 case QETH_PRIO_Q_ING_FIXED: 3930 return card->qdio.default_out_queue; 3931 default: 3932 break; 3933 } 3934 return card->qdio.default_out_queue; 3935 } 3936 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3937 3938 /** 3939 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3940 * @skb: SKB address 3941 * 3942 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3943 * fragmented part of the SKB. Returns zero for linear SKB. 3944 */ 3945 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3946 { 3947 int cnt, elements = 0; 3948 3949 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3950 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3951 3952 elements += qeth_get_elements_for_range( 3953 (addr_t)skb_frag_address(frag), 3954 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3955 } 3956 return elements; 3957 } 3958 3959 /** 3960 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3961 * to transmit an skb. 3962 * @skb: the skb to operate on. 3963 * @data_offset: skip this part of the skb's linear data 3964 * 3965 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3966 * skb's data (both its linear part and paged fragments). 3967 */ 3968 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3969 { 3970 unsigned int elements = qeth_get_elements_for_frags(skb); 3971 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3972 addr_t start = (addr_t)skb->data + data_offset; 3973 3974 if (start != end) 3975 elements += qeth_get_elements_for_range(start, end); 3976 return elements; 3977 } 3978 EXPORT_SYMBOL_GPL(qeth_count_elements); 3979 3980 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3981 MAX_TCP_HEADER) 3982 3983 /** 3984 * qeth_add_hw_header() - add a HW header to an skb. 3985 * @skb: skb that the HW header should be added to. 3986 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3987 * it contains a valid pointer to a qeth_hdr. 3988 * @hdr_len: length of the HW header. 3989 * @proto_len: length of protocol headers that need to be in same page as the 3990 * HW header. 3991 * 3992 * Returns the pushed length. If the header can't be pushed on 3993 * (eg. because it would cross a page boundary), it is allocated from 3994 * the cache instead and 0 is returned. 3995 * The number of needed buffer elements is returned in @elements. 3996 * Error to create the hdr is indicated by returning with < 0. 3997 */ 3998 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3999 struct sk_buff *skb, struct qeth_hdr **hdr, 4000 unsigned int hdr_len, unsigned int proto_len, 4001 unsigned int *elements) 4002 { 4003 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 4004 const unsigned int contiguous = proto_len ? proto_len : 1; 4005 const unsigned int max_elements = queue->max_elements; 4006 unsigned int __elements; 4007 addr_t start, end; 4008 bool push_ok; 4009 int rc; 4010 4011 check_layout: 4012 start = (addr_t)skb->data - hdr_len; 4013 end = (addr_t)skb->data; 4014 4015 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 4016 /* Push HW header into same page as first protocol header. */ 4017 push_ok = true; 4018 /* ... but TSO always needs a separate element for headers: */ 4019 if (skb_is_gso(skb)) 4020 __elements = 1 + qeth_count_elements(skb, proto_len); 4021 else 4022 __elements = qeth_count_elements(skb, 0); 4023 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 4024 /* Push HW header into preceding page, flush with skb->data. */ 4025 push_ok = true; 4026 __elements = 1 + qeth_count_elements(skb, 0); 4027 } else { 4028 /* Use header cache, copy protocol headers up. */ 4029 push_ok = false; 4030 __elements = 1 + qeth_count_elements(skb, proto_len); 4031 } 4032 4033 /* Compress skb to fit into one IO buffer: */ 4034 if (__elements > max_elements) { 4035 if (!skb_is_nonlinear(skb)) { 4036 /* Drop it, no easy way of shrinking it further. */ 4037 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 4038 max_elements, __elements, skb->len); 4039 return -E2BIG; 4040 } 4041 4042 rc = skb_linearize(skb); 4043 if (rc) { 4044 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 4045 return rc; 4046 } 4047 4048 QETH_TXQ_STAT_INC(queue, skbs_linearized); 4049 /* Linearization changed the layout, re-evaluate: */ 4050 goto check_layout; 4051 } 4052 4053 *elements = __elements; 4054 /* Add the header: */ 4055 if (push_ok) { 4056 *hdr = skb_push(skb, hdr_len); 4057 return hdr_len; 4058 } 4059 4060 /* Fall back to cache element with known-good alignment: */ 4061 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 4062 return -E2BIG; 4063 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 4064 if (!*hdr) 4065 return -ENOMEM; 4066 /* Copy protocol headers behind HW header: */ 4067 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 4068 return 0; 4069 } 4070 4071 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 4072 struct sk_buff *curr_skb, 4073 struct qeth_hdr *curr_hdr) 4074 { 4075 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 4076 struct qeth_hdr *prev_hdr = queue->prev_hdr; 4077 4078 if (!prev_hdr) 4079 return true; 4080 4081 /* All packets must have the same target: */ 4082 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 4083 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 4084 4085 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 4086 eth_hdr(curr_skb)->h_dest) && 4087 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 4088 } 4089 4090 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 4091 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 4092 } 4093 4094 /** 4095 * qeth_fill_buffer() - map skb into an output buffer 4096 * @buf: buffer to transport the skb 4097 * @skb: skb to map into the buffer 4098 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 4099 * from qeth_core_header_cache. 4100 * @offset: when mapping the skb, start at skb->data + offset 4101 * @hd_len: if > 0, build a dedicated header element of this size 4102 */ 4103 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 4104 struct sk_buff *skb, struct qeth_hdr *hdr, 4105 unsigned int offset, unsigned int hd_len) 4106 { 4107 struct qdio_buffer *buffer = buf->buffer; 4108 int element = buf->next_element_to_fill; 4109 int length = skb_headlen(skb) - offset; 4110 char *data = skb->data + offset; 4111 unsigned int elem_length, cnt; 4112 bool is_first_elem = true; 4113 4114 __skb_queue_tail(&buf->skb_list, skb); 4115 4116 /* build dedicated element for HW Header */ 4117 if (hd_len) { 4118 is_first_elem = false; 4119 4120 buffer->element[element].addr = virt_to_phys(hdr); 4121 buffer->element[element].length = hd_len; 4122 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4123 4124 /* HW header is allocated from cache: */ 4125 if ((void *)hdr != skb->data) 4126 buf->is_header[element] = 1; 4127 /* HW header was pushed and is contiguous with linear part: */ 4128 else if (length > 0 && !PAGE_ALIGNED(data) && 4129 (data == (char *)hdr + hd_len)) 4130 buffer->element[element].eflags |= 4131 SBAL_EFLAGS_CONTIGUOUS; 4132 4133 element++; 4134 } 4135 4136 /* map linear part into buffer element(s) */ 4137 while (length > 0) { 4138 elem_length = min_t(unsigned int, length, 4139 PAGE_SIZE - offset_in_page(data)); 4140 4141 buffer->element[element].addr = virt_to_phys(data); 4142 buffer->element[element].length = elem_length; 4143 length -= elem_length; 4144 if (is_first_elem) { 4145 is_first_elem = false; 4146 if (length || skb_is_nonlinear(skb)) 4147 /* skb needs additional elements */ 4148 buffer->element[element].eflags = 4149 SBAL_EFLAGS_FIRST_FRAG; 4150 else 4151 buffer->element[element].eflags = 0; 4152 } else { 4153 buffer->element[element].eflags = 4154 SBAL_EFLAGS_MIDDLE_FRAG; 4155 } 4156 4157 data += elem_length; 4158 element++; 4159 } 4160 4161 /* map page frags into buffer element(s) */ 4162 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4163 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4164 4165 data = skb_frag_address(frag); 4166 length = skb_frag_size(frag); 4167 while (length > 0) { 4168 elem_length = min_t(unsigned int, length, 4169 PAGE_SIZE - offset_in_page(data)); 4170 4171 buffer->element[element].addr = virt_to_phys(data); 4172 buffer->element[element].length = elem_length; 4173 buffer->element[element].eflags = 4174 SBAL_EFLAGS_MIDDLE_FRAG; 4175 4176 length -= elem_length; 4177 data += elem_length; 4178 element++; 4179 } 4180 } 4181 4182 if (buffer->element[element - 1].eflags) 4183 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4184 buf->next_element_to_fill = element; 4185 return element; 4186 } 4187 4188 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4189 struct sk_buff *skb, unsigned int elements, 4190 struct qeth_hdr *hdr, unsigned int offset, 4191 unsigned int hd_len) 4192 { 4193 unsigned int bytes = qdisc_pkt_len(skb); 4194 struct qeth_qdio_out_buffer *buffer; 4195 unsigned int next_element; 4196 struct netdev_queue *txq; 4197 bool stopped = false; 4198 bool flush; 4199 4200 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4201 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4202 4203 /* Just a sanity check, the wake/stop logic should ensure that we always 4204 * get a free buffer. 4205 */ 4206 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4207 return -EBUSY; 4208 4209 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4210 4211 if (flush || 4212 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4213 if (buffer->next_element_to_fill > 0) { 4214 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4215 queue->bulk_count++; 4216 } 4217 4218 if (queue->bulk_count >= queue->bulk_max) 4219 flush = true; 4220 4221 if (flush) 4222 qeth_flush_queue(queue); 4223 4224 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4225 queue->bulk_count)]; 4226 4227 /* Sanity-check again: */ 4228 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4229 return -EBUSY; 4230 } 4231 4232 if (buffer->next_element_to_fill == 0 && 4233 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4234 /* If a TX completion happens right _here_ and misses to wake 4235 * the txq, then our re-check below will catch the race. 4236 */ 4237 QETH_TXQ_STAT_INC(queue, stopped); 4238 netif_tx_stop_queue(txq); 4239 stopped = true; 4240 } 4241 4242 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4243 buffer->bytes += bytes; 4244 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4245 queue->prev_hdr = hdr; 4246 4247 flush = __netdev_tx_sent_queue(txq, bytes, 4248 !stopped && netdev_xmit_more()); 4249 4250 if (flush || next_element >= queue->max_elements) { 4251 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4252 queue->bulk_count++; 4253 4254 if (queue->bulk_count >= queue->bulk_max) 4255 flush = true; 4256 4257 if (flush) 4258 qeth_flush_queue(queue); 4259 } 4260 4261 if (stopped && !qeth_out_queue_is_full(queue)) 4262 netif_tx_start_queue(txq); 4263 return 0; 4264 } 4265 4266 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4267 struct sk_buff *skb, struct qeth_hdr *hdr, 4268 unsigned int offset, unsigned int hd_len, 4269 int elements_needed) 4270 { 4271 unsigned int start_index = queue->next_buf_to_fill; 4272 struct qeth_qdio_out_buffer *buffer; 4273 unsigned int next_element; 4274 struct netdev_queue *txq; 4275 bool stopped = false; 4276 int flush_count = 0; 4277 int do_pack = 0; 4278 int rc = 0; 4279 4280 buffer = queue->bufs[queue->next_buf_to_fill]; 4281 4282 /* Just a sanity check, the wake/stop logic should ensure that we always 4283 * get a free buffer. 4284 */ 4285 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4286 return -EBUSY; 4287 4288 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4289 4290 /* check if we need to switch packing state of this queue */ 4291 qeth_switch_to_packing_if_needed(queue); 4292 if (queue->do_pack) { 4293 do_pack = 1; 4294 /* does packet fit in current buffer? */ 4295 if (buffer->next_element_to_fill + elements_needed > 4296 queue->max_elements) { 4297 /* ... no -> set state PRIMED */ 4298 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4299 flush_count++; 4300 queue->next_buf_to_fill = 4301 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4302 buffer = queue->bufs[queue->next_buf_to_fill]; 4303 4304 /* We stepped forward, so sanity-check again: */ 4305 if (atomic_read(&buffer->state) != 4306 QETH_QDIO_BUF_EMPTY) { 4307 qeth_flush_buffers(queue, start_index, 4308 flush_count); 4309 rc = -EBUSY; 4310 goto out; 4311 } 4312 } 4313 } 4314 4315 if (buffer->next_element_to_fill == 0 && 4316 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4317 /* If a TX completion happens right _here_ and misses to wake 4318 * the txq, then our re-check below will catch the race. 4319 */ 4320 QETH_TXQ_STAT_INC(queue, stopped); 4321 netif_tx_stop_queue(txq); 4322 stopped = true; 4323 } 4324 4325 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4326 buffer->bytes += qdisc_pkt_len(skb); 4327 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4328 4329 if (queue->do_pack) 4330 QETH_TXQ_STAT_INC(queue, skbs_pack); 4331 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4332 flush_count++; 4333 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4334 queue->next_buf_to_fill = 4335 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4336 } 4337 4338 if (flush_count) 4339 qeth_flush_buffers(queue, start_index, flush_count); 4340 4341 out: 4342 if (do_pack) 4343 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4344 4345 if (stopped && !qeth_out_queue_is_full(queue)) 4346 netif_tx_start_queue(txq); 4347 return rc; 4348 } 4349 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4350 4351 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4352 unsigned int payload_len, struct sk_buff *skb, 4353 unsigned int proto_len) 4354 { 4355 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4356 4357 ext->hdr_tot_len = sizeof(*ext); 4358 ext->imb_hdr_no = 1; 4359 ext->hdr_type = 1; 4360 ext->hdr_version = 1; 4361 ext->hdr_len = 28; 4362 ext->payload_len = payload_len; 4363 ext->mss = skb_shinfo(skb)->gso_size; 4364 ext->dg_hdr_len = proto_len; 4365 } 4366 4367 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4368 struct qeth_qdio_out_q *queue, int ipv, 4369 void (*fill_header)(struct qeth_qdio_out_q *queue, 4370 struct qeth_hdr *hdr, struct sk_buff *skb, 4371 int ipv, unsigned int data_len)) 4372 { 4373 unsigned int proto_len, hw_hdr_len; 4374 unsigned int frame_len = skb->len; 4375 bool is_tso = skb_is_gso(skb); 4376 unsigned int data_offset = 0; 4377 struct qeth_hdr *hdr = NULL; 4378 unsigned int hd_len = 0; 4379 unsigned int elements; 4380 int push_len, rc; 4381 4382 if (is_tso) { 4383 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4384 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4385 } else { 4386 hw_hdr_len = sizeof(struct qeth_hdr); 4387 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4388 } 4389 4390 rc = skb_cow_head(skb, hw_hdr_len); 4391 if (rc) 4392 return rc; 4393 4394 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4395 &elements); 4396 if (push_len < 0) 4397 return push_len; 4398 if (is_tso || !push_len) { 4399 /* HW header needs its own buffer element. */ 4400 hd_len = hw_hdr_len + proto_len; 4401 data_offset = push_len + proto_len; 4402 } 4403 memset(hdr, 0, hw_hdr_len); 4404 fill_header(queue, hdr, skb, ipv, frame_len); 4405 if (is_tso) 4406 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4407 frame_len - proto_len, skb, proto_len); 4408 4409 if (IS_IQD(card)) { 4410 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4411 hd_len); 4412 } else { 4413 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4414 skb_orphan(skb); 4415 spin_lock(&queue->lock); 4416 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4417 hd_len, elements); 4418 spin_unlock(&queue->lock); 4419 } 4420 4421 if (rc && !push_len) 4422 kmem_cache_free(qeth_core_header_cache, hdr); 4423 4424 return rc; 4425 } 4426 EXPORT_SYMBOL_GPL(qeth_xmit); 4427 4428 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4429 struct qeth_reply *reply, unsigned long data) 4430 { 4431 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4432 struct qeth_ipacmd_setadpparms *setparms; 4433 4434 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4435 4436 setparms = &(cmd->data.setadapterparms); 4437 if (qeth_setadpparms_inspect_rc(cmd)) { 4438 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4439 setparms->data.mode = SET_PROMISC_MODE_OFF; 4440 } 4441 card->info.promisc_mode = setparms->data.mode; 4442 return (cmd->hdr.return_code) ? -EIO : 0; 4443 } 4444 4445 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4446 { 4447 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4448 SET_PROMISC_MODE_OFF; 4449 struct qeth_cmd_buffer *iob; 4450 struct qeth_ipa_cmd *cmd; 4451 4452 QETH_CARD_TEXT(card, 4, "setprom"); 4453 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4454 4455 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4456 SETADP_DATA_SIZEOF(mode)); 4457 if (!iob) 4458 return; 4459 cmd = __ipa_cmd(iob); 4460 cmd->data.setadapterparms.data.mode = mode; 4461 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4462 } 4463 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4464 4465 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4466 struct qeth_reply *reply, unsigned long data) 4467 { 4468 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4469 struct qeth_ipacmd_setadpparms *adp_cmd; 4470 4471 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4472 if (qeth_setadpparms_inspect_rc(cmd)) 4473 return -EIO; 4474 4475 adp_cmd = &cmd->data.setadapterparms; 4476 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4477 return -EADDRNOTAVAIL; 4478 4479 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4480 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4481 return -EADDRNOTAVAIL; 4482 4483 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4484 return 0; 4485 } 4486 4487 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4488 { 4489 int rc; 4490 struct qeth_cmd_buffer *iob; 4491 struct qeth_ipa_cmd *cmd; 4492 4493 QETH_CARD_TEXT(card, 4, "chgmac"); 4494 4495 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4496 SETADP_DATA_SIZEOF(change_addr)); 4497 if (!iob) 4498 return -ENOMEM; 4499 cmd = __ipa_cmd(iob); 4500 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4501 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4502 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4503 card->dev->dev_addr); 4504 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4505 NULL); 4506 return rc; 4507 } 4508 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4509 4510 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4511 struct qeth_reply *reply, unsigned long data) 4512 { 4513 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4514 struct qeth_set_access_ctrl *access_ctrl_req; 4515 4516 QETH_CARD_TEXT(card, 4, "setaccb"); 4517 4518 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4519 QETH_CARD_TEXT_(card, 2, "rc=%d", 4520 cmd->data.setadapterparms.hdr.return_code); 4521 if (cmd->data.setadapterparms.hdr.return_code != 4522 SET_ACCESS_CTRL_RC_SUCCESS) 4523 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4524 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4525 cmd->data.setadapterparms.hdr.return_code); 4526 switch (qeth_setadpparms_inspect_rc(cmd)) { 4527 case SET_ACCESS_CTRL_RC_SUCCESS: 4528 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4529 dev_info(&card->gdev->dev, 4530 "QDIO data connection isolation is deactivated\n"); 4531 else 4532 dev_info(&card->gdev->dev, 4533 "QDIO data connection isolation is activated\n"); 4534 return 0; 4535 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4536 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4537 CARD_DEVID(card)); 4538 return 0; 4539 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4540 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4541 CARD_DEVID(card)); 4542 return 0; 4543 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4544 dev_err(&card->gdev->dev, "Adapter does not " 4545 "support QDIO data connection isolation\n"); 4546 return -EOPNOTSUPP; 4547 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4548 dev_err(&card->gdev->dev, 4549 "Adapter is dedicated. " 4550 "QDIO data connection isolation not supported\n"); 4551 return -EOPNOTSUPP; 4552 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4553 dev_err(&card->gdev->dev, 4554 "TSO does not permit QDIO data connection isolation\n"); 4555 return -EPERM; 4556 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4557 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4558 "support reflective relay mode\n"); 4559 return -EOPNOTSUPP; 4560 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4561 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4562 "enabled at the adjacent switch port"); 4563 return -EREMOTEIO; 4564 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4565 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4566 "at the adjacent switch failed\n"); 4567 /* benign error while disabling ISOLATION_MODE_FWD */ 4568 return 0; 4569 default: 4570 return -EIO; 4571 } 4572 } 4573 4574 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4575 enum qeth_ipa_isolation_modes mode) 4576 { 4577 int rc; 4578 struct qeth_cmd_buffer *iob; 4579 struct qeth_ipa_cmd *cmd; 4580 struct qeth_set_access_ctrl *access_ctrl_req; 4581 4582 QETH_CARD_TEXT(card, 4, "setacctl"); 4583 4584 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4585 dev_err(&card->gdev->dev, 4586 "Adapter does not support QDIO data connection isolation\n"); 4587 return -EOPNOTSUPP; 4588 } 4589 4590 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4591 SETADP_DATA_SIZEOF(set_access_ctrl)); 4592 if (!iob) 4593 return -ENOMEM; 4594 cmd = __ipa_cmd(iob); 4595 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4596 access_ctrl_req->subcmd_code = mode; 4597 4598 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4599 NULL); 4600 if (rc) { 4601 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4602 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4603 rc, CARD_DEVID(card)); 4604 } 4605 4606 return rc; 4607 } 4608 4609 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4610 { 4611 struct qeth_card *card; 4612 4613 card = dev->ml_priv; 4614 QETH_CARD_TEXT(card, 4, "txtimeo"); 4615 qeth_schedule_recovery(card); 4616 } 4617 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4618 4619 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4620 { 4621 struct qeth_card *card = dev->ml_priv; 4622 int rc = 0; 4623 4624 switch (regnum) { 4625 case MII_BMCR: /* Basic mode control register */ 4626 rc = BMCR_FULLDPLX; 4627 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4628 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4629 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4630 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4631 rc |= BMCR_SPEED100; 4632 break; 4633 case MII_BMSR: /* Basic mode status register */ 4634 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4635 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4636 BMSR_100BASE4; 4637 break; 4638 case MII_PHYSID1: /* PHYS ID 1 */ 4639 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4640 dev->dev_addr[2]; 4641 rc = (rc >> 5) & 0xFFFF; 4642 break; 4643 case MII_PHYSID2: /* PHYS ID 2 */ 4644 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4645 break; 4646 case MII_ADVERTISE: /* Advertisement control reg */ 4647 rc = ADVERTISE_ALL; 4648 break; 4649 case MII_LPA: /* Link partner ability reg */ 4650 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4651 LPA_100BASE4 | LPA_LPACK; 4652 break; 4653 case MII_EXPANSION: /* Expansion register */ 4654 break; 4655 case MII_DCOUNTER: /* disconnect counter */ 4656 break; 4657 case MII_FCSCOUNTER: /* false carrier counter */ 4658 break; 4659 case MII_NWAYTEST: /* N-way auto-neg test register */ 4660 break; 4661 case MII_RERRCOUNTER: /* rx error counter */ 4662 rc = card->stats.rx_length_errors + 4663 card->stats.rx_frame_errors + 4664 card->stats.rx_fifo_errors; 4665 break; 4666 case MII_SREVISION: /* silicon revision */ 4667 break; 4668 case MII_RESV1: /* reserved 1 */ 4669 break; 4670 case MII_LBRERROR: /* loopback, rx, bypass error */ 4671 break; 4672 case MII_PHYADDR: /* physical address */ 4673 break; 4674 case MII_RESV2: /* reserved 2 */ 4675 break; 4676 case MII_TPISTATUS: /* TPI status for 10mbps */ 4677 break; 4678 case MII_NCONFIG: /* network interface config */ 4679 break; 4680 default: 4681 break; 4682 } 4683 return rc; 4684 } 4685 4686 static int qeth_snmp_command_cb(struct qeth_card *card, 4687 struct qeth_reply *reply, unsigned long data) 4688 { 4689 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4690 struct qeth_arp_query_info *qinfo = reply->param; 4691 struct qeth_ipacmd_setadpparms *adp_cmd; 4692 unsigned int data_len; 4693 void *snmp_data; 4694 4695 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4696 4697 if (cmd->hdr.return_code) { 4698 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4699 return -EIO; 4700 } 4701 if (cmd->data.setadapterparms.hdr.return_code) { 4702 cmd->hdr.return_code = 4703 cmd->data.setadapterparms.hdr.return_code; 4704 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4705 return -EIO; 4706 } 4707 4708 adp_cmd = &cmd->data.setadapterparms; 4709 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4710 if (adp_cmd->hdr.seq_no == 1) { 4711 snmp_data = &adp_cmd->data.snmp; 4712 } else { 4713 snmp_data = &adp_cmd->data.snmp.request; 4714 data_len -= offsetof(struct qeth_snmp_cmd, request); 4715 } 4716 4717 /* check if there is enough room in userspace */ 4718 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4719 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4720 return -ENOSPC; 4721 } 4722 QETH_CARD_TEXT_(card, 4, "snore%i", 4723 cmd->data.setadapterparms.hdr.used_total); 4724 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4725 cmd->data.setadapterparms.hdr.seq_no); 4726 /*copy entries to user buffer*/ 4727 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4728 qinfo->udata_offset += data_len; 4729 4730 if (cmd->data.setadapterparms.hdr.seq_no < 4731 cmd->data.setadapterparms.hdr.used_total) 4732 return 1; 4733 return 0; 4734 } 4735 4736 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4737 { 4738 struct qeth_snmp_ureq __user *ureq; 4739 struct qeth_cmd_buffer *iob; 4740 unsigned int req_len; 4741 struct qeth_arp_query_info qinfo = {0, }; 4742 int rc = 0; 4743 4744 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4745 4746 if (IS_VM_NIC(card)) 4747 return -EOPNOTSUPP; 4748 4749 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4750 IS_LAYER3(card)) 4751 return -EOPNOTSUPP; 4752 4753 ureq = (struct qeth_snmp_ureq __user *) udata; 4754 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4755 get_user(req_len, &ureq->hdr.req_len)) 4756 return -EFAULT; 4757 4758 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4759 if (req_len > QETH_BUFSIZE) 4760 return -EINVAL; 4761 4762 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4763 if (!iob) 4764 return -ENOMEM; 4765 4766 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4767 &ureq->cmd, req_len)) { 4768 qeth_put_cmd(iob); 4769 return -EFAULT; 4770 } 4771 4772 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4773 if (!qinfo.udata) { 4774 qeth_put_cmd(iob); 4775 return -ENOMEM; 4776 } 4777 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4778 4779 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4780 if (rc) 4781 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4782 CARD_DEVID(card), rc); 4783 else { 4784 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4785 rc = -EFAULT; 4786 } 4787 4788 kfree(qinfo.udata); 4789 return rc; 4790 } 4791 4792 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4793 struct qeth_reply *reply, 4794 unsigned long data) 4795 { 4796 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4797 struct qeth_qoat_priv *priv = reply->param; 4798 int resdatalen; 4799 4800 QETH_CARD_TEXT(card, 3, "qoatcb"); 4801 if (qeth_setadpparms_inspect_rc(cmd)) 4802 return -EIO; 4803 4804 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4805 4806 if (resdatalen > (priv->buffer_len - priv->response_len)) 4807 return -ENOSPC; 4808 4809 memcpy(priv->buffer + priv->response_len, 4810 &cmd->data.setadapterparms.hdr, resdatalen); 4811 priv->response_len += resdatalen; 4812 4813 if (cmd->data.setadapterparms.hdr.seq_no < 4814 cmd->data.setadapterparms.hdr.used_total) 4815 return 1; 4816 return 0; 4817 } 4818 4819 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4820 { 4821 int rc = 0; 4822 struct qeth_cmd_buffer *iob; 4823 struct qeth_ipa_cmd *cmd; 4824 struct qeth_query_oat *oat_req; 4825 struct qeth_query_oat_data oat_data; 4826 struct qeth_qoat_priv priv; 4827 void __user *tmp; 4828 4829 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4830 4831 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4832 return -EOPNOTSUPP; 4833 4834 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4835 return -EFAULT; 4836 4837 priv.buffer_len = oat_data.buffer_len; 4838 priv.response_len = 0; 4839 priv.buffer = vzalloc(oat_data.buffer_len); 4840 if (!priv.buffer) 4841 return -ENOMEM; 4842 4843 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4844 SETADP_DATA_SIZEOF(query_oat)); 4845 if (!iob) { 4846 rc = -ENOMEM; 4847 goto out_free; 4848 } 4849 cmd = __ipa_cmd(iob); 4850 oat_req = &cmd->data.setadapterparms.data.query_oat; 4851 oat_req->subcmd_code = oat_data.command; 4852 4853 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4854 if (!rc) { 4855 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4856 u64_to_user_ptr(oat_data.ptr); 4857 oat_data.response_len = priv.response_len; 4858 4859 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4860 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4861 rc = -EFAULT; 4862 } 4863 4864 out_free: 4865 vfree(priv.buffer); 4866 return rc; 4867 } 4868 4869 static int qeth_query_card_info_cb(struct qeth_card *card, 4870 struct qeth_reply *reply, unsigned long data) 4871 { 4872 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4873 struct qeth_link_info *link_info = reply->param; 4874 struct qeth_query_card_info *card_info; 4875 4876 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4877 if (qeth_setadpparms_inspect_rc(cmd)) 4878 return -EIO; 4879 4880 card_info = &cmd->data.setadapterparms.data.card_info; 4881 netdev_dbg(card->dev, 4882 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", 4883 card_info->card_type, card_info->port_mode, 4884 card_info->port_speed); 4885 4886 switch (card_info->port_mode) { 4887 case CARD_INFO_PORTM_FULLDUPLEX: 4888 link_info->duplex = DUPLEX_FULL; 4889 break; 4890 case CARD_INFO_PORTM_HALFDUPLEX: 4891 link_info->duplex = DUPLEX_HALF; 4892 break; 4893 default: 4894 link_info->duplex = DUPLEX_UNKNOWN; 4895 } 4896 4897 switch (card_info->card_type) { 4898 case CARD_INFO_TYPE_1G_COPPER_A: 4899 case CARD_INFO_TYPE_1G_COPPER_B: 4900 link_info->speed = SPEED_1000; 4901 link_info->port = PORT_TP; 4902 break; 4903 case CARD_INFO_TYPE_1G_FIBRE_A: 4904 case CARD_INFO_TYPE_1G_FIBRE_B: 4905 link_info->speed = SPEED_1000; 4906 link_info->port = PORT_FIBRE; 4907 break; 4908 case CARD_INFO_TYPE_10G_FIBRE_A: 4909 case CARD_INFO_TYPE_10G_FIBRE_B: 4910 link_info->speed = SPEED_10000; 4911 link_info->port = PORT_FIBRE; 4912 break; 4913 default: 4914 switch (card_info->port_speed) { 4915 case CARD_INFO_PORTS_10M: 4916 link_info->speed = SPEED_10; 4917 break; 4918 case CARD_INFO_PORTS_100M: 4919 link_info->speed = SPEED_100; 4920 break; 4921 case CARD_INFO_PORTS_1G: 4922 link_info->speed = SPEED_1000; 4923 break; 4924 case CARD_INFO_PORTS_10G: 4925 link_info->speed = SPEED_10000; 4926 break; 4927 case CARD_INFO_PORTS_25G: 4928 link_info->speed = SPEED_25000; 4929 break; 4930 default: 4931 link_info->speed = SPEED_UNKNOWN; 4932 } 4933 4934 link_info->port = PORT_OTHER; 4935 } 4936 4937 return 0; 4938 } 4939 4940 int qeth_query_card_info(struct qeth_card *card, 4941 struct qeth_link_info *link_info) 4942 { 4943 struct qeth_cmd_buffer *iob; 4944 4945 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4946 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4947 return -EOPNOTSUPP; 4948 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4949 if (!iob) 4950 return -ENOMEM; 4951 4952 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info); 4953 } 4954 4955 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4956 struct qeth_reply *reply_priv, 4957 unsigned long data) 4958 { 4959 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4960 struct qeth_link_info *link_info = reply_priv->param; 4961 struct qeth_query_oat_physical_if *phys_if; 4962 struct qeth_query_oat_reply *reply; 4963 4964 if (qeth_setadpparms_inspect_rc(cmd)) 4965 return -EIO; 4966 4967 /* Multi-part reply is unexpected, don't bother: */ 4968 if (cmd->data.setadapterparms.hdr.used_total > 1) 4969 return -EINVAL; 4970 4971 /* Expect the reply to start with phys_if data: */ 4972 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4973 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4974 reply->length < sizeof(*reply)) 4975 return -EINVAL; 4976 4977 phys_if = &reply->phys_if; 4978 4979 switch (phys_if->speed_duplex) { 4980 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4981 link_info->speed = SPEED_10; 4982 link_info->duplex = DUPLEX_HALF; 4983 break; 4984 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4985 link_info->speed = SPEED_10; 4986 link_info->duplex = DUPLEX_FULL; 4987 break; 4988 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4989 link_info->speed = SPEED_100; 4990 link_info->duplex = DUPLEX_HALF; 4991 break; 4992 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4993 link_info->speed = SPEED_100; 4994 link_info->duplex = DUPLEX_FULL; 4995 break; 4996 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4997 link_info->speed = SPEED_1000; 4998 link_info->duplex = DUPLEX_HALF; 4999 break; 5000 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 5001 link_info->speed = SPEED_1000; 5002 link_info->duplex = DUPLEX_FULL; 5003 break; 5004 case QETH_QOAT_PHYS_SPEED_10G_FULL: 5005 link_info->speed = SPEED_10000; 5006 link_info->duplex = DUPLEX_FULL; 5007 break; 5008 case QETH_QOAT_PHYS_SPEED_25G_FULL: 5009 link_info->speed = SPEED_25000; 5010 link_info->duplex = DUPLEX_FULL; 5011 break; 5012 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 5013 default: 5014 link_info->speed = SPEED_UNKNOWN; 5015 link_info->duplex = DUPLEX_UNKNOWN; 5016 break; 5017 } 5018 5019 switch (phys_if->media_type) { 5020 case QETH_QOAT_PHYS_MEDIA_COPPER: 5021 link_info->port = PORT_TP; 5022 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 5023 break; 5024 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 5025 link_info->port = PORT_FIBRE; 5026 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 5027 break; 5028 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 5029 link_info->port = PORT_FIBRE; 5030 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 5031 break; 5032 default: 5033 link_info->port = PORT_OTHER; 5034 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 5035 break; 5036 } 5037 5038 return 0; 5039 } 5040 5041 static void qeth_init_link_info(struct qeth_card *card) 5042 { 5043 card->info.link_info.duplex = DUPLEX_FULL; 5044 5045 if (IS_IQD(card) || IS_VM_NIC(card)) { 5046 card->info.link_info.speed = SPEED_10000; 5047 card->info.link_info.port = PORT_FIBRE; 5048 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT; 5049 } else { 5050 switch (card->info.link_type) { 5051 case QETH_LINK_TYPE_FAST_ETH: 5052 case QETH_LINK_TYPE_LANE_ETH100: 5053 card->info.link_info.speed = SPEED_100; 5054 card->info.link_info.port = PORT_TP; 5055 break; 5056 case QETH_LINK_TYPE_GBIT_ETH: 5057 case QETH_LINK_TYPE_LANE_ETH1000: 5058 card->info.link_info.speed = SPEED_1000; 5059 card->info.link_info.port = PORT_FIBRE; 5060 break; 5061 case QETH_LINK_TYPE_10GBIT_ETH: 5062 card->info.link_info.speed = SPEED_10000; 5063 card->info.link_info.port = PORT_FIBRE; 5064 break; 5065 case QETH_LINK_TYPE_25GBIT_ETH: 5066 card->info.link_info.speed = SPEED_25000; 5067 card->info.link_info.port = PORT_FIBRE; 5068 break; 5069 default: 5070 dev_info(&card->gdev->dev, "Unknown link type %x\n", 5071 card->info.link_type); 5072 card->info.link_info.speed = SPEED_UNKNOWN; 5073 card->info.link_info.port = PORT_OTHER; 5074 } 5075 5076 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN; 5077 } 5078 5079 /* Get more accurate data via QUERY OAT: */ 5080 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 5081 struct qeth_link_info link_info; 5082 struct qeth_cmd_buffer *iob; 5083 5084 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 5085 SETADP_DATA_SIZEOF(query_oat)); 5086 if (iob) { 5087 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 5088 struct qeth_query_oat *oat_req; 5089 5090 oat_req = &cmd->data.setadapterparms.data.query_oat; 5091 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 5092 5093 if (!qeth_send_ipa_cmd(card, iob, 5094 qeth_init_link_info_oat_cb, 5095 &link_info)) { 5096 if (link_info.speed != SPEED_UNKNOWN) 5097 card->info.link_info.speed = link_info.speed; 5098 if (link_info.duplex != DUPLEX_UNKNOWN) 5099 card->info.link_info.duplex = link_info.duplex; 5100 if (link_info.port != PORT_OTHER) 5101 card->info.link_info.port = link_info.port; 5102 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 5103 card->info.link_info.link_mode = link_info.link_mode; 5104 } 5105 } 5106 } 5107 } 5108 5109 /** 5110 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 5111 * @card: pointer to a qeth_card 5112 * 5113 * Returns 5114 * 0, if a MAC address has been set for the card's netdevice 5115 * a return code, for various error conditions 5116 */ 5117 int qeth_vm_request_mac(struct qeth_card *card) 5118 { 5119 struct diag26c_mac_resp *response; 5120 struct diag26c_mac_req *request; 5121 int rc; 5122 5123 QETH_CARD_TEXT(card, 2, "vmreqmac"); 5124 5125 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 5126 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 5127 if (!request || !response) { 5128 rc = -ENOMEM; 5129 goto out; 5130 } 5131 5132 request->resp_buf_len = sizeof(*response); 5133 request->resp_version = DIAG26C_VERSION2; 5134 request->op_code = DIAG26C_GET_MAC; 5135 request->devno = card->info.ddev_devno; 5136 5137 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5138 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 5139 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5140 if (rc) 5141 goto out; 5142 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 5143 5144 if (request->resp_buf_len < sizeof(*response) || 5145 response->version != request->resp_version) { 5146 rc = -EIO; 5147 QETH_CARD_TEXT(card, 2, "badresp"); 5148 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 5149 sizeof(request->resp_buf_len)); 5150 } else if (!is_valid_ether_addr(response->mac)) { 5151 rc = -EINVAL; 5152 QETH_CARD_TEXT(card, 2, "badmac"); 5153 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 5154 } else { 5155 ether_addr_copy(card->dev->dev_addr, response->mac); 5156 } 5157 5158 out: 5159 kfree(response); 5160 kfree(request); 5161 return rc; 5162 } 5163 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 5164 5165 static void qeth_determine_capabilities(struct qeth_card *card) 5166 { 5167 struct qeth_channel *channel = &card->data; 5168 struct ccw_device *ddev = channel->ccwdev; 5169 int rc; 5170 int ddev_offline = 0; 5171 5172 QETH_CARD_TEXT(card, 2, "detcapab"); 5173 if (!ddev->online) { 5174 ddev_offline = 1; 5175 rc = qeth_start_channel(channel); 5176 if (rc) { 5177 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5178 goto out; 5179 } 5180 } 5181 5182 rc = qeth_read_conf_data(card); 5183 if (rc) { 5184 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5185 CARD_DEVID(card), rc); 5186 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5187 goto out_offline; 5188 } 5189 5190 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5191 if (rc) 5192 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5193 5194 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5195 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5196 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5197 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5198 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5199 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5200 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5201 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5202 dev_info(&card->gdev->dev, 5203 "Completion Queueing supported\n"); 5204 } else { 5205 card->options.cq = QETH_CQ_NOTAVAILABLE; 5206 } 5207 5208 out_offline: 5209 if (ddev_offline == 1) 5210 qeth_stop_channel(channel); 5211 out: 5212 return; 5213 } 5214 5215 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5216 { 5217 struct qeth_card_info *info = &card->info; 5218 struct ccw_device *cdev = CARD_DDEV(card); 5219 struct ccw_dev_id dev_id; 5220 5221 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5222 ccw_device_get_id(cdev, &dev_id); 5223 5224 info->ddev_devno = dev_id.devno; 5225 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5226 !ccw_device_get_iid(cdev, &info->iid) && 5227 !ccw_device_get_chid(cdev, 0, &info->chid); 5228 info->ssid = dev_id.ssid; 5229 5230 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5231 info->chid, info->chpid); 5232 5233 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5234 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5235 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5236 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5237 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5238 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5239 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5240 } 5241 5242 static int qeth_qdio_establish(struct qeth_card *card) 5243 { 5244 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5245 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5246 struct qeth_qib_parms *qib_parms = NULL; 5247 struct qdio_initialize init_data; 5248 unsigned int i; 5249 int rc = 0; 5250 5251 QETH_CARD_TEXT(card, 2, "qdioest"); 5252 5253 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5254 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5255 if (!qib_parms) 5256 return -ENOMEM; 5257 5258 qeth_fill_qib_parms(card, qib_parms); 5259 } 5260 5261 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5262 if (card->options.cq == QETH_CQ_ENABLED) 5263 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5264 5265 for (i = 0; i < card->qdio.no_out_queues; i++) 5266 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5267 5268 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5269 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5270 QDIO_QETH_QFMT; 5271 init_data.qib_param_field_format = 0; 5272 init_data.qib_param_field = (void *)qib_parms; 5273 init_data.no_input_qs = card->qdio.no_in_queues; 5274 init_data.no_output_qs = card->qdio.no_out_queues; 5275 init_data.input_handler = qeth_qdio_input_handler; 5276 init_data.output_handler = qeth_qdio_output_handler; 5277 init_data.irq_poll = qeth_qdio_poll; 5278 init_data.int_parm = (unsigned long) card; 5279 init_data.input_sbal_addr_array = in_sbal_ptrs; 5280 init_data.output_sbal_addr_array = out_sbal_ptrs; 5281 init_data.output_sbal_state_array = card->qdio.out_bufstates; 5282 init_data.scan_threshold = IS_IQD(card) ? 0 : 32; 5283 5284 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5285 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5286 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5287 init_data.no_output_qs); 5288 if (rc) { 5289 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5290 goto out; 5291 } 5292 rc = qdio_establish(CARD_DDEV(card), &init_data); 5293 if (rc) { 5294 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5295 qdio_free(CARD_DDEV(card)); 5296 } 5297 } 5298 5299 switch (card->options.cq) { 5300 case QETH_CQ_ENABLED: 5301 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5302 break; 5303 case QETH_CQ_DISABLED: 5304 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5305 break; 5306 default: 5307 break; 5308 } 5309 5310 out: 5311 kfree(qib_parms); 5312 return rc; 5313 } 5314 5315 static void qeth_core_free_card(struct qeth_card *card) 5316 { 5317 QETH_CARD_TEXT(card, 2, "freecrd"); 5318 5319 unregister_service_level(&card->qeth_service_level); 5320 debugfs_remove_recursive(card->debugfs); 5321 qeth_put_cmd(card->read_cmd); 5322 destroy_workqueue(card->event_wq); 5323 dev_set_drvdata(&card->gdev->dev, NULL); 5324 kfree(card); 5325 } 5326 5327 static void qeth_trace_features(struct qeth_card *card) 5328 { 5329 QETH_CARD_TEXT(card, 2, "features"); 5330 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5331 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5332 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5333 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5334 sizeof(card->info.diagass_support)); 5335 } 5336 5337 static struct ccw_device_id qeth_ids[] = { 5338 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5339 .driver_info = QETH_CARD_TYPE_OSD}, 5340 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5341 .driver_info = QETH_CARD_TYPE_IQD}, 5342 #ifdef CONFIG_QETH_OSN 5343 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 5344 .driver_info = QETH_CARD_TYPE_OSN}, 5345 #endif 5346 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5347 .driver_info = QETH_CARD_TYPE_OSM}, 5348 #ifdef CONFIG_QETH_OSX 5349 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5350 .driver_info = QETH_CARD_TYPE_OSX}, 5351 #endif 5352 {}, 5353 }; 5354 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5355 5356 static struct ccw_driver qeth_ccw_driver = { 5357 .driver = { 5358 .owner = THIS_MODULE, 5359 .name = "qeth", 5360 }, 5361 .ids = qeth_ids, 5362 .probe = ccwgroup_probe_ccwdev, 5363 .remove = ccwgroup_remove_ccwdev, 5364 }; 5365 5366 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5367 { 5368 int retries = 3; 5369 int rc; 5370 5371 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5372 atomic_set(&card->force_alloc_skb, 0); 5373 rc = qeth_update_from_chp_desc(card); 5374 if (rc) 5375 return rc; 5376 retry: 5377 if (retries < 3) 5378 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5379 CARD_DEVID(card)); 5380 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5381 qeth_stop_channel(&card->data); 5382 qeth_stop_channel(&card->write); 5383 qeth_stop_channel(&card->read); 5384 qdio_free(CARD_DDEV(card)); 5385 5386 rc = qeth_start_channel(&card->read); 5387 if (rc) 5388 goto retriable; 5389 rc = qeth_start_channel(&card->write); 5390 if (rc) 5391 goto retriable; 5392 rc = qeth_start_channel(&card->data); 5393 if (rc) 5394 goto retriable; 5395 retriable: 5396 if (rc == -ERESTARTSYS) { 5397 QETH_CARD_TEXT(card, 2, "break1"); 5398 return rc; 5399 } else if (rc) { 5400 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5401 if (--retries < 0) 5402 goto out; 5403 else 5404 goto retry; 5405 } 5406 5407 qeth_determine_capabilities(card); 5408 qeth_read_ccw_conf_data(card); 5409 qeth_idx_init(card); 5410 5411 rc = qeth_idx_activate_read_channel(card); 5412 if (rc == -EINTR) { 5413 QETH_CARD_TEXT(card, 2, "break2"); 5414 return rc; 5415 } else if (rc) { 5416 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5417 if (--retries < 0) 5418 goto out; 5419 else 5420 goto retry; 5421 } 5422 5423 rc = qeth_idx_activate_write_channel(card); 5424 if (rc == -EINTR) { 5425 QETH_CARD_TEXT(card, 2, "break3"); 5426 return rc; 5427 } else if (rc) { 5428 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5429 if (--retries < 0) 5430 goto out; 5431 else 5432 goto retry; 5433 } 5434 card->read_or_write_problem = 0; 5435 rc = qeth_mpc_initialize(card); 5436 if (rc) { 5437 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5438 goto out; 5439 } 5440 5441 rc = qeth_send_startlan(card); 5442 if (rc) { 5443 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5444 if (rc == -ENETDOWN) { 5445 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5446 *carrier_ok = false; 5447 } else { 5448 goto out; 5449 } 5450 } else { 5451 *carrier_ok = true; 5452 } 5453 5454 card->options.ipa4.supported = 0; 5455 card->options.ipa6.supported = 0; 5456 card->options.adp.supported = 0; 5457 card->options.sbp.supported_funcs = 0; 5458 card->info.diagass_support = 0; 5459 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5460 if (rc == -ENOMEM) 5461 goto out; 5462 if (qeth_is_supported(card, IPA_IPV6)) { 5463 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5464 if (rc == -ENOMEM) 5465 goto out; 5466 } 5467 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5468 rc = qeth_query_setadapterparms(card); 5469 if (rc < 0) { 5470 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5471 goto out; 5472 } 5473 } 5474 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5475 rc = qeth_query_setdiagass(card); 5476 if (rc) 5477 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5478 } 5479 5480 qeth_trace_features(card); 5481 5482 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5483 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5484 card->info.hwtrap = 0; 5485 5486 if (card->options.isolation != ISOLATION_MODE_NONE) { 5487 rc = qeth_setadpparms_set_access_ctrl(card, 5488 card->options.isolation); 5489 if (rc) 5490 goto out; 5491 } 5492 5493 qeth_init_link_info(card); 5494 5495 rc = qeth_init_qdio_queues(card); 5496 if (rc) { 5497 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5498 goto out; 5499 } 5500 5501 return 0; 5502 out: 5503 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5504 "an error on the device\n"); 5505 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5506 CARD_DEVID(card), rc); 5507 return rc; 5508 } 5509 5510 static int qeth_set_online(struct qeth_card *card) 5511 { 5512 bool carrier_ok; 5513 int rc; 5514 5515 mutex_lock(&card->discipline_mutex); 5516 mutex_lock(&card->conf_mutex); 5517 QETH_CARD_TEXT(card, 2, "setonlin"); 5518 5519 rc = qeth_hardsetup_card(card, &carrier_ok); 5520 if (rc) { 5521 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5522 rc = -ENODEV; 5523 goto err_hardsetup; 5524 } 5525 5526 qeth_print_status_message(card); 5527 5528 if (card->dev->reg_state != NETREG_REGISTERED) 5529 /* no need for locking / error handling at this early stage: */ 5530 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5531 5532 rc = card->discipline->set_online(card, carrier_ok); 5533 if (rc) 5534 goto err_online; 5535 5536 /* let user_space know that device is online */ 5537 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5538 5539 mutex_unlock(&card->conf_mutex); 5540 mutex_unlock(&card->discipline_mutex); 5541 return 0; 5542 5543 err_online: 5544 err_hardsetup: 5545 qeth_qdio_clear_card(card, 0); 5546 qeth_clear_working_pool_list(card); 5547 qeth_flush_local_addrs(card); 5548 5549 qeth_stop_channel(&card->data); 5550 qeth_stop_channel(&card->write); 5551 qeth_stop_channel(&card->read); 5552 qdio_free(CARD_DDEV(card)); 5553 5554 mutex_unlock(&card->conf_mutex); 5555 mutex_unlock(&card->discipline_mutex); 5556 return rc; 5557 } 5558 5559 int qeth_set_offline(struct qeth_card *card, bool resetting) 5560 { 5561 int rc, rc2, rc3; 5562 5563 mutex_lock(&card->discipline_mutex); 5564 mutex_lock(&card->conf_mutex); 5565 QETH_CARD_TEXT(card, 3, "setoffl"); 5566 5567 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5568 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5569 card->info.hwtrap = 1; 5570 } 5571 5572 /* cancel any stalled cmd that might block the rtnl: */ 5573 qeth_clear_ipacmd_list(card); 5574 5575 rtnl_lock(); 5576 card->info.open_when_online = card->dev->flags & IFF_UP; 5577 dev_close(card->dev); 5578 netif_device_detach(card->dev); 5579 netif_carrier_off(card->dev); 5580 rtnl_unlock(); 5581 5582 cancel_work_sync(&card->rx_mode_work); 5583 5584 card->discipline->set_offline(card); 5585 5586 qeth_qdio_clear_card(card, 0); 5587 qeth_drain_output_queues(card); 5588 qeth_clear_working_pool_list(card); 5589 qeth_flush_local_addrs(card); 5590 card->info.promisc_mode = 0; 5591 5592 rc = qeth_stop_channel(&card->data); 5593 rc2 = qeth_stop_channel(&card->write); 5594 rc3 = qeth_stop_channel(&card->read); 5595 if (!rc) 5596 rc = (rc2) ? rc2 : rc3; 5597 if (rc) 5598 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5599 qdio_free(CARD_DDEV(card)); 5600 5601 /* let user_space know that device is offline */ 5602 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5603 5604 mutex_unlock(&card->conf_mutex); 5605 mutex_unlock(&card->discipline_mutex); 5606 return 0; 5607 } 5608 EXPORT_SYMBOL_GPL(qeth_set_offline); 5609 5610 static int qeth_do_reset(void *data) 5611 { 5612 struct qeth_card *card = data; 5613 int rc; 5614 5615 QETH_CARD_TEXT(card, 2, "recover1"); 5616 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5617 return 0; 5618 QETH_CARD_TEXT(card, 2, "recover2"); 5619 dev_warn(&card->gdev->dev, 5620 "A recovery process has been started for the device\n"); 5621 5622 qeth_set_offline(card, true); 5623 rc = qeth_set_online(card); 5624 if (!rc) { 5625 dev_info(&card->gdev->dev, 5626 "Device successfully recovered!\n"); 5627 } else { 5628 ccwgroup_set_offline(card->gdev); 5629 dev_warn(&card->gdev->dev, 5630 "The qeth device driver failed to recover an error on the device\n"); 5631 } 5632 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5633 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5634 return 0; 5635 } 5636 5637 #if IS_ENABLED(CONFIG_QETH_L3) 5638 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5639 struct qeth_hdr *hdr) 5640 { 5641 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5642 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5643 struct net_device *dev = skb->dev; 5644 5645 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5646 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5647 "FAKELL", skb->len); 5648 return; 5649 } 5650 5651 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5652 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5653 ETH_P_IP; 5654 unsigned char tg_addr[ETH_ALEN]; 5655 5656 skb_reset_network_header(skb); 5657 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5658 case QETH_CAST_MULTICAST: 5659 if (prot == ETH_P_IP) 5660 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5661 else 5662 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5663 QETH_CARD_STAT_INC(card, rx_multicast); 5664 break; 5665 case QETH_CAST_BROADCAST: 5666 ether_addr_copy(tg_addr, dev->broadcast); 5667 QETH_CARD_STAT_INC(card, rx_multicast); 5668 break; 5669 default: 5670 if (card->options.sniffer) 5671 skb->pkt_type = PACKET_OTHERHOST; 5672 ether_addr_copy(tg_addr, dev->dev_addr); 5673 } 5674 5675 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5676 dev_hard_header(skb, dev, prot, tg_addr, 5677 &l3_hdr->next_hop.rx.src_mac, skb->len); 5678 else 5679 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5680 skb->len); 5681 } 5682 5683 /* copy VLAN tag from hdr into skb */ 5684 if (!card->options.sniffer && 5685 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5686 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5687 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5688 l3_hdr->vlan_id : 5689 l3_hdr->next_hop.rx.vlan_id; 5690 5691 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5692 } 5693 } 5694 #endif 5695 5696 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5697 struct qeth_hdr *hdr, bool uses_frags) 5698 { 5699 struct napi_struct *napi = &card->napi; 5700 bool is_cso; 5701 5702 switch (hdr->hdr.l2.id) { 5703 case QETH_HEADER_TYPE_OSN: 5704 skb_push(skb, sizeof(*hdr)); 5705 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); 5706 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5707 QETH_CARD_STAT_INC(card, rx_packets); 5708 5709 card->osn_info.data_cb(skb); 5710 return; 5711 #if IS_ENABLED(CONFIG_QETH_L3) 5712 case QETH_HEADER_TYPE_LAYER3: 5713 qeth_l3_rebuild_skb(card, skb, hdr); 5714 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5715 break; 5716 #endif 5717 case QETH_HEADER_TYPE_LAYER2: 5718 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5719 break; 5720 default: 5721 /* never happens */ 5722 if (uses_frags) 5723 napi_free_frags(napi); 5724 else 5725 dev_kfree_skb_any(skb); 5726 return; 5727 } 5728 5729 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5730 skb->ip_summed = CHECKSUM_UNNECESSARY; 5731 QETH_CARD_STAT_INC(card, rx_skb_csum); 5732 } else { 5733 skb->ip_summed = CHECKSUM_NONE; 5734 } 5735 5736 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5737 QETH_CARD_STAT_INC(card, rx_packets); 5738 if (skb_is_nonlinear(skb)) { 5739 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5740 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5741 skb_shinfo(skb)->nr_frags); 5742 } 5743 5744 if (uses_frags) { 5745 napi_gro_frags(napi); 5746 } else { 5747 skb->protocol = eth_type_trans(skb, skb->dev); 5748 napi_gro_receive(napi, skb); 5749 } 5750 } 5751 5752 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5753 { 5754 struct page *page = virt_to_page(data); 5755 unsigned int next_frag; 5756 5757 next_frag = skb_shinfo(skb)->nr_frags; 5758 get_page(page); 5759 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5760 data_len); 5761 } 5762 5763 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5764 { 5765 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5766 } 5767 5768 static int qeth_extract_skb(struct qeth_card *card, 5769 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5770 int *__offset) 5771 { 5772 struct qeth_priv *priv = netdev_priv(card->dev); 5773 struct qdio_buffer *buffer = qethbuffer->buffer; 5774 struct napi_struct *napi = &card->napi; 5775 struct qdio_buffer_element *element; 5776 unsigned int linear_len = 0; 5777 bool uses_frags = false; 5778 int offset = *__offset; 5779 bool use_rx_sg = false; 5780 unsigned int headroom; 5781 struct qeth_hdr *hdr; 5782 struct sk_buff *skb; 5783 int skb_len = 0; 5784 5785 element = &buffer->element[*element_no]; 5786 5787 next_packet: 5788 /* qeth_hdr must not cross element boundaries */ 5789 while (element->length < offset + sizeof(struct qeth_hdr)) { 5790 if (qeth_is_last_sbale(element)) 5791 return -ENODATA; 5792 element++; 5793 offset = 0; 5794 } 5795 5796 hdr = phys_to_virt(element->addr) + offset; 5797 offset += sizeof(*hdr); 5798 skb = NULL; 5799 5800 switch (hdr->hdr.l2.id) { 5801 case QETH_HEADER_TYPE_LAYER2: 5802 skb_len = hdr->hdr.l2.pkt_length; 5803 linear_len = ETH_HLEN; 5804 headroom = 0; 5805 break; 5806 case QETH_HEADER_TYPE_LAYER3: 5807 skb_len = hdr->hdr.l3.length; 5808 if (!IS_LAYER3(card)) { 5809 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5810 goto walk_packet; 5811 } 5812 5813 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5814 linear_len = ETH_HLEN; 5815 headroom = 0; 5816 break; 5817 } 5818 5819 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5820 linear_len = sizeof(struct ipv6hdr); 5821 else 5822 linear_len = sizeof(struct iphdr); 5823 headroom = ETH_HLEN; 5824 break; 5825 case QETH_HEADER_TYPE_OSN: 5826 skb_len = hdr->hdr.osn.pdu_length; 5827 if (!IS_OSN(card)) { 5828 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5829 goto walk_packet; 5830 } 5831 5832 linear_len = skb_len; 5833 headroom = sizeof(struct qeth_hdr); 5834 break; 5835 default: 5836 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5837 QETH_CARD_STAT_INC(card, rx_frame_errors); 5838 else 5839 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5840 5841 /* Can't determine packet length, drop the whole buffer. */ 5842 return -EPROTONOSUPPORT; 5843 } 5844 5845 if (skb_len < linear_len) { 5846 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5847 goto walk_packet; 5848 } 5849 5850 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5851 (skb_len > READ_ONCE(priv->rx_copybreak) && 5852 !atomic_read(&card->force_alloc_skb) && 5853 !IS_OSN(card)); 5854 5855 if (use_rx_sg) { 5856 /* QETH_CQ_ENABLED only: */ 5857 if (qethbuffer->rx_skb && 5858 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5859 skb = qethbuffer->rx_skb; 5860 qethbuffer->rx_skb = NULL; 5861 goto use_skb; 5862 } 5863 5864 skb = napi_get_frags(napi); 5865 if (!skb) { 5866 /* -ENOMEM, no point in falling back further. */ 5867 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5868 goto walk_packet; 5869 } 5870 5871 if (skb_tailroom(skb) >= linear_len + headroom) { 5872 uses_frags = true; 5873 goto use_skb; 5874 } 5875 5876 netdev_info_once(card->dev, 5877 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5878 linear_len + headroom, skb_tailroom(skb)); 5879 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5880 } 5881 5882 linear_len = skb_len; 5883 skb = napi_alloc_skb(napi, linear_len + headroom); 5884 if (!skb) { 5885 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5886 goto walk_packet; 5887 } 5888 5889 use_skb: 5890 if (headroom) 5891 skb_reserve(skb, headroom); 5892 walk_packet: 5893 while (skb_len) { 5894 int data_len = min(skb_len, (int)(element->length - offset)); 5895 char *data = phys_to_virt(element->addr) + offset; 5896 5897 skb_len -= data_len; 5898 offset += data_len; 5899 5900 /* Extract data from current element: */ 5901 if (skb && data_len) { 5902 if (linear_len) { 5903 unsigned int copy_len; 5904 5905 copy_len = min_t(unsigned int, linear_len, 5906 data_len); 5907 5908 skb_put_data(skb, data, copy_len); 5909 linear_len -= copy_len; 5910 data_len -= copy_len; 5911 data += copy_len; 5912 } 5913 5914 if (data_len) 5915 qeth_create_skb_frag(skb, data, data_len); 5916 } 5917 5918 /* Step forward to next element: */ 5919 if (skb_len) { 5920 if (qeth_is_last_sbale(element)) { 5921 QETH_CARD_TEXT(card, 4, "unexeob"); 5922 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5923 if (skb) { 5924 if (uses_frags) 5925 napi_free_frags(napi); 5926 else 5927 dev_kfree_skb_any(skb); 5928 QETH_CARD_STAT_INC(card, 5929 rx_length_errors); 5930 } 5931 return -EMSGSIZE; 5932 } 5933 element++; 5934 offset = 0; 5935 } 5936 } 5937 5938 /* This packet was skipped, go get another one: */ 5939 if (!skb) 5940 goto next_packet; 5941 5942 *element_no = element - &buffer->element[0]; 5943 *__offset = offset; 5944 5945 qeth_receive_skb(card, skb, hdr, uses_frags); 5946 return 0; 5947 } 5948 5949 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5950 struct qeth_qdio_buffer *buf, bool *done) 5951 { 5952 unsigned int work_done = 0; 5953 5954 while (budget) { 5955 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5956 &card->rx.e_offset)) { 5957 *done = true; 5958 break; 5959 } 5960 5961 work_done++; 5962 budget--; 5963 } 5964 5965 return work_done; 5966 } 5967 5968 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5969 { 5970 struct qeth_rx *ctx = &card->rx; 5971 unsigned int work_done = 0; 5972 5973 while (budget > 0) { 5974 struct qeth_qdio_buffer *buffer; 5975 unsigned int skbs_done = 0; 5976 bool done = false; 5977 5978 /* Fetch completed RX buffers: */ 5979 if (!card->rx.b_count) { 5980 card->rx.qdio_err = 0; 5981 card->rx.b_count = qdio_get_next_buffers( 5982 card->data.ccwdev, 0, &card->rx.b_index, 5983 &card->rx.qdio_err); 5984 if (card->rx.b_count <= 0) { 5985 card->rx.b_count = 0; 5986 break; 5987 } 5988 } 5989 5990 /* Process one completed RX buffer: */ 5991 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5992 if (!(card->rx.qdio_err && 5993 qeth_check_qdio_errors(card, buffer->buffer, 5994 card->rx.qdio_err, "qinerr"))) 5995 skbs_done = qeth_extract_skbs(card, budget, buffer, 5996 &done); 5997 else 5998 done = true; 5999 6000 work_done += skbs_done; 6001 budget -= skbs_done; 6002 6003 if (done) { 6004 QETH_CARD_STAT_INC(card, rx_bufs); 6005 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 6006 buffer->pool_entry = NULL; 6007 card->rx.b_count--; 6008 ctx->bufs_refill++; 6009 ctx->bufs_refill -= qeth_rx_refill_queue(card, 6010 ctx->bufs_refill); 6011 6012 /* Step forward to next buffer: */ 6013 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 6014 card->rx.buf_element = 0; 6015 card->rx.e_offset = 0; 6016 } 6017 } 6018 6019 return work_done; 6020 } 6021 6022 static void qeth_cq_poll(struct qeth_card *card) 6023 { 6024 unsigned int work_done = 0; 6025 6026 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 6027 unsigned int start, error; 6028 int completed; 6029 6030 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 6031 &error); 6032 if (completed <= 0) 6033 return; 6034 6035 qeth_qdio_cq_handler(card, error, 1, start, completed); 6036 work_done += completed; 6037 } 6038 } 6039 6040 int qeth_poll(struct napi_struct *napi, int budget) 6041 { 6042 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 6043 unsigned int work_done; 6044 6045 work_done = qeth_rx_poll(card, budget); 6046 6047 if (card->options.cq == QETH_CQ_ENABLED) 6048 qeth_cq_poll(card); 6049 6050 if (budget) { 6051 struct qeth_rx *ctx = &card->rx; 6052 6053 /* Process any substantial refill backlog: */ 6054 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 6055 6056 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 6057 if (work_done >= budget) 6058 return work_done; 6059 } 6060 6061 if (napi_complete_done(napi, work_done) && 6062 qdio_start_irq(CARD_DDEV(card))) 6063 napi_schedule(napi); 6064 6065 return work_done; 6066 } 6067 EXPORT_SYMBOL_GPL(qeth_poll); 6068 6069 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 6070 unsigned int bidx, bool error, int budget) 6071 { 6072 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 6073 u8 sflags = buffer->buffer->element[15].sflags; 6074 struct qeth_card *card = queue->card; 6075 6076 if (queue->bufstates && (queue->bufstates[bidx].flags & 6077 QDIO_OUTBUF_STATE_FLAG_PENDING)) { 6078 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 6079 6080 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 6081 6082 switch (atomic_cmpxchg(&buffer->state, 6083 QETH_QDIO_BUF_PRIMED, 6084 QETH_QDIO_BUF_PENDING)) { 6085 case QETH_QDIO_BUF_PRIMED: 6086 /* We have initial ownership, no QAOB (yet): */ 6087 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 6088 6089 /* Handle race with qeth_qdio_handle_aob(): */ 6090 switch (atomic_xchg(&buffer->state, 6091 QETH_QDIO_BUF_NEED_QAOB)) { 6092 case QETH_QDIO_BUF_PENDING: 6093 /* No concurrent QAOB notification. */ 6094 6095 /* Prepare the queue slot for immediate re-use: */ 6096 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 6097 if (qeth_init_qdio_out_buf(queue, bidx)) { 6098 QETH_CARD_TEXT(card, 2, "outofbuf"); 6099 qeth_schedule_recovery(card); 6100 } 6101 6102 /* Skip clearing the buffer: */ 6103 return; 6104 case QETH_QDIO_BUF_QAOB_OK: 6105 qeth_notify_skbs(queue, buffer, 6106 TX_NOTIFY_DELAYED_OK); 6107 error = false; 6108 break; 6109 case QETH_QDIO_BUF_QAOB_ERROR: 6110 qeth_notify_skbs(queue, buffer, 6111 TX_NOTIFY_DELAYED_GENERALERROR); 6112 error = true; 6113 break; 6114 default: 6115 WARN_ON_ONCE(1); 6116 } 6117 6118 break; 6119 case QETH_QDIO_BUF_QAOB_OK: 6120 /* qeth_qdio_handle_aob() already received a QAOB: */ 6121 qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK); 6122 error = false; 6123 break; 6124 case QETH_QDIO_BUF_QAOB_ERROR: 6125 /* qeth_qdio_handle_aob() already received a QAOB: */ 6126 qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR); 6127 error = true; 6128 break; 6129 default: 6130 WARN_ON_ONCE(1); 6131 } 6132 } else if (card->options.cq == QETH_CQ_ENABLED) { 6133 qeth_notify_skbs(queue, buffer, 6134 qeth_compute_cq_notification(sflags, 0)); 6135 } 6136 6137 qeth_clear_output_buffer(queue, buffer, error, budget); 6138 } 6139 6140 static int qeth_tx_poll(struct napi_struct *napi, int budget) 6141 { 6142 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 6143 unsigned int queue_no = queue->queue_no; 6144 struct qeth_card *card = queue->card; 6145 struct net_device *dev = card->dev; 6146 unsigned int work_done = 0; 6147 struct netdev_queue *txq; 6148 6149 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 6150 6151 while (1) { 6152 unsigned int start, error, i; 6153 unsigned int packets = 0; 6154 unsigned int bytes = 0; 6155 int completed; 6156 6157 if (qeth_out_queue_is_empty(queue)) { 6158 napi_complete(napi); 6159 return 0; 6160 } 6161 6162 /* Give the CPU a breather: */ 6163 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 6164 QETH_TXQ_STAT_INC(queue, completion_yield); 6165 if (napi_complete_done(napi, 0)) 6166 napi_schedule(napi); 6167 return 0; 6168 } 6169 6170 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 6171 &start, &error); 6172 if (completed <= 0) { 6173 /* Ensure we see TX completion for pending work: */ 6174 if (napi_complete_done(napi, 0)) 6175 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS); 6176 return 0; 6177 } 6178 6179 for (i = start; i < start + completed; i++) { 6180 struct qeth_qdio_out_buffer *buffer; 6181 unsigned int bidx = QDIO_BUFNR(i); 6182 6183 buffer = queue->bufs[bidx]; 6184 packets += buffer->frames; 6185 bytes += buffer->bytes; 6186 6187 qeth_handle_send_error(card, buffer, error); 6188 qeth_iqd_tx_complete(queue, bidx, error, budget); 6189 qeth_cleanup_handled_pending(queue, bidx, false); 6190 } 6191 6192 netdev_tx_completed_queue(txq, packets, bytes); 6193 atomic_sub(completed, &queue->used_buffers); 6194 work_done += completed; 6195 6196 /* xmit may have observed the full-condition, but not yet 6197 * stopped the txq. In which case the code below won't trigger. 6198 * So before returning, xmit will re-check the txq's fill level 6199 * and wake it up if needed. 6200 */ 6201 if (netif_tx_queue_stopped(txq) && 6202 !qeth_out_queue_is_full(queue)) 6203 netif_tx_wake_queue(txq); 6204 } 6205 } 6206 6207 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6208 { 6209 if (!cmd->hdr.return_code) 6210 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6211 return cmd->hdr.return_code; 6212 } 6213 6214 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6215 struct qeth_reply *reply, 6216 unsigned long data) 6217 { 6218 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6219 struct qeth_ipa_caps *caps = reply->param; 6220 6221 if (qeth_setassparms_inspect_rc(cmd)) 6222 return -EIO; 6223 6224 caps->supported = cmd->data.setassparms.data.caps.supported; 6225 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6226 return 0; 6227 } 6228 6229 int qeth_setassparms_cb(struct qeth_card *card, 6230 struct qeth_reply *reply, unsigned long data) 6231 { 6232 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6233 6234 QETH_CARD_TEXT(card, 4, "defadpcb"); 6235 6236 if (cmd->hdr.return_code) 6237 return -EIO; 6238 6239 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6240 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6241 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6242 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6243 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6244 return 0; 6245 } 6246 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6247 6248 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6249 enum qeth_ipa_funcs ipa_func, 6250 u16 cmd_code, 6251 unsigned int data_length, 6252 enum qeth_prot_versions prot) 6253 { 6254 struct qeth_ipacmd_setassparms *setassparms; 6255 struct qeth_ipacmd_setassparms_hdr *hdr; 6256 struct qeth_cmd_buffer *iob; 6257 6258 QETH_CARD_TEXT(card, 4, "getasscm"); 6259 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6260 data_length + 6261 offsetof(struct qeth_ipacmd_setassparms, 6262 data)); 6263 if (!iob) 6264 return NULL; 6265 6266 setassparms = &__ipa_cmd(iob)->data.setassparms; 6267 setassparms->assist_no = ipa_func; 6268 6269 hdr = &setassparms->hdr; 6270 hdr->length = sizeof(*hdr) + data_length; 6271 hdr->command_code = cmd_code; 6272 return iob; 6273 } 6274 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6275 6276 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6277 enum qeth_ipa_funcs ipa_func, 6278 u16 cmd_code, u32 *data, 6279 enum qeth_prot_versions prot) 6280 { 6281 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6282 struct qeth_cmd_buffer *iob; 6283 6284 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6285 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6286 if (!iob) 6287 return -ENOMEM; 6288 6289 if (data) 6290 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6291 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6292 } 6293 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6294 6295 static void qeth_unregister_dbf_views(void) 6296 { 6297 int x; 6298 6299 for (x = 0; x < QETH_DBF_INFOS; x++) { 6300 debug_unregister(qeth_dbf[x].id); 6301 qeth_dbf[x].id = NULL; 6302 } 6303 } 6304 6305 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6306 { 6307 char dbf_txt_buf[32]; 6308 va_list args; 6309 6310 if (!debug_level_enabled(id, level)) 6311 return; 6312 va_start(args, fmt); 6313 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6314 va_end(args); 6315 debug_text_event(id, level, dbf_txt_buf); 6316 } 6317 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6318 6319 static int qeth_register_dbf_views(void) 6320 { 6321 int ret; 6322 int x; 6323 6324 for (x = 0; x < QETH_DBF_INFOS; x++) { 6325 /* register the areas */ 6326 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6327 qeth_dbf[x].pages, 6328 qeth_dbf[x].areas, 6329 qeth_dbf[x].len); 6330 if (qeth_dbf[x].id == NULL) { 6331 qeth_unregister_dbf_views(); 6332 return -ENOMEM; 6333 } 6334 6335 /* register a view */ 6336 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6337 if (ret) { 6338 qeth_unregister_dbf_views(); 6339 return ret; 6340 } 6341 6342 /* set a passing level */ 6343 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6344 } 6345 6346 return 0; 6347 } 6348 6349 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6350 6351 int qeth_core_load_discipline(struct qeth_card *card, 6352 enum qeth_discipline_id discipline) 6353 { 6354 mutex_lock(&qeth_mod_mutex); 6355 switch (discipline) { 6356 case QETH_DISCIPLINE_LAYER3: 6357 card->discipline = try_then_request_module( 6358 symbol_get(qeth_l3_discipline), "qeth_l3"); 6359 break; 6360 case QETH_DISCIPLINE_LAYER2: 6361 card->discipline = try_then_request_module( 6362 symbol_get(qeth_l2_discipline), "qeth_l2"); 6363 break; 6364 default: 6365 break; 6366 } 6367 mutex_unlock(&qeth_mod_mutex); 6368 6369 if (!card->discipline) { 6370 dev_err(&card->gdev->dev, "There is no kernel module to " 6371 "support discipline %d\n", discipline); 6372 return -EINVAL; 6373 } 6374 6375 card->options.layer = discipline; 6376 return 0; 6377 } 6378 6379 void qeth_core_free_discipline(struct qeth_card *card) 6380 { 6381 if (IS_LAYER2(card)) 6382 symbol_put(qeth_l2_discipline); 6383 else 6384 symbol_put(qeth_l3_discipline); 6385 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6386 card->discipline = NULL; 6387 } 6388 6389 const struct device_type qeth_generic_devtype = { 6390 .name = "qeth_generic", 6391 }; 6392 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 6393 6394 static const struct device_type qeth_osn_devtype = { 6395 .name = "qeth_osn", 6396 }; 6397 6398 #define DBF_NAME_LEN 20 6399 6400 struct qeth_dbf_entry { 6401 char dbf_name[DBF_NAME_LEN]; 6402 debug_info_t *dbf_info; 6403 struct list_head dbf_list; 6404 }; 6405 6406 static LIST_HEAD(qeth_dbf_list); 6407 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6408 6409 static debug_info_t *qeth_get_dbf_entry(char *name) 6410 { 6411 struct qeth_dbf_entry *entry; 6412 debug_info_t *rc = NULL; 6413 6414 mutex_lock(&qeth_dbf_list_mutex); 6415 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6416 if (strcmp(entry->dbf_name, name) == 0) { 6417 rc = entry->dbf_info; 6418 break; 6419 } 6420 } 6421 mutex_unlock(&qeth_dbf_list_mutex); 6422 return rc; 6423 } 6424 6425 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6426 { 6427 struct qeth_dbf_entry *new_entry; 6428 6429 card->debug = debug_register(name, 2, 1, 8); 6430 if (!card->debug) { 6431 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6432 goto err; 6433 } 6434 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6435 goto err_dbg; 6436 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6437 if (!new_entry) 6438 goto err_dbg; 6439 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6440 new_entry->dbf_info = card->debug; 6441 mutex_lock(&qeth_dbf_list_mutex); 6442 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6443 mutex_unlock(&qeth_dbf_list_mutex); 6444 6445 return 0; 6446 6447 err_dbg: 6448 debug_unregister(card->debug); 6449 err: 6450 return -ENOMEM; 6451 } 6452 6453 static void qeth_clear_dbf_list(void) 6454 { 6455 struct qeth_dbf_entry *entry, *tmp; 6456 6457 mutex_lock(&qeth_dbf_list_mutex); 6458 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6459 list_del(&entry->dbf_list); 6460 debug_unregister(entry->dbf_info); 6461 kfree(entry); 6462 } 6463 mutex_unlock(&qeth_dbf_list_mutex); 6464 } 6465 6466 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6467 { 6468 struct net_device *dev; 6469 struct qeth_priv *priv; 6470 6471 switch (card->info.type) { 6472 case QETH_CARD_TYPE_IQD: 6473 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6474 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6475 break; 6476 case QETH_CARD_TYPE_OSM: 6477 dev = alloc_etherdev(sizeof(*priv)); 6478 break; 6479 case QETH_CARD_TYPE_OSN: 6480 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, 6481 ether_setup); 6482 break; 6483 default: 6484 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6485 } 6486 6487 if (!dev) 6488 return NULL; 6489 6490 priv = netdev_priv(dev); 6491 priv->rx_copybreak = QETH_RX_COPYBREAK; 6492 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6493 6494 dev->ml_priv = card; 6495 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6496 dev->min_mtu = IS_OSN(card) ? 64 : 576; 6497 /* initialized when device first goes online: */ 6498 dev->max_mtu = 0; 6499 dev->mtu = 0; 6500 SET_NETDEV_DEV(dev, &card->gdev->dev); 6501 netif_carrier_off(dev); 6502 6503 if (IS_OSN(card)) { 6504 dev->ethtool_ops = &qeth_osn_ethtool_ops; 6505 } else { 6506 dev->ethtool_ops = &qeth_ethtool_ops; 6507 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6508 dev->hw_features |= NETIF_F_SG; 6509 dev->vlan_features |= NETIF_F_SG; 6510 if (IS_IQD(card)) 6511 dev->features |= NETIF_F_SG; 6512 } 6513 6514 return dev; 6515 } 6516 6517 struct net_device *qeth_clone_netdev(struct net_device *orig) 6518 { 6519 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6520 6521 if (!clone) 6522 return NULL; 6523 6524 clone->dev_port = orig->dev_port; 6525 return clone; 6526 } 6527 6528 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6529 { 6530 struct qeth_card *card; 6531 struct device *dev; 6532 int rc; 6533 enum qeth_discipline_id enforced_disc; 6534 char dbf_name[DBF_NAME_LEN]; 6535 6536 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6537 6538 dev = &gdev->dev; 6539 if (!get_device(dev)) 6540 return -ENODEV; 6541 6542 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6543 6544 card = qeth_alloc_card(gdev); 6545 if (!card) { 6546 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6547 rc = -ENOMEM; 6548 goto err_dev; 6549 } 6550 6551 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6552 dev_name(&gdev->dev)); 6553 card->debug = qeth_get_dbf_entry(dbf_name); 6554 if (!card->debug) { 6555 rc = qeth_add_dbf_entry(card, dbf_name); 6556 if (rc) 6557 goto err_card; 6558 } 6559 6560 qeth_setup_card(card); 6561 card->dev = qeth_alloc_netdev(card); 6562 if (!card->dev) { 6563 rc = -ENOMEM; 6564 goto err_card; 6565 } 6566 6567 qeth_determine_capabilities(card); 6568 qeth_set_blkt_defaults(card); 6569 6570 card->qdio.no_out_queues = card->dev->num_tx_queues; 6571 rc = qeth_update_from_chp_desc(card); 6572 if (rc) 6573 goto err_chp_desc; 6574 6575 if (IS_OSN(card)) 6576 gdev->dev.groups = qeth_osn_dev_groups; 6577 else 6578 gdev->dev.groups = qeth_dev_groups; 6579 6580 enforced_disc = qeth_enforce_discipline(card); 6581 switch (enforced_disc) { 6582 case QETH_DISCIPLINE_UNDETERMINED: 6583 gdev->dev.type = &qeth_generic_devtype; 6584 break; 6585 default: 6586 card->info.layer_enforced = true; 6587 rc = qeth_core_load_discipline(card, enforced_disc); 6588 if (rc) 6589 goto err_load; 6590 6591 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 6592 card->discipline->devtype; 6593 rc = card->discipline->setup(card->gdev); 6594 if (rc) 6595 goto err_disc; 6596 break; 6597 } 6598 6599 return 0; 6600 6601 err_disc: 6602 qeth_core_free_discipline(card); 6603 err_load: 6604 err_chp_desc: 6605 free_netdev(card->dev); 6606 err_card: 6607 qeth_core_free_card(card); 6608 err_dev: 6609 put_device(dev); 6610 return rc; 6611 } 6612 6613 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6614 { 6615 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6616 6617 QETH_CARD_TEXT(card, 2, "removedv"); 6618 6619 if (card->discipline) { 6620 card->discipline->remove(gdev); 6621 qeth_core_free_discipline(card); 6622 } 6623 6624 qeth_free_qdio_queues(card); 6625 6626 free_netdev(card->dev); 6627 qeth_core_free_card(card); 6628 put_device(&gdev->dev); 6629 } 6630 6631 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6632 { 6633 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6634 int rc = 0; 6635 enum qeth_discipline_id def_discipline; 6636 6637 if (!card->discipline) { 6638 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6639 QETH_DISCIPLINE_LAYER2; 6640 rc = qeth_core_load_discipline(card, def_discipline); 6641 if (rc) 6642 goto err; 6643 rc = card->discipline->setup(card->gdev); 6644 if (rc) { 6645 qeth_core_free_discipline(card); 6646 goto err; 6647 } 6648 } 6649 6650 rc = qeth_set_online(card); 6651 err: 6652 return rc; 6653 } 6654 6655 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6656 { 6657 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6658 6659 return qeth_set_offline(card, false); 6660 } 6661 6662 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6663 { 6664 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6665 6666 qeth_set_allowed_threads(card, 0, 1); 6667 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6668 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6669 qeth_qdio_clear_card(card, 0); 6670 qeth_drain_output_queues(card); 6671 qdio_free(CARD_DDEV(card)); 6672 } 6673 6674 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6675 size_t count) 6676 { 6677 int err; 6678 6679 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6680 buf); 6681 6682 return err ? err : count; 6683 } 6684 static DRIVER_ATTR_WO(group); 6685 6686 static struct attribute *qeth_drv_attrs[] = { 6687 &driver_attr_group.attr, 6688 NULL, 6689 }; 6690 static struct attribute_group qeth_drv_attr_group = { 6691 .attrs = qeth_drv_attrs, 6692 }; 6693 static const struct attribute_group *qeth_drv_attr_groups[] = { 6694 &qeth_drv_attr_group, 6695 NULL, 6696 }; 6697 6698 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6699 .driver = { 6700 .groups = qeth_drv_attr_groups, 6701 .owner = THIS_MODULE, 6702 .name = "qeth", 6703 }, 6704 .ccw_driver = &qeth_ccw_driver, 6705 .setup = qeth_core_probe_device, 6706 .remove = qeth_core_remove_device, 6707 .set_online = qeth_core_set_online, 6708 .set_offline = qeth_core_set_offline, 6709 .shutdown = qeth_core_shutdown, 6710 }; 6711 6712 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 6713 { 6714 struct ccwgroup_device *gdev; 6715 struct qeth_card *card; 6716 6717 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 6718 if (!gdev) 6719 return NULL; 6720 6721 card = dev_get_drvdata(&gdev->dev); 6722 put_device(&gdev->dev); 6723 return card; 6724 } 6725 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 6726 6727 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6728 { 6729 struct qeth_card *card = dev->ml_priv; 6730 struct mii_ioctl_data *mii_data; 6731 int rc = 0; 6732 6733 switch (cmd) { 6734 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6735 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 6736 break; 6737 case SIOC_QETH_GET_CARD_TYPE: 6738 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6739 !IS_VM_NIC(card)) 6740 return 1; 6741 return 0; 6742 case SIOCGMIIPHY: 6743 mii_data = if_mii(rq); 6744 mii_data->phy_id = 0; 6745 break; 6746 case SIOCGMIIREG: 6747 mii_data = if_mii(rq); 6748 if (mii_data->phy_id != 0) 6749 rc = -EINVAL; 6750 else 6751 mii_data->val_out = qeth_mdio_read(dev, 6752 mii_data->phy_id, mii_data->reg_num); 6753 break; 6754 case SIOC_QETH_QUERY_OAT: 6755 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 6756 break; 6757 default: 6758 if (card->discipline->do_ioctl) 6759 rc = card->discipline->do_ioctl(dev, rq, cmd); 6760 else 6761 rc = -EOPNOTSUPP; 6762 } 6763 if (rc) 6764 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6765 return rc; 6766 } 6767 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6768 6769 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6770 unsigned long data) 6771 { 6772 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6773 u32 *features = reply->param; 6774 6775 if (qeth_setassparms_inspect_rc(cmd)) 6776 return -EIO; 6777 6778 *features = cmd->data.setassparms.data.flags_32bit; 6779 return 0; 6780 } 6781 6782 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6783 enum qeth_prot_versions prot) 6784 { 6785 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6786 NULL, prot); 6787 } 6788 6789 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6790 enum qeth_prot_versions prot, u8 *lp2lp) 6791 { 6792 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6793 struct qeth_cmd_buffer *iob; 6794 struct qeth_ipa_caps caps; 6795 u32 features; 6796 int rc; 6797 6798 /* some L3 HW requires combined L3+L4 csum offload: */ 6799 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6800 cstype == IPA_OUTBOUND_CHECKSUM) 6801 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6802 6803 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6804 prot); 6805 if (!iob) 6806 return -ENOMEM; 6807 6808 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6809 if (rc) 6810 return rc; 6811 6812 if ((required_features & features) != required_features) { 6813 qeth_set_csum_off(card, cstype, prot); 6814 return -EOPNOTSUPP; 6815 } 6816 6817 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6818 SETASS_DATA_SIZEOF(flags_32bit), 6819 prot); 6820 if (!iob) { 6821 qeth_set_csum_off(card, cstype, prot); 6822 return -ENOMEM; 6823 } 6824 6825 if (features & QETH_IPA_CHECKSUM_LP2LP) 6826 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6827 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6828 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6829 if (rc) { 6830 qeth_set_csum_off(card, cstype, prot); 6831 return rc; 6832 } 6833 6834 if (!qeth_ipa_caps_supported(&caps, required_features) || 6835 !qeth_ipa_caps_enabled(&caps, required_features)) { 6836 qeth_set_csum_off(card, cstype, prot); 6837 return -EOPNOTSUPP; 6838 } 6839 6840 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6841 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6842 6843 if (lp2lp) 6844 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6845 6846 return 0; 6847 } 6848 6849 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6850 enum qeth_prot_versions prot, u8 *lp2lp) 6851 { 6852 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6853 qeth_set_csum_off(card, cstype, prot); 6854 } 6855 6856 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6857 unsigned long data) 6858 { 6859 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6860 struct qeth_tso_start_data *tso_data = reply->param; 6861 6862 if (qeth_setassparms_inspect_rc(cmd)) 6863 return -EIO; 6864 6865 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6866 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6867 return 0; 6868 } 6869 6870 static int qeth_set_tso_off(struct qeth_card *card, 6871 enum qeth_prot_versions prot) 6872 { 6873 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6874 IPA_CMD_ASS_STOP, NULL, prot); 6875 } 6876 6877 static int qeth_set_tso_on(struct qeth_card *card, 6878 enum qeth_prot_versions prot) 6879 { 6880 struct qeth_tso_start_data tso_data; 6881 struct qeth_cmd_buffer *iob; 6882 struct qeth_ipa_caps caps; 6883 int rc; 6884 6885 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6886 IPA_CMD_ASS_START, 0, prot); 6887 if (!iob) 6888 return -ENOMEM; 6889 6890 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6891 if (rc) 6892 return rc; 6893 6894 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6895 qeth_set_tso_off(card, prot); 6896 return -EOPNOTSUPP; 6897 } 6898 6899 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6900 IPA_CMD_ASS_ENABLE, 6901 SETASS_DATA_SIZEOF(caps), prot); 6902 if (!iob) { 6903 qeth_set_tso_off(card, prot); 6904 return -ENOMEM; 6905 } 6906 6907 /* enable TSO capability */ 6908 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6909 QETH_IPA_LARGE_SEND_TCP; 6910 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6911 if (rc) { 6912 qeth_set_tso_off(card, prot); 6913 return rc; 6914 } 6915 6916 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6917 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6918 qeth_set_tso_off(card, prot); 6919 return -EOPNOTSUPP; 6920 } 6921 6922 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6923 tso_data.mss); 6924 return 0; 6925 } 6926 6927 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6928 enum qeth_prot_versions prot) 6929 { 6930 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6931 } 6932 6933 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6934 { 6935 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6936 int rc_ipv6; 6937 6938 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6939 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6940 QETH_PROT_IPV4, NULL); 6941 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6942 /* no/one Offload Assist available, so the rc is trivial */ 6943 return rc_ipv4; 6944 6945 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6946 QETH_PROT_IPV6, NULL); 6947 6948 if (on) 6949 /* enable: success if any Assist is active */ 6950 return (rc_ipv6) ? rc_ipv4 : 0; 6951 6952 /* disable: failure if any Assist is still active */ 6953 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6954 } 6955 6956 /** 6957 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6958 * @dev: a net_device 6959 */ 6960 void qeth_enable_hw_features(struct net_device *dev) 6961 { 6962 struct qeth_card *card = dev->ml_priv; 6963 netdev_features_t features; 6964 6965 features = dev->features; 6966 /* force-off any feature that might need an IPA sequence. 6967 * netdev_update_features() will restart them. 6968 */ 6969 dev->features &= ~dev->hw_features; 6970 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6971 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6972 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6973 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6974 } 6975 netdev_update_features(dev); 6976 if (features != dev->features) 6977 dev_warn(&card->gdev->dev, 6978 "Device recovery failed to restore all offload features\n"); 6979 } 6980 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6981 6982 static void qeth_check_restricted_features(struct qeth_card *card, 6983 netdev_features_t changed, 6984 netdev_features_t actual) 6985 { 6986 netdev_features_t ipv6_features = NETIF_F_TSO6; 6987 netdev_features_t ipv4_features = NETIF_F_TSO; 6988 6989 if (!card->info.has_lp2lp_cso_v6) 6990 ipv6_features |= NETIF_F_IPV6_CSUM; 6991 if (!card->info.has_lp2lp_cso_v4) 6992 ipv4_features |= NETIF_F_IP_CSUM; 6993 6994 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6995 qeth_flush_local_addrs6(card); 6996 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6997 qeth_flush_local_addrs4(card); 6998 } 6999 7000 int qeth_set_features(struct net_device *dev, netdev_features_t features) 7001 { 7002 struct qeth_card *card = dev->ml_priv; 7003 netdev_features_t changed = dev->features ^ features; 7004 int rc = 0; 7005 7006 QETH_CARD_TEXT(card, 2, "setfeat"); 7007 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 7008 7009 if ((changed & NETIF_F_IP_CSUM)) { 7010 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 7011 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 7012 &card->info.has_lp2lp_cso_v4); 7013 if (rc) 7014 changed ^= NETIF_F_IP_CSUM; 7015 } 7016 if (changed & NETIF_F_IPV6_CSUM) { 7017 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 7018 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 7019 &card->info.has_lp2lp_cso_v6); 7020 if (rc) 7021 changed ^= NETIF_F_IPV6_CSUM; 7022 } 7023 if (changed & NETIF_F_RXCSUM) { 7024 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 7025 if (rc) 7026 changed ^= NETIF_F_RXCSUM; 7027 } 7028 if (changed & NETIF_F_TSO) { 7029 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 7030 QETH_PROT_IPV4); 7031 if (rc) 7032 changed ^= NETIF_F_TSO; 7033 } 7034 if (changed & NETIF_F_TSO6) { 7035 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 7036 QETH_PROT_IPV6); 7037 if (rc) 7038 changed ^= NETIF_F_TSO6; 7039 } 7040 7041 qeth_check_restricted_features(card, dev->features ^ features, 7042 dev->features ^ changed); 7043 7044 /* everything changed successfully? */ 7045 if ((dev->features ^ features) == changed) 7046 return 0; 7047 /* something went wrong. save changed features and return error */ 7048 dev->features ^= changed; 7049 return -EIO; 7050 } 7051 EXPORT_SYMBOL_GPL(qeth_set_features); 7052 7053 netdev_features_t qeth_fix_features(struct net_device *dev, 7054 netdev_features_t features) 7055 { 7056 struct qeth_card *card = dev->ml_priv; 7057 7058 QETH_CARD_TEXT(card, 2, "fixfeat"); 7059 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 7060 features &= ~NETIF_F_IP_CSUM; 7061 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 7062 features &= ~NETIF_F_IPV6_CSUM; 7063 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 7064 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 7065 features &= ~NETIF_F_RXCSUM; 7066 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 7067 features &= ~NETIF_F_TSO; 7068 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 7069 features &= ~NETIF_F_TSO6; 7070 7071 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 7072 return features; 7073 } 7074 EXPORT_SYMBOL_GPL(qeth_fix_features); 7075 7076 netdev_features_t qeth_features_check(struct sk_buff *skb, 7077 struct net_device *dev, 7078 netdev_features_t features) 7079 { 7080 struct qeth_card *card = dev->ml_priv; 7081 7082 /* Traffic with local next-hop is not eligible for some offloads: */ 7083 if (skb->ip_summed == CHECKSUM_PARTIAL && 7084 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 7085 netdev_features_t restricted = 0; 7086 7087 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 7088 restricted |= NETIF_F_ALL_TSO; 7089 7090 switch (vlan_get_protocol(skb)) { 7091 case htons(ETH_P_IP): 7092 if (!card->info.has_lp2lp_cso_v4) 7093 restricted |= NETIF_F_IP_CSUM; 7094 7095 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 7096 features &= ~restricted; 7097 break; 7098 case htons(ETH_P_IPV6): 7099 if (!card->info.has_lp2lp_cso_v6) 7100 restricted |= NETIF_F_IPV6_CSUM; 7101 7102 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 7103 features &= ~restricted; 7104 break; 7105 default: 7106 break; 7107 } 7108 } 7109 7110 /* GSO segmentation builds skbs with 7111 * a (small) linear part for the headers, and 7112 * page frags for the data. 7113 * Compared to a linear skb, the header-only part consumes an 7114 * additional buffer element. This reduces buffer utilization, and 7115 * hurts throughput. So compress small segments into one element. 7116 */ 7117 if (netif_needs_gso(skb, features)) { 7118 /* match skb_segment(): */ 7119 unsigned int doffset = skb->data - skb_mac_header(skb); 7120 unsigned int hsize = skb_shinfo(skb)->gso_size; 7121 unsigned int hroom = skb_headroom(skb); 7122 7123 /* linearize only if resulting skb allocations are order-0: */ 7124 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 7125 features &= ~NETIF_F_SG; 7126 } 7127 7128 return vlan_features_check(skb, features); 7129 } 7130 EXPORT_SYMBOL_GPL(qeth_features_check); 7131 7132 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7133 { 7134 struct qeth_card *card = dev->ml_priv; 7135 struct qeth_qdio_out_q *queue; 7136 unsigned int i; 7137 7138 QETH_CARD_TEXT(card, 5, "getstat"); 7139 7140 stats->rx_packets = card->stats.rx_packets; 7141 stats->rx_bytes = card->stats.rx_bytes; 7142 stats->rx_errors = card->stats.rx_length_errors + 7143 card->stats.rx_frame_errors + 7144 card->stats.rx_fifo_errors; 7145 stats->rx_dropped = card->stats.rx_dropped_nomem + 7146 card->stats.rx_dropped_notsupp + 7147 card->stats.rx_dropped_runt; 7148 stats->multicast = card->stats.rx_multicast; 7149 stats->rx_length_errors = card->stats.rx_length_errors; 7150 stats->rx_frame_errors = card->stats.rx_frame_errors; 7151 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 7152 7153 for (i = 0; i < card->qdio.no_out_queues; i++) { 7154 queue = card->qdio.out_qs[i]; 7155 7156 stats->tx_packets += queue->stats.tx_packets; 7157 stats->tx_bytes += queue->stats.tx_bytes; 7158 stats->tx_errors += queue->stats.tx_errors; 7159 stats->tx_dropped += queue->stats.tx_dropped; 7160 } 7161 } 7162 EXPORT_SYMBOL_GPL(qeth_get_stats64); 7163 7164 #define TC_IQD_UCAST 0 7165 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 7166 unsigned int ucast_txqs) 7167 { 7168 unsigned int prio; 7169 7170 /* IQD requires mcast traffic to be placed on a dedicated queue, and 7171 * qeth_iqd_select_queue() deals with this. 7172 * For unicast traffic, we defer the queue selection to the stack. 7173 * By installing a trivial prio map that spans over only the unicast 7174 * queues, we can encourage the stack to spread the ucast traffic evenly 7175 * without selecting the mcast queue. 7176 */ 7177 7178 /* One traffic class, spanning over all active ucast queues: */ 7179 netdev_set_num_tc(dev, 1); 7180 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 7181 QETH_IQD_MIN_UCAST_TXQ); 7182 7183 /* Map all priorities to this traffic class: */ 7184 for (prio = 0; prio <= TC_BITMASK; prio++) 7185 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 7186 } 7187 7188 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7189 { 7190 struct net_device *dev = card->dev; 7191 int rc; 7192 7193 /* Per netif_setup_tc(), adjust the mapping first: */ 7194 if (IS_IQD(card)) 7195 qeth_iqd_set_prio_tc_map(dev, count - 1); 7196 7197 rc = netif_set_real_num_tx_queues(dev, count); 7198 7199 if (rc && IS_IQD(card)) 7200 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7201 7202 return rc; 7203 } 7204 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7205 7206 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7207 u8 cast_type, struct net_device *sb_dev) 7208 { 7209 u16 txq; 7210 7211 if (cast_type != RTN_UNICAST) 7212 return QETH_IQD_MCAST_TXQ; 7213 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7214 return QETH_IQD_MIN_UCAST_TXQ; 7215 7216 txq = netdev_pick_tx(dev, skb, sb_dev); 7217 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7218 } 7219 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7220 7221 int qeth_open(struct net_device *dev) 7222 { 7223 struct qeth_card *card = dev->ml_priv; 7224 7225 QETH_CARD_TEXT(card, 4, "qethopen"); 7226 7227 card->data.state = CH_STATE_UP; 7228 netif_tx_start_all_queues(dev); 7229 7230 napi_enable(&card->napi); 7231 local_bh_disable(); 7232 napi_schedule(&card->napi); 7233 if (IS_IQD(card)) { 7234 struct qeth_qdio_out_q *queue; 7235 unsigned int i; 7236 7237 qeth_for_each_output_queue(card, queue, i) { 7238 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 7239 QETH_NAPI_WEIGHT); 7240 napi_enable(&queue->napi); 7241 napi_schedule(&queue->napi); 7242 } 7243 } 7244 /* kick-start the NAPI softirq: */ 7245 local_bh_enable(); 7246 return 0; 7247 } 7248 EXPORT_SYMBOL_GPL(qeth_open); 7249 7250 int qeth_stop(struct net_device *dev) 7251 { 7252 struct qeth_card *card = dev->ml_priv; 7253 7254 QETH_CARD_TEXT(card, 4, "qethstop"); 7255 if (IS_IQD(card)) { 7256 struct qeth_qdio_out_q *queue; 7257 unsigned int i; 7258 7259 /* Quiesce the NAPI instances: */ 7260 qeth_for_each_output_queue(card, queue, i) 7261 napi_disable(&queue->napi); 7262 7263 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7264 netif_tx_disable(dev); 7265 7266 qeth_for_each_output_queue(card, queue, i) { 7267 del_timer_sync(&queue->timer); 7268 /* Queues may get re-allocated, so remove the NAPIs. */ 7269 netif_napi_del(&queue->napi); 7270 } 7271 } else { 7272 netif_tx_disable(dev); 7273 } 7274 7275 napi_disable(&card->napi); 7276 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7277 qdio_stop_irq(CARD_DDEV(card)); 7278 7279 return 0; 7280 } 7281 EXPORT_SYMBOL_GPL(qeth_stop); 7282 7283 static int __init qeth_core_init(void) 7284 { 7285 int rc; 7286 7287 pr_info("loading core functions\n"); 7288 7289 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7290 7291 rc = qeth_register_dbf_views(); 7292 if (rc) 7293 goto dbf_err; 7294 qeth_core_root_dev = root_device_register("qeth"); 7295 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7296 if (rc) 7297 goto register_err; 7298 qeth_core_header_cache = 7299 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7300 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7301 0, NULL); 7302 if (!qeth_core_header_cache) { 7303 rc = -ENOMEM; 7304 goto slab_err; 7305 } 7306 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7307 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7308 if (!qeth_qdio_outbuf_cache) { 7309 rc = -ENOMEM; 7310 goto cqslab_err; 7311 } 7312 rc = ccw_driver_register(&qeth_ccw_driver); 7313 if (rc) 7314 goto ccw_err; 7315 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7316 if (rc) 7317 goto ccwgroup_err; 7318 7319 return 0; 7320 7321 ccwgroup_err: 7322 ccw_driver_unregister(&qeth_ccw_driver); 7323 ccw_err: 7324 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7325 cqslab_err: 7326 kmem_cache_destroy(qeth_core_header_cache); 7327 slab_err: 7328 root_device_unregister(qeth_core_root_dev); 7329 register_err: 7330 qeth_unregister_dbf_views(); 7331 dbf_err: 7332 debugfs_remove_recursive(qeth_debugfs_root); 7333 pr_err("Initializing the qeth device driver failed\n"); 7334 return rc; 7335 } 7336 7337 static void __exit qeth_core_exit(void) 7338 { 7339 qeth_clear_dbf_list(); 7340 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7341 ccw_driver_unregister(&qeth_ccw_driver); 7342 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7343 kmem_cache_destroy(qeth_core_header_cache); 7344 root_device_unregister(qeth_core_root_dev); 7345 qeth_unregister_dbf_views(); 7346 debugfs_remove_recursive(qeth_debugfs_root); 7347 pr_info("core functions removed\n"); 7348 } 7349 7350 module_init(qeth_core_init); 7351 module_exit(qeth_core_exit); 7352 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7353 MODULE_DESCRIPTION("qeth core functions"); 7354 MODULE_LICENSE("GPL"); 7355