1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 #include <net/sock.h> 37 38 #include <asm/ebcdic.h> 39 #include <asm/chpid.h> 40 #include <asm/sysinfo.h> 41 #include <asm/diag.h> 42 #include <asm/cio.h> 43 #include <asm/ccwdev.h> 44 #include <asm/cpcmd.h> 45 46 #include "qeth_core.h" 47 48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 50 /* N P A M L V H */ 51 [QETH_DBF_SETUP] = {"qeth_setup", 52 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 54 &debug_sprintf_view, NULL}, 55 [QETH_DBF_CTRL] = {"qeth_control", 56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 57 }; 58 EXPORT_SYMBOL_GPL(qeth_dbf); 59 60 static struct kmem_cache *qeth_core_header_cache; 61 static struct kmem_cache *qeth_qdio_outbuf_cache; 62 static struct kmem_cache *qeth_qaob_cache; 63 64 static struct device *qeth_core_root_dev; 65 static struct dentry *qeth_debugfs_root; 66 static struct lock_class_key qdio_out_skb_queue_key; 67 68 static void qeth_issue_next_read_cb(struct qeth_card *card, 69 struct qeth_cmd_buffer *iob, 70 unsigned int data_length); 71 static int qeth_qdio_establish(struct qeth_card *); 72 static void qeth_free_qdio_queues(struct qeth_card *card); 73 74 static const char *qeth_get_cardname(struct qeth_card *card) 75 { 76 if (IS_VM_NIC(card)) { 77 switch (card->info.type) { 78 case QETH_CARD_TYPE_OSD: 79 return " Virtual NIC QDIO"; 80 case QETH_CARD_TYPE_IQD: 81 return " Virtual NIC Hiper"; 82 case QETH_CARD_TYPE_OSM: 83 return " Virtual NIC QDIO - OSM"; 84 case QETH_CARD_TYPE_OSX: 85 return " Virtual NIC QDIO - OSX"; 86 default: 87 return " unknown"; 88 } 89 } else { 90 switch (card->info.type) { 91 case QETH_CARD_TYPE_OSD: 92 return " OSD Express"; 93 case QETH_CARD_TYPE_IQD: 94 return " HiperSockets"; 95 case QETH_CARD_TYPE_OSM: 96 return " OSM QDIO"; 97 case QETH_CARD_TYPE_OSX: 98 return " OSX QDIO"; 99 default: 100 return " unknown"; 101 } 102 } 103 return " n/a"; 104 } 105 106 /* max length to be returned: 14 */ 107 const char *qeth_get_cardname_short(struct qeth_card *card) 108 { 109 if (IS_VM_NIC(card)) { 110 switch (card->info.type) { 111 case QETH_CARD_TYPE_OSD: 112 return "Virt.NIC QDIO"; 113 case QETH_CARD_TYPE_IQD: 114 return "Virt.NIC Hiper"; 115 case QETH_CARD_TYPE_OSM: 116 return "Virt.NIC OSM"; 117 case QETH_CARD_TYPE_OSX: 118 return "Virt.NIC OSX"; 119 default: 120 return "unknown"; 121 } 122 } else { 123 switch (card->info.type) { 124 case QETH_CARD_TYPE_OSD: 125 switch (card->info.link_type) { 126 case QETH_LINK_TYPE_FAST_ETH: 127 return "OSD_100"; 128 case QETH_LINK_TYPE_HSTR: 129 return "HSTR"; 130 case QETH_LINK_TYPE_GBIT_ETH: 131 return "OSD_1000"; 132 case QETH_LINK_TYPE_10GBIT_ETH: 133 return "OSD_10GIG"; 134 case QETH_LINK_TYPE_25GBIT_ETH: 135 return "OSD_25GIG"; 136 case QETH_LINK_TYPE_LANE_ETH100: 137 return "OSD_FE_LANE"; 138 case QETH_LINK_TYPE_LANE_TR: 139 return "OSD_TR_LANE"; 140 case QETH_LINK_TYPE_LANE_ETH1000: 141 return "OSD_GbE_LANE"; 142 case QETH_LINK_TYPE_LANE: 143 return "OSD_ATM_LANE"; 144 default: 145 return "OSD_Express"; 146 } 147 case QETH_CARD_TYPE_IQD: 148 return "HiperSockets"; 149 case QETH_CARD_TYPE_OSM: 150 return "OSM_1000"; 151 case QETH_CARD_TYPE_OSX: 152 return "OSX_10GIG"; 153 default: 154 return "unknown"; 155 } 156 } 157 return "n/a"; 158 } 159 160 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 161 int clear_start_mask) 162 { 163 unsigned long flags; 164 165 spin_lock_irqsave(&card->thread_mask_lock, flags); 166 card->thread_allowed_mask = threads; 167 if (clear_start_mask) 168 card->thread_start_mask &= threads; 169 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 170 wake_up(&card->wait_q); 171 } 172 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 173 174 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 175 { 176 unsigned long flags; 177 int rc = 0; 178 179 spin_lock_irqsave(&card->thread_mask_lock, flags); 180 rc = (card->thread_running_mask & threads); 181 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 182 return rc; 183 } 184 EXPORT_SYMBOL_GPL(qeth_threads_running); 185 186 static void qeth_clear_working_pool_list(struct qeth_card *card) 187 { 188 struct qeth_buffer_pool_entry *pool_entry, *tmp; 189 struct qeth_qdio_q *queue = card->qdio.in_q; 190 unsigned int i; 191 192 QETH_CARD_TEXT(card, 5, "clwrklst"); 193 list_for_each_entry_safe(pool_entry, tmp, 194 &card->qdio.in_buf_pool.entry_list, list) 195 list_del(&pool_entry->list); 196 197 if (!queue) 198 return; 199 200 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 201 queue->bufs[i].pool_entry = NULL; 202 } 203 204 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 205 { 206 unsigned int i; 207 208 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 209 if (entry->elements[i]) 210 __free_page(entry->elements[i]); 211 } 212 213 kfree(entry); 214 } 215 216 static void qeth_free_buffer_pool(struct qeth_card *card) 217 { 218 struct qeth_buffer_pool_entry *entry, *tmp; 219 220 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 221 init_list) { 222 list_del(&entry->init_list); 223 qeth_free_pool_entry(entry); 224 } 225 } 226 227 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 228 { 229 struct qeth_buffer_pool_entry *entry; 230 unsigned int i; 231 232 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 233 if (!entry) 234 return NULL; 235 236 for (i = 0; i < pages; i++) { 237 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 238 239 if (!entry->elements[i]) { 240 qeth_free_pool_entry(entry); 241 return NULL; 242 } 243 } 244 245 return entry; 246 } 247 248 static int qeth_alloc_buffer_pool(struct qeth_card *card) 249 { 250 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 251 unsigned int i; 252 253 QETH_CARD_TEXT(card, 5, "alocpool"); 254 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 255 struct qeth_buffer_pool_entry *entry; 256 257 entry = qeth_alloc_pool_entry(buf_elements); 258 if (!entry) { 259 qeth_free_buffer_pool(card); 260 return -ENOMEM; 261 } 262 263 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 264 } 265 return 0; 266 } 267 268 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 269 { 270 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 271 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 272 struct qeth_buffer_pool_entry *entry, *tmp; 273 int delta = count - pool->buf_count; 274 LIST_HEAD(entries); 275 276 QETH_CARD_TEXT(card, 2, "realcbp"); 277 278 /* Defer until queue is allocated: */ 279 if (!card->qdio.in_q) 280 goto out; 281 282 /* Remove entries from the pool: */ 283 while (delta < 0) { 284 entry = list_first_entry(&pool->entry_list, 285 struct qeth_buffer_pool_entry, 286 init_list); 287 list_del(&entry->init_list); 288 qeth_free_pool_entry(entry); 289 290 delta++; 291 } 292 293 /* Allocate additional entries: */ 294 while (delta > 0) { 295 entry = qeth_alloc_pool_entry(buf_elements); 296 if (!entry) { 297 list_for_each_entry_safe(entry, tmp, &entries, 298 init_list) { 299 list_del(&entry->init_list); 300 qeth_free_pool_entry(entry); 301 } 302 303 return -ENOMEM; 304 } 305 306 list_add(&entry->init_list, &entries); 307 308 delta--; 309 } 310 311 list_splice(&entries, &pool->entry_list); 312 313 out: 314 card->qdio.in_buf_pool.buf_count = count; 315 pool->buf_count = count; 316 return 0; 317 } 318 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 319 320 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 321 { 322 if (!q) 323 return; 324 325 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 326 kfree(q); 327 } 328 329 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 330 { 331 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 332 int i; 333 334 if (!q) 335 return NULL; 336 337 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 338 kfree(q); 339 return NULL; 340 } 341 342 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 343 q->bufs[i].buffer = q->qdio_bufs[i]; 344 345 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 346 return q; 347 } 348 349 static int qeth_cq_init(struct qeth_card *card) 350 { 351 int rc; 352 353 if (card->options.cq == QETH_CQ_ENABLED) { 354 QETH_CARD_TEXT(card, 2, "cqinit"); 355 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 356 QDIO_MAX_BUFFERS_PER_Q); 357 card->qdio.c_q->next_buf_to_init = 127; 358 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127, 359 NULL); 360 if (rc) { 361 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 362 goto out; 363 } 364 } 365 rc = 0; 366 out: 367 return rc; 368 } 369 370 static int qeth_alloc_cq(struct qeth_card *card) 371 { 372 if (card->options.cq == QETH_CQ_ENABLED) { 373 QETH_CARD_TEXT(card, 2, "cqon"); 374 card->qdio.c_q = qeth_alloc_qdio_queue(); 375 if (!card->qdio.c_q) { 376 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 377 return -ENOMEM; 378 } 379 } else { 380 QETH_CARD_TEXT(card, 2, "nocq"); 381 card->qdio.c_q = NULL; 382 } 383 return 0; 384 } 385 386 static void qeth_free_cq(struct qeth_card *card) 387 { 388 if (card->qdio.c_q) { 389 qeth_free_qdio_queue(card->qdio.c_q); 390 card->qdio.c_q = NULL; 391 } 392 } 393 394 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 395 int delayed) 396 { 397 enum iucv_tx_notify n; 398 399 switch (sbalf15) { 400 case 0: 401 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 402 break; 403 case 4: 404 case 16: 405 case 17: 406 case 18: 407 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 408 TX_NOTIFY_UNREACHABLE; 409 break; 410 default: 411 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 412 TX_NOTIFY_GENERALERROR; 413 break; 414 } 415 416 return n; 417 } 418 419 static void qeth_put_cmd(struct qeth_cmd_buffer *iob) 420 { 421 if (refcount_dec_and_test(&iob->ref_count)) { 422 kfree(iob->data); 423 kfree(iob); 424 } 425 } 426 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 427 void *data) 428 { 429 ccw->cmd_code = cmd_code; 430 ccw->flags = flags | CCW_FLAG_SLI; 431 ccw->count = len; 432 ccw->cda = (__u32) __pa(data); 433 } 434 435 static int __qeth_issue_next_read(struct qeth_card *card) 436 { 437 struct qeth_cmd_buffer *iob = card->read_cmd; 438 struct qeth_channel *channel = iob->channel; 439 struct ccw1 *ccw = __ccw_from_cmd(iob); 440 int rc; 441 442 QETH_CARD_TEXT(card, 5, "issnxrd"); 443 if (channel->state != CH_STATE_UP) 444 return -EIO; 445 446 memset(iob->data, 0, iob->length); 447 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 448 iob->callback = qeth_issue_next_read_cb; 449 /* keep the cmd alive after completion: */ 450 qeth_get_cmd(iob); 451 452 QETH_CARD_TEXT(card, 6, "noirqpnd"); 453 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 454 if (!rc) { 455 channel->active_cmd = iob; 456 } else { 457 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 458 rc, CARD_DEVID(card)); 459 qeth_unlock_channel(card, channel); 460 qeth_put_cmd(iob); 461 card->read_or_write_problem = 1; 462 qeth_schedule_recovery(card); 463 } 464 return rc; 465 } 466 467 static int qeth_issue_next_read(struct qeth_card *card) 468 { 469 int ret; 470 471 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 472 ret = __qeth_issue_next_read(card); 473 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 474 475 return ret; 476 } 477 478 static void qeth_enqueue_cmd(struct qeth_card *card, 479 struct qeth_cmd_buffer *iob) 480 { 481 spin_lock_irq(&card->lock); 482 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 483 spin_unlock_irq(&card->lock); 484 } 485 486 static void qeth_dequeue_cmd(struct qeth_card *card, 487 struct qeth_cmd_buffer *iob) 488 { 489 spin_lock_irq(&card->lock); 490 list_del(&iob->list_entry); 491 spin_unlock_irq(&card->lock); 492 } 493 494 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 495 { 496 iob->rc = reason; 497 complete(&iob->done); 498 } 499 500 static void qeth_flush_local_addrs4(struct qeth_card *card) 501 { 502 struct qeth_local_addr *addr; 503 struct hlist_node *tmp; 504 unsigned int i; 505 506 spin_lock_irq(&card->local_addrs4_lock); 507 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 508 hash_del_rcu(&addr->hnode); 509 kfree_rcu(addr, rcu); 510 } 511 spin_unlock_irq(&card->local_addrs4_lock); 512 } 513 514 static void qeth_flush_local_addrs6(struct qeth_card *card) 515 { 516 struct qeth_local_addr *addr; 517 struct hlist_node *tmp; 518 unsigned int i; 519 520 spin_lock_irq(&card->local_addrs6_lock); 521 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 522 hash_del_rcu(&addr->hnode); 523 kfree_rcu(addr, rcu); 524 } 525 spin_unlock_irq(&card->local_addrs6_lock); 526 } 527 528 static void qeth_flush_local_addrs(struct qeth_card *card) 529 { 530 qeth_flush_local_addrs4(card); 531 qeth_flush_local_addrs6(card); 532 } 533 534 static void qeth_add_local_addrs4(struct qeth_card *card, 535 struct qeth_ipacmd_local_addrs4 *cmd) 536 { 537 unsigned int i; 538 539 if (cmd->addr_length != 540 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 541 dev_err_ratelimited(&card->gdev->dev, 542 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 543 cmd->addr_length); 544 return; 545 } 546 547 spin_lock(&card->local_addrs4_lock); 548 for (i = 0; i < cmd->count; i++) { 549 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 550 struct qeth_local_addr *addr; 551 bool duplicate = false; 552 553 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 554 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 555 duplicate = true; 556 break; 557 } 558 } 559 560 if (duplicate) 561 continue; 562 563 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 564 if (!addr) { 565 dev_err(&card->gdev->dev, 566 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 567 &cmd->addrs[i].addr); 568 continue; 569 } 570 571 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 572 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 573 } 574 spin_unlock(&card->local_addrs4_lock); 575 } 576 577 static void qeth_add_local_addrs6(struct qeth_card *card, 578 struct qeth_ipacmd_local_addrs6 *cmd) 579 { 580 unsigned int i; 581 582 if (cmd->addr_length != 583 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 584 dev_err_ratelimited(&card->gdev->dev, 585 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 586 cmd->addr_length); 587 return; 588 } 589 590 spin_lock(&card->local_addrs6_lock); 591 for (i = 0; i < cmd->count; i++) { 592 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 593 struct qeth_local_addr *addr; 594 bool duplicate = false; 595 596 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 597 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 598 duplicate = true; 599 break; 600 } 601 } 602 603 if (duplicate) 604 continue; 605 606 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 607 if (!addr) { 608 dev_err(&card->gdev->dev, 609 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 610 &cmd->addrs[i].addr); 611 continue; 612 } 613 614 addr->addr = cmd->addrs[i].addr; 615 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 616 } 617 spin_unlock(&card->local_addrs6_lock); 618 } 619 620 static void qeth_del_local_addrs4(struct qeth_card *card, 621 struct qeth_ipacmd_local_addrs4 *cmd) 622 { 623 unsigned int i; 624 625 if (cmd->addr_length != 626 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 627 dev_err_ratelimited(&card->gdev->dev, 628 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 629 cmd->addr_length); 630 return; 631 } 632 633 spin_lock(&card->local_addrs4_lock); 634 for (i = 0; i < cmd->count; i++) { 635 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 636 unsigned int key = ipv4_addr_hash(addr->addr); 637 struct qeth_local_addr *tmp; 638 639 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 640 if (tmp->addr.s6_addr32[3] == addr->addr) { 641 hash_del_rcu(&tmp->hnode); 642 kfree_rcu(tmp, rcu); 643 break; 644 } 645 } 646 } 647 spin_unlock(&card->local_addrs4_lock); 648 } 649 650 static void qeth_del_local_addrs6(struct qeth_card *card, 651 struct qeth_ipacmd_local_addrs6 *cmd) 652 { 653 unsigned int i; 654 655 if (cmd->addr_length != 656 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 657 dev_err_ratelimited(&card->gdev->dev, 658 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 659 cmd->addr_length); 660 return; 661 } 662 663 spin_lock(&card->local_addrs6_lock); 664 for (i = 0; i < cmd->count; i++) { 665 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 666 u32 key = ipv6_addr_hash(&addr->addr); 667 struct qeth_local_addr *tmp; 668 669 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 670 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 671 hash_del_rcu(&tmp->hnode); 672 kfree_rcu(tmp, rcu); 673 break; 674 } 675 } 676 } 677 spin_unlock(&card->local_addrs6_lock); 678 } 679 680 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 681 struct sk_buff *skb) 682 { 683 struct qeth_local_addr *tmp; 684 bool is_local = false; 685 unsigned int key; 686 __be32 next_hop; 687 688 if (hash_empty(card->local_addrs4)) 689 return false; 690 691 rcu_read_lock(); 692 next_hop = qeth_next_hop_v4_rcu(skb, 693 qeth_dst_check_rcu(skb, htons(ETH_P_IP))); 694 key = ipv4_addr_hash(next_hop); 695 696 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 697 if (tmp->addr.s6_addr32[3] == next_hop) { 698 is_local = true; 699 break; 700 } 701 } 702 rcu_read_unlock(); 703 704 return is_local; 705 } 706 707 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 708 struct sk_buff *skb) 709 { 710 struct qeth_local_addr *tmp; 711 struct in6_addr *next_hop; 712 bool is_local = false; 713 u32 key; 714 715 if (hash_empty(card->local_addrs6)) 716 return false; 717 718 rcu_read_lock(); 719 next_hop = qeth_next_hop_v6_rcu(skb, 720 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6))); 721 key = ipv6_addr_hash(next_hop); 722 723 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 724 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 725 is_local = true; 726 break; 727 } 728 } 729 rcu_read_unlock(); 730 731 return is_local; 732 } 733 734 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 735 { 736 struct qeth_card *card = m->private; 737 struct qeth_local_addr *tmp; 738 unsigned int i; 739 740 rcu_read_lock(); 741 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 742 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 743 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 744 seq_printf(m, "%pI6c\n", &tmp->addr); 745 rcu_read_unlock(); 746 747 return 0; 748 } 749 750 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 751 752 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 753 struct qeth_card *card) 754 { 755 const char *ipa_name; 756 int com = cmd->hdr.command; 757 758 ipa_name = qeth_get_ipa_cmd_name(com); 759 760 if (rc) 761 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 762 ipa_name, com, CARD_DEVID(card), rc, 763 qeth_get_ipa_msg(rc)); 764 else 765 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 766 ipa_name, com, CARD_DEVID(card)); 767 } 768 769 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 770 struct qeth_ipa_cmd *cmd) 771 { 772 QETH_CARD_TEXT(card, 5, "chkipad"); 773 774 if (IS_IPA_REPLY(cmd)) { 775 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 776 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 777 return cmd; 778 } 779 780 /* handle unsolicited event: */ 781 switch (cmd->hdr.command) { 782 case IPA_CMD_STOPLAN: 783 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 784 dev_err(&card->gdev->dev, 785 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n", 786 netdev_name(card->dev)); 787 /* Set offline, then probably fail to set online: */ 788 qeth_schedule_recovery(card); 789 } else { 790 /* stay online for subsequent STARTLAN */ 791 dev_warn(&card->gdev->dev, 792 "The link for interface %s on CHPID 0x%X failed\n", 793 netdev_name(card->dev), card->info.chpid); 794 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 795 netif_carrier_off(card->dev); 796 } 797 return NULL; 798 case IPA_CMD_STARTLAN: 799 dev_info(&card->gdev->dev, 800 "The link for %s on CHPID 0x%X has been restored\n", 801 netdev_name(card->dev), card->info.chpid); 802 if (card->info.hwtrap) 803 card->info.hwtrap = 2; 804 qeth_schedule_recovery(card); 805 return NULL; 806 case IPA_CMD_SETBRIDGEPORT_IQD: 807 case IPA_CMD_SETBRIDGEPORT_OSA: 808 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 809 if (card->discipline->control_event_handler(card, cmd)) 810 return cmd; 811 return NULL; 812 case IPA_CMD_REGISTER_LOCAL_ADDR: 813 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 814 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 815 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 816 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 817 818 QETH_CARD_TEXT(card, 3, "irla"); 819 return NULL; 820 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 821 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 822 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 823 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 824 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 825 826 QETH_CARD_TEXT(card, 3, "urla"); 827 return NULL; 828 default: 829 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 830 return cmd; 831 } 832 } 833 834 static void qeth_clear_ipacmd_list(struct qeth_card *card) 835 { 836 struct qeth_cmd_buffer *iob; 837 unsigned long flags; 838 839 QETH_CARD_TEXT(card, 4, "clipalst"); 840 841 spin_lock_irqsave(&card->lock, flags); 842 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 843 qeth_notify_cmd(iob, -ECANCELED); 844 spin_unlock_irqrestore(&card->lock, flags); 845 } 846 847 static int qeth_check_idx_response(struct qeth_card *card, 848 unsigned char *buffer) 849 { 850 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 851 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 852 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 853 buffer[4]); 854 QETH_CARD_TEXT(card, 2, "ckidxres"); 855 QETH_CARD_TEXT(card, 2, " idxterm"); 856 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 857 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 858 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 859 dev_err(&card->gdev->dev, 860 "The device does not support the configured transport mode\n"); 861 return -EPROTONOSUPPORT; 862 } 863 return -EIO; 864 } 865 return 0; 866 } 867 868 static void qeth_release_buffer_cb(struct qeth_card *card, 869 struct qeth_cmd_buffer *iob, 870 unsigned int data_length) 871 { 872 qeth_put_cmd(iob); 873 } 874 875 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 876 { 877 qeth_notify_cmd(iob, rc); 878 qeth_put_cmd(iob); 879 } 880 881 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 882 unsigned int length, 883 unsigned int ccws, long timeout) 884 { 885 struct qeth_cmd_buffer *iob; 886 887 if (length > QETH_BUFSIZE) 888 return NULL; 889 890 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 891 if (!iob) 892 return NULL; 893 894 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 895 GFP_KERNEL | GFP_DMA); 896 if (!iob->data) { 897 kfree(iob); 898 return NULL; 899 } 900 901 init_completion(&iob->done); 902 spin_lock_init(&iob->lock); 903 refcount_set(&iob->ref_count, 1); 904 iob->channel = channel; 905 iob->timeout = timeout; 906 iob->length = length; 907 return iob; 908 } 909 910 static void qeth_issue_next_read_cb(struct qeth_card *card, 911 struct qeth_cmd_buffer *iob, 912 unsigned int data_length) 913 { 914 struct qeth_cmd_buffer *request = NULL; 915 struct qeth_ipa_cmd *cmd = NULL; 916 struct qeth_reply *reply = NULL; 917 struct qeth_cmd_buffer *tmp; 918 unsigned long flags; 919 int rc = 0; 920 921 QETH_CARD_TEXT(card, 4, "sndctlcb"); 922 rc = qeth_check_idx_response(card, iob->data); 923 switch (rc) { 924 case 0: 925 break; 926 case -EIO: 927 qeth_schedule_recovery(card); 928 fallthrough; 929 default: 930 qeth_clear_ipacmd_list(card); 931 goto err_idx; 932 } 933 934 cmd = __ipa_reply(iob); 935 if (cmd) { 936 cmd = qeth_check_ipa_data(card, cmd); 937 if (!cmd) 938 goto out; 939 } 940 941 /* match against pending cmd requests */ 942 spin_lock_irqsave(&card->lock, flags); 943 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 944 if (tmp->match && tmp->match(tmp, iob)) { 945 request = tmp; 946 /* take the object outside the lock */ 947 qeth_get_cmd(request); 948 break; 949 } 950 } 951 spin_unlock_irqrestore(&card->lock, flags); 952 953 if (!request) 954 goto out; 955 956 reply = &request->reply; 957 if (!reply->callback) { 958 rc = 0; 959 goto no_callback; 960 } 961 962 spin_lock_irqsave(&request->lock, flags); 963 if (request->rc) 964 /* Bail out when the requestor has already left: */ 965 rc = request->rc; 966 else 967 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 968 (unsigned long)iob); 969 spin_unlock_irqrestore(&request->lock, flags); 970 971 no_callback: 972 if (rc <= 0) 973 qeth_notify_cmd(request, rc); 974 qeth_put_cmd(request); 975 out: 976 memcpy(&card->seqno.pdu_hdr_ack, 977 QETH_PDU_HEADER_SEQ_NO(iob->data), 978 QETH_SEQ_NO_LENGTH); 979 __qeth_issue_next_read(card); 980 err_idx: 981 qeth_put_cmd(iob); 982 } 983 984 static int qeth_set_thread_start_bit(struct qeth_card *card, 985 unsigned long thread) 986 { 987 unsigned long flags; 988 int rc = 0; 989 990 spin_lock_irqsave(&card->thread_mask_lock, flags); 991 if (!(card->thread_allowed_mask & thread)) 992 rc = -EPERM; 993 else if (card->thread_start_mask & thread) 994 rc = -EBUSY; 995 else 996 card->thread_start_mask |= thread; 997 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 998 999 return rc; 1000 } 1001 1002 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1003 unsigned long thread) 1004 { 1005 unsigned long flags; 1006 1007 spin_lock_irqsave(&card->thread_mask_lock, flags); 1008 card->thread_start_mask &= ~thread; 1009 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1010 wake_up(&card->wait_q); 1011 } 1012 1013 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1014 unsigned long thread) 1015 { 1016 unsigned long flags; 1017 1018 spin_lock_irqsave(&card->thread_mask_lock, flags); 1019 card->thread_running_mask &= ~thread; 1020 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1021 wake_up_all(&card->wait_q); 1022 } 1023 1024 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1025 { 1026 unsigned long flags; 1027 int rc = 0; 1028 1029 spin_lock_irqsave(&card->thread_mask_lock, flags); 1030 if (card->thread_start_mask & thread) { 1031 if ((card->thread_allowed_mask & thread) && 1032 !(card->thread_running_mask & thread)) { 1033 rc = 1; 1034 card->thread_start_mask &= ~thread; 1035 card->thread_running_mask |= thread; 1036 } else 1037 rc = -EPERM; 1038 } 1039 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1040 return rc; 1041 } 1042 1043 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1044 { 1045 int rc = 0; 1046 1047 wait_event(card->wait_q, 1048 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1049 return rc; 1050 } 1051 1052 int qeth_schedule_recovery(struct qeth_card *card) 1053 { 1054 int rc; 1055 1056 QETH_CARD_TEXT(card, 2, "startrec"); 1057 1058 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1059 if (!rc) 1060 schedule_work(&card->kernel_thread_starter); 1061 1062 return rc; 1063 } 1064 1065 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1066 struct irb *irb) 1067 { 1068 int dstat, cstat; 1069 char *sense; 1070 1071 sense = (char *) irb->ecw; 1072 cstat = irb->scsw.cmd.cstat; 1073 dstat = irb->scsw.cmd.dstat; 1074 1075 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1076 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1077 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1078 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1079 dev_warn(&cdev->dev, "The qeth device driver " 1080 "failed to recover an error on the device\n"); 1081 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1082 CCW_DEVID(cdev), dstat, cstat); 1083 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1084 16, 1, irb, 64, 1); 1085 return -EIO; 1086 } 1087 1088 if (dstat & DEV_STAT_UNIT_CHECK) { 1089 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1090 SENSE_RESETTING_EVENT_FLAG) { 1091 QETH_CARD_TEXT(card, 2, "REVIND"); 1092 return -EIO; 1093 } 1094 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1095 SENSE_COMMAND_REJECT_FLAG) { 1096 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1097 return -EIO; 1098 } 1099 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1100 QETH_CARD_TEXT(card, 2, "AFFE"); 1101 return -EIO; 1102 } 1103 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1104 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1105 return 0; 1106 } 1107 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1108 return -EIO; 1109 } 1110 return 0; 1111 } 1112 1113 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1114 struct irb *irb) 1115 { 1116 if (!IS_ERR(irb)) 1117 return 0; 1118 1119 switch (PTR_ERR(irb)) { 1120 case -EIO: 1121 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1122 CCW_DEVID(cdev)); 1123 QETH_CARD_TEXT(card, 2, "ckirberr"); 1124 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1125 return -EIO; 1126 case -ETIMEDOUT: 1127 dev_warn(&cdev->dev, "A hardware operation timed out" 1128 " on the device\n"); 1129 QETH_CARD_TEXT(card, 2, "ckirberr"); 1130 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1131 return -ETIMEDOUT; 1132 default: 1133 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1134 PTR_ERR(irb), CCW_DEVID(cdev)); 1135 QETH_CARD_TEXT(card, 2, "ckirberr"); 1136 QETH_CARD_TEXT(card, 2, " rc???"); 1137 return PTR_ERR(irb); 1138 } 1139 } 1140 1141 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1142 struct irb *irb) 1143 { 1144 int rc; 1145 int cstat, dstat; 1146 struct qeth_cmd_buffer *iob = NULL; 1147 struct ccwgroup_device *gdev; 1148 struct qeth_channel *channel; 1149 struct qeth_card *card; 1150 1151 /* while we hold the ccwdev lock, this stays valid: */ 1152 gdev = dev_get_drvdata(&cdev->dev); 1153 card = dev_get_drvdata(&gdev->dev); 1154 1155 QETH_CARD_TEXT(card, 5, "irq"); 1156 1157 if (card->read.ccwdev == cdev) { 1158 channel = &card->read; 1159 QETH_CARD_TEXT(card, 5, "read"); 1160 } else if (card->write.ccwdev == cdev) { 1161 channel = &card->write; 1162 QETH_CARD_TEXT(card, 5, "write"); 1163 } else { 1164 channel = &card->data; 1165 QETH_CARD_TEXT(card, 5, "data"); 1166 } 1167 1168 if (intparm == 0) { 1169 QETH_CARD_TEXT(card, 5, "irqunsol"); 1170 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1171 QETH_CARD_TEXT(card, 5, "irqunexp"); 1172 1173 dev_err(&cdev->dev, 1174 "Received IRQ with intparm %lx, expected %px\n", 1175 intparm, channel->active_cmd); 1176 if (channel->active_cmd) 1177 qeth_cancel_cmd(channel->active_cmd, -EIO); 1178 } else { 1179 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1180 } 1181 1182 qeth_unlock_channel(card, channel); 1183 1184 rc = qeth_check_irb_error(card, cdev, irb); 1185 if (rc) { 1186 /* IO was terminated, free its resources. */ 1187 if (iob) 1188 qeth_cancel_cmd(iob, rc); 1189 return; 1190 } 1191 1192 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1193 channel->state = CH_STATE_STOPPED; 1194 wake_up(&card->wait_q); 1195 } 1196 1197 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1198 channel->state = CH_STATE_HALTED; 1199 wake_up(&card->wait_q); 1200 } 1201 1202 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1203 SCSW_FCTL_HALT_FUNC))) { 1204 qeth_cancel_cmd(iob, -ECANCELED); 1205 iob = NULL; 1206 } 1207 1208 cstat = irb->scsw.cmd.cstat; 1209 dstat = irb->scsw.cmd.dstat; 1210 1211 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1212 (dstat & DEV_STAT_UNIT_CHECK) || 1213 (cstat)) { 1214 if (irb->esw.esw0.erw.cons) { 1215 dev_warn(&channel->ccwdev->dev, 1216 "The qeth device driver failed to recover " 1217 "an error on the device\n"); 1218 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1219 CCW_DEVID(channel->ccwdev), cstat, 1220 dstat); 1221 print_hex_dump(KERN_WARNING, "qeth: irb ", 1222 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1223 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1224 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1225 } 1226 1227 rc = qeth_get_problem(card, cdev, irb); 1228 if (rc) { 1229 card->read_or_write_problem = 1; 1230 if (iob) 1231 qeth_cancel_cmd(iob, rc); 1232 qeth_clear_ipacmd_list(card); 1233 qeth_schedule_recovery(card); 1234 return; 1235 } 1236 } 1237 1238 if (iob) { 1239 /* sanity check: */ 1240 if (irb->scsw.cmd.count > iob->length) { 1241 qeth_cancel_cmd(iob, -EIO); 1242 return; 1243 } 1244 if (iob->callback) 1245 iob->callback(card, iob, 1246 iob->length - irb->scsw.cmd.count); 1247 } 1248 } 1249 1250 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1251 struct qeth_qdio_out_buffer *buf, 1252 enum iucv_tx_notify notification) 1253 { 1254 struct sk_buff *skb; 1255 1256 skb_queue_walk(&buf->skb_list, skb) { 1257 struct sock *sk = skb->sk; 1258 1259 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1260 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1261 if (sk && sk->sk_family == PF_IUCV) 1262 iucv_sk(sk)->sk_txnotify(sk, notification); 1263 } 1264 } 1265 1266 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, 1267 struct qeth_qdio_out_buffer *buf, bool error, 1268 int budget) 1269 { 1270 struct sk_buff *skb; 1271 1272 /* Empty buffer? */ 1273 if (buf->next_element_to_fill == 0) 1274 return; 1275 1276 QETH_TXQ_STAT_INC(queue, bufs); 1277 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1278 if (error) { 1279 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1280 } else { 1281 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1282 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1283 } 1284 1285 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1286 unsigned int bytes = qdisc_pkt_len(skb); 1287 bool is_tso = skb_is_gso(skb); 1288 unsigned int packets; 1289 1290 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1291 if (!error) { 1292 if (skb->ip_summed == CHECKSUM_PARTIAL) 1293 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1294 if (skb_is_nonlinear(skb)) 1295 QETH_TXQ_STAT_INC(queue, skbs_sg); 1296 if (is_tso) { 1297 QETH_TXQ_STAT_INC(queue, skbs_tso); 1298 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1299 } 1300 } 1301 1302 napi_consume_skb(skb, budget); 1303 } 1304 } 1305 1306 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1307 struct qeth_qdio_out_buffer *buf, 1308 bool error, int budget) 1309 { 1310 int i; 1311 1312 /* is PCI flag set on buffer? */ 1313 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { 1314 atomic_dec(&queue->set_pci_flags_count); 1315 QETH_TXQ_STAT_INC(queue, completion_irq); 1316 } 1317 1318 qeth_tx_complete_buf(queue, buf, error, budget); 1319 1320 for (i = 0; i < queue->max_elements; ++i) { 1321 void *data = phys_to_virt(buf->buffer->element[i].addr); 1322 1323 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1324 kmem_cache_free(qeth_core_header_cache, data); 1325 } 1326 1327 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1328 buf->next_element_to_fill = 0; 1329 buf->frames = 0; 1330 buf->bytes = 0; 1331 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1332 } 1333 1334 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) 1335 { 1336 if (buf->aob) 1337 kmem_cache_free(qeth_qaob_cache, buf->aob); 1338 kmem_cache_free(qeth_qdio_outbuf_cache, buf); 1339 } 1340 1341 static void qeth_tx_complete_pending_bufs(struct qeth_card *card, 1342 struct qeth_qdio_out_q *queue, 1343 bool drain, int budget) 1344 { 1345 struct qeth_qdio_out_buffer *buf, *tmp; 1346 1347 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { 1348 struct qeth_qaob_priv1 *priv; 1349 struct qaob *aob = buf->aob; 1350 enum iucv_tx_notify notify; 1351 unsigned int i; 1352 1353 priv = (struct qeth_qaob_priv1 *)&aob->user1; 1354 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { 1355 QETH_CARD_TEXT(card, 5, "fp"); 1356 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); 1357 1358 notify = drain ? TX_NOTIFY_GENERALERROR : 1359 qeth_compute_cq_notification(aob->aorc, 1); 1360 qeth_notify_skbs(queue, buf, notify); 1361 qeth_tx_complete_buf(queue, buf, drain, budget); 1362 1363 for (i = 0; 1364 i < aob->sb_count && i < queue->max_elements; 1365 i++) { 1366 void *data = phys_to_virt(aob->sba[i]); 1367 1368 if (test_bit(i, buf->from_kmem_cache) && data) 1369 kmem_cache_free(qeth_core_header_cache, 1370 data); 1371 } 1372 1373 list_del(&buf->list_entry); 1374 qeth_free_out_buf(buf); 1375 } 1376 } 1377 } 1378 1379 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1380 { 1381 int j; 1382 1383 qeth_tx_complete_pending_bufs(q->card, q, true, 0); 1384 1385 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1386 if (!q->bufs[j]) 1387 continue; 1388 1389 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1390 if (free) { 1391 qeth_free_out_buf(q->bufs[j]); 1392 q->bufs[j] = NULL; 1393 } 1394 } 1395 } 1396 1397 static void qeth_drain_output_queues(struct qeth_card *card) 1398 { 1399 int i; 1400 1401 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1402 /* clear outbound buffers to free skbs */ 1403 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1404 if (card->qdio.out_qs[i]) 1405 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1406 } 1407 } 1408 1409 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1410 { 1411 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1412 1413 if (card->qdio.no_out_queues == max) 1414 return; 1415 1416 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1417 qeth_free_qdio_queues(card); 1418 1419 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1420 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1421 1422 card->qdio.no_out_queues = max; 1423 } 1424 1425 static int qeth_update_from_chp_desc(struct qeth_card *card) 1426 { 1427 struct ccw_device *ccwdev; 1428 struct channel_path_desc_fmt0 *chp_dsc; 1429 1430 QETH_CARD_TEXT(card, 2, "chp_desc"); 1431 1432 ccwdev = card->data.ccwdev; 1433 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1434 if (!chp_dsc) 1435 return -ENOMEM; 1436 1437 card->info.func_level = 0x4100 + chp_dsc->desc; 1438 1439 if (IS_OSD(card) || IS_OSX(card)) 1440 /* CHPP field bit 6 == 1 -> single queue */ 1441 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1442 1443 kfree(chp_dsc); 1444 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1445 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1446 return 0; 1447 } 1448 1449 static void qeth_init_qdio_info(struct qeth_card *card) 1450 { 1451 QETH_CARD_TEXT(card, 4, "intqdinf"); 1452 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1453 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1454 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1455 1456 /* inbound */ 1457 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1458 if (IS_IQD(card)) 1459 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1460 else 1461 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1462 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1463 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1464 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1465 } 1466 1467 static void qeth_set_initial_options(struct qeth_card *card) 1468 { 1469 card->options.route4.type = NO_ROUTER; 1470 card->options.route6.type = NO_ROUTER; 1471 card->options.isolation = ISOLATION_MODE_NONE; 1472 card->options.cq = QETH_CQ_DISABLED; 1473 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1474 } 1475 1476 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1477 { 1478 unsigned long flags; 1479 int rc = 0; 1480 1481 spin_lock_irqsave(&card->thread_mask_lock, flags); 1482 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1483 (u8) card->thread_start_mask, 1484 (u8) card->thread_allowed_mask, 1485 (u8) card->thread_running_mask); 1486 rc = (card->thread_start_mask & thread); 1487 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1488 return rc; 1489 } 1490 1491 static int qeth_do_reset(void *data); 1492 static void qeth_start_kernel_thread(struct work_struct *work) 1493 { 1494 struct task_struct *ts; 1495 struct qeth_card *card = container_of(work, struct qeth_card, 1496 kernel_thread_starter); 1497 QETH_CARD_TEXT(card, 2, "strthrd"); 1498 1499 if (card->read.state != CH_STATE_UP && 1500 card->write.state != CH_STATE_UP) 1501 return; 1502 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1503 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1504 if (IS_ERR(ts)) { 1505 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1506 qeth_clear_thread_running_bit(card, 1507 QETH_RECOVER_THREAD); 1508 } 1509 } 1510 } 1511 1512 static void qeth_buffer_reclaim_work(struct work_struct *); 1513 static void qeth_setup_card(struct qeth_card *card) 1514 { 1515 QETH_CARD_TEXT(card, 2, "setupcrd"); 1516 1517 card->info.type = CARD_RDEV(card)->id.driver_info; 1518 card->state = CARD_STATE_DOWN; 1519 spin_lock_init(&card->lock); 1520 spin_lock_init(&card->thread_mask_lock); 1521 mutex_init(&card->conf_mutex); 1522 mutex_init(&card->discipline_mutex); 1523 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1524 INIT_LIST_HEAD(&card->cmd_waiter_list); 1525 init_waitqueue_head(&card->wait_q); 1526 qeth_set_initial_options(card); 1527 /* IP address takeover */ 1528 INIT_LIST_HEAD(&card->ipato.entries); 1529 qeth_init_qdio_info(card); 1530 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1531 hash_init(card->rx_mode_addrs); 1532 hash_init(card->local_addrs4); 1533 hash_init(card->local_addrs6); 1534 spin_lock_init(&card->local_addrs4_lock); 1535 spin_lock_init(&card->local_addrs6_lock); 1536 } 1537 1538 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1539 { 1540 struct qeth_card *card = container_of(slr, struct qeth_card, 1541 qeth_service_level); 1542 if (card->info.mcl_level[0]) 1543 seq_printf(m, "qeth: %s firmware level %s\n", 1544 CARD_BUS_ID(card), card->info.mcl_level); 1545 } 1546 1547 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1548 { 1549 struct qeth_card *card; 1550 1551 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1552 card = kzalloc(sizeof(*card), GFP_KERNEL); 1553 if (!card) 1554 goto out; 1555 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1556 1557 card->gdev = gdev; 1558 dev_set_drvdata(&gdev->dev, card); 1559 CARD_RDEV(card) = gdev->cdev[0]; 1560 CARD_WDEV(card) = gdev->cdev[1]; 1561 CARD_DDEV(card) = gdev->cdev[2]; 1562 1563 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1564 dev_name(&gdev->dev)); 1565 if (!card->event_wq) 1566 goto out_wq; 1567 1568 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1569 if (!card->read_cmd) 1570 goto out_read_cmd; 1571 1572 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1573 qeth_debugfs_root); 1574 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1575 &qeth_debugfs_local_addr_fops); 1576 1577 card->qeth_service_level.seq_print = qeth_core_sl_print; 1578 register_service_level(&card->qeth_service_level); 1579 return card; 1580 1581 out_read_cmd: 1582 destroy_workqueue(card->event_wq); 1583 out_wq: 1584 dev_set_drvdata(&gdev->dev, NULL); 1585 kfree(card); 1586 out: 1587 return NULL; 1588 } 1589 1590 static int qeth_clear_channel(struct qeth_card *card, 1591 struct qeth_channel *channel) 1592 { 1593 int rc; 1594 1595 QETH_CARD_TEXT(card, 3, "clearch"); 1596 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1597 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1598 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1599 1600 if (rc) 1601 return rc; 1602 rc = wait_event_interruptible_timeout(card->wait_q, 1603 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1604 if (rc == -ERESTARTSYS) 1605 return rc; 1606 if (channel->state != CH_STATE_STOPPED) 1607 return -ETIME; 1608 channel->state = CH_STATE_DOWN; 1609 return 0; 1610 } 1611 1612 static int qeth_halt_channel(struct qeth_card *card, 1613 struct qeth_channel *channel) 1614 { 1615 int rc; 1616 1617 QETH_CARD_TEXT(card, 3, "haltch"); 1618 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1619 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1620 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1621 1622 if (rc) 1623 return rc; 1624 rc = wait_event_interruptible_timeout(card->wait_q, 1625 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1626 if (rc == -ERESTARTSYS) 1627 return rc; 1628 if (channel->state != CH_STATE_HALTED) 1629 return -ETIME; 1630 return 0; 1631 } 1632 1633 static int qeth_stop_channel(struct qeth_channel *channel) 1634 { 1635 struct ccw_device *cdev = channel->ccwdev; 1636 int rc; 1637 1638 rc = ccw_device_set_offline(cdev); 1639 1640 spin_lock_irq(get_ccwdev_lock(cdev)); 1641 if (channel->active_cmd) 1642 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1643 channel->active_cmd); 1644 1645 cdev->handler = NULL; 1646 spin_unlock_irq(get_ccwdev_lock(cdev)); 1647 1648 return rc; 1649 } 1650 1651 static int qeth_start_channel(struct qeth_channel *channel) 1652 { 1653 struct ccw_device *cdev = channel->ccwdev; 1654 int rc; 1655 1656 channel->state = CH_STATE_DOWN; 1657 xchg(&channel->active_cmd, NULL); 1658 1659 spin_lock_irq(get_ccwdev_lock(cdev)); 1660 cdev->handler = qeth_irq; 1661 spin_unlock_irq(get_ccwdev_lock(cdev)); 1662 1663 rc = ccw_device_set_online(cdev); 1664 if (rc) 1665 goto err; 1666 1667 return 0; 1668 1669 err: 1670 spin_lock_irq(get_ccwdev_lock(cdev)); 1671 cdev->handler = NULL; 1672 spin_unlock_irq(get_ccwdev_lock(cdev)); 1673 return rc; 1674 } 1675 1676 static int qeth_halt_channels(struct qeth_card *card) 1677 { 1678 int rc1 = 0, rc2 = 0, rc3 = 0; 1679 1680 QETH_CARD_TEXT(card, 3, "haltchs"); 1681 rc1 = qeth_halt_channel(card, &card->read); 1682 rc2 = qeth_halt_channel(card, &card->write); 1683 rc3 = qeth_halt_channel(card, &card->data); 1684 if (rc1) 1685 return rc1; 1686 if (rc2) 1687 return rc2; 1688 return rc3; 1689 } 1690 1691 static int qeth_clear_channels(struct qeth_card *card) 1692 { 1693 int rc1 = 0, rc2 = 0, rc3 = 0; 1694 1695 QETH_CARD_TEXT(card, 3, "clearchs"); 1696 rc1 = qeth_clear_channel(card, &card->read); 1697 rc2 = qeth_clear_channel(card, &card->write); 1698 rc3 = qeth_clear_channel(card, &card->data); 1699 if (rc1) 1700 return rc1; 1701 if (rc2) 1702 return rc2; 1703 return rc3; 1704 } 1705 1706 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1707 { 1708 int rc = 0; 1709 1710 QETH_CARD_TEXT(card, 3, "clhacrd"); 1711 1712 if (halt) 1713 rc = qeth_halt_channels(card); 1714 if (rc) 1715 return rc; 1716 return qeth_clear_channels(card); 1717 } 1718 1719 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1720 { 1721 int rc = 0; 1722 1723 QETH_CARD_TEXT(card, 3, "qdioclr"); 1724 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1725 QETH_QDIO_CLEANING)) { 1726 case QETH_QDIO_ESTABLISHED: 1727 if (IS_IQD(card)) 1728 rc = qdio_shutdown(CARD_DDEV(card), 1729 QDIO_FLAG_CLEANUP_USING_HALT); 1730 else 1731 rc = qdio_shutdown(CARD_DDEV(card), 1732 QDIO_FLAG_CLEANUP_USING_CLEAR); 1733 if (rc) 1734 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1735 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1736 break; 1737 case QETH_QDIO_CLEANING: 1738 return rc; 1739 default: 1740 break; 1741 } 1742 rc = qeth_clear_halt_card(card, use_halt); 1743 if (rc) 1744 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1745 return rc; 1746 } 1747 1748 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1749 { 1750 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1751 struct diag26c_vnic_resp *response = NULL; 1752 struct diag26c_vnic_req *request = NULL; 1753 struct ccw_dev_id id; 1754 char userid[80]; 1755 int rc = 0; 1756 1757 QETH_CARD_TEXT(card, 2, "vmlayer"); 1758 1759 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1760 if (rc) 1761 goto out; 1762 1763 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1764 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1765 if (!request || !response) { 1766 rc = -ENOMEM; 1767 goto out; 1768 } 1769 1770 ccw_device_get_id(CARD_RDEV(card), &id); 1771 request->resp_buf_len = sizeof(*response); 1772 request->resp_version = DIAG26C_VERSION6_VM65918; 1773 request->req_format = DIAG26C_VNIC_INFO; 1774 ASCEBC(userid, 8); 1775 memcpy(&request->sys_name, userid, 8); 1776 request->devno = id.devno; 1777 1778 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1779 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1780 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1781 if (rc) 1782 goto out; 1783 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1784 1785 if (request->resp_buf_len < sizeof(*response) || 1786 response->version != request->resp_version) { 1787 rc = -EIO; 1788 goto out; 1789 } 1790 1791 if (response->protocol == VNIC_INFO_PROT_L2) 1792 disc = QETH_DISCIPLINE_LAYER2; 1793 else if (response->protocol == VNIC_INFO_PROT_L3) 1794 disc = QETH_DISCIPLINE_LAYER3; 1795 1796 out: 1797 kfree(response); 1798 kfree(request); 1799 if (rc) 1800 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1801 return disc; 1802 } 1803 1804 /* Determine whether the device requires a specific layer discipline */ 1805 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1806 { 1807 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1808 1809 if (IS_OSM(card)) 1810 disc = QETH_DISCIPLINE_LAYER2; 1811 else if (IS_VM_NIC(card)) 1812 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1813 qeth_vm_detect_layer(card); 1814 1815 switch (disc) { 1816 case QETH_DISCIPLINE_LAYER2: 1817 QETH_CARD_TEXT(card, 3, "force l2"); 1818 break; 1819 case QETH_DISCIPLINE_LAYER3: 1820 QETH_CARD_TEXT(card, 3, "force l3"); 1821 break; 1822 default: 1823 QETH_CARD_TEXT(card, 3, "force no"); 1824 } 1825 1826 return disc; 1827 } 1828 1829 static void qeth_set_blkt_defaults(struct qeth_card *card) 1830 { 1831 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1832 1833 if (card->info.use_v1_blkt) { 1834 card->info.blkt.time_total = 0; 1835 card->info.blkt.inter_packet = 0; 1836 card->info.blkt.inter_packet_jumbo = 0; 1837 } else { 1838 card->info.blkt.time_total = 250; 1839 card->info.blkt.inter_packet = 5; 1840 card->info.blkt.inter_packet_jumbo = 15; 1841 } 1842 } 1843 1844 static void qeth_idx_init(struct qeth_card *card) 1845 { 1846 memset(&card->seqno, 0, sizeof(card->seqno)); 1847 1848 card->token.issuer_rm_w = 0x00010103UL; 1849 card->token.cm_filter_w = 0x00010108UL; 1850 card->token.cm_connection_w = 0x0001010aUL; 1851 card->token.ulp_filter_w = 0x0001010bUL; 1852 card->token.ulp_connection_w = 0x0001010dUL; 1853 1854 switch (card->info.type) { 1855 case QETH_CARD_TYPE_IQD: 1856 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1857 break; 1858 case QETH_CARD_TYPE_OSD: 1859 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1860 break; 1861 default: 1862 break; 1863 } 1864 } 1865 1866 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1867 struct qeth_cmd_buffer *iob) 1868 { 1869 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1870 QETH_SEQ_NO_LENGTH); 1871 if (iob->channel == &card->write) 1872 card->seqno.trans_hdr++; 1873 } 1874 1875 static int qeth_peer_func_level(int level) 1876 { 1877 if ((level & 0xff) == 8) 1878 return (level & 0xff) + 0x400; 1879 if (((level >> 8) & 3) == 1) 1880 return (level & 0xff) + 0x200; 1881 return level; 1882 } 1883 1884 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1885 struct qeth_cmd_buffer *iob) 1886 { 1887 qeth_idx_finalize_cmd(card, iob); 1888 1889 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1890 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1891 card->seqno.pdu_hdr++; 1892 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1893 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1894 1895 iob->callback = qeth_release_buffer_cb; 1896 } 1897 1898 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1899 struct qeth_cmd_buffer *reply) 1900 { 1901 /* MPC cmds are issued strictly in sequence. */ 1902 return !IS_IPA(reply->data); 1903 } 1904 1905 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1906 const void *data, 1907 unsigned int data_length) 1908 { 1909 struct qeth_cmd_buffer *iob; 1910 1911 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1912 if (!iob) 1913 return NULL; 1914 1915 memcpy(iob->data, data, data_length); 1916 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1917 iob->data); 1918 iob->finalize = qeth_mpc_finalize_cmd; 1919 iob->match = qeth_mpc_match_reply; 1920 return iob; 1921 } 1922 1923 /** 1924 * qeth_send_control_data() - send control command to the card 1925 * @card: qeth_card structure pointer 1926 * @iob: qeth_cmd_buffer pointer 1927 * @reply_cb: callback function pointer 1928 * cb_card: pointer to the qeth_card structure 1929 * cb_reply: pointer to the qeth_reply structure 1930 * cb_cmd: pointer to the original iob for non-IPA 1931 * commands, or to the qeth_ipa_cmd structure 1932 * for the IPA commands. 1933 * @reply_param: private pointer passed to the callback 1934 * 1935 * Callback function gets called one or more times, with cb_cmd 1936 * pointing to the response returned by the hardware. Callback 1937 * function must return 1938 * > 0 if more reply blocks are expected, 1939 * 0 if the last or only reply block is received, and 1940 * < 0 on error. 1941 * Callback function can get the value of the reply_param pointer from the 1942 * field 'param' of the structure qeth_reply. 1943 */ 1944 1945 static int qeth_send_control_data(struct qeth_card *card, 1946 struct qeth_cmd_buffer *iob, 1947 int (*reply_cb)(struct qeth_card *cb_card, 1948 struct qeth_reply *cb_reply, 1949 unsigned long cb_cmd), 1950 void *reply_param) 1951 { 1952 struct qeth_channel *channel = iob->channel; 1953 struct qeth_reply *reply = &iob->reply; 1954 long timeout = iob->timeout; 1955 int rc; 1956 1957 QETH_CARD_TEXT(card, 2, "sendctl"); 1958 1959 reply->callback = reply_cb; 1960 reply->param = reply_param; 1961 1962 timeout = wait_event_interruptible_timeout(card->wait_q, 1963 qeth_trylock_channel(channel, iob), 1964 timeout); 1965 if (timeout <= 0) { 1966 qeth_put_cmd(iob); 1967 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1968 } 1969 1970 if (iob->finalize) 1971 iob->finalize(card, iob); 1972 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 1973 1974 qeth_enqueue_cmd(card, iob); 1975 1976 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 1977 qeth_get_cmd(iob); 1978 1979 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1980 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1981 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1982 (addr_t) iob, 0, 0, timeout); 1983 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1984 if (rc) { 1985 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1986 CARD_DEVID(card), rc); 1987 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1988 qeth_dequeue_cmd(card, iob); 1989 qeth_put_cmd(iob); 1990 qeth_unlock_channel(card, channel); 1991 goto out; 1992 } 1993 1994 timeout = wait_for_completion_interruptible_timeout(&iob->done, 1995 timeout); 1996 if (timeout <= 0) 1997 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1998 1999 qeth_dequeue_cmd(card, iob); 2000 2001 if (reply_cb) { 2002 /* Wait until the callback for a late reply has completed: */ 2003 spin_lock_irq(&iob->lock); 2004 if (rc) 2005 /* Zap any callback that's still pending: */ 2006 iob->rc = rc; 2007 spin_unlock_irq(&iob->lock); 2008 } 2009 2010 if (!rc) 2011 rc = iob->rc; 2012 2013 out: 2014 qeth_put_cmd(iob); 2015 return rc; 2016 } 2017 2018 struct qeth_node_desc { 2019 struct node_descriptor nd1; 2020 struct node_descriptor nd2; 2021 struct node_descriptor nd3; 2022 }; 2023 2024 static void qeth_read_conf_data_cb(struct qeth_card *card, 2025 struct qeth_cmd_buffer *iob, 2026 unsigned int data_length) 2027 { 2028 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2029 int rc = 0; 2030 u8 *tag; 2031 2032 QETH_CARD_TEXT(card, 2, "cfgunit"); 2033 2034 if (data_length < sizeof(*nd)) { 2035 rc = -EINVAL; 2036 goto out; 2037 } 2038 2039 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2040 nd->nd1.plant[1] == _ascebc['M']; 2041 tag = (u8 *)&nd->nd1.tag; 2042 card->info.chpid = tag[0]; 2043 card->info.unit_addr2 = tag[1]; 2044 2045 tag = (u8 *)&nd->nd2.tag; 2046 card->info.cula = tag[1]; 2047 2048 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2049 nd->nd3.model[1] == 0xF0 && 2050 nd->nd3.model[2] >= 0xF1 && 2051 nd->nd3.model[2] <= 0xF4; 2052 2053 out: 2054 qeth_notify_cmd(iob, rc); 2055 qeth_put_cmd(iob); 2056 } 2057 2058 static int qeth_read_conf_data(struct qeth_card *card) 2059 { 2060 struct qeth_channel *channel = &card->data; 2061 struct qeth_cmd_buffer *iob; 2062 struct ciw *ciw; 2063 2064 /* scan for RCD command in extended SenseID data */ 2065 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2066 if (!ciw || ciw->cmd == 0) 2067 return -EOPNOTSUPP; 2068 if (ciw->count < sizeof(struct qeth_node_desc)) 2069 return -EINVAL; 2070 2071 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2072 if (!iob) 2073 return -ENOMEM; 2074 2075 iob->callback = qeth_read_conf_data_cb; 2076 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2077 iob->data); 2078 2079 return qeth_send_control_data(card, iob, NULL, NULL); 2080 } 2081 2082 static int qeth_idx_check_activate_response(struct qeth_card *card, 2083 struct qeth_channel *channel, 2084 struct qeth_cmd_buffer *iob) 2085 { 2086 int rc; 2087 2088 rc = qeth_check_idx_response(card, iob->data); 2089 if (rc) 2090 return rc; 2091 2092 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2093 return 0; 2094 2095 /* negative reply: */ 2096 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2097 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2098 2099 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2100 case QETH_IDX_ACT_ERR_EXCL: 2101 dev_err(&channel->ccwdev->dev, 2102 "The adapter is used exclusively by another host\n"); 2103 return -EBUSY; 2104 case QETH_IDX_ACT_ERR_AUTH: 2105 case QETH_IDX_ACT_ERR_AUTH_USER: 2106 dev_err(&channel->ccwdev->dev, 2107 "Setting the device online failed because of insufficient authorization\n"); 2108 return -EPERM; 2109 default: 2110 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2111 CCW_DEVID(channel->ccwdev)); 2112 return -EIO; 2113 } 2114 } 2115 2116 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2117 struct qeth_cmd_buffer *iob, 2118 unsigned int data_length) 2119 { 2120 struct qeth_channel *channel = iob->channel; 2121 u16 peer_level; 2122 int rc; 2123 2124 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2125 2126 rc = qeth_idx_check_activate_response(card, channel, iob); 2127 if (rc) 2128 goto out; 2129 2130 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2131 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2132 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2133 CCW_DEVID(channel->ccwdev), 2134 card->info.func_level, peer_level); 2135 rc = -EINVAL; 2136 goto out; 2137 } 2138 2139 memcpy(&card->token.issuer_rm_r, 2140 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2141 QETH_MPC_TOKEN_LENGTH); 2142 memcpy(&card->info.mcl_level[0], 2143 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2144 2145 out: 2146 qeth_notify_cmd(iob, rc); 2147 qeth_put_cmd(iob); 2148 } 2149 2150 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2151 struct qeth_cmd_buffer *iob, 2152 unsigned int data_length) 2153 { 2154 struct qeth_channel *channel = iob->channel; 2155 u16 peer_level; 2156 int rc; 2157 2158 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2159 2160 rc = qeth_idx_check_activate_response(card, channel, iob); 2161 if (rc) 2162 goto out; 2163 2164 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2165 if ((peer_level & ~0x0100) != 2166 qeth_peer_func_level(card->info.func_level)) { 2167 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2168 CCW_DEVID(channel->ccwdev), 2169 card->info.func_level, peer_level); 2170 rc = -EINVAL; 2171 } 2172 2173 out: 2174 qeth_notify_cmd(iob, rc); 2175 qeth_put_cmd(iob); 2176 } 2177 2178 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2179 struct qeth_cmd_buffer *iob) 2180 { 2181 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2182 u8 port = ((u8)card->dev->dev_port) | 0x80; 2183 struct ccw1 *ccw = __ccw_from_cmd(iob); 2184 2185 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2186 iob->data); 2187 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2188 iob->finalize = qeth_idx_finalize_cmd; 2189 2190 port |= QETH_IDX_ACT_INVAL_FRAME; 2191 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2192 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2193 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2194 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2195 &card->info.func_level, 2); 2196 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2197 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2198 } 2199 2200 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2201 { 2202 struct qeth_channel *channel = &card->read; 2203 struct qeth_cmd_buffer *iob; 2204 int rc; 2205 2206 QETH_CARD_TEXT(card, 2, "idxread"); 2207 2208 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2209 if (!iob) 2210 return -ENOMEM; 2211 2212 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2213 qeth_idx_setup_activate_cmd(card, iob); 2214 iob->callback = qeth_idx_activate_read_channel_cb; 2215 2216 rc = qeth_send_control_data(card, iob, NULL, NULL); 2217 if (rc) 2218 return rc; 2219 2220 channel->state = CH_STATE_UP; 2221 return 0; 2222 } 2223 2224 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2225 { 2226 struct qeth_channel *channel = &card->write; 2227 struct qeth_cmd_buffer *iob; 2228 int rc; 2229 2230 QETH_CARD_TEXT(card, 2, "idxwrite"); 2231 2232 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2233 if (!iob) 2234 return -ENOMEM; 2235 2236 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2237 qeth_idx_setup_activate_cmd(card, iob); 2238 iob->callback = qeth_idx_activate_write_channel_cb; 2239 2240 rc = qeth_send_control_data(card, iob, NULL, NULL); 2241 if (rc) 2242 return rc; 2243 2244 channel->state = CH_STATE_UP; 2245 return 0; 2246 } 2247 2248 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2249 unsigned long data) 2250 { 2251 struct qeth_cmd_buffer *iob; 2252 2253 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2254 2255 iob = (struct qeth_cmd_buffer *) data; 2256 memcpy(&card->token.cm_filter_r, 2257 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2258 QETH_MPC_TOKEN_LENGTH); 2259 return 0; 2260 } 2261 2262 static int qeth_cm_enable(struct qeth_card *card) 2263 { 2264 struct qeth_cmd_buffer *iob; 2265 2266 QETH_CARD_TEXT(card, 2, "cmenable"); 2267 2268 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2269 if (!iob) 2270 return -ENOMEM; 2271 2272 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2273 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2274 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2275 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2276 2277 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2278 } 2279 2280 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2281 unsigned long data) 2282 { 2283 struct qeth_cmd_buffer *iob; 2284 2285 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2286 2287 iob = (struct qeth_cmd_buffer *) data; 2288 memcpy(&card->token.cm_connection_r, 2289 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2290 QETH_MPC_TOKEN_LENGTH); 2291 return 0; 2292 } 2293 2294 static int qeth_cm_setup(struct qeth_card *card) 2295 { 2296 struct qeth_cmd_buffer *iob; 2297 2298 QETH_CARD_TEXT(card, 2, "cmsetup"); 2299 2300 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2301 if (!iob) 2302 return -ENOMEM; 2303 2304 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2305 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2306 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2307 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2308 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2309 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2310 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2311 } 2312 2313 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2314 { 2315 if (link_type == QETH_LINK_TYPE_LANE_TR || 2316 link_type == QETH_LINK_TYPE_HSTR) { 2317 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2318 return false; 2319 } 2320 2321 return true; 2322 } 2323 2324 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2325 { 2326 struct net_device *dev = card->dev; 2327 unsigned int new_mtu; 2328 2329 if (!max_mtu) { 2330 /* IQD needs accurate max MTU to set up its RX buffers: */ 2331 if (IS_IQD(card)) 2332 return -EINVAL; 2333 /* tolerate quirky HW: */ 2334 max_mtu = ETH_MAX_MTU; 2335 } 2336 2337 rtnl_lock(); 2338 if (IS_IQD(card)) { 2339 /* move any device with default MTU to new max MTU: */ 2340 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2341 2342 /* adjust RX buffer size to new max MTU: */ 2343 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2344 if (dev->max_mtu && dev->max_mtu != max_mtu) 2345 qeth_free_qdio_queues(card); 2346 } else { 2347 if (dev->mtu) 2348 new_mtu = dev->mtu; 2349 /* default MTUs for first setup: */ 2350 else if (IS_LAYER2(card)) 2351 new_mtu = ETH_DATA_LEN; 2352 else 2353 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2354 } 2355 2356 dev->max_mtu = max_mtu; 2357 dev->mtu = min(new_mtu, max_mtu); 2358 rtnl_unlock(); 2359 return 0; 2360 } 2361 2362 static int qeth_get_mtu_outof_framesize(int framesize) 2363 { 2364 switch (framesize) { 2365 case 0x4000: 2366 return 8192; 2367 case 0x6000: 2368 return 16384; 2369 case 0xa000: 2370 return 32768; 2371 case 0xffff: 2372 return 57344; 2373 default: 2374 return 0; 2375 } 2376 } 2377 2378 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2379 unsigned long data) 2380 { 2381 __u16 mtu, framesize; 2382 __u16 len; 2383 struct qeth_cmd_buffer *iob; 2384 u8 link_type = 0; 2385 2386 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2387 2388 iob = (struct qeth_cmd_buffer *) data; 2389 memcpy(&card->token.ulp_filter_r, 2390 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2391 QETH_MPC_TOKEN_LENGTH); 2392 if (IS_IQD(card)) { 2393 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2394 mtu = qeth_get_mtu_outof_framesize(framesize); 2395 } else { 2396 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2397 } 2398 *(u16 *)reply->param = mtu; 2399 2400 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2401 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2402 memcpy(&link_type, 2403 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2404 if (!qeth_is_supported_link_type(card, link_type)) 2405 return -EPROTONOSUPPORT; 2406 } 2407 2408 card->info.link_type = link_type; 2409 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2410 return 0; 2411 } 2412 2413 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2414 { 2415 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3; 2416 } 2417 2418 static int qeth_ulp_enable(struct qeth_card *card) 2419 { 2420 u8 prot_type = qeth_mpc_select_prot_type(card); 2421 struct qeth_cmd_buffer *iob; 2422 u16 max_mtu; 2423 int rc; 2424 2425 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2426 2427 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2428 if (!iob) 2429 return -ENOMEM; 2430 2431 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2432 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2433 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2434 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2435 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2436 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2437 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2438 if (rc) 2439 return rc; 2440 return qeth_update_max_mtu(card, max_mtu); 2441 } 2442 2443 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2444 unsigned long data) 2445 { 2446 struct qeth_cmd_buffer *iob; 2447 2448 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2449 2450 iob = (struct qeth_cmd_buffer *) data; 2451 memcpy(&card->token.ulp_connection_r, 2452 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2453 QETH_MPC_TOKEN_LENGTH); 2454 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2455 3)) { 2456 QETH_CARD_TEXT(card, 2, "olmlimit"); 2457 dev_err(&card->gdev->dev, "A connection could not be " 2458 "established because of an OLM limit\n"); 2459 return -EMLINK; 2460 } 2461 return 0; 2462 } 2463 2464 static int qeth_ulp_setup(struct qeth_card *card) 2465 { 2466 __u16 temp; 2467 struct qeth_cmd_buffer *iob; 2468 2469 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2470 2471 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2472 if (!iob) 2473 return -ENOMEM; 2474 2475 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2476 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2477 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2478 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2479 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2480 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2481 2482 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2483 temp = (card->info.cula << 8) + card->info.unit_addr2; 2484 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2485 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2486 } 2487 2488 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, 2489 gfp_t gfp) 2490 { 2491 struct qeth_qdio_out_buffer *newbuf; 2492 2493 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); 2494 if (!newbuf) 2495 return -ENOMEM; 2496 2497 newbuf->buffer = q->qdio_bufs[bidx]; 2498 skb_queue_head_init(&newbuf->skb_list); 2499 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2500 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2501 q->bufs[bidx] = newbuf; 2502 return 0; 2503 } 2504 2505 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2506 { 2507 if (!q) 2508 return; 2509 2510 qeth_drain_output_queue(q, true); 2511 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2512 kfree(q); 2513 } 2514 2515 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2516 { 2517 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2518 unsigned int i; 2519 2520 if (!q) 2521 return NULL; 2522 2523 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) 2524 goto err_qdio_bufs; 2525 2526 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 2527 if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) 2528 goto err_out_bufs; 2529 } 2530 2531 return q; 2532 2533 err_out_bufs: 2534 while (i > 0) 2535 qeth_free_out_buf(q->bufs[--i]); 2536 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2537 err_qdio_bufs: 2538 kfree(q); 2539 return NULL; 2540 } 2541 2542 static void qeth_tx_completion_timer(struct timer_list *timer) 2543 { 2544 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2545 2546 napi_schedule(&queue->napi); 2547 QETH_TXQ_STAT_INC(queue, completion_timer); 2548 } 2549 2550 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2551 { 2552 unsigned int i; 2553 2554 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2555 2556 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2557 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2558 return 0; 2559 2560 QETH_CARD_TEXT(card, 2, "inq"); 2561 card->qdio.in_q = qeth_alloc_qdio_queue(); 2562 if (!card->qdio.in_q) 2563 goto out_nomem; 2564 2565 /* inbound buffer pool */ 2566 if (qeth_alloc_buffer_pool(card)) 2567 goto out_freeinq; 2568 2569 /* outbound */ 2570 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2571 struct qeth_qdio_out_q *queue; 2572 2573 queue = qeth_alloc_output_queue(); 2574 if (!queue) 2575 goto out_freeoutq; 2576 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2577 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2578 card->qdio.out_qs[i] = queue; 2579 queue->card = card; 2580 queue->queue_no = i; 2581 INIT_LIST_HEAD(&queue->pending_bufs); 2582 spin_lock_init(&queue->lock); 2583 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2584 if (IS_IQD(card)) { 2585 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2586 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2587 queue->rescan_usecs = QETH_TX_TIMER_USECS; 2588 } else { 2589 queue->coalesce_usecs = USEC_PER_SEC; 2590 queue->max_coalesced_frames = 0; 2591 queue->rescan_usecs = 10 * USEC_PER_SEC; 2592 } 2593 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2594 } 2595 2596 /* completion */ 2597 if (qeth_alloc_cq(card)) 2598 goto out_freeoutq; 2599 2600 return 0; 2601 2602 out_freeoutq: 2603 while (i > 0) { 2604 qeth_free_output_queue(card->qdio.out_qs[--i]); 2605 card->qdio.out_qs[i] = NULL; 2606 } 2607 qeth_free_buffer_pool(card); 2608 out_freeinq: 2609 qeth_free_qdio_queue(card->qdio.in_q); 2610 card->qdio.in_q = NULL; 2611 out_nomem: 2612 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2613 return -ENOMEM; 2614 } 2615 2616 static void qeth_free_qdio_queues(struct qeth_card *card) 2617 { 2618 int i, j; 2619 2620 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2621 QETH_QDIO_UNINITIALIZED) 2622 return; 2623 2624 qeth_free_cq(card); 2625 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2626 if (card->qdio.in_q->bufs[j].rx_skb) 2627 consume_skb(card->qdio.in_q->bufs[j].rx_skb); 2628 } 2629 qeth_free_qdio_queue(card->qdio.in_q); 2630 card->qdio.in_q = NULL; 2631 /* inbound buffer pool */ 2632 qeth_free_buffer_pool(card); 2633 /* free outbound qdio_qs */ 2634 for (i = 0; i < card->qdio.no_out_queues; i++) { 2635 qeth_free_output_queue(card->qdio.out_qs[i]); 2636 card->qdio.out_qs[i] = NULL; 2637 } 2638 } 2639 2640 static void qeth_fill_qib_parms(struct qeth_card *card, 2641 struct qeth_qib_parms *parms) 2642 { 2643 struct qeth_qdio_out_q *queue; 2644 unsigned int i; 2645 2646 parms->pcit_magic[0] = 'P'; 2647 parms->pcit_magic[1] = 'C'; 2648 parms->pcit_magic[2] = 'I'; 2649 parms->pcit_magic[3] = 'T'; 2650 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2651 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2652 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2653 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2654 2655 parms->blkt_magic[0] = 'B'; 2656 parms->blkt_magic[1] = 'L'; 2657 parms->blkt_magic[2] = 'K'; 2658 parms->blkt_magic[3] = 'T'; 2659 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2660 parms->blkt_total = card->info.blkt.time_total; 2661 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2662 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2663 2664 /* Prio-queueing implicitly uses the default priorities: */ 2665 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2666 return; 2667 2668 parms->pque_magic[0] = 'P'; 2669 parms->pque_magic[1] = 'Q'; 2670 parms->pque_magic[2] = 'U'; 2671 parms->pque_magic[3] = 'E'; 2672 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2673 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2674 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2675 2676 qeth_for_each_output_queue(card, queue, i) 2677 parms->pque_priority[i] = queue->priority; 2678 } 2679 2680 static int qeth_qdio_activate(struct qeth_card *card) 2681 { 2682 QETH_CARD_TEXT(card, 3, "qdioact"); 2683 return qdio_activate(CARD_DDEV(card)); 2684 } 2685 2686 static int qeth_dm_act(struct qeth_card *card) 2687 { 2688 struct qeth_cmd_buffer *iob; 2689 2690 QETH_CARD_TEXT(card, 2, "dmact"); 2691 2692 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2693 if (!iob) 2694 return -ENOMEM; 2695 2696 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2697 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2698 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2699 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2700 return qeth_send_control_data(card, iob, NULL, NULL); 2701 } 2702 2703 static int qeth_mpc_initialize(struct qeth_card *card) 2704 { 2705 int rc; 2706 2707 QETH_CARD_TEXT(card, 2, "mpcinit"); 2708 2709 rc = qeth_issue_next_read(card); 2710 if (rc) { 2711 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2712 return rc; 2713 } 2714 rc = qeth_cm_enable(card); 2715 if (rc) { 2716 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2717 return rc; 2718 } 2719 rc = qeth_cm_setup(card); 2720 if (rc) { 2721 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2722 return rc; 2723 } 2724 rc = qeth_ulp_enable(card); 2725 if (rc) { 2726 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2727 return rc; 2728 } 2729 rc = qeth_ulp_setup(card); 2730 if (rc) { 2731 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2732 return rc; 2733 } 2734 rc = qeth_alloc_qdio_queues(card); 2735 if (rc) { 2736 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2737 return rc; 2738 } 2739 rc = qeth_qdio_establish(card); 2740 if (rc) { 2741 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2742 qeth_free_qdio_queues(card); 2743 return rc; 2744 } 2745 rc = qeth_qdio_activate(card); 2746 if (rc) { 2747 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2748 return rc; 2749 } 2750 rc = qeth_dm_act(card); 2751 if (rc) { 2752 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2753 return rc; 2754 } 2755 2756 return 0; 2757 } 2758 2759 static void qeth_print_status_message(struct qeth_card *card) 2760 { 2761 switch (card->info.type) { 2762 case QETH_CARD_TYPE_OSD: 2763 case QETH_CARD_TYPE_OSM: 2764 case QETH_CARD_TYPE_OSX: 2765 /* VM will use a non-zero first character 2766 * to indicate a HiperSockets like reporting 2767 * of the level OSA sets the first character to zero 2768 * */ 2769 if (!card->info.mcl_level[0]) { 2770 sprintf(card->info.mcl_level, "%02x%02x", 2771 card->info.mcl_level[2], 2772 card->info.mcl_level[3]); 2773 break; 2774 } 2775 fallthrough; 2776 case QETH_CARD_TYPE_IQD: 2777 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2778 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2779 card->info.mcl_level[0]]; 2780 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2781 card->info.mcl_level[1]]; 2782 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2783 card->info.mcl_level[2]]; 2784 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2785 card->info.mcl_level[3]]; 2786 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2787 } 2788 break; 2789 default: 2790 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2791 } 2792 dev_info(&card->gdev->dev, 2793 "Device is a%s card%s%s%s\nwith link type %s.\n", 2794 qeth_get_cardname(card), 2795 (card->info.mcl_level[0]) ? " (level: " : "", 2796 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2797 (card->info.mcl_level[0]) ? ")" : "", 2798 qeth_get_cardname_short(card)); 2799 } 2800 2801 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2802 { 2803 struct qeth_buffer_pool_entry *entry; 2804 2805 QETH_CARD_TEXT(card, 5, "inwrklst"); 2806 2807 list_for_each_entry(entry, 2808 &card->qdio.init_pool.entry_list, init_list) { 2809 qeth_put_buffer_pool_entry(card, entry); 2810 } 2811 } 2812 2813 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2814 struct qeth_card *card) 2815 { 2816 struct qeth_buffer_pool_entry *entry; 2817 int i, free; 2818 2819 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2820 return NULL; 2821 2822 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2823 free = 1; 2824 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2825 if (page_count(entry->elements[i]) > 1) { 2826 free = 0; 2827 break; 2828 } 2829 } 2830 if (free) { 2831 list_del_init(&entry->list); 2832 return entry; 2833 } 2834 } 2835 2836 /* no free buffer in pool so take first one and swap pages */ 2837 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2838 struct qeth_buffer_pool_entry, list); 2839 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2840 if (page_count(entry->elements[i]) > 1) { 2841 struct page *page = dev_alloc_page(); 2842 2843 if (!page) 2844 return NULL; 2845 2846 __free_page(entry->elements[i]); 2847 entry->elements[i] = page; 2848 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2849 } 2850 } 2851 list_del_init(&entry->list); 2852 return entry; 2853 } 2854 2855 static int qeth_init_input_buffer(struct qeth_card *card, 2856 struct qeth_qdio_buffer *buf) 2857 { 2858 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2859 int i; 2860 2861 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2862 buf->rx_skb = netdev_alloc_skb(card->dev, 2863 ETH_HLEN + 2864 sizeof(struct ipv6hdr)); 2865 if (!buf->rx_skb) 2866 return -ENOMEM; 2867 } 2868 2869 if (!pool_entry) { 2870 pool_entry = qeth_find_free_buffer_pool_entry(card); 2871 if (!pool_entry) 2872 return -ENOBUFS; 2873 2874 buf->pool_entry = pool_entry; 2875 } 2876 2877 /* 2878 * since the buffer is accessed only from the input_tasklet 2879 * there shouldn't be a need to synchronize; also, since we use 2880 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2881 * buffers 2882 */ 2883 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2884 buf->buffer->element[i].length = PAGE_SIZE; 2885 buf->buffer->element[i].addr = 2886 page_to_phys(pool_entry->elements[i]); 2887 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2888 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2889 else 2890 buf->buffer->element[i].eflags = 0; 2891 buf->buffer->element[i].sflags = 0; 2892 } 2893 return 0; 2894 } 2895 2896 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2897 struct qeth_qdio_out_q *queue) 2898 { 2899 if (!IS_IQD(card) || 2900 qeth_iqd_is_mcast_queue(card, queue) || 2901 card->options.cq == QETH_CQ_ENABLED || 2902 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2903 return 1; 2904 2905 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2906 } 2907 2908 static int qeth_init_qdio_queues(struct qeth_card *card) 2909 { 2910 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 2911 unsigned int i; 2912 int rc; 2913 2914 QETH_CARD_TEXT(card, 2, "initqdqs"); 2915 2916 /* inbound queue */ 2917 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2918 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2919 2920 qeth_initialize_working_pool_list(card); 2921 /*give only as many buffers to hardware as we have buffer pool entries*/ 2922 for (i = 0; i < rx_bufs; i++) { 2923 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2924 if (rc) 2925 return rc; 2926 } 2927 2928 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 2929 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, 2930 NULL); 2931 if (rc) { 2932 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2933 return rc; 2934 } 2935 2936 /* completion */ 2937 rc = qeth_cq_init(card); 2938 if (rc) { 2939 return rc; 2940 } 2941 2942 /* outbound queue */ 2943 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2944 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2945 2946 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2947 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2948 queue->next_buf_to_fill = 0; 2949 queue->do_pack = 0; 2950 queue->prev_hdr = NULL; 2951 queue->coalesced_frames = 0; 2952 queue->bulk_start = 0; 2953 queue->bulk_count = 0; 2954 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 2955 atomic_set(&queue->used_buffers, 0); 2956 atomic_set(&queue->set_pci_flags_count, 0); 2957 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2958 } 2959 return 0; 2960 } 2961 2962 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2963 struct qeth_cmd_buffer *iob) 2964 { 2965 qeth_mpc_finalize_cmd(card, iob); 2966 2967 /* override with IPA-specific values: */ 2968 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 2969 } 2970 2971 static void qeth_prepare_ipa_cmd(struct qeth_card *card, 2972 struct qeth_cmd_buffer *iob, u16 cmd_length) 2973 { 2974 u8 prot_type = qeth_mpc_select_prot_type(card); 2975 u16 total_length = iob->length; 2976 2977 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 2978 iob->data); 2979 iob->finalize = qeth_ipa_finalize_cmd; 2980 2981 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2982 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2983 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2984 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2985 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2986 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2987 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2988 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2989 } 2990 2991 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 2992 struct qeth_cmd_buffer *reply) 2993 { 2994 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 2995 2996 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 2997 } 2998 2999 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3000 enum qeth_ipa_cmds cmd_code, 3001 enum qeth_prot_versions prot, 3002 unsigned int data_length) 3003 { 3004 struct qeth_cmd_buffer *iob; 3005 struct qeth_ipacmd_hdr *hdr; 3006 3007 data_length += offsetof(struct qeth_ipa_cmd, data); 3008 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3009 QETH_IPA_TIMEOUT); 3010 if (!iob) 3011 return NULL; 3012 3013 qeth_prepare_ipa_cmd(card, iob, data_length); 3014 iob->match = qeth_ipa_match_reply; 3015 3016 hdr = &__ipa_cmd(iob)->hdr; 3017 hdr->command = cmd_code; 3018 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3019 /* hdr->seqno is set by qeth_send_control_data() */ 3020 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3021 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3022 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3023 hdr->param_count = 1; 3024 hdr->prot_version = prot; 3025 return iob; 3026 } 3027 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3028 3029 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3030 struct qeth_reply *reply, unsigned long data) 3031 { 3032 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3033 3034 return (cmd->hdr.return_code) ? -EIO : 0; 3035 } 3036 3037 /* 3038 * qeth_send_ipa_cmd() - send an IPA command 3039 * 3040 * See qeth_send_control_data() for explanation of the arguments. 3041 */ 3042 3043 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3044 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3045 unsigned long), 3046 void *reply_param) 3047 { 3048 int rc; 3049 3050 QETH_CARD_TEXT(card, 4, "sendipa"); 3051 3052 if (card->read_or_write_problem) { 3053 qeth_put_cmd(iob); 3054 return -EIO; 3055 } 3056 3057 if (reply_cb == NULL) 3058 reply_cb = qeth_send_ipa_cmd_cb; 3059 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3060 if (rc == -ETIME) { 3061 qeth_clear_ipacmd_list(card); 3062 qeth_schedule_recovery(card); 3063 } 3064 return rc; 3065 } 3066 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3067 3068 static int qeth_send_startlan_cb(struct qeth_card *card, 3069 struct qeth_reply *reply, unsigned long data) 3070 { 3071 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3072 3073 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3074 return -ENETDOWN; 3075 3076 return (cmd->hdr.return_code) ? -EIO : 0; 3077 } 3078 3079 static int qeth_send_startlan(struct qeth_card *card) 3080 { 3081 struct qeth_cmd_buffer *iob; 3082 3083 QETH_CARD_TEXT(card, 2, "strtlan"); 3084 3085 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3086 if (!iob) 3087 return -ENOMEM; 3088 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3089 } 3090 3091 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3092 { 3093 if (!cmd->hdr.return_code) 3094 cmd->hdr.return_code = 3095 cmd->data.setadapterparms.hdr.return_code; 3096 return cmd->hdr.return_code; 3097 } 3098 3099 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3100 struct qeth_reply *reply, unsigned long data) 3101 { 3102 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3103 struct qeth_query_cmds_supp *query_cmd; 3104 3105 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3106 if (qeth_setadpparms_inspect_rc(cmd)) 3107 return -EIO; 3108 3109 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3110 if (query_cmd->lan_type & 0x7f) { 3111 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3112 return -EPROTONOSUPPORT; 3113 3114 card->info.link_type = query_cmd->lan_type; 3115 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3116 } 3117 3118 card->options.adp.supported = query_cmd->supported_cmds; 3119 return 0; 3120 } 3121 3122 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3123 enum qeth_ipa_setadp_cmd adp_cmd, 3124 unsigned int data_length) 3125 { 3126 struct qeth_ipacmd_setadpparms_hdr *hdr; 3127 struct qeth_cmd_buffer *iob; 3128 3129 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3130 data_length + 3131 offsetof(struct qeth_ipacmd_setadpparms, 3132 data)); 3133 if (!iob) 3134 return NULL; 3135 3136 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3137 hdr->cmdlength = sizeof(*hdr) + data_length; 3138 hdr->command_code = adp_cmd; 3139 hdr->used_total = 1; 3140 hdr->seq_no = 1; 3141 return iob; 3142 } 3143 3144 static int qeth_query_setadapterparms(struct qeth_card *card) 3145 { 3146 int rc; 3147 struct qeth_cmd_buffer *iob; 3148 3149 QETH_CARD_TEXT(card, 3, "queryadp"); 3150 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3151 SETADP_DATA_SIZEOF(query_cmds_supp)); 3152 if (!iob) 3153 return -ENOMEM; 3154 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3155 return rc; 3156 } 3157 3158 static int qeth_query_ipassists_cb(struct qeth_card *card, 3159 struct qeth_reply *reply, unsigned long data) 3160 { 3161 struct qeth_ipa_cmd *cmd; 3162 3163 QETH_CARD_TEXT(card, 2, "qipasscb"); 3164 3165 cmd = (struct qeth_ipa_cmd *) data; 3166 3167 switch (cmd->hdr.return_code) { 3168 case IPA_RC_SUCCESS: 3169 break; 3170 case IPA_RC_NOTSUPP: 3171 case IPA_RC_L2_UNSUPPORTED_CMD: 3172 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3173 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3174 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3175 return -EOPNOTSUPP; 3176 default: 3177 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3178 CARD_DEVID(card), cmd->hdr.return_code); 3179 return -EIO; 3180 } 3181 3182 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3183 card->options.ipa4 = cmd->hdr.assists; 3184 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3185 card->options.ipa6 = cmd->hdr.assists; 3186 else 3187 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3188 CARD_DEVID(card)); 3189 return 0; 3190 } 3191 3192 static int qeth_query_ipassists(struct qeth_card *card, 3193 enum qeth_prot_versions prot) 3194 { 3195 int rc; 3196 struct qeth_cmd_buffer *iob; 3197 3198 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3199 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3200 if (!iob) 3201 return -ENOMEM; 3202 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3203 return rc; 3204 } 3205 3206 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3207 struct qeth_reply *reply, unsigned long data) 3208 { 3209 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3210 struct qeth_query_switch_attributes *attrs; 3211 struct qeth_switch_info *sw_info; 3212 3213 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3214 if (qeth_setadpparms_inspect_rc(cmd)) 3215 return -EIO; 3216 3217 sw_info = (struct qeth_switch_info *)reply->param; 3218 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3219 sw_info->capabilities = attrs->capabilities; 3220 sw_info->settings = attrs->settings; 3221 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3222 sw_info->settings); 3223 return 0; 3224 } 3225 3226 int qeth_query_switch_attributes(struct qeth_card *card, 3227 struct qeth_switch_info *sw_info) 3228 { 3229 struct qeth_cmd_buffer *iob; 3230 3231 QETH_CARD_TEXT(card, 2, "qswiattr"); 3232 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3233 return -EOPNOTSUPP; 3234 if (!netif_carrier_ok(card->dev)) 3235 return -ENOMEDIUM; 3236 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3237 if (!iob) 3238 return -ENOMEM; 3239 return qeth_send_ipa_cmd(card, iob, 3240 qeth_query_switch_attributes_cb, sw_info); 3241 } 3242 3243 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3244 enum qeth_diags_cmds sub_cmd, 3245 unsigned int data_length) 3246 { 3247 struct qeth_ipacmd_diagass *cmd; 3248 struct qeth_cmd_buffer *iob; 3249 3250 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3251 DIAG_HDR_LEN + data_length); 3252 if (!iob) 3253 return NULL; 3254 3255 cmd = &__ipa_cmd(iob)->data.diagass; 3256 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3257 cmd->subcmd = sub_cmd; 3258 return iob; 3259 } 3260 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3261 3262 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3263 struct qeth_reply *reply, unsigned long data) 3264 { 3265 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3266 u16 rc = cmd->hdr.return_code; 3267 3268 if (rc) { 3269 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3270 return -EIO; 3271 } 3272 3273 card->info.diagass_support = cmd->data.diagass.ext; 3274 return 0; 3275 } 3276 3277 static int qeth_query_setdiagass(struct qeth_card *card) 3278 { 3279 struct qeth_cmd_buffer *iob; 3280 3281 QETH_CARD_TEXT(card, 2, "qdiagass"); 3282 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3283 if (!iob) 3284 return -ENOMEM; 3285 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3286 } 3287 3288 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3289 { 3290 unsigned long info = get_zeroed_page(GFP_KERNEL); 3291 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3292 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3293 struct ccw_dev_id ccwid; 3294 int level; 3295 3296 tid->chpid = card->info.chpid; 3297 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3298 tid->ssid = ccwid.ssid; 3299 tid->devno = ccwid.devno; 3300 if (!info) 3301 return; 3302 level = stsi(NULL, 0, 0, 0); 3303 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3304 tid->lparnr = info222->lpar_number; 3305 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3306 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3307 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3308 } 3309 free_page(info); 3310 } 3311 3312 static int qeth_hw_trap_cb(struct qeth_card *card, 3313 struct qeth_reply *reply, unsigned long data) 3314 { 3315 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3316 u16 rc = cmd->hdr.return_code; 3317 3318 if (rc) { 3319 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3320 return -EIO; 3321 } 3322 return 0; 3323 } 3324 3325 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3326 { 3327 struct qeth_cmd_buffer *iob; 3328 struct qeth_ipa_cmd *cmd; 3329 3330 QETH_CARD_TEXT(card, 2, "diagtrap"); 3331 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3332 if (!iob) 3333 return -ENOMEM; 3334 cmd = __ipa_cmd(iob); 3335 cmd->data.diagass.type = 1; 3336 cmd->data.diagass.action = action; 3337 switch (action) { 3338 case QETH_DIAGS_TRAP_ARM: 3339 cmd->data.diagass.options = 0x0003; 3340 cmd->data.diagass.ext = 0x00010000 + 3341 sizeof(struct qeth_trap_id); 3342 qeth_get_trap_id(card, 3343 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3344 break; 3345 case QETH_DIAGS_TRAP_DISARM: 3346 cmd->data.diagass.options = 0x0001; 3347 break; 3348 case QETH_DIAGS_TRAP_CAPTURE: 3349 break; 3350 } 3351 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3352 } 3353 3354 static int qeth_check_qdio_errors(struct qeth_card *card, 3355 struct qdio_buffer *buf, 3356 unsigned int qdio_error, 3357 const char *dbftext) 3358 { 3359 if (qdio_error) { 3360 QETH_CARD_TEXT(card, 2, dbftext); 3361 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3362 buf->element[15].sflags); 3363 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3364 buf->element[14].sflags); 3365 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3366 if ((buf->element[15].sflags) == 0x12) { 3367 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3368 return 0; 3369 } else 3370 return 1; 3371 } 3372 return 0; 3373 } 3374 3375 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3376 unsigned int count) 3377 { 3378 struct qeth_qdio_q *queue = card->qdio.in_q; 3379 struct list_head *lh; 3380 int i; 3381 int rc; 3382 int newcount = 0; 3383 3384 /* only requeue at a certain threshold to avoid SIGAs */ 3385 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3386 for (i = queue->next_buf_to_init; 3387 i < queue->next_buf_to_init + count; ++i) { 3388 if (qeth_init_input_buffer(card, 3389 &queue->bufs[QDIO_BUFNR(i)])) { 3390 break; 3391 } else { 3392 newcount++; 3393 } 3394 } 3395 3396 if (newcount < count) { 3397 /* we are in memory shortage so we switch back to 3398 traditional skb allocation and drop packages */ 3399 atomic_set(&card->force_alloc_skb, 3); 3400 count = newcount; 3401 } else { 3402 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3403 } 3404 3405 if (!count) { 3406 i = 0; 3407 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3408 i++; 3409 if (i == card->qdio.in_buf_pool.buf_count) { 3410 QETH_CARD_TEXT(card, 2, "qsarbw"); 3411 schedule_delayed_work( 3412 &card->buffer_reclaim_work, 3413 QETH_RECLAIM_WORK_TIME); 3414 } 3415 return 0; 3416 } 3417 3418 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3419 queue->next_buf_to_init, count, NULL); 3420 if (rc) { 3421 QETH_CARD_TEXT(card, 2, "qinberr"); 3422 } 3423 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3424 count); 3425 return count; 3426 } 3427 3428 return 0; 3429 } 3430 3431 static void qeth_buffer_reclaim_work(struct work_struct *work) 3432 { 3433 struct qeth_card *card = container_of(to_delayed_work(work), 3434 struct qeth_card, 3435 buffer_reclaim_work); 3436 3437 local_bh_disable(); 3438 napi_schedule(&card->napi); 3439 /* kick-start the NAPI softirq: */ 3440 local_bh_enable(); 3441 } 3442 3443 static void qeth_handle_send_error(struct qeth_card *card, 3444 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3445 { 3446 int sbalf15 = buffer->buffer->element[15].sflags; 3447 3448 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3449 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3450 3451 if (!qdio_err) 3452 return; 3453 3454 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3455 return; 3456 3457 QETH_CARD_TEXT(card, 1, "lnkfail"); 3458 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3459 (u16)qdio_err, (u8)sbalf15); 3460 } 3461 3462 /** 3463 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3464 * @queue: queue to check for packing buffer 3465 * 3466 * Returns number of buffers that were prepared for flush. 3467 */ 3468 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3469 { 3470 struct qeth_qdio_out_buffer *buffer; 3471 3472 buffer = queue->bufs[queue->next_buf_to_fill]; 3473 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3474 (buffer->next_element_to_fill > 0)) { 3475 /* it's a packing buffer */ 3476 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3477 queue->next_buf_to_fill = 3478 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3479 return 1; 3480 } 3481 return 0; 3482 } 3483 3484 /* 3485 * Switched to packing state if the number of used buffers on a queue 3486 * reaches a certain limit. 3487 */ 3488 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3489 { 3490 if (!queue->do_pack) { 3491 if (atomic_read(&queue->used_buffers) 3492 >= QETH_HIGH_WATERMARK_PACK){ 3493 /* switch non-PACKING -> PACKING */ 3494 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3495 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3496 queue->do_pack = 1; 3497 } 3498 } 3499 } 3500 3501 /* 3502 * Switches from packing to non-packing mode. If there is a packing 3503 * buffer on the queue this buffer will be prepared to be flushed. 3504 * In that case 1 is returned to inform the caller. If no buffer 3505 * has to be flushed, zero is returned. 3506 */ 3507 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3508 { 3509 if (queue->do_pack) { 3510 if (atomic_read(&queue->used_buffers) 3511 <= QETH_LOW_WATERMARK_PACK) { 3512 /* switch PACKING -> non-PACKING */ 3513 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3514 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3515 queue->do_pack = 0; 3516 return qeth_prep_flush_pack_buffer(queue); 3517 } 3518 } 3519 return 0; 3520 } 3521 3522 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3523 int count) 3524 { 3525 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3526 struct qeth_card *card = queue->card; 3527 unsigned int frames, usecs; 3528 struct qaob *aob = NULL; 3529 int rc; 3530 int i; 3531 3532 for (i = index; i < index + count; ++i) { 3533 unsigned int bidx = QDIO_BUFNR(i); 3534 struct sk_buff *skb; 3535 3536 buf = queue->bufs[bidx]; 3537 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3538 SBAL_EFLAGS_LAST_ENTRY; 3539 queue->coalesced_frames += buf->frames; 3540 3541 if (IS_IQD(card)) { 3542 skb_queue_walk(&buf->skb_list, skb) 3543 skb_tx_timestamp(skb); 3544 } 3545 } 3546 3547 if (IS_IQD(card)) { 3548 if (card->options.cq == QETH_CQ_ENABLED && 3549 !qeth_iqd_is_mcast_queue(card, queue) && 3550 count == 1) { 3551 if (!buf->aob) 3552 buf->aob = kmem_cache_zalloc(qeth_qaob_cache, 3553 GFP_ATOMIC); 3554 if (buf->aob) { 3555 struct qeth_qaob_priv1 *priv; 3556 3557 aob = buf->aob; 3558 priv = (struct qeth_qaob_priv1 *)&aob->user1; 3559 priv->state = QETH_QAOB_ISSUED; 3560 priv->queue_no = queue->queue_no; 3561 } 3562 } 3563 } else { 3564 if (!queue->do_pack) { 3565 if ((atomic_read(&queue->used_buffers) >= 3566 (QETH_HIGH_WATERMARK_PACK - 3567 QETH_WATERMARK_PACK_FUZZ)) && 3568 !atomic_read(&queue->set_pci_flags_count)) { 3569 /* it's likely that we'll go to packing 3570 * mode soon */ 3571 atomic_inc(&queue->set_pci_flags_count); 3572 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3573 } 3574 } else { 3575 if (!atomic_read(&queue->set_pci_flags_count)) { 3576 /* 3577 * there's no outstanding PCI any more, so we 3578 * have to request a PCI to be sure the the PCI 3579 * will wake at some time in the future then we 3580 * can flush packed buffers that might still be 3581 * hanging around, which can happen if no 3582 * further send was requested by the stack 3583 */ 3584 atomic_inc(&queue->set_pci_flags_count); 3585 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3586 } 3587 } 3588 } 3589 3590 QETH_TXQ_STAT_INC(queue, doorbell); 3591 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, 3592 index, count, aob); 3593 3594 switch (rc) { 3595 case 0: 3596 case -ENOBUFS: 3597 /* ignore temporary SIGA errors without busy condition */ 3598 3599 /* Fake the TX completion interrupt: */ 3600 frames = READ_ONCE(queue->max_coalesced_frames); 3601 usecs = READ_ONCE(queue->coalesce_usecs); 3602 3603 if (frames && queue->coalesced_frames >= frames) { 3604 napi_schedule(&queue->napi); 3605 queue->coalesced_frames = 0; 3606 QETH_TXQ_STAT_INC(queue, coal_frames); 3607 } else if (qeth_use_tx_irqs(card) && 3608 atomic_read(&queue->used_buffers) >= 32) { 3609 /* Old behaviour carried over from the qdio layer: */ 3610 napi_schedule(&queue->napi); 3611 QETH_TXQ_STAT_INC(queue, coal_frames); 3612 } else if (usecs) { 3613 qeth_tx_arm_timer(queue, usecs); 3614 } 3615 3616 break; 3617 default: 3618 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3619 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3620 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3621 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3622 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3623 3624 /* this must not happen under normal circumstances. if it 3625 * happens something is really wrong -> recover */ 3626 qeth_schedule_recovery(queue->card); 3627 } 3628 } 3629 3630 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3631 { 3632 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3633 3634 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3635 queue->prev_hdr = NULL; 3636 queue->bulk_count = 0; 3637 } 3638 3639 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3640 { 3641 /* 3642 * check if weed have to switch to non-packing mode or if 3643 * we have to get a pci flag out on the queue 3644 */ 3645 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3646 !atomic_read(&queue->set_pci_flags_count)) { 3647 unsigned int index, flush_cnt; 3648 bool q_was_packing; 3649 3650 spin_lock(&queue->lock); 3651 3652 index = queue->next_buf_to_fill; 3653 q_was_packing = queue->do_pack; 3654 3655 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3656 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3657 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3658 3659 if (flush_cnt) { 3660 qeth_flush_buffers(queue, index, flush_cnt); 3661 if (q_was_packing) 3662 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3663 } 3664 3665 spin_unlock(&queue->lock); 3666 } 3667 } 3668 3669 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3670 { 3671 struct qeth_card *card = (struct qeth_card *)card_ptr; 3672 3673 napi_schedule_irqoff(&card->napi); 3674 } 3675 3676 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3677 { 3678 int rc; 3679 3680 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3681 rc = -1; 3682 goto out; 3683 } else { 3684 if (card->options.cq == cq) { 3685 rc = 0; 3686 goto out; 3687 } 3688 3689 qeth_free_qdio_queues(card); 3690 card->options.cq = cq; 3691 rc = 0; 3692 } 3693 out: 3694 return rc; 3695 3696 } 3697 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3698 3699 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) 3700 { 3701 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; 3702 unsigned int queue_no = priv->queue_no; 3703 3704 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); 3705 3706 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && 3707 queue_no < card->qdio.no_out_queues) 3708 napi_schedule(&card->qdio.out_qs[queue_no]->napi); 3709 } 3710 3711 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3712 unsigned int queue, int first_element, 3713 int count) 3714 { 3715 struct qeth_qdio_q *cq = card->qdio.c_q; 3716 int i; 3717 int rc; 3718 3719 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3720 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3721 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3722 3723 if (qdio_err) { 3724 netif_tx_stop_all_queues(card->dev); 3725 qeth_schedule_recovery(card); 3726 return; 3727 } 3728 3729 for (i = first_element; i < first_element + count; ++i) { 3730 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3731 int e = 0; 3732 3733 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3734 buffer->element[e].addr) { 3735 unsigned long phys_aob_addr = buffer->element[e].addr; 3736 3737 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); 3738 ++e; 3739 } 3740 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3741 } 3742 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3743 cq->next_buf_to_init, count, NULL); 3744 if (rc) { 3745 dev_warn(&card->gdev->dev, 3746 "QDIO reported an error, rc=%i\n", rc); 3747 QETH_CARD_TEXT(card, 2, "qcqherr"); 3748 } 3749 3750 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3751 } 3752 3753 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3754 unsigned int qdio_err, int queue, 3755 int first_elem, int count, 3756 unsigned long card_ptr) 3757 { 3758 struct qeth_card *card = (struct qeth_card *)card_ptr; 3759 3760 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3761 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3762 3763 if (qdio_err) 3764 qeth_schedule_recovery(card); 3765 } 3766 3767 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3768 unsigned int qdio_error, int __queue, 3769 int first_element, int count, 3770 unsigned long card_ptr) 3771 { 3772 struct qeth_card *card = (struct qeth_card *) card_ptr; 3773 3774 QETH_CARD_TEXT(card, 2, "achkcond"); 3775 netif_tx_stop_all_queues(card->dev); 3776 qeth_schedule_recovery(card); 3777 } 3778 3779 /* 3780 * Note: Function assumes that we have 4 outbound queues. 3781 */ 3782 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3783 { 3784 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3785 u8 tos; 3786 3787 switch (card->qdio.do_prio_queueing) { 3788 case QETH_PRIO_Q_ING_TOS: 3789 case QETH_PRIO_Q_ING_PREC: 3790 switch (vlan_get_protocol(skb)) { 3791 case htons(ETH_P_IP): 3792 tos = ipv4_get_dsfield(ip_hdr(skb)); 3793 break; 3794 case htons(ETH_P_IPV6): 3795 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3796 break; 3797 default: 3798 return card->qdio.default_out_queue; 3799 } 3800 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3801 return ~tos >> 6 & 3; 3802 if (tos & IPTOS_MINCOST) 3803 return 3; 3804 if (tos & IPTOS_RELIABILITY) 3805 return 2; 3806 if (tos & IPTOS_THROUGHPUT) 3807 return 1; 3808 if (tos & IPTOS_LOWDELAY) 3809 return 0; 3810 break; 3811 case QETH_PRIO_Q_ING_SKB: 3812 if (skb->priority > 5) 3813 return 0; 3814 return ~skb->priority >> 1 & 3; 3815 case QETH_PRIO_Q_ING_VLAN: 3816 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3817 return ~ntohs(veth->h_vlan_TCI) >> 3818 (VLAN_PRIO_SHIFT + 1) & 3; 3819 break; 3820 case QETH_PRIO_Q_ING_FIXED: 3821 return card->qdio.default_out_queue; 3822 default: 3823 break; 3824 } 3825 return card->qdio.default_out_queue; 3826 } 3827 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3828 3829 /** 3830 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3831 * @skb: SKB address 3832 * 3833 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3834 * fragmented part of the SKB. Returns zero for linear SKB. 3835 */ 3836 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3837 { 3838 int cnt, elements = 0; 3839 3840 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3841 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3842 3843 elements += qeth_get_elements_for_range( 3844 (addr_t)skb_frag_address(frag), 3845 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3846 } 3847 return elements; 3848 } 3849 3850 /** 3851 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3852 * to transmit an skb. 3853 * @skb: the skb to operate on. 3854 * @data_offset: skip this part of the skb's linear data 3855 * 3856 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3857 * skb's data (both its linear part and paged fragments). 3858 */ 3859 static unsigned int qeth_count_elements(struct sk_buff *skb, 3860 unsigned int data_offset) 3861 { 3862 unsigned int elements = qeth_get_elements_for_frags(skb); 3863 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3864 addr_t start = (addr_t)skb->data + data_offset; 3865 3866 if (start != end) 3867 elements += qeth_get_elements_for_range(start, end); 3868 return elements; 3869 } 3870 3871 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3872 MAX_TCP_HEADER) 3873 3874 /** 3875 * qeth_add_hw_header() - add a HW header to an skb. 3876 * @queue: TX queue that the skb will be placed on. 3877 * @skb: skb that the HW header should be added to. 3878 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3879 * it contains a valid pointer to a qeth_hdr. 3880 * @hdr_len: length of the HW header. 3881 * @proto_len: length of protocol headers that need to be in same page as the 3882 * HW header. 3883 * @elements: returns the required number of buffer elements for this skb. 3884 * 3885 * Returns the pushed length. If the header can't be pushed on 3886 * (eg. because it would cross a page boundary), it is allocated from 3887 * the cache instead and 0 is returned. 3888 * The number of needed buffer elements is returned in @elements. 3889 * Error to create the hdr is indicated by returning with < 0. 3890 */ 3891 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3892 struct sk_buff *skb, struct qeth_hdr **hdr, 3893 unsigned int hdr_len, unsigned int proto_len, 3894 unsigned int *elements) 3895 { 3896 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 3897 const unsigned int contiguous = proto_len ? proto_len : 1; 3898 const unsigned int max_elements = queue->max_elements; 3899 unsigned int __elements; 3900 addr_t start, end; 3901 bool push_ok; 3902 int rc; 3903 3904 check_layout: 3905 start = (addr_t)skb->data - hdr_len; 3906 end = (addr_t)skb->data; 3907 3908 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3909 /* Push HW header into same page as first protocol header. */ 3910 push_ok = true; 3911 /* ... but TSO always needs a separate element for headers: */ 3912 if (skb_is_gso(skb)) 3913 __elements = 1 + qeth_count_elements(skb, proto_len); 3914 else 3915 __elements = qeth_count_elements(skb, 0); 3916 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3917 /* Push HW header into preceding page, flush with skb->data. */ 3918 push_ok = true; 3919 __elements = 1 + qeth_count_elements(skb, 0); 3920 } else { 3921 /* Use header cache, copy protocol headers up. */ 3922 push_ok = false; 3923 __elements = 1 + qeth_count_elements(skb, proto_len); 3924 } 3925 3926 /* Compress skb to fit into one IO buffer: */ 3927 if (__elements > max_elements) { 3928 if (!skb_is_nonlinear(skb)) { 3929 /* Drop it, no easy way of shrinking it further. */ 3930 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3931 max_elements, __elements, skb->len); 3932 return -E2BIG; 3933 } 3934 3935 rc = skb_linearize(skb); 3936 if (rc) { 3937 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3938 return rc; 3939 } 3940 3941 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3942 /* Linearization changed the layout, re-evaluate: */ 3943 goto check_layout; 3944 } 3945 3946 *elements = __elements; 3947 /* Add the header: */ 3948 if (push_ok) { 3949 *hdr = skb_push(skb, hdr_len); 3950 return hdr_len; 3951 } 3952 3953 /* Fall back to cache element with known-good alignment: */ 3954 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3955 return -E2BIG; 3956 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 3957 if (!*hdr) 3958 return -ENOMEM; 3959 /* Copy protocol headers behind HW header: */ 3960 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3961 return 0; 3962 } 3963 3964 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 3965 struct sk_buff *curr_skb, 3966 struct qeth_hdr *curr_hdr) 3967 { 3968 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 3969 struct qeth_hdr *prev_hdr = queue->prev_hdr; 3970 3971 if (!prev_hdr) 3972 return true; 3973 3974 /* All packets must have the same target: */ 3975 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 3976 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 3977 3978 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 3979 eth_hdr(curr_skb)->h_dest) && 3980 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 3981 } 3982 3983 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 3984 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 3985 } 3986 3987 /** 3988 * qeth_fill_buffer() - map skb into an output buffer 3989 * @buf: buffer to transport the skb 3990 * @skb: skb to map into the buffer 3991 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3992 * from qeth_core_header_cache. 3993 * @offset: when mapping the skb, start at skb->data + offset 3994 * @hd_len: if > 0, build a dedicated header element of this size 3995 */ 3996 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 3997 struct sk_buff *skb, struct qeth_hdr *hdr, 3998 unsigned int offset, unsigned int hd_len) 3999 { 4000 struct qdio_buffer *buffer = buf->buffer; 4001 int element = buf->next_element_to_fill; 4002 int length = skb_headlen(skb) - offset; 4003 char *data = skb->data + offset; 4004 unsigned int elem_length, cnt; 4005 bool is_first_elem = true; 4006 4007 __skb_queue_tail(&buf->skb_list, skb); 4008 4009 /* build dedicated element for HW Header */ 4010 if (hd_len) { 4011 is_first_elem = false; 4012 4013 buffer->element[element].addr = virt_to_phys(hdr); 4014 buffer->element[element].length = hd_len; 4015 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4016 4017 /* HW header is allocated from cache: */ 4018 if ((void *)hdr != skb->data) 4019 __set_bit(element, buf->from_kmem_cache); 4020 /* HW header was pushed and is contiguous with linear part: */ 4021 else if (length > 0 && !PAGE_ALIGNED(data) && 4022 (data == (char *)hdr + hd_len)) 4023 buffer->element[element].eflags |= 4024 SBAL_EFLAGS_CONTIGUOUS; 4025 4026 element++; 4027 } 4028 4029 /* map linear part into buffer element(s) */ 4030 while (length > 0) { 4031 elem_length = min_t(unsigned int, length, 4032 PAGE_SIZE - offset_in_page(data)); 4033 4034 buffer->element[element].addr = virt_to_phys(data); 4035 buffer->element[element].length = elem_length; 4036 length -= elem_length; 4037 if (is_first_elem) { 4038 is_first_elem = false; 4039 if (length || skb_is_nonlinear(skb)) 4040 /* skb needs additional elements */ 4041 buffer->element[element].eflags = 4042 SBAL_EFLAGS_FIRST_FRAG; 4043 else 4044 buffer->element[element].eflags = 0; 4045 } else { 4046 buffer->element[element].eflags = 4047 SBAL_EFLAGS_MIDDLE_FRAG; 4048 } 4049 4050 data += elem_length; 4051 element++; 4052 } 4053 4054 /* map page frags into buffer element(s) */ 4055 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4056 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4057 4058 data = skb_frag_address(frag); 4059 length = skb_frag_size(frag); 4060 while (length > 0) { 4061 elem_length = min_t(unsigned int, length, 4062 PAGE_SIZE - offset_in_page(data)); 4063 4064 buffer->element[element].addr = virt_to_phys(data); 4065 buffer->element[element].length = elem_length; 4066 buffer->element[element].eflags = 4067 SBAL_EFLAGS_MIDDLE_FRAG; 4068 4069 length -= elem_length; 4070 data += elem_length; 4071 element++; 4072 } 4073 } 4074 4075 if (buffer->element[element - 1].eflags) 4076 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4077 buf->next_element_to_fill = element; 4078 return element; 4079 } 4080 4081 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4082 struct sk_buff *skb, unsigned int elements, 4083 struct qeth_hdr *hdr, unsigned int offset, 4084 unsigned int hd_len) 4085 { 4086 unsigned int bytes = qdisc_pkt_len(skb); 4087 struct qeth_qdio_out_buffer *buffer; 4088 unsigned int next_element; 4089 struct netdev_queue *txq; 4090 bool stopped = false; 4091 bool flush; 4092 4093 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4094 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4095 4096 /* Just a sanity check, the wake/stop logic should ensure that we always 4097 * get a free buffer. 4098 */ 4099 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4100 return -EBUSY; 4101 4102 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4103 4104 if (flush || 4105 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4106 if (buffer->next_element_to_fill > 0) { 4107 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4108 queue->bulk_count++; 4109 } 4110 4111 if (queue->bulk_count >= queue->bulk_max) 4112 flush = true; 4113 4114 if (flush) 4115 qeth_flush_queue(queue); 4116 4117 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4118 queue->bulk_count)]; 4119 4120 /* Sanity-check again: */ 4121 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4122 return -EBUSY; 4123 } 4124 4125 if (buffer->next_element_to_fill == 0 && 4126 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4127 /* If a TX completion happens right _here_ and misses to wake 4128 * the txq, then our re-check below will catch the race. 4129 */ 4130 QETH_TXQ_STAT_INC(queue, stopped); 4131 netif_tx_stop_queue(txq); 4132 stopped = true; 4133 } 4134 4135 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4136 buffer->bytes += bytes; 4137 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4138 queue->prev_hdr = hdr; 4139 4140 flush = __netdev_tx_sent_queue(txq, bytes, 4141 !stopped && netdev_xmit_more()); 4142 4143 if (flush || next_element >= queue->max_elements) { 4144 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4145 queue->bulk_count++; 4146 4147 if (queue->bulk_count >= queue->bulk_max) 4148 flush = true; 4149 4150 if (flush) 4151 qeth_flush_queue(queue); 4152 } 4153 4154 if (stopped && !qeth_out_queue_is_full(queue)) 4155 netif_tx_start_queue(txq); 4156 return 0; 4157 } 4158 4159 static int qeth_do_send_packet(struct qeth_card *card, 4160 struct qeth_qdio_out_q *queue, 4161 struct sk_buff *skb, struct qeth_hdr *hdr, 4162 unsigned int offset, unsigned int hd_len, 4163 unsigned int elements_needed) 4164 { 4165 unsigned int start_index = queue->next_buf_to_fill; 4166 struct qeth_qdio_out_buffer *buffer; 4167 unsigned int next_element; 4168 struct netdev_queue *txq; 4169 bool stopped = false; 4170 int flush_count = 0; 4171 int do_pack = 0; 4172 int rc = 0; 4173 4174 buffer = queue->bufs[queue->next_buf_to_fill]; 4175 4176 /* Just a sanity check, the wake/stop logic should ensure that we always 4177 * get a free buffer. 4178 */ 4179 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4180 return -EBUSY; 4181 4182 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4183 4184 /* check if we need to switch packing state of this queue */ 4185 qeth_switch_to_packing_if_needed(queue); 4186 if (queue->do_pack) { 4187 do_pack = 1; 4188 /* does packet fit in current buffer? */ 4189 if (buffer->next_element_to_fill + elements_needed > 4190 queue->max_elements) { 4191 /* ... no -> set state PRIMED */ 4192 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4193 flush_count++; 4194 queue->next_buf_to_fill = 4195 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4196 buffer = queue->bufs[queue->next_buf_to_fill]; 4197 4198 /* We stepped forward, so sanity-check again: */ 4199 if (atomic_read(&buffer->state) != 4200 QETH_QDIO_BUF_EMPTY) { 4201 qeth_flush_buffers(queue, start_index, 4202 flush_count); 4203 rc = -EBUSY; 4204 goto out; 4205 } 4206 } 4207 } 4208 4209 if (buffer->next_element_to_fill == 0 && 4210 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4211 /* If a TX completion happens right _here_ and misses to wake 4212 * the txq, then our re-check below will catch the race. 4213 */ 4214 QETH_TXQ_STAT_INC(queue, stopped); 4215 netif_tx_stop_queue(txq); 4216 stopped = true; 4217 } 4218 4219 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4220 buffer->bytes += qdisc_pkt_len(skb); 4221 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4222 4223 if (queue->do_pack) 4224 QETH_TXQ_STAT_INC(queue, skbs_pack); 4225 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4226 flush_count++; 4227 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4228 queue->next_buf_to_fill = 4229 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4230 } 4231 4232 if (flush_count) 4233 qeth_flush_buffers(queue, start_index, flush_count); 4234 4235 out: 4236 if (do_pack) 4237 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4238 4239 if (stopped && !qeth_out_queue_is_full(queue)) 4240 netif_tx_start_queue(txq); 4241 return rc; 4242 } 4243 4244 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4245 unsigned int payload_len, struct sk_buff *skb, 4246 unsigned int proto_len) 4247 { 4248 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4249 4250 ext->hdr_tot_len = sizeof(*ext); 4251 ext->imb_hdr_no = 1; 4252 ext->hdr_type = 1; 4253 ext->hdr_version = 1; 4254 ext->hdr_len = 28; 4255 ext->payload_len = payload_len; 4256 ext->mss = skb_shinfo(skb)->gso_size; 4257 ext->dg_hdr_len = proto_len; 4258 } 4259 4260 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4261 struct qeth_qdio_out_q *queue, __be16 proto, 4262 void (*fill_header)(struct qeth_qdio_out_q *queue, 4263 struct qeth_hdr *hdr, struct sk_buff *skb, 4264 __be16 proto, unsigned int data_len)) 4265 { 4266 unsigned int proto_len, hw_hdr_len; 4267 unsigned int frame_len = skb->len; 4268 bool is_tso = skb_is_gso(skb); 4269 unsigned int data_offset = 0; 4270 struct qeth_hdr *hdr = NULL; 4271 unsigned int hd_len = 0; 4272 unsigned int elements; 4273 int push_len, rc; 4274 4275 if (is_tso) { 4276 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4277 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4278 } else { 4279 hw_hdr_len = sizeof(struct qeth_hdr); 4280 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4281 } 4282 4283 rc = skb_cow_head(skb, hw_hdr_len); 4284 if (rc) 4285 return rc; 4286 4287 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4288 &elements); 4289 if (push_len < 0) 4290 return push_len; 4291 if (is_tso || !push_len) { 4292 /* HW header needs its own buffer element. */ 4293 hd_len = hw_hdr_len + proto_len; 4294 data_offset = push_len + proto_len; 4295 } 4296 memset(hdr, 0, hw_hdr_len); 4297 fill_header(queue, hdr, skb, proto, frame_len); 4298 if (is_tso) 4299 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4300 frame_len - proto_len, skb, proto_len); 4301 4302 if (IS_IQD(card)) { 4303 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4304 hd_len); 4305 } else { 4306 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4307 skb_orphan(skb); 4308 spin_lock(&queue->lock); 4309 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4310 hd_len, elements); 4311 spin_unlock(&queue->lock); 4312 } 4313 4314 if (rc && !push_len) 4315 kmem_cache_free(qeth_core_header_cache, hdr); 4316 4317 return rc; 4318 } 4319 EXPORT_SYMBOL_GPL(qeth_xmit); 4320 4321 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4322 struct qeth_reply *reply, unsigned long data) 4323 { 4324 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4325 struct qeth_ipacmd_setadpparms *setparms; 4326 4327 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4328 4329 setparms = &(cmd->data.setadapterparms); 4330 if (qeth_setadpparms_inspect_rc(cmd)) { 4331 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4332 setparms->data.mode = SET_PROMISC_MODE_OFF; 4333 } 4334 card->info.promisc_mode = setparms->data.mode; 4335 return (cmd->hdr.return_code) ? -EIO : 0; 4336 } 4337 4338 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4339 { 4340 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4341 SET_PROMISC_MODE_OFF; 4342 struct qeth_cmd_buffer *iob; 4343 struct qeth_ipa_cmd *cmd; 4344 4345 QETH_CARD_TEXT(card, 4, "setprom"); 4346 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4347 4348 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4349 SETADP_DATA_SIZEOF(mode)); 4350 if (!iob) 4351 return; 4352 cmd = __ipa_cmd(iob); 4353 cmd->data.setadapterparms.data.mode = mode; 4354 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4355 } 4356 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4357 4358 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4359 struct qeth_reply *reply, unsigned long data) 4360 { 4361 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4362 struct qeth_ipacmd_setadpparms *adp_cmd; 4363 4364 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4365 if (qeth_setadpparms_inspect_rc(cmd)) 4366 return -EIO; 4367 4368 adp_cmd = &cmd->data.setadapterparms; 4369 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4370 return -EADDRNOTAVAIL; 4371 4372 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4373 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4374 return -EADDRNOTAVAIL; 4375 4376 eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr); 4377 return 0; 4378 } 4379 4380 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4381 { 4382 int rc; 4383 struct qeth_cmd_buffer *iob; 4384 struct qeth_ipa_cmd *cmd; 4385 4386 QETH_CARD_TEXT(card, 4, "chgmac"); 4387 4388 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4389 SETADP_DATA_SIZEOF(change_addr)); 4390 if (!iob) 4391 return -ENOMEM; 4392 cmd = __ipa_cmd(iob); 4393 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4394 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4395 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4396 card->dev->dev_addr); 4397 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4398 NULL); 4399 return rc; 4400 } 4401 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4402 4403 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4404 struct qeth_reply *reply, unsigned long data) 4405 { 4406 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4407 struct qeth_set_access_ctrl *access_ctrl_req; 4408 4409 QETH_CARD_TEXT(card, 4, "setaccb"); 4410 4411 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4412 QETH_CARD_TEXT_(card, 2, "rc=%d", 4413 cmd->data.setadapterparms.hdr.return_code); 4414 if (cmd->data.setadapterparms.hdr.return_code != 4415 SET_ACCESS_CTRL_RC_SUCCESS) 4416 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4417 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4418 cmd->data.setadapterparms.hdr.return_code); 4419 switch (qeth_setadpparms_inspect_rc(cmd)) { 4420 case SET_ACCESS_CTRL_RC_SUCCESS: 4421 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4422 dev_info(&card->gdev->dev, 4423 "QDIO data connection isolation is deactivated\n"); 4424 else 4425 dev_info(&card->gdev->dev, 4426 "QDIO data connection isolation is activated\n"); 4427 return 0; 4428 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4429 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4430 CARD_DEVID(card)); 4431 return 0; 4432 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4433 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4434 CARD_DEVID(card)); 4435 return 0; 4436 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4437 dev_err(&card->gdev->dev, "Adapter does not " 4438 "support QDIO data connection isolation\n"); 4439 return -EOPNOTSUPP; 4440 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4441 dev_err(&card->gdev->dev, 4442 "Adapter is dedicated. " 4443 "QDIO data connection isolation not supported\n"); 4444 return -EOPNOTSUPP; 4445 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4446 dev_err(&card->gdev->dev, 4447 "TSO does not permit QDIO data connection isolation\n"); 4448 return -EPERM; 4449 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4450 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4451 "support reflective relay mode\n"); 4452 return -EOPNOTSUPP; 4453 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4454 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4455 "enabled at the adjacent switch port"); 4456 return -EREMOTEIO; 4457 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4458 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4459 "at the adjacent switch failed\n"); 4460 /* benign error while disabling ISOLATION_MODE_FWD */ 4461 return 0; 4462 default: 4463 return -EIO; 4464 } 4465 } 4466 4467 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4468 enum qeth_ipa_isolation_modes mode) 4469 { 4470 int rc; 4471 struct qeth_cmd_buffer *iob; 4472 struct qeth_ipa_cmd *cmd; 4473 struct qeth_set_access_ctrl *access_ctrl_req; 4474 4475 QETH_CARD_TEXT(card, 4, "setacctl"); 4476 4477 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4478 dev_err(&card->gdev->dev, 4479 "Adapter does not support QDIO data connection isolation\n"); 4480 return -EOPNOTSUPP; 4481 } 4482 4483 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4484 SETADP_DATA_SIZEOF(set_access_ctrl)); 4485 if (!iob) 4486 return -ENOMEM; 4487 cmd = __ipa_cmd(iob); 4488 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4489 access_ctrl_req->subcmd_code = mode; 4490 4491 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4492 NULL); 4493 if (rc) { 4494 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4495 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4496 rc, CARD_DEVID(card)); 4497 } 4498 4499 return rc; 4500 } 4501 4502 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4503 { 4504 struct qeth_card *card; 4505 4506 card = dev->ml_priv; 4507 QETH_CARD_TEXT(card, 4, "txtimeo"); 4508 qeth_schedule_recovery(card); 4509 } 4510 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4511 4512 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4513 { 4514 struct qeth_card *card = dev->ml_priv; 4515 int rc = 0; 4516 4517 switch (regnum) { 4518 case MII_BMCR: /* Basic mode control register */ 4519 rc = BMCR_FULLDPLX; 4520 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4521 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4522 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4523 rc |= BMCR_SPEED100; 4524 break; 4525 case MII_BMSR: /* Basic mode status register */ 4526 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4527 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4528 BMSR_100BASE4; 4529 break; 4530 case MII_PHYSID1: /* PHYS ID 1 */ 4531 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4532 dev->dev_addr[2]; 4533 rc = (rc >> 5) & 0xFFFF; 4534 break; 4535 case MII_PHYSID2: /* PHYS ID 2 */ 4536 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4537 break; 4538 case MII_ADVERTISE: /* Advertisement control reg */ 4539 rc = ADVERTISE_ALL; 4540 break; 4541 case MII_LPA: /* Link partner ability reg */ 4542 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4543 LPA_100BASE4 | LPA_LPACK; 4544 break; 4545 case MII_EXPANSION: /* Expansion register */ 4546 break; 4547 case MII_DCOUNTER: /* disconnect counter */ 4548 break; 4549 case MII_FCSCOUNTER: /* false carrier counter */ 4550 break; 4551 case MII_NWAYTEST: /* N-way auto-neg test register */ 4552 break; 4553 case MII_RERRCOUNTER: /* rx error counter */ 4554 rc = card->stats.rx_length_errors + 4555 card->stats.rx_frame_errors + 4556 card->stats.rx_fifo_errors; 4557 break; 4558 case MII_SREVISION: /* silicon revision */ 4559 break; 4560 case MII_RESV1: /* reserved 1 */ 4561 break; 4562 case MII_LBRERROR: /* loopback, rx, bypass error */ 4563 break; 4564 case MII_PHYADDR: /* physical address */ 4565 break; 4566 case MII_RESV2: /* reserved 2 */ 4567 break; 4568 case MII_TPISTATUS: /* TPI status for 10mbps */ 4569 break; 4570 case MII_NCONFIG: /* network interface config */ 4571 break; 4572 default: 4573 break; 4574 } 4575 return rc; 4576 } 4577 4578 static int qeth_snmp_command_cb(struct qeth_card *card, 4579 struct qeth_reply *reply, unsigned long data) 4580 { 4581 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4582 struct qeth_arp_query_info *qinfo = reply->param; 4583 struct qeth_ipacmd_setadpparms *adp_cmd; 4584 unsigned int data_len; 4585 void *snmp_data; 4586 4587 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4588 4589 if (cmd->hdr.return_code) { 4590 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4591 return -EIO; 4592 } 4593 if (cmd->data.setadapterparms.hdr.return_code) { 4594 cmd->hdr.return_code = 4595 cmd->data.setadapterparms.hdr.return_code; 4596 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4597 return -EIO; 4598 } 4599 4600 adp_cmd = &cmd->data.setadapterparms; 4601 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4602 if (adp_cmd->hdr.seq_no == 1) { 4603 snmp_data = &adp_cmd->data.snmp; 4604 } else { 4605 snmp_data = &adp_cmd->data.snmp.request; 4606 data_len -= offsetof(struct qeth_snmp_cmd, request); 4607 } 4608 4609 /* check if there is enough room in userspace */ 4610 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4611 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4612 return -ENOSPC; 4613 } 4614 QETH_CARD_TEXT_(card, 4, "snore%i", 4615 cmd->data.setadapterparms.hdr.used_total); 4616 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4617 cmd->data.setadapterparms.hdr.seq_no); 4618 /*copy entries to user buffer*/ 4619 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4620 qinfo->udata_offset += data_len; 4621 4622 if (cmd->data.setadapterparms.hdr.seq_no < 4623 cmd->data.setadapterparms.hdr.used_total) 4624 return 1; 4625 return 0; 4626 } 4627 4628 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4629 { 4630 struct qeth_snmp_ureq __user *ureq; 4631 struct qeth_cmd_buffer *iob; 4632 unsigned int req_len; 4633 struct qeth_arp_query_info qinfo = {0, }; 4634 int rc = 0; 4635 4636 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4637 4638 if (IS_VM_NIC(card)) 4639 return -EOPNOTSUPP; 4640 4641 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4642 IS_LAYER3(card)) 4643 return -EOPNOTSUPP; 4644 4645 ureq = (struct qeth_snmp_ureq __user *) udata; 4646 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4647 get_user(req_len, &ureq->hdr.req_len)) 4648 return -EFAULT; 4649 4650 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4651 if (req_len > QETH_BUFSIZE) 4652 return -EINVAL; 4653 4654 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4655 if (!iob) 4656 return -ENOMEM; 4657 4658 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4659 &ureq->cmd, req_len)) { 4660 qeth_put_cmd(iob); 4661 return -EFAULT; 4662 } 4663 4664 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4665 if (!qinfo.udata) { 4666 qeth_put_cmd(iob); 4667 return -ENOMEM; 4668 } 4669 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4670 4671 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4672 if (rc) 4673 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4674 CARD_DEVID(card), rc); 4675 else { 4676 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4677 rc = -EFAULT; 4678 } 4679 4680 kfree(qinfo.udata); 4681 return rc; 4682 } 4683 4684 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4685 struct qeth_reply *reply, 4686 unsigned long data) 4687 { 4688 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4689 struct qeth_qoat_priv *priv = reply->param; 4690 int resdatalen; 4691 4692 QETH_CARD_TEXT(card, 3, "qoatcb"); 4693 if (qeth_setadpparms_inspect_rc(cmd)) 4694 return -EIO; 4695 4696 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4697 4698 if (resdatalen > (priv->buffer_len - priv->response_len)) 4699 return -ENOSPC; 4700 4701 memcpy(priv->buffer + priv->response_len, 4702 &cmd->data.setadapterparms.hdr, resdatalen); 4703 priv->response_len += resdatalen; 4704 4705 if (cmd->data.setadapterparms.hdr.seq_no < 4706 cmd->data.setadapterparms.hdr.used_total) 4707 return 1; 4708 return 0; 4709 } 4710 4711 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4712 { 4713 int rc = 0; 4714 struct qeth_cmd_buffer *iob; 4715 struct qeth_ipa_cmd *cmd; 4716 struct qeth_query_oat *oat_req; 4717 struct qeth_query_oat_data oat_data; 4718 struct qeth_qoat_priv priv; 4719 void __user *tmp; 4720 4721 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4722 4723 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4724 return -EOPNOTSUPP; 4725 4726 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4727 return -EFAULT; 4728 4729 priv.buffer_len = oat_data.buffer_len; 4730 priv.response_len = 0; 4731 priv.buffer = vzalloc(oat_data.buffer_len); 4732 if (!priv.buffer) 4733 return -ENOMEM; 4734 4735 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4736 SETADP_DATA_SIZEOF(query_oat)); 4737 if (!iob) { 4738 rc = -ENOMEM; 4739 goto out_free; 4740 } 4741 cmd = __ipa_cmd(iob); 4742 oat_req = &cmd->data.setadapterparms.data.query_oat; 4743 oat_req->subcmd_code = oat_data.command; 4744 4745 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4746 if (!rc) { 4747 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4748 u64_to_user_ptr(oat_data.ptr); 4749 oat_data.response_len = priv.response_len; 4750 4751 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4752 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4753 rc = -EFAULT; 4754 } 4755 4756 out_free: 4757 vfree(priv.buffer); 4758 return rc; 4759 } 4760 4761 static int qeth_query_card_info_cb(struct qeth_card *card, 4762 struct qeth_reply *reply, unsigned long data) 4763 { 4764 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4765 struct qeth_link_info *link_info = reply->param; 4766 struct qeth_query_card_info *card_info; 4767 4768 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4769 if (qeth_setadpparms_inspect_rc(cmd)) 4770 return -EIO; 4771 4772 card_info = &cmd->data.setadapterparms.data.card_info; 4773 netdev_dbg(card->dev, 4774 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", 4775 card_info->card_type, card_info->port_mode, 4776 card_info->port_speed); 4777 4778 switch (card_info->port_mode) { 4779 case CARD_INFO_PORTM_FULLDUPLEX: 4780 link_info->duplex = DUPLEX_FULL; 4781 break; 4782 case CARD_INFO_PORTM_HALFDUPLEX: 4783 link_info->duplex = DUPLEX_HALF; 4784 break; 4785 default: 4786 link_info->duplex = DUPLEX_UNKNOWN; 4787 } 4788 4789 switch (card_info->card_type) { 4790 case CARD_INFO_TYPE_1G_COPPER_A: 4791 case CARD_INFO_TYPE_1G_COPPER_B: 4792 link_info->speed = SPEED_1000; 4793 link_info->port = PORT_TP; 4794 break; 4795 case CARD_INFO_TYPE_1G_FIBRE_A: 4796 case CARD_INFO_TYPE_1G_FIBRE_B: 4797 link_info->speed = SPEED_1000; 4798 link_info->port = PORT_FIBRE; 4799 break; 4800 case CARD_INFO_TYPE_10G_FIBRE_A: 4801 case CARD_INFO_TYPE_10G_FIBRE_B: 4802 link_info->speed = SPEED_10000; 4803 link_info->port = PORT_FIBRE; 4804 break; 4805 default: 4806 switch (card_info->port_speed) { 4807 case CARD_INFO_PORTS_10M: 4808 link_info->speed = SPEED_10; 4809 break; 4810 case CARD_INFO_PORTS_100M: 4811 link_info->speed = SPEED_100; 4812 break; 4813 case CARD_INFO_PORTS_1G: 4814 link_info->speed = SPEED_1000; 4815 break; 4816 case CARD_INFO_PORTS_10G: 4817 link_info->speed = SPEED_10000; 4818 break; 4819 case CARD_INFO_PORTS_25G: 4820 link_info->speed = SPEED_25000; 4821 break; 4822 default: 4823 link_info->speed = SPEED_UNKNOWN; 4824 } 4825 4826 link_info->port = PORT_OTHER; 4827 } 4828 4829 return 0; 4830 } 4831 4832 int qeth_query_card_info(struct qeth_card *card, 4833 struct qeth_link_info *link_info) 4834 { 4835 struct qeth_cmd_buffer *iob; 4836 4837 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4838 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4839 return -EOPNOTSUPP; 4840 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4841 if (!iob) 4842 return -ENOMEM; 4843 4844 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info); 4845 } 4846 4847 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4848 struct qeth_reply *reply_priv, 4849 unsigned long data) 4850 { 4851 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4852 struct qeth_link_info *link_info = reply_priv->param; 4853 struct qeth_query_oat_physical_if *phys_if; 4854 struct qeth_query_oat_reply *reply; 4855 4856 if (qeth_setadpparms_inspect_rc(cmd)) 4857 return -EIO; 4858 4859 /* Multi-part reply is unexpected, don't bother: */ 4860 if (cmd->data.setadapterparms.hdr.used_total > 1) 4861 return -EINVAL; 4862 4863 /* Expect the reply to start with phys_if data: */ 4864 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4865 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4866 reply->length < sizeof(*reply)) 4867 return -EINVAL; 4868 4869 phys_if = &reply->phys_if; 4870 4871 switch (phys_if->speed_duplex) { 4872 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4873 link_info->speed = SPEED_10; 4874 link_info->duplex = DUPLEX_HALF; 4875 break; 4876 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4877 link_info->speed = SPEED_10; 4878 link_info->duplex = DUPLEX_FULL; 4879 break; 4880 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4881 link_info->speed = SPEED_100; 4882 link_info->duplex = DUPLEX_HALF; 4883 break; 4884 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4885 link_info->speed = SPEED_100; 4886 link_info->duplex = DUPLEX_FULL; 4887 break; 4888 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4889 link_info->speed = SPEED_1000; 4890 link_info->duplex = DUPLEX_HALF; 4891 break; 4892 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 4893 link_info->speed = SPEED_1000; 4894 link_info->duplex = DUPLEX_FULL; 4895 break; 4896 case QETH_QOAT_PHYS_SPEED_10G_FULL: 4897 link_info->speed = SPEED_10000; 4898 link_info->duplex = DUPLEX_FULL; 4899 break; 4900 case QETH_QOAT_PHYS_SPEED_25G_FULL: 4901 link_info->speed = SPEED_25000; 4902 link_info->duplex = DUPLEX_FULL; 4903 break; 4904 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 4905 default: 4906 link_info->speed = SPEED_UNKNOWN; 4907 link_info->duplex = DUPLEX_UNKNOWN; 4908 break; 4909 } 4910 4911 switch (phys_if->media_type) { 4912 case QETH_QOAT_PHYS_MEDIA_COPPER: 4913 link_info->port = PORT_TP; 4914 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4915 break; 4916 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 4917 link_info->port = PORT_FIBRE; 4918 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4919 break; 4920 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 4921 link_info->port = PORT_FIBRE; 4922 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 4923 break; 4924 default: 4925 link_info->port = PORT_OTHER; 4926 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4927 break; 4928 } 4929 4930 return 0; 4931 } 4932 4933 static void qeth_init_link_info(struct qeth_card *card) 4934 { 4935 card->info.link_info.duplex = DUPLEX_FULL; 4936 4937 if (IS_IQD(card) || IS_VM_NIC(card)) { 4938 card->info.link_info.speed = SPEED_10000; 4939 card->info.link_info.port = PORT_FIBRE; 4940 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4941 } else { 4942 switch (card->info.link_type) { 4943 case QETH_LINK_TYPE_FAST_ETH: 4944 case QETH_LINK_TYPE_LANE_ETH100: 4945 card->info.link_info.speed = SPEED_100; 4946 card->info.link_info.port = PORT_TP; 4947 break; 4948 case QETH_LINK_TYPE_GBIT_ETH: 4949 case QETH_LINK_TYPE_LANE_ETH1000: 4950 card->info.link_info.speed = SPEED_1000; 4951 card->info.link_info.port = PORT_FIBRE; 4952 break; 4953 case QETH_LINK_TYPE_10GBIT_ETH: 4954 card->info.link_info.speed = SPEED_10000; 4955 card->info.link_info.port = PORT_FIBRE; 4956 break; 4957 case QETH_LINK_TYPE_25GBIT_ETH: 4958 card->info.link_info.speed = SPEED_25000; 4959 card->info.link_info.port = PORT_FIBRE; 4960 break; 4961 default: 4962 dev_info(&card->gdev->dev, "Unknown link type %x\n", 4963 card->info.link_type); 4964 card->info.link_info.speed = SPEED_UNKNOWN; 4965 card->info.link_info.port = PORT_OTHER; 4966 } 4967 4968 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN; 4969 } 4970 4971 /* Get more accurate data via QUERY OAT: */ 4972 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4973 struct qeth_link_info link_info; 4974 struct qeth_cmd_buffer *iob; 4975 4976 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4977 SETADP_DATA_SIZEOF(query_oat)); 4978 if (iob) { 4979 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 4980 struct qeth_query_oat *oat_req; 4981 4982 oat_req = &cmd->data.setadapterparms.data.query_oat; 4983 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 4984 4985 if (!qeth_send_ipa_cmd(card, iob, 4986 qeth_init_link_info_oat_cb, 4987 &link_info)) { 4988 if (link_info.speed != SPEED_UNKNOWN) 4989 card->info.link_info.speed = link_info.speed; 4990 if (link_info.duplex != DUPLEX_UNKNOWN) 4991 card->info.link_info.duplex = link_info.duplex; 4992 if (link_info.port != PORT_OTHER) 4993 card->info.link_info.port = link_info.port; 4994 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 4995 card->info.link_info.link_mode = link_info.link_mode; 4996 } 4997 } 4998 } 4999 } 5000 5001 /** 5002 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 5003 * @card: pointer to a qeth_card 5004 * 5005 * Returns 5006 * 0, if a MAC address has been set for the card's netdevice 5007 * a return code, for various error conditions 5008 */ 5009 int qeth_vm_request_mac(struct qeth_card *card) 5010 { 5011 struct diag26c_mac_resp *response; 5012 struct diag26c_mac_req *request; 5013 int rc; 5014 5015 QETH_CARD_TEXT(card, 2, "vmreqmac"); 5016 5017 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 5018 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 5019 if (!request || !response) { 5020 rc = -ENOMEM; 5021 goto out; 5022 } 5023 5024 request->resp_buf_len = sizeof(*response); 5025 request->resp_version = DIAG26C_VERSION2; 5026 request->op_code = DIAG26C_GET_MAC; 5027 request->devno = card->info.ddev_devno; 5028 5029 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5030 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 5031 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5032 if (rc) 5033 goto out; 5034 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 5035 5036 if (request->resp_buf_len < sizeof(*response) || 5037 response->version != request->resp_version) { 5038 rc = -EIO; 5039 QETH_CARD_TEXT(card, 2, "badresp"); 5040 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 5041 sizeof(request->resp_buf_len)); 5042 } else if (!is_valid_ether_addr(response->mac)) { 5043 rc = -EINVAL; 5044 QETH_CARD_TEXT(card, 2, "badmac"); 5045 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 5046 } else { 5047 eth_hw_addr_set(card->dev, response->mac); 5048 } 5049 5050 out: 5051 kfree(response); 5052 kfree(request); 5053 return rc; 5054 } 5055 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 5056 5057 static void qeth_determine_capabilities(struct qeth_card *card) 5058 { 5059 struct qeth_channel *channel = &card->data; 5060 struct ccw_device *ddev = channel->ccwdev; 5061 int rc; 5062 int ddev_offline = 0; 5063 5064 QETH_CARD_TEXT(card, 2, "detcapab"); 5065 if (!ddev->online) { 5066 ddev_offline = 1; 5067 rc = qeth_start_channel(channel); 5068 if (rc) { 5069 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5070 goto out; 5071 } 5072 } 5073 5074 rc = qeth_read_conf_data(card); 5075 if (rc) { 5076 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5077 CARD_DEVID(card), rc); 5078 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5079 goto out_offline; 5080 } 5081 5082 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5083 if (rc) 5084 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5085 5086 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5087 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5088 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5089 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5090 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5091 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5092 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5093 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5094 dev_info(&card->gdev->dev, 5095 "Completion Queueing supported\n"); 5096 } else { 5097 card->options.cq = QETH_CQ_NOTAVAILABLE; 5098 } 5099 5100 out_offline: 5101 if (ddev_offline == 1) 5102 qeth_stop_channel(channel); 5103 out: 5104 return; 5105 } 5106 5107 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5108 { 5109 struct qeth_card_info *info = &card->info; 5110 struct ccw_device *cdev = CARD_DDEV(card); 5111 struct ccw_dev_id dev_id; 5112 5113 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5114 ccw_device_get_id(cdev, &dev_id); 5115 5116 info->ddev_devno = dev_id.devno; 5117 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5118 !ccw_device_get_iid(cdev, &info->iid) && 5119 !ccw_device_get_chid(cdev, 0, &info->chid); 5120 info->ssid = dev_id.ssid; 5121 5122 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5123 info->chid, info->chpid); 5124 5125 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5126 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5127 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5128 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5129 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5130 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5131 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5132 } 5133 5134 static int qeth_qdio_establish(struct qeth_card *card) 5135 { 5136 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5137 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5138 struct qeth_qib_parms *qib_parms = NULL; 5139 struct qdio_initialize init_data; 5140 unsigned int no_input_qs = 1; 5141 unsigned int i; 5142 int rc = 0; 5143 5144 QETH_CARD_TEXT(card, 2, "qdioest"); 5145 5146 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5147 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5148 if (!qib_parms) 5149 return -ENOMEM; 5150 5151 qeth_fill_qib_parms(card, qib_parms); 5152 } 5153 5154 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5155 if (card->options.cq == QETH_CQ_ENABLED) { 5156 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5157 no_input_qs++; 5158 } 5159 5160 for (i = 0; i < card->qdio.no_out_queues; i++) 5161 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5162 5163 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5164 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5165 QDIO_QETH_QFMT; 5166 init_data.qib_param_field_format = 0; 5167 init_data.qib_param_field = (void *)qib_parms; 5168 init_data.no_input_qs = no_input_qs; 5169 init_data.no_output_qs = card->qdio.no_out_queues; 5170 init_data.input_handler = qeth_qdio_input_handler; 5171 init_data.output_handler = qeth_qdio_output_handler; 5172 init_data.irq_poll = qeth_qdio_poll; 5173 init_data.int_parm = (unsigned long) card; 5174 init_data.input_sbal_addr_array = in_sbal_ptrs; 5175 init_data.output_sbal_addr_array = out_sbal_ptrs; 5176 5177 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5178 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5179 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5180 init_data.no_output_qs); 5181 if (rc) { 5182 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5183 goto out; 5184 } 5185 rc = qdio_establish(CARD_DDEV(card), &init_data); 5186 if (rc) { 5187 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5188 qdio_free(CARD_DDEV(card)); 5189 } 5190 } 5191 5192 switch (card->options.cq) { 5193 case QETH_CQ_ENABLED: 5194 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5195 break; 5196 case QETH_CQ_DISABLED: 5197 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5198 break; 5199 default: 5200 break; 5201 } 5202 5203 out: 5204 kfree(qib_parms); 5205 return rc; 5206 } 5207 5208 static void qeth_core_free_card(struct qeth_card *card) 5209 { 5210 QETH_CARD_TEXT(card, 2, "freecrd"); 5211 5212 unregister_service_level(&card->qeth_service_level); 5213 debugfs_remove_recursive(card->debugfs); 5214 qeth_put_cmd(card->read_cmd); 5215 destroy_workqueue(card->event_wq); 5216 dev_set_drvdata(&card->gdev->dev, NULL); 5217 kfree(card); 5218 } 5219 5220 static void qeth_trace_features(struct qeth_card *card) 5221 { 5222 QETH_CARD_TEXT(card, 2, "features"); 5223 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5224 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5225 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5226 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5227 sizeof(card->info.diagass_support)); 5228 } 5229 5230 static struct ccw_device_id qeth_ids[] = { 5231 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5232 .driver_info = QETH_CARD_TYPE_OSD}, 5233 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5234 .driver_info = QETH_CARD_TYPE_IQD}, 5235 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5236 .driver_info = QETH_CARD_TYPE_OSM}, 5237 #ifdef CONFIG_QETH_OSX 5238 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5239 .driver_info = QETH_CARD_TYPE_OSX}, 5240 #endif 5241 {}, 5242 }; 5243 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5244 5245 static struct ccw_driver qeth_ccw_driver = { 5246 .driver = { 5247 .owner = THIS_MODULE, 5248 .name = "qeth", 5249 }, 5250 .ids = qeth_ids, 5251 .probe = ccwgroup_probe_ccwdev, 5252 .remove = ccwgroup_remove_ccwdev, 5253 }; 5254 5255 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5256 { 5257 int retries = 3; 5258 int rc; 5259 5260 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5261 atomic_set(&card->force_alloc_skb, 0); 5262 rc = qeth_update_from_chp_desc(card); 5263 if (rc) 5264 return rc; 5265 retry: 5266 if (retries < 3) 5267 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5268 CARD_DEVID(card)); 5269 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5270 qeth_stop_channel(&card->data); 5271 qeth_stop_channel(&card->write); 5272 qeth_stop_channel(&card->read); 5273 qdio_free(CARD_DDEV(card)); 5274 5275 rc = qeth_start_channel(&card->read); 5276 if (rc) 5277 goto retriable; 5278 rc = qeth_start_channel(&card->write); 5279 if (rc) 5280 goto retriable; 5281 rc = qeth_start_channel(&card->data); 5282 if (rc) 5283 goto retriable; 5284 retriable: 5285 if (rc == -ERESTARTSYS) { 5286 QETH_CARD_TEXT(card, 2, "break1"); 5287 return rc; 5288 } else if (rc) { 5289 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5290 if (--retries < 0) 5291 goto out; 5292 else 5293 goto retry; 5294 } 5295 5296 qeth_determine_capabilities(card); 5297 qeth_read_ccw_conf_data(card); 5298 qeth_idx_init(card); 5299 5300 rc = qeth_idx_activate_read_channel(card); 5301 if (rc == -EINTR) { 5302 QETH_CARD_TEXT(card, 2, "break2"); 5303 return rc; 5304 } else if (rc) { 5305 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5306 if (--retries < 0) 5307 goto out; 5308 else 5309 goto retry; 5310 } 5311 5312 rc = qeth_idx_activate_write_channel(card); 5313 if (rc == -EINTR) { 5314 QETH_CARD_TEXT(card, 2, "break3"); 5315 return rc; 5316 } else if (rc) { 5317 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5318 if (--retries < 0) 5319 goto out; 5320 else 5321 goto retry; 5322 } 5323 card->read_or_write_problem = 0; 5324 rc = qeth_mpc_initialize(card); 5325 if (rc) { 5326 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5327 goto out; 5328 } 5329 5330 rc = qeth_send_startlan(card); 5331 if (rc) { 5332 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5333 if (rc == -ENETDOWN) { 5334 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5335 *carrier_ok = false; 5336 } else { 5337 goto out; 5338 } 5339 } else { 5340 *carrier_ok = true; 5341 } 5342 5343 card->options.ipa4.supported = 0; 5344 card->options.ipa6.supported = 0; 5345 card->options.adp.supported = 0; 5346 card->options.sbp.supported_funcs = 0; 5347 card->info.diagass_support = 0; 5348 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5349 if (rc == -ENOMEM) 5350 goto out; 5351 if (qeth_is_supported(card, IPA_IPV6)) { 5352 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5353 if (rc == -ENOMEM) 5354 goto out; 5355 } 5356 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5357 rc = qeth_query_setadapterparms(card); 5358 if (rc < 0) { 5359 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5360 goto out; 5361 } 5362 } 5363 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5364 rc = qeth_query_setdiagass(card); 5365 if (rc) 5366 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5367 } 5368 5369 qeth_trace_features(card); 5370 5371 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5372 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5373 card->info.hwtrap = 0; 5374 5375 if (card->options.isolation != ISOLATION_MODE_NONE) { 5376 rc = qeth_setadpparms_set_access_ctrl(card, 5377 card->options.isolation); 5378 if (rc) 5379 goto out; 5380 } 5381 5382 qeth_init_link_info(card); 5383 5384 rc = qeth_init_qdio_queues(card); 5385 if (rc) { 5386 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5387 goto out; 5388 } 5389 5390 return 0; 5391 out: 5392 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5393 "an error on the device\n"); 5394 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5395 CARD_DEVID(card), rc); 5396 return rc; 5397 } 5398 5399 static int qeth_set_online(struct qeth_card *card, 5400 const struct qeth_discipline *disc) 5401 { 5402 bool carrier_ok; 5403 int rc; 5404 5405 mutex_lock(&card->conf_mutex); 5406 QETH_CARD_TEXT(card, 2, "setonlin"); 5407 5408 rc = qeth_hardsetup_card(card, &carrier_ok); 5409 if (rc) { 5410 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5411 rc = -ENODEV; 5412 goto err_hardsetup; 5413 } 5414 5415 qeth_print_status_message(card); 5416 5417 if (card->dev->reg_state != NETREG_REGISTERED) 5418 /* no need for locking / error handling at this early stage: */ 5419 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5420 5421 rc = disc->set_online(card, carrier_ok); 5422 if (rc) 5423 goto err_online; 5424 5425 /* let user_space know that device is online */ 5426 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5427 5428 mutex_unlock(&card->conf_mutex); 5429 return 0; 5430 5431 err_online: 5432 err_hardsetup: 5433 qeth_qdio_clear_card(card, 0); 5434 qeth_clear_working_pool_list(card); 5435 qeth_flush_local_addrs(card); 5436 5437 qeth_stop_channel(&card->data); 5438 qeth_stop_channel(&card->write); 5439 qeth_stop_channel(&card->read); 5440 qdio_free(CARD_DDEV(card)); 5441 5442 mutex_unlock(&card->conf_mutex); 5443 return rc; 5444 } 5445 5446 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5447 bool resetting) 5448 { 5449 int rc, rc2, rc3; 5450 5451 mutex_lock(&card->conf_mutex); 5452 QETH_CARD_TEXT(card, 3, "setoffl"); 5453 5454 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5455 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5456 card->info.hwtrap = 1; 5457 } 5458 5459 /* cancel any stalled cmd that might block the rtnl: */ 5460 qeth_clear_ipacmd_list(card); 5461 5462 rtnl_lock(); 5463 card->info.open_when_online = card->dev->flags & IFF_UP; 5464 dev_close(card->dev); 5465 netif_device_detach(card->dev); 5466 netif_carrier_off(card->dev); 5467 rtnl_unlock(); 5468 5469 cancel_work_sync(&card->rx_mode_work); 5470 5471 disc->set_offline(card); 5472 5473 qeth_qdio_clear_card(card, 0); 5474 qeth_drain_output_queues(card); 5475 qeth_clear_working_pool_list(card); 5476 qeth_flush_local_addrs(card); 5477 card->info.promisc_mode = 0; 5478 5479 rc = qeth_stop_channel(&card->data); 5480 rc2 = qeth_stop_channel(&card->write); 5481 rc3 = qeth_stop_channel(&card->read); 5482 if (!rc) 5483 rc = (rc2) ? rc2 : rc3; 5484 if (rc) 5485 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5486 qdio_free(CARD_DDEV(card)); 5487 5488 /* let user_space know that device is offline */ 5489 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5490 5491 mutex_unlock(&card->conf_mutex); 5492 return 0; 5493 } 5494 EXPORT_SYMBOL_GPL(qeth_set_offline); 5495 5496 static int qeth_do_reset(void *data) 5497 { 5498 const struct qeth_discipline *disc; 5499 struct qeth_card *card = data; 5500 int rc; 5501 5502 /* Lock-free, other users will block until we are done. */ 5503 disc = card->discipline; 5504 5505 QETH_CARD_TEXT(card, 2, "recover1"); 5506 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5507 return 0; 5508 QETH_CARD_TEXT(card, 2, "recover2"); 5509 dev_warn(&card->gdev->dev, 5510 "A recovery process has been started for the device\n"); 5511 5512 qeth_set_offline(card, disc, true); 5513 rc = qeth_set_online(card, disc); 5514 if (!rc) { 5515 dev_info(&card->gdev->dev, 5516 "Device successfully recovered!\n"); 5517 } else { 5518 qeth_set_offline(card, disc, true); 5519 ccwgroup_set_offline(card->gdev, false); 5520 dev_warn(&card->gdev->dev, 5521 "The qeth device driver failed to recover an error on the device\n"); 5522 } 5523 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5524 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5525 return 0; 5526 } 5527 5528 #if IS_ENABLED(CONFIG_QETH_L3) 5529 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5530 struct qeth_hdr *hdr) 5531 { 5532 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5533 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5534 struct net_device *dev = skb->dev; 5535 5536 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5537 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5538 "FAKELL", skb->len); 5539 return; 5540 } 5541 5542 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5543 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5544 ETH_P_IP; 5545 unsigned char tg_addr[ETH_ALEN]; 5546 5547 skb_reset_network_header(skb); 5548 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5549 case QETH_CAST_MULTICAST: 5550 if (prot == ETH_P_IP) 5551 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5552 else 5553 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5554 QETH_CARD_STAT_INC(card, rx_multicast); 5555 break; 5556 case QETH_CAST_BROADCAST: 5557 ether_addr_copy(tg_addr, dev->broadcast); 5558 QETH_CARD_STAT_INC(card, rx_multicast); 5559 break; 5560 default: 5561 if (card->options.sniffer) 5562 skb->pkt_type = PACKET_OTHERHOST; 5563 ether_addr_copy(tg_addr, dev->dev_addr); 5564 } 5565 5566 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5567 dev_hard_header(skb, dev, prot, tg_addr, 5568 &l3_hdr->next_hop.rx.src_mac, skb->len); 5569 else 5570 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5571 skb->len); 5572 } 5573 5574 /* copy VLAN tag from hdr into skb */ 5575 if (!card->options.sniffer && 5576 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5577 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5578 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5579 l3_hdr->vlan_id : 5580 l3_hdr->next_hop.rx.vlan_id; 5581 5582 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5583 } 5584 } 5585 #endif 5586 5587 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5588 struct qeth_hdr *hdr, bool uses_frags) 5589 { 5590 struct napi_struct *napi = &card->napi; 5591 bool is_cso; 5592 5593 switch (hdr->hdr.l2.id) { 5594 #if IS_ENABLED(CONFIG_QETH_L3) 5595 case QETH_HEADER_TYPE_LAYER3: 5596 qeth_l3_rebuild_skb(card, skb, hdr); 5597 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5598 break; 5599 #endif 5600 case QETH_HEADER_TYPE_LAYER2: 5601 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5602 break; 5603 default: 5604 /* never happens */ 5605 if (uses_frags) 5606 napi_free_frags(napi); 5607 else 5608 kfree_skb(skb); 5609 return; 5610 } 5611 5612 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5613 skb->ip_summed = CHECKSUM_UNNECESSARY; 5614 QETH_CARD_STAT_INC(card, rx_skb_csum); 5615 } else { 5616 skb->ip_summed = CHECKSUM_NONE; 5617 } 5618 5619 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5620 QETH_CARD_STAT_INC(card, rx_packets); 5621 if (skb_is_nonlinear(skb)) { 5622 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5623 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5624 skb_shinfo(skb)->nr_frags); 5625 } 5626 5627 if (uses_frags) { 5628 napi_gro_frags(napi); 5629 } else { 5630 skb->protocol = eth_type_trans(skb, skb->dev); 5631 napi_gro_receive(napi, skb); 5632 } 5633 } 5634 5635 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5636 { 5637 struct page *page = virt_to_page(data); 5638 unsigned int next_frag; 5639 5640 next_frag = skb_shinfo(skb)->nr_frags; 5641 get_page(page); 5642 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5643 data_len); 5644 } 5645 5646 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5647 { 5648 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5649 } 5650 5651 static int qeth_extract_skb(struct qeth_card *card, 5652 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5653 int *__offset) 5654 { 5655 struct qeth_priv *priv = netdev_priv(card->dev); 5656 struct qdio_buffer *buffer = qethbuffer->buffer; 5657 struct napi_struct *napi = &card->napi; 5658 struct qdio_buffer_element *element; 5659 unsigned int linear_len = 0; 5660 bool uses_frags = false; 5661 int offset = *__offset; 5662 bool use_rx_sg = false; 5663 unsigned int headroom; 5664 struct qeth_hdr *hdr; 5665 struct sk_buff *skb; 5666 int skb_len = 0; 5667 5668 element = &buffer->element[*element_no]; 5669 5670 next_packet: 5671 /* qeth_hdr must not cross element boundaries */ 5672 while (element->length < offset + sizeof(struct qeth_hdr)) { 5673 if (qeth_is_last_sbale(element)) 5674 return -ENODATA; 5675 element++; 5676 offset = 0; 5677 } 5678 5679 hdr = phys_to_virt(element->addr) + offset; 5680 offset += sizeof(*hdr); 5681 skb = NULL; 5682 5683 switch (hdr->hdr.l2.id) { 5684 case QETH_HEADER_TYPE_LAYER2: 5685 skb_len = hdr->hdr.l2.pkt_length; 5686 linear_len = ETH_HLEN; 5687 headroom = 0; 5688 break; 5689 case QETH_HEADER_TYPE_LAYER3: 5690 skb_len = hdr->hdr.l3.length; 5691 if (!IS_LAYER3(card)) { 5692 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5693 goto walk_packet; 5694 } 5695 5696 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5697 linear_len = ETH_HLEN; 5698 headroom = 0; 5699 break; 5700 } 5701 5702 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5703 linear_len = sizeof(struct ipv6hdr); 5704 else 5705 linear_len = sizeof(struct iphdr); 5706 headroom = ETH_HLEN; 5707 break; 5708 default: 5709 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5710 QETH_CARD_STAT_INC(card, rx_frame_errors); 5711 else 5712 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5713 5714 /* Can't determine packet length, drop the whole buffer. */ 5715 return -EPROTONOSUPPORT; 5716 } 5717 5718 if (skb_len < linear_len) { 5719 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5720 goto walk_packet; 5721 } 5722 5723 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5724 (skb_len > READ_ONCE(priv->rx_copybreak) && 5725 !atomic_read(&card->force_alloc_skb)); 5726 5727 if (use_rx_sg) { 5728 /* QETH_CQ_ENABLED only: */ 5729 if (qethbuffer->rx_skb && 5730 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5731 skb = qethbuffer->rx_skb; 5732 qethbuffer->rx_skb = NULL; 5733 goto use_skb; 5734 } 5735 5736 skb = napi_get_frags(napi); 5737 if (!skb) { 5738 /* -ENOMEM, no point in falling back further. */ 5739 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5740 goto walk_packet; 5741 } 5742 5743 if (skb_tailroom(skb) >= linear_len + headroom) { 5744 uses_frags = true; 5745 goto use_skb; 5746 } 5747 5748 netdev_info_once(card->dev, 5749 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5750 linear_len + headroom, skb_tailroom(skb)); 5751 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5752 } 5753 5754 linear_len = skb_len; 5755 skb = napi_alloc_skb(napi, linear_len + headroom); 5756 if (!skb) { 5757 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5758 goto walk_packet; 5759 } 5760 5761 use_skb: 5762 if (headroom) 5763 skb_reserve(skb, headroom); 5764 walk_packet: 5765 while (skb_len) { 5766 int data_len = min(skb_len, (int)(element->length - offset)); 5767 char *data = phys_to_virt(element->addr) + offset; 5768 5769 skb_len -= data_len; 5770 offset += data_len; 5771 5772 /* Extract data from current element: */ 5773 if (skb && data_len) { 5774 if (linear_len) { 5775 unsigned int copy_len; 5776 5777 copy_len = min_t(unsigned int, linear_len, 5778 data_len); 5779 5780 skb_put_data(skb, data, copy_len); 5781 linear_len -= copy_len; 5782 data_len -= copy_len; 5783 data += copy_len; 5784 } 5785 5786 if (data_len) 5787 qeth_create_skb_frag(skb, data, data_len); 5788 } 5789 5790 /* Step forward to next element: */ 5791 if (skb_len) { 5792 if (qeth_is_last_sbale(element)) { 5793 QETH_CARD_TEXT(card, 4, "unexeob"); 5794 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5795 if (skb) { 5796 if (uses_frags) 5797 napi_free_frags(napi); 5798 else 5799 kfree_skb(skb); 5800 QETH_CARD_STAT_INC(card, 5801 rx_length_errors); 5802 } 5803 return -EMSGSIZE; 5804 } 5805 element++; 5806 offset = 0; 5807 } 5808 } 5809 5810 /* This packet was skipped, go get another one: */ 5811 if (!skb) 5812 goto next_packet; 5813 5814 *element_no = element - &buffer->element[0]; 5815 *__offset = offset; 5816 5817 qeth_receive_skb(card, skb, hdr, uses_frags); 5818 return 0; 5819 } 5820 5821 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5822 struct qeth_qdio_buffer *buf, bool *done) 5823 { 5824 unsigned int work_done = 0; 5825 5826 while (budget) { 5827 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5828 &card->rx.e_offset)) { 5829 *done = true; 5830 break; 5831 } 5832 5833 work_done++; 5834 budget--; 5835 } 5836 5837 return work_done; 5838 } 5839 5840 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5841 { 5842 struct qeth_rx *ctx = &card->rx; 5843 unsigned int work_done = 0; 5844 5845 while (budget > 0) { 5846 struct qeth_qdio_buffer *buffer; 5847 unsigned int skbs_done = 0; 5848 bool done = false; 5849 5850 /* Fetch completed RX buffers: */ 5851 if (!card->rx.b_count) { 5852 card->rx.qdio_err = 0; 5853 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), 5854 0, true, 5855 &card->rx.b_index, 5856 &card->rx.qdio_err); 5857 if (card->rx.b_count <= 0) { 5858 card->rx.b_count = 0; 5859 break; 5860 } 5861 } 5862 5863 /* Process one completed RX buffer: */ 5864 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5865 if (!(card->rx.qdio_err && 5866 qeth_check_qdio_errors(card, buffer->buffer, 5867 card->rx.qdio_err, "qinerr"))) 5868 skbs_done = qeth_extract_skbs(card, budget, buffer, 5869 &done); 5870 else 5871 done = true; 5872 5873 work_done += skbs_done; 5874 budget -= skbs_done; 5875 5876 if (done) { 5877 QETH_CARD_STAT_INC(card, rx_bufs); 5878 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5879 buffer->pool_entry = NULL; 5880 card->rx.b_count--; 5881 ctx->bufs_refill++; 5882 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5883 ctx->bufs_refill); 5884 5885 /* Step forward to next buffer: */ 5886 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5887 card->rx.buf_element = 0; 5888 card->rx.e_offset = 0; 5889 } 5890 } 5891 5892 return work_done; 5893 } 5894 5895 static void qeth_cq_poll(struct qeth_card *card) 5896 { 5897 unsigned int work_done = 0; 5898 5899 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5900 unsigned int start, error; 5901 int completed; 5902 5903 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 5904 &error); 5905 if (completed <= 0) 5906 return; 5907 5908 qeth_qdio_cq_handler(card, error, 1, start, completed); 5909 work_done += completed; 5910 } 5911 } 5912 5913 int qeth_poll(struct napi_struct *napi, int budget) 5914 { 5915 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5916 unsigned int work_done; 5917 5918 work_done = qeth_rx_poll(card, budget); 5919 5920 if (qeth_use_tx_irqs(card)) { 5921 struct qeth_qdio_out_q *queue; 5922 unsigned int i; 5923 5924 qeth_for_each_output_queue(card, queue, i) { 5925 if (!qeth_out_queue_is_empty(queue)) 5926 napi_schedule(&queue->napi); 5927 } 5928 } 5929 5930 if (card->options.cq == QETH_CQ_ENABLED) 5931 qeth_cq_poll(card); 5932 5933 if (budget) { 5934 struct qeth_rx *ctx = &card->rx; 5935 5936 /* Process any substantial refill backlog: */ 5937 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5938 5939 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5940 if (work_done >= budget) 5941 return work_done; 5942 } 5943 5944 if (napi_complete_done(napi, work_done) && 5945 qdio_start_irq(CARD_DDEV(card))) 5946 napi_schedule(napi); 5947 5948 return work_done; 5949 } 5950 EXPORT_SYMBOL_GPL(qeth_poll); 5951 5952 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5953 unsigned int bidx, unsigned int qdio_error, 5954 int budget) 5955 { 5956 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5957 u8 sflags = buffer->buffer->element[15].sflags; 5958 struct qeth_card *card = queue->card; 5959 bool error = !!qdio_error; 5960 5961 if (qdio_error == QDIO_ERROR_SLSB_PENDING) { 5962 struct qaob *aob = buffer->aob; 5963 struct qeth_qaob_priv1 *priv; 5964 enum iucv_tx_notify notify; 5965 5966 if (!aob) { 5967 netdev_WARN_ONCE(card->dev, 5968 "Pending TX buffer %#x without QAOB on TX queue %u\n", 5969 bidx, queue->queue_no); 5970 qeth_schedule_recovery(card); 5971 return; 5972 } 5973 5974 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5975 5976 priv = (struct qeth_qaob_priv1 *)&aob->user1; 5977 /* QAOB hasn't completed yet: */ 5978 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { 5979 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5980 5981 /* Prepare the queue slot for immediate re-use: */ 5982 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5983 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { 5984 QETH_CARD_TEXT(card, 2, "outofbuf"); 5985 qeth_schedule_recovery(card); 5986 } 5987 5988 list_add(&buffer->list_entry, &queue->pending_bufs); 5989 /* Skip clearing the buffer: */ 5990 return; 5991 } 5992 5993 /* QAOB already completed: */ 5994 notify = qeth_compute_cq_notification(aob->aorc, 0); 5995 qeth_notify_skbs(queue, buffer, notify); 5996 error = !!aob->aorc; 5997 memset(aob, 0, sizeof(*aob)); 5998 } else if (card->options.cq == QETH_CQ_ENABLED) { 5999 qeth_notify_skbs(queue, buffer, 6000 qeth_compute_cq_notification(sflags, 0)); 6001 } 6002 6003 qeth_clear_output_buffer(queue, buffer, error, budget); 6004 } 6005 6006 static int qeth_tx_poll(struct napi_struct *napi, int budget) 6007 { 6008 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 6009 unsigned int queue_no = queue->queue_no; 6010 struct qeth_card *card = queue->card; 6011 struct net_device *dev = card->dev; 6012 unsigned int work_done = 0; 6013 struct netdev_queue *txq; 6014 6015 if (IS_IQD(card)) 6016 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 6017 else 6018 txq = netdev_get_tx_queue(dev, queue_no); 6019 6020 while (1) { 6021 unsigned int start, error, i; 6022 unsigned int packets = 0; 6023 unsigned int bytes = 0; 6024 int completed; 6025 6026 qeth_tx_complete_pending_bufs(card, queue, false, budget); 6027 6028 if (qeth_out_queue_is_empty(queue)) { 6029 napi_complete(napi); 6030 return 0; 6031 } 6032 6033 /* Give the CPU a breather: */ 6034 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 6035 QETH_TXQ_STAT_INC(queue, completion_yield); 6036 if (napi_complete_done(napi, 0)) 6037 napi_schedule(napi); 6038 return 0; 6039 } 6040 6041 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 6042 &start, &error); 6043 if (completed <= 0) { 6044 /* Ensure we see TX completion for pending work: */ 6045 if (napi_complete_done(napi, 0) && 6046 !atomic_read(&queue->set_pci_flags_count)) 6047 qeth_tx_arm_timer(queue, queue->rescan_usecs); 6048 return 0; 6049 } 6050 6051 for (i = start; i < start + completed; i++) { 6052 struct qeth_qdio_out_buffer *buffer; 6053 unsigned int bidx = QDIO_BUFNR(i); 6054 6055 buffer = queue->bufs[bidx]; 6056 packets += buffer->frames; 6057 bytes += buffer->bytes; 6058 6059 qeth_handle_send_error(card, buffer, error); 6060 if (IS_IQD(card)) 6061 qeth_iqd_tx_complete(queue, bidx, error, budget); 6062 else 6063 qeth_clear_output_buffer(queue, buffer, error, 6064 budget); 6065 } 6066 6067 atomic_sub(completed, &queue->used_buffers); 6068 work_done += completed; 6069 if (IS_IQD(card)) 6070 netdev_tx_completed_queue(txq, packets, bytes); 6071 else 6072 qeth_check_outbound_queue(queue); 6073 6074 /* xmit may have observed the full-condition, but not yet 6075 * stopped the txq. In which case the code below won't trigger. 6076 * So before returning, xmit will re-check the txq's fill level 6077 * and wake it up if needed. 6078 */ 6079 if (netif_tx_queue_stopped(txq) && 6080 !qeth_out_queue_is_full(queue)) 6081 netif_tx_wake_queue(txq); 6082 } 6083 } 6084 6085 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6086 { 6087 if (!cmd->hdr.return_code) 6088 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6089 return cmd->hdr.return_code; 6090 } 6091 6092 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6093 struct qeth_reply *reply, 6094 unsigned long data) 6095 { 6096 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6097 struct qeth_ipa_caps *caps = reply->param; 6098 6099 if (qeth_setassparms_inspect_rc(cmd)) 6100 return -EIO; 6101 6102 caps->supported = cmd->data.setassparms.data.caps.supported; 6103 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6104 return 0; 6105 } 6106 6107 int qeth_setassparms_cb(struct qeth_card *card, 6108 struct qeth_reply *reply, unsigned long data) 6109 { 6110 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6111 6112 QETH_CARD_TEXT(card, 4, "defadpcb"); 6113 6114 if (cmd->hdr.return_code) 6115 return -EIO; 6116 6117 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6118 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6119 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6120 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6121 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6122 return 0; 6123 } 6124 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6125 6126 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6127 enum qeth_ipa_funcs ipa_func, 6128 u16 cmd_code, 6129 unsigned int data_length, 6130 enum qeth_prot_versions prot) 6131 { 6132 struct qeth_ipacmd_setassparms *setassparms; 6133 struct qeth_ipacmd_setassparms_hdr *hdr; 6134 struct qeth_cmd_buffer *iob; 6135 6136 QETH_CARD_TEXT(card, 4, "getasscm"); 6137 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6138 data_length + 6139 offsetof(struct qeth_ipacmd_setassparms, 6140 data)); 6141 if (!iob) 6142 return NULL; 6143 6144 setassparms = &__ipa_cmd(iob)->data.setassparms; 6145 setassparms->assist_no = ipa_func; 6146 6147 hdr = &setassparms->hdr; 6148 hdr->length = sizeof(*hdr) + data_length; 6149 hdr->command_code = cmd_code; 6150 return iob; 6151 } 6152 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6153 6154 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6155 enum qeth_ipa_funcs ipa_func, 6156 u16 cmd_code, u32 *data, 6157 enum qeth_prot_versions prot) 6158 { 6159 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6160 struct qeth_cmd_buffer *iob; 6161 6162 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6163 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6164 if (!iob) 6165 return -ENOMEM; 6166 6167 if (data) 6168 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6169 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6170 } 6171 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6172 6173 static void qeth_unregister_dbf_views(void) 6174 { 6175 int x; 6176 6177 for (x = 0; x < QETH_DBF_INFOS; x++) { 6178 debug_unregister(qeth_dbf[x].id); 6179 qeth_dbf[x].id = NULL; 6180 } 6181 } 6182 6183 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6184 { 6185 char dbf_txt_buf[32]; 6186 va_list args; 6187 6188 if (!debug_level_enabled(id, level)) 6189 return; 6190 va_start(args, fmt); 6191 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6192 va_end(args); 6193 debug_text_event(id, level, dbf_txt_buf); 6194 } 6195 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6196 6197 static int qeth_register_dbf_views(void) 6198 { 6199 int ret; 6200 int x; 6201 6202 for (x = 0; x < QETH_DBF_INFOS; x++) { 6203 /* register the areas */ 6204 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6205 qeth_dbf[x].pages, 6206 qeth_dbf[x].areas, 6207 qeth_dbf[x].len); 6208 if (qeth_dbf[x].id == NULL) { 6209 qeth_unregister_dbf_views(); 6210 return -ENOMEM; 6211 } 6212 6213 /* register a view */ 6214 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6215 if (ret) { 6216 qeth_unregister_dbf_views(); 6217 return ret; 6218 } 6219 6220 /* set a passing level */ 6221 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6222 } 6223 6224 return 0; 6225 } 6226 6227 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6228 6229 int qeth_setup_discipline(struct qeth_card *card, 6230 enum qeth_discipline_id discipline) 6231 { 6232 int rc; 6233 6234 mutex_lock(&qeth_mod_mutex); 6235 switch (discipline) { 6236 case QETH_DISCIPLINE_LAYER3: 6237 card->discipline = try_then_request_module( 6238 symbol_get(qeth_l3_discipline), "qeth_l3"); 6239 break; 6240 case QETH_DISCIPLINE_LAYER2: 6241 card->discipline = try_then_request_module( 6242 symbol_get(qeth_l2_discipline), "qeth_l2"); 6243 break; 6244 default: 6245 break; 6246 } 6247 mutex_unlock(&qeth_mod_mutex); 6248 6249 if (!card->discipline) { 6250 dev_err(&card->gdev->dev, "There is no kernel module to " 6251 "support discipline %d\n", discipline); 6252 return -EINVAL; 6253 } 6254 6255 rc = card->discipline->setup(card->gdev); 6256 if (rc) { 6257 if (discipline == QETH_DISCIPLINE_LAYER2) 6258 symbol_put(qeth_l2_discipline); 6259 else 6260 symbol_put(qeth_l3_discipline); 6261 card->discipline = NULL; 6262 6263 return rc; 6264 } 6265 6266 card->options.layer = discipline; 6267 return 0; 6268 } 6269 6270 void qeth_remove_discipline(struct qeth_card *card) 6271 { 6272 card->discipline->remove(card->gdev); 6273 6274 if (IS_LAYER2(card)) 6275 symbol_put(qeth_l2_discipline); 6276 else 6277 symbol_put(qeth_l3_discipline); 6278 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6279 card->discipline = NULL; 6280 } 6281 6282 static const struct device_type qeth_generic_devtype = { 6283 .name = "qeth_generic", 6284 }; 6285 6286 #define DBF_NAME_LEN 20 6287 6288 struct qeth_dbf_entry { 6289 char dbf_name[DBF_NAME_LEN]; 6290 debug_info_t *dbf_info; 6291 struct list_head dbf_list; 6292 }; 6293 6294 static LIST_HEAD(qeth_dbf_list); 6295 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6296 6297 static debug_info_t *qeth_get_dbf_entry(char *name) 6298 { 6299 struct qeth_dbf_entry *entry; 6300 debug_info_t *rc = NULL; 6301 6302 mutex_lock(&qeth_dbf_list_mutex); 6303 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6304 if (strcmp(entry->dbf_name, name) == 0) { 6305 rc = entry->dbf_info; 6306 break; 6307 } 6308 } 6309 mutex_unlock(&qeth_dbf_list_mutex); 6310 return rc; 6311 } 6312 6313 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6314 { 6315 struct qeth_dbf_entry *new_entry; 6316 6317 card->debug = debug_register(name, 2, 1, 8); 6318 if (!card->debug) { 6319 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6320 goto err; 6321 } 6322 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6323 goto err_dbg; 6324 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6325 if (!new_entry) 6326 goto err_dbg; 6327 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6328 new_entry->dbf_info = card->debug; 6329 mutex_lock(&qeth_dbf_list_mutex); 6330 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6331 mutex_unlock(&qeth_dbf_list_mutex); 6332 6333 return 0; 6334 6335 err_dbg: 6336 debug_unregister(card->debug); 6337 err: 6338 return -ENOMEM; 6339 } 6340 6341 static void qeth_clear_dbf_list(void) 6342 { 6343 struct qeth_dbf_entry *entry, *tmp; 6344 6345 mutex_lock(&qeth_dbf_list_mutex); 6346 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6347 list_del(&entry->dbf_list); 6348 debug_unregister(entry->dbf_info); 6349 kfree(entry); 6350 } 6351 mutex_unlock(&qeth_dbf_list_mutex); 6352 } 6353 6354 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6355 { 6356 struct net_device *dev; 6357 struct qeth_priv *priv; 6358 6359 switch (card->info.type) { 6360 case QETH_CARD_TYPE_IQD: 6361 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6362 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6363 break; 6364 case QETH_CARD_TYPE_OSM: 6365 dev = alloc_etherdev(sizeof(*priv)); 6366 break; 6367 default: 6368 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6369 } 6370 6371 if (!dev) 6372 return NULL; 6373 6374 priv = netdev_priv(dev); 6375 priv->rx_copybreak = QETH_RX_COPYBREAK; 6376 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6377 6378 dev->ml_priv = card; 6379 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6380 dev->min_mtu = 576; 6381 /* initialized when device first goes online: */ 6382 dev->max_mtu = 0; 6383 dev->mtu = 0; 6384 SET_NETDEV_DEV(dev, &card->gdev->dev); 6385 netif_carrier_off(dev); 6386 6387 dev->ethtool_ops = &qeth_ethtool_ops; 6388 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6389 dev->hw_features |= NETIF_F_SG; 6390 dev->vlan_features |= NETIF_F_SG; 6391 if (IS_IQD(card)) 6392 dev->features |= NETIF_F_SG; 6393 6394 return dev; 6395 } 6396 6397 struct net_device *qeth_clone_netdev(struct net_device *orig) 6398 { 6399 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6400 6401 if (!clone) 6402 return NULL; 6403 6404 clone->dev_port = orig->dev_port; 6405 return clone; 6406 } 6407 6408 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6409 { 6410 struct qeth_card *card; 6411 struct device *dev; 6412 int rc; 6413 enum qeth_discipline_id enforced_disc; 6414 char dbf_name[DBF_NAME_LEN]; 6415 6416 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6417 6418 dev = &gdev->dev; 6419 if (!get_device(dev)) 6420 return -ENODEV; 6421 6422 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6423 6424 card = qeth_alloc_card(gdev); 6425 if (!card) { 6426 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6427 rc = -ENOMEM; 6428 goto err_dev; 6429 } 6430 6431 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6432 dev_name(&gdev->dev)); 6433 card->debug = qeth_get_dbf_entry(dbf_name); 6434 if (!card->debug) { 6435 rc = qeth_add_dbf_entry(card, dbf_name); 6436 if (rc) 6437 goto err_card; 6438 } 6439 6440 qeth_setup_card(card); 6441 card->dev = qeth_alloc_netdev(card); 6442 if (!card->dev) { 6443 rc = -ENOMEM; 6444 goto err_card; 6445 } 6446 6447 qeth_determine_capabilities(card); 6448 qeth_set_blkt_defaults(card); 6449 6450 card->qdio.no_out_queues = card->dev->num_tx_queues; 6451 rc = qeth_update_from_chp_desc(card); 6452 if (rc) 6453 goto err_chp_desc; 6454 6455 gdev->dev.groups = qeth_dev_groups; 6456 6457 enforced_disc = qeth_enforce_discipline(card); 6458 switch (enforced_disc) { 6459 case QETH_DISCIPLINE_UNDETERMINED: 6460 gdev->dev.type = &qeth_generic_devtype; 6461 break; 6462 default: 6463 card->info.layer_enforced = true; 6464 /* It's so early that we don't need the discipline_mutex yet. */ 6465 rc = qeth_setup_discipline(card, enforced_disc); 6466 if (rc) 6467 goto err_setup_disc; 6468 6469 break; 6470 } 6471 6472 return 0; 6473 6474 err_setup_disc: 6475 err_chp_desc: 6476 free_netdev(card->dev); 6477 err_card: 6478 qeth_core_free_card(card); 6479 err_dev: 6480 put_device(dev); 6481 return rc; 6482 } 6483 6484 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6485 { 6486 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6487 6488 QETH_CARD_TEXT(card, 2, "removedv"); 6489 6490 mutex_lock(&card->discipline_mutex); 6491 if (card->discipline) 6492 qeth_remove_discipline(card); 6493 mutex_unlock(&card->discipline_mutex); 6494 6495 qeth_free_qdio_queues(card); 6496 6497 free_netdev(card->dev); 6498 qeth_core_free_card(card); 6499 put_device(&gdev->dev); 6500 } 6501 6502 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6503 { 6504 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6505 int rc = 0; 6506 enum qeth_discipline_id def_discipline; 6507 6508 mutex_lock(&card->discipline_mutex); 6509 if (!card->discipline) { 6510 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6511 QETH_DISCIPLINE_LAYER2; 6512 rc = qeth_setup_discipline(card, def_discipline); 6513 if (rc) 6514 goto err; 6515 } 6516 6517 rc = qeth_set_online(card, card->discipline); 6518 6519 err: 6520 mutex_unlock(&card->discipline_mutex); 6521 return rc; 6522 } 6523 6524 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6525 { 6526 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6527 int rc; 6528 6529 mutex_lock(&card->discipline_mutex); 6530 rc = qeth_set_offline(card, card->discipline, false); 6531 mutex_unlock(&card->discipline_mutex); 6532 6533 return rc; 6534 } 6535 6536 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6537 { 6538 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6539 6540 qeth_set_allowed_threads(card, 0, 1); 6541 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6542 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6543 qeth_qdio_clear_card(card, 0); 6544 qeth_drain_output_queues(card); 6545 qdio_free(CARD_DDEV(card)); 6546 } 6547 6548 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6549 size_t count) 6550 { 6551 int err; 6552 6553 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6554 buf); 6555 6556 return err ? err : count; 6557 } 6558 static DRIVER_ATTR_WO(group); 6559 6560 static struct attribute *qeth_drv_attrs[] = { 6561 &driver_attr_group.attr, 6562 NULL, 6563 }; 6564 static struct attribute_group qeth_drv_attr_group = { 6565 .attrs = qeth_drv_attrs, 6566 }; 6567 static const struct attribute_group *qeth_drv_attr_groups[] = { 6568 &qeth_drv_attr_group, 6569 NULL, 6570 }; 6571 6572 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6573 .driver = { 6574 .groups = qeth_drv_attr_groups, 6575 .owner = THIS_MODULE, 6576 .name = "qeth", 6577 }, 6578 .ccw_driver = &qeth_ccw_driver, 6579 .setup = qeth_core_probe_device, 6580 .remove = qeth_core_remove_device, 6581 .set_online = qeth_core_set_online, 6582 .set_offline = qeth_core_set_offline, 6583 .shutdown = qeth_core_shutdown, 6584 }; 6585 6586 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) 6587 { 6588 struct qeth_card *card = dev->ml_priv; 6589 int rc = 0; 6590 6591 switch (cmd) { 6592 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6593 rc = qeth_snmp_command(card, data); 6594 break; 6595 case SIOC_QETH_GET_CARD_TYPE: 6596 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6597 !IS_VM_NIC(card)) 6598 return 1; 6599 return 0; 6600 case SIOC_QETH_QUERY_OAT: 6601 rc = qeth_query_oat_command(card, data); 6602 break; 6603 default: 6604 rc = -EOPNOTSUPP; 6605 } 6606 if (rc) 6607 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6608 return rc; 6609 } 6610 EXPORT_SYMBOL_GPL(qeth_siocdevprivate); 6611 6612 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6613 { 6614 struct qeth_card *card = dev->ml_priv; 6615 struct mii_ioctl_data *mii_data; 6616 int rc = 0; 6617 6618 switch (cmd) { 6619 case SIOCGMIIPHY: 6620 mii_data = if_mii(rq); 6621 mii_data->phy_id = 0; 6622 break; 6623 case SIOCGMIIREG: 6624 mii_data = if_mii(rq); 6625 if (mii_data->phy_id != 0) 6626 rc = -EINVAL; 6627 else 6628 mii_data->val_out = qeth_mdio_read(dev, 6629 mii_data->phy_id, mii_data->reg_num); 6630 break; 6631 default: 6632 return -EOPNOTSUPP; 6633 } 6634 if (rc) 6635 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6636 return rc; 6637 } 6638 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6639 6640 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6641 unsigned long data) 6642 { 6643 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6644 u32 *features = reply->param; 6645 6646 if (qeth_setassparms_inspect_rc(cmd)) 6647 return -EIO; 6648 6649 *features = cmd->data.setassparms.data.flags_32bit; 6650 return 0; 6651 } 6652 6653 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6654 enum qeth_prot_versions prot) 6655 { 6656 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6657 NULL, prot); 6658 } 6659 6660 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6661 enum qeth_prot_versions prot, u8 *lp2lp) 6662 { 6663 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6664 struct qeth_cmd_buffer *iob; 6665 struct qeth_ipa_caps caps; 6666 u32 features; 6667 int rc; 6668 6669 /* some L3 HW requires combined L3+L4 csum offload: */ 6670 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6671 cstype == IPA_OUTBOUND_CHECKSUM) 6672 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6673 6674 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6675 prot); 6676 if (!iob) 6677 return -ENOMEM; 6678 6679 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6680 if (rc) 6681 return rc; 6682 6683 if ((required_features & features) != required_features) { 6684 qeth_set_csum_off(card, cstype, prot); 6685 return -EOPNOTSUPP; 6686 } 6687 6688 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6689 SETASS_DATA_SIZEOF(flags_32bit), 6690 prot); 6691 if (!iob) { 6692 qeth_set_csum_off(card, cstype, prot); 6693 return -ENOMEM; 6694 } 6695 6696 if (features & QETH_IPA_CHECKSUM_LP2LP) 6697 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6698 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6699 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6700 if (rc) { 6701 qeth_set_csum_off(card, cstype, prot); 6702 return rc; 6703 } 6704 6705 if (!qeth_ipa_caps_supported(&caps, required_features) || 6706 !qeth_ipa_caps_enabled(&caps, required_features)) { 6707 qeth_set_csum_off(card, cstype, prot); 6708 return -EOPNOTSUPP; 6709 } 6710 6711 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6712 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6713 6714 if (lp2lp) 6715 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6716 6717 return 0; 6718 } 6719 6720 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6721 enum qeth_prot_versions prot, u8 *lp2lp) 6722 { 6723 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6724 qeth_set_csum_off(card, cstype, prot); 6725 } 6726 6727 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6728 unsigned long data) 6729 { 6730 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6731 struct qeth_tso_start_data *tso_data = reply->param; 6732 6733 if (qeth_setassparms_inspect_rc(cmd)) 6734 return -EIO; 6735 6736 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6737 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6738 return 0; 6739 } 6740 6741 static int qeth_set_tso_off(struct qeth_card *card, 6742 enum qeth_prot_versions prot) 6743 { 6744 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6745 IPA_CMD_ASS_STOP, NULL, prot); 6746 } 6747 6748 static int qeth_set_tso_on(struct qeth_card *card, 6749 enum qeth_prot_versions prot) 6750 { 6751 struct qeth_tso_start_data tso_data; 6752 struct qeth_cmd_buffer *iob; 6753 struct qeth_ipa_caps caps; 6754 int rc; 6755 6756 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6757 IPA_CMD_ASS_START, 0, prot); 6758 if (!iob) 6759 return -ENOMEM; 6760 6761 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6762 if (rc) 6763 return rc; 6764 6765 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6766 qeth_set_tso_off(card, prot); 6767 return -EOPNOTSUPP; 6768 } 6769 6770 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6771 IPA_CMD_ASS_ENABLE, 6772 SETASS_DATA_SIZEOF(caps), prot); 6773 if (!iob) { 6774 qeth_set_tso_off(card, prot); 6775 return -ENOMEM; 6776 } 6777 6778 /* enable TSO capability */ 6779 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6780 QETH_IPA_LARGE_SEND_TCP; 6781 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6782 if (rc) { 6783 qeth_set_tso_off(card, prot); 6784 return rc; 6785 } 6786 6787 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6788 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6789 qeth_set_tso_off(card, prot); 6790 return -EOPNOTSUPP; 6791 } 6792 6793 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6794 tso_data.mss); 6795 return 0; 6796 } 6797 6798 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6799 enum qeth_prot_versions prot) 6800 { 6801 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6802 } 6803 6804 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6805 { 6806 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6807 int rc_ipv6; 6808 6809 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6810 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6811 QETH_PROT_IPV4, NULL); 6812 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6813 /* no/one Offload Assist available, so the rc is trivial */ 6814 return rc_ipv4; 6815 6816 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6817 QETH_PROT_IPV6, NULL); 6818 6819 if (on) 6820 /* enable: success if any Assist is active */ 6821 return (rc_ipv6) ? rc_ipv4 : 0; 6822 6823 /* disable: failure if any Assist is still active */ 6824 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6825 } 6826 6827 /** 6828 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6829 * @dev: a net_device 6830 */ 6831 void qeth_enable_hw_features(struct net_device *dev) 6832 { 6833 struct qeth_card *card = dev->ml_priv; 6834 netdev_features_t features; 6835 6836 features = dev->features; 6837 /* force-off any feature that might need an IPA sequence. 6838 * netdev_update_features() will restart them. 6839 */ 6840 dev->features &= ~dev->hw_features; 6841 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6842 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6843 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6844 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6845 } 6846 netdev_update_features(dev); 6847 if (features != dev->features) 6848 dev_warn(&card->gdev->dev, 6849 "Device recovery failed to restore all offload features\n"); 6850 } 6851 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6852 6853 static void qeth_check_restricted_features(struct qeth_card *card, 6854 netdev_features_t changed, 6855 netdev_features_t actual) 6856 { 6857 netdev_features_t ipv6_features = NETIF_F_TSO6; 6858 netdev_features_t ipv4_features = NETIF_F_TSO; 6859 6860 if (!card->info.has_lp2lp_cso_v6) 6861 ipv6_features |= NETIF_F_IPV6_CSUM; 6862 if (!card->info.has_lp2lp_cso_v4) 6863 ipv4_features |= NETIF_F_IP_CSUM; 6864 6865 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6866 qeth_flush_local_addrs6(card); 6867 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6868 qeth_flush_local_addrs4(card); 6869 } 6870 6871 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6872 { 6873 struct qeth_card *card = dev->ml_priv; 6874 netdev_features_t changed = dev->features ^ features; 6875 int rc = 0; 6876 6877 QETH_CARD_TEXT(card, 2, "setfeat"); 6878 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6879 6880 if ((changed & NETIF_F_IP_CSUM)) { 6881 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6882 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6883 &card->info.has_lp2lp_cso_v4); 6884 if (rc) 6885 changed ^= NETIF_F_IP_CSUM; 6886 } 6887 if (changed & NETIF_F_IPV6_CSUM) { 6888 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6889 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6890 &card->info.has_lp2lp_cso_v6); 6891 if (rc) 6892 changed ^= NETIF_F_IPV6_CSUM; 6893 } 6894 if (changed & NETIF_F_RXCSUM) { 6895 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6896 if (rc) 6897 changed ^= NETIF_F_RXCSUM; 6898 } 6899 if (changed & NETIF_F_TSO) { 6900 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6901 QETH_PROT_IPV4); 6902 if (rc) 6903 changed ^= NETIF_F_TSO; 6904 } 6905 if (changed & NETIF_F_TSO6) { 6906 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6907 QETH_PROT_IPV6); 6908 if (rc) 6909 changed ^= NETIF_F_TSO6; 6910 } 6911 6912 qeth_check_restricted_features(card, dev->features ^ features, 6913 dev->features ^ changed); 6914 6915 /* everything changed successfully? */ 6916 if ((dev->features ^ features) == changed) 6917 return 0; 6918 /* something went wrong. save changed features and return error */ 6919 dev->features ^= changed; 6920 return -EIO; 6921 } 6922 EXPORT_SYMBOL_GPL(qeth_set_features); 6923 6924 netdev_features_t qeth_fix_features(struct net_device *dev, 6925 netdev_features_t features) 6926 { 6927 struct qeth_card *card = dev->ml_priv; 6928 6929 QETH_CARD_TEXT(card, 2, "fixfeat"); 6930 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6931 features &= ~NETIF_F_IP_CSUM; 6932 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6933 features &= ~NETIF_F_IPV6_CSUM; 6934 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6935 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6936 features &= ~NETIF_F_RXCSUM; 6937 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6938 features &= ~NETIF_F_TSO; 6939 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6940 features &= ~NETIF_F_TSO6; 6941 6942 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6943 return features; 6944 } 6945 EXPORT_SYMBOL_GPL(qeth_fix_features); 6946 6947 netdev_features_t qeth_features_check(struct sk_buff *skb, 6948 struct net_device *dev, 6949 netdev_features_t features) 6950 { 6951 struct qeth_card *card = dev->ml_priv; 6952 6953 /* Traffic with local next-hop is not eligible for some offloads: */ 6954 if (skb->ip_summed == CHECKSUM_PARTIAL && 6955 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 6956 netdev_features_t restricted = 0; 6957 6958 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 6959 restricted |= NETIF_F_ALL_TSO; 6960 6961 switch (vlan_get_protocol(skb)) { 6962 case htons(ETH_P_IP): 6963 if (!card->info.has_lp2lp_cso_v4) 6964 restricted |= NETIF_F_IP_CSUM; 6965 6966 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 6967 features &= ~restricted; 6968 break; 6969 case htons(ETH_P_IPV6): 6970 if (!card->info.has_lp2lp_cso_v6) 6971 restricted |= NETIF_F_IPV6_CSUM; 6972 6973 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 6974 features &= ~restricted; 6975 break; 6976 default: 6977 break; 6978 } 6979 } 6980 6981 /* GSO segmentation builds skbs with 6982 * a (small) linear part for the headers, and 6983 * page frags for the data. 6984 * Compared to a linear skb, the header-only part consumes an 6985 * additional buffer element. This reduces buffer utilization, and 6986 * hurts throughput. So compress small segments into one element. 6987 */ 6988 if (netif_needs_gso(skb, features)) { 6989 /* match skb_segment(): */ 6990 unsigned int doffset = skb->data - skb_mac_header(skb); 6991 unsigned int hsize = skb_shinfo(skb)->gso_size; 6992 unsigned int hroom = skb_headroom(skb); 6993 6994 /* linearize only if resulting skb allocations are order-0: */ 6995 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6996 features &= ~NETIF_F_SG; 6997 } 6998 6999 return vlan_features_check(skb, features); 7000 } 7001 EXPORT_SYMBOL_GPL(qeth_features_check); 7002 7003 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7004 { 7005 struct qeth_card *card = dev->ml_priv; 7006 struct qeth_qdio_out_q *queue; 7007 unsigned int i; 7008 7009 QETH_CARD_TEXT(card, 5, "getstat"); 7010 7011 stats->rx_packets = card->stats.rx_packets; 7012 stats->rx_bytes = card->stats.rx_bytes; 7013 stats->rx_errors = card->stats.rx_length_errors + 7014 card->stats.rx_frame_errors + 7015 card->stats.rx_fifo_errors; 7016 stats->rx_dropped = card->stats.rx_dropped_nomem + 7017 card->stats.rx_dropped_notsupp + 7018 card->stats.rx_dropped_runt; 7019 stats->multicast = card->stats.rx_multicast; 7020 stats->rx_length_errors = card->stats.rx_length_errors; 7021 stats->rx_frame_errors = card->stats.rx_frame_errors; 7022 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 7023 7024 for (i = 0; i < card->qdio.no_out_queues; i++) { 7025 queue = card->qdio.out_qs[i]; 7026 7027 stats->tx_packets += queue->stats.tx_packets; 7028 stats->tx_bytes += queue->stats.tx_bytes; 7029 stats->tx_errors += queue->stats.tx_errors; 7030 stats->tx_dropped += queue->stats.tx_dropped; 7031 } 7032 } 7033 EXPORT_SYMBOL_GPL(qeth_get_stats64); 7034 7035 #define TC_IQD_UCAST 0 7036 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 7037 unsigned int ucast_txqs) 7038 { 7039 unsigned int prio; 7040 7041 /* IQD requires mcast traffic to be placed on a dedicated queue, and 7042 * qeth_iqd_select_queue() deals with this. 7043 * For unicast traffic, we defer the queue selection to the stack. 7044 * By installing a trivial prio map that spans over only the unicast 7045 * queues, we can encourage the stack to spread the ucast traffic evenly 7046 * without selecting the mcast queue. 7047 */ 7048 7049 /* One traffic class, spanning over all active ucast queues: */ 7050 netdev_set_num_tc(dev, 1); 7051 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 7052 QETH_IQD_MIN_UCAST_TXQ); 7053 7054 /* Map all priorities to this traffic class: */ 7055 for (prio = 0; prio <= TC_BITMASK; prio++) 7056 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 7057 } 7058 7059 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7060 { 7061 struct net_device *dev = card->dev; 7062 int rc; 7063 7064 /* Per netif_setup_tc(), adjust the mapping first: */ 7065 if (IS_IQD(card)) 7066 qeth_iqd_set_prio_tc_map(dev, count - 1); 7067 7068 rc = netif_set_real_num_tx_queues(dev, count); 7069 7070 if (rc && IS_IQD(card)) 7071 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7072 7073 return rc; 7074 } 7075 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7076 7077 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7078 u8 cast_type, struct net_device *sb_dev) 7079 { 7080 u16 txq; 7081 7082 if (cast_type != RTN_UNICAST) 7083 return QETH_IQD_MCAST_TXQ; 7084 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7085 return QETH_IQD_MIN_UCAST_TXQ; 7086 7087 txq = netdev_pick_tx(dev, skb, sb_dev); 7088 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7089 } 7090 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7091 7092 int qeth_open(struct net_device *dev) 7093 { 7094 struct qeth_card *card = dev->ml_priv; 7095 struct qeth_qdio_out_q *queue; 7096 unsigned int i; 7097 7098 QETH_CARD_TEXT(card, 4, "qethopen"); 7099 7100 card->data.state = CH_STATE_UP; 7101 netif_tx_start_all_queues(dev); 7102 7103 local_bh_disable(); 7104 qeth_for_each_output_queue(card, queue, i) { 7105 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 7106 QETH_NAPI_WEIGHT); 7107 napi_enable(&queue->napi); 7108 napi_schedule(&queue->napi); 7109 } 7110 7111 napi_enable(&card->napi); 7112 napi_schedule(&card->napi); 7113 /* kick-start the NAPI softirq: */ 7114 local_bh_enable(); 7115 7116 return 0; 7117 } 7118 EXPORT_SYMBOL_GPL(qeth_open); 7119 7120 int qeth_stop(struct net_device *dev) 7121 { 7122 struct qeth_card *card = dev->ml_priv; 7123 struct qeth_qdio_out_q *queue; 7124 unsigned int i; 7125 7126 QETH_CARD_TEXT(card, 4, "qethstop"); 7127 7128 napi_disable(&card->napi); 7129 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7130 qdio_stop_irq(CARD_DDEV(card)); 7131 7132 /* Quiesce the NAPI instances: */ 7133 qeth_for_each_output_queue(card, queue, i) 7134 napi_disable(&queue->napi); 7135 7136 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7137 netif_tx_disable(dev); 7138 7139 qeth_for_each_output_queue(card, queue, i) { 7140 del_timer_sync(&queue->timer); 7141 /* Queues may get re-allocated, so remove the NAPIs. */ 7142 netif_napi_del(&queue->napi); 7143 } 7144 7145 return 0; 7146 } 7147 EXPORT_SYMBOL_GPL(qeth_stop); 7148 7149 static int __init qeth_core_init(void) 7150 { 7151 int rc; 7152 7153 pr_info("loading core functions\n"); 7154 7155 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7156 7157 rc = qeth_register_dbf_views(); 7158 if (rc) 7159 goto dbf_err; 7160 qeth_core_root_dev = root_device_register("qeth"); 7161 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7162 if (rc) 7163 goto register_err; 7164 qeth_core_header_cache = 7165 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7166 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7167 0, NULL); 7168 if (!qeth_core_header_cache) { 7169 rc = -ENOMEM; 7170 goto slab_err; 7171 } 7172 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7173 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7174 if (!qeth_qdio_outbuf_cache) { 7175 rc = -ENOMEM; 7176 goto cqslab_err; 7177 } 7178 7179 qeth_qaob_cache = kmem_cache_create("qeth_qaob", 7180 sizeof(struct qaob), 7181 sizeof(struct qaob), 7182 0, NULL); 7183 if (!qeth_qaob_cache) { 7184 rc = -ENOMEM; 7185 goto qaob_err; 7186 } 7187 7188 rc = ccw_driver_register(&qeth_ccw_driver); 7189 if (rc) 7190 goto ccw_err; 7191 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7192 if (rc) 7193 goto ccwgroup_err; 7194 7195 return 0; 7196 7197 ccwgroup_err: 7198 ccw_driver_unregister(&qeth_ccw_driver); 7199 ccw_err: 7200 kmem_cache_destroy(qeth_qaob_cache); 7201 qaob_err: 7202 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7203 cqslab_err: 7204 kmem_cache_destroy(qeth_core_header_cache); 7205 slab_err: 7206 root_device_unregister(qeth_core_root_dev); 7207 register_err: 7208 qeth_unregister_dbf_views(); 7209 dbf_err: 7210 debugfs_remove_recursive(qeth_debugfs_root); 7211 pr_err("Initializing the qeth device driver failed\n"); 7212 return rc; 7213 } 7214 7215 static void __exit qeth_core_exit(void) 7216 { 7217 qeth_clear_dbf_list(); 7218 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7219 ccw_driver_unregister(&qeth_ccw_driver); 7220 kmem_cache_destroy(qeth_qaob_cache); 7221 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7222 kmem_cache_destroy(qeth_core_header_cache); 7223 root_device_unregister(qeth_core_root_dev); 7224 qeth_unregister_dbf_views(); 7225 debugfs_remove_recursive(qeth_debugfs_root); 7226 pr_info("core functions removed\n"); 7227 } 7228 7229 module_init(qeth_core_init); 7230 module_exit(qeth_core_exit); 7231 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7232 MODULE_DESCRIPTION("qeth core functions"); 7233 MODULE_LICENSE("GPL"); 7234