1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 #include <net/sock.h> 37 38 #include <asm/ebcdic.h> 39 #include <asm/chpid.h> 40 #include <asm/sysinfo.h> 41 #include <asm/diag.h> 42 #include <asm/cio.h> 43 #include <asm/ccwdev.h> 44 #include <asm/cpcmd.h> 45 46 #include "qeth_core.h" 47 48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 50 /* N P A M L V H */ 51 [QETH_DBF_SETUP] = {"qeth_setup", 52 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 54 &debug_sprintf_view, NULL}, 55 [QETH_DBF_CTRL] = {"qeth_control", 56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 57 }; 58 EXPORT_SYMBOL_GPL(qeth_dbf); 59 60 static struct kmem_cache *qeth_core_header_cache; 61 static struct kmem_cache *qeth_qdio_outbuf_cache; 62 static struct kmem_cache *qeth_qaob_cache; 63 64 static struct device *qeth_core_root_dev; 65 static struct dentry *qeth_debugfs_root; 66 static struct lock_class_key qdio_out_skb_queue_key; 67 68 static void qeth_issue_next_read_cb(struct qeth_card *card, 69 struct qeth_cmd_buffer *iob, 70 unsigned int data_length); 71 static int qeth_qdio_establish(struct qeth_card *); 72 static void qeth_free_qdio_queues(struct qeth_card *card); 73 74 static const char *qeth_get_cardname(struct qeth_card *card) 75 { 76 if (IS_VM_NIC(card)) { 77 switch (card->info.type) { 78 case QETH_CARD_TYPE_OSD: 79 return " Virtual NIC QDIO"; 80 case QETH_CARD_TYPE_IQD: 81 return " Virtual NIC Hiper"; 82 case QETH_CARD_TYPE_OSM: 83 return " Virtual NIC QDIO - OSM"; 84 case QETH_CARD_TYPE_OSX: 85 return " Virtual NIC QDIO - OSX"; 86 default: 87 return " unknown"; 88 } 89 } else { 90 switch (card->info.type) { 91 case QETH_CARD_TYPE_OSD: 92 return " OSD Express"; 93 case QETH_CARD_TYPE_IQD: 94 return " HiperSockets"; 95 case QETH_CARD_TYPE_OSM: 96 return " OSM QDIO"; 97 case QETH_CARD_TYPE_OSX: 98 return " OSX QDIO"; 99 default: 100 return " unknown"; 101 } 102 } 103 return " n/a"; 104 } 105 106 /* max length to be returned: 14 */ 107 const char *qeth_get_cardname_short(struct qeth_card *card) 108 { 109 if (IS_VM_NIC(card)) { 110 switch (card->info.type) { 111 case QETH_CARD_TYPE_OSD: 112 return "Virt.NIC QDIO"; 113 case QETH_CARD_TYPE_IQD: 114 return "Virt.NIC Hiper"; 115 case QETH_CARD_TYPE_OSM: 116 return "Virt.NIC OSM"; 117 case QETH_CARD_TYPE_OSX: 118 return "Virt.NIC OSX"; 119 default: 120 return "unknown"; 121 } 122 } else { 123 switch (card->info.type) { 124 case QETH_CARD_TYPE_OSD: 125 switch (card->info.link_type) { 126 case QETH_LINK_TYPE_FAST_ETH: 127 return "OSD_100"; 128 case QETH_LINK_TYPE_HSTR: 129 return "HSTR"; 130 case QETH_LINK_TYPE_GBIT_ETH: 131 return "OSD_1000"; 132 case QETH_LINK_TYPE_10GBIT_ETH: 133 return "OSD_10GIG"; 134 case QETH_LINK_TYPE_25GBIT_ETH: 135 return "OSD_25GIG"; 136 case QETH_LINK_TYPE_LANE_ETH100: 137 return "OSD_FE_LANE"; 138 case QETH_LINK_TYPE_LANE_TR: 139 return "OSD_TR_LANE"; 140 case QETH_LINK_TYPE_LANE_ETH1000: 141 return "OSD_GbE_LANE"; 142 case QETH_LINK_TYPE_LANE: 143 return "OSD_ATM_LANE"; 144 default: 145 return "OSD_Express"; 146 } 147 case QETH_CARD_TYPE_IQD: 148 return "HiperSockets"; 149 case QETH_CARD_TYPE_OSM: 150 return "OSM_1000"; 151 case QETH_CARD_TYPE_OSX: 152 return "OSX_10GIG"; 153 default: 154 return "unknown"; 155 } 156 } 157 return "n/a"; 158 } 159 160 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 161 int clear_start_mask) 162 { 163 unsigned long flags; 164 165 spin_lock_irqsave(&card->thread_mask_lock, flags); 166 card->thread_allowed_mask = threads; 167 if (clear_start_mask) 168 card->thread_start_mask &= threads; 169 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 170 wake_up(&card->wait_q); 171 } 172 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 173 174 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 175 { 176 unsigned long flags; 177 int rc = 0; 178 179 spin_lock_irqsave(&card->thread_mask_lock, flags); 180 rc = (card->thread_running_mask & threads); 181 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 182 return rc; 183 } 184 EXPORT_SYMBOL_GPL(qeth_threads_running); 185 186 static void qeth_clear_working_pool_list(struct qeth_card *card) 187 { 188 struct qeth_buffer_pool_entry *pool_entry, *tmp; 189 struct qeth_qdio_q *queue = card->qdio.in_q; 190 unsigned int i; 191 192 QETH_CARD_TEXT(card, 5, "clwrklst"); 193 list_for_each_entry_safe(pool_entry, tmp, 194 &card->qdio.in_buf_pool.entry_list, list) 195 list_del(&pool_entry->list); 196 197 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 198 queue->bufs[i].pool_entry = NULL; 199 } 200 201 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 202 { 203 unsigned int i; 204 205 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 206 if (entry->elements[i]) 207 __free_page(entry->elements[i]); 208 } 209 210 kfree(entry); 211 } 212 213 static void qeth_free_buffer_pool(struct qeth_card *card) 214 { 215 struct qeth_buffer_pool_entry *entry, *tmp; 216 217 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 218 init_list) { 219 list_del(&entry->init_list); 220 qeth_free_pool_entry(entry); 221 } 222 } 223 224 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 225 { 226 struct qeth_buffer_pool_entry *entry; 227 unsigned int i; 228 229 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 230 if (!entry) 231 return NULL; 232 233 for (i = 0; i < pages; i++) { 234 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 235 236 if (!entry->elements[i]) { 237 qeth_free_pool_entry(entry); 238 return NULL; 239 } 240 } 241 242 return entry; 243 } 244 245 static int qeth_alloc_buffer_pool(struct qeth_card *card) 246 { 247 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 248 unsigned int i; 249 250 QETH_CARD_TEXT(card, 5, "alocpool"); 251 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 252 struct qeth_buffer_pool_entry *entry; 253 254 entry = qeth_alloc_pool_entry(buf_elements); 255 if (!entry) { 256 qeth_free_buffer_pool(card); 257 return -ENOMEM; 258 } 259 260 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 261 } 262 return 0; 263 } 264 265 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 266 { 267 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 268 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 269 struct qeth_buffer_pool_entry *entry, *tmp; 270 int delta = count - pool->buf_count; 271 LIST_HEAD(entries); 272 273 QETH_CARD_TEXT(card, 2, "realcbp"); 274 275 /* Defer until pool is allocated: */ 276 if (list_empty(&pool->entry_list)) 277 goto out; 278 279 /* Remove entries from the pool: */ 280 while (delta < 0) { 281 entry = list_first_entry(&pool->entry_list, 282 struct qeth_buffer_pool_entry, 283 init_list); 284 list_del(&entry->init_list); 285 qeth_free_pool_entry(entry); 286 287 delta++; 288 } 289 290 /* Allocate additional entries: */ 291 while (delta > 0) { 292 entry = qeth_alloc_pool_entry(buf_elements); 293 if (!entry) { 294 list_for_each_entry_safe(entry, tmp, &entries, 295 init_list) { 296 list_del(&entry->init_list); 297 qeth_free_pool_entry(entry); 298 } 299 300 return -ENOMEM; 301 } 302 303 list_add(&entry->init_list, &entries); 304 305 delta--; 306 } 307 308 list_splice(&entries, &pool->entry_list); 309 310 out: 311 card->qdio.in_buf_pool.buf_count = count; 312 pool->buf_count = count; 313 return 0; 314 } 315 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 316 317 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 318 { 319 if (!q) 320 return; 321 322 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 323 kfree(q); 324 } 325 326 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 327 { 328 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 329 int i; 330 331 if (!q) 332 return NULL; 333 334 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 335 kfree(q); 336 return NULL; 337 } 338 339 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 340 q->bufs[i].buffer = q->qdio_bufs[i]; 341 342 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 343 return q; 344 } 345 346 static int qeth_cq_init(struct qeth_card *card) 347 { 348 int rc; 349 350 if (card->options.cq == QETH_CQ_ENABLED) { 351 QETH_CARD_TEXT(card, 2, "cqinit"); 352 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 353 QDIO_MAX_BUFFERS_PER_Q); 354 card->qdio.c_q->next_buf_to_init = 127; 355 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127, 356 NULL); 357 if (rc) { 358 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 359 goto out; 360 } 361 } 362 rc = 0; 363 out: 364 return rc; 365 } 366 367 static int qeth_alloc_cq(struct qeth_card *card) 368 { 369 if (card->options.cq == QETH_CQ_ENABLED) { 370 QETH_CARD_TEXT(card, 2, "cqon"); 371 card->qdio.c_q = qeth_alloc_qdio_queue(); 372 if (!card->qdio.c_q) { 373 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 374 return -ENOMEM; 375 } 376 } else { 377 QETH_CARD_TEXT(card, 2, "nocq"); 378 card->qdio.c_q = NULL; 379 } 380 return 0; 381 } 382 383 static void qeth_free_cq(struct qeth_card *card) 384 { 385 if (card->qdio.c_q) { 386 qeth_free_qdio_queue(card->qdio.c_q); 387 card->qdio.c_q = NULL; 388 } 389 } 390 391 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 392 int delayed) 393 { 394 enum iucv_tx_notify n; 395 396 switch (sbalf15) { 397 case 0: 398 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 399 break; 400 case 4: 401 case 16: 402 case 17: 403 case 18: 404 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 405 TX_NOTIFY_UNREACHABLE; 406 break; 407 default: 408 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 409 TX_NOTIFY_GENERALERROR; 410 break; 411 } 412 413 return n; 414 } 415 416 static void qeth_put_cmd(struct qeth_cmd_buffer *iob) 417 { 418 if (refcount_dec_and_test(&iob->ref_count)) { 419 kfree(iob->data); 420 kfree(iob); 421 } 422 } 423 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 424 void *data) 425 { 426 ccw->cmd_code = cmd_code; 427 ccw->flags = flags | CCW_FLAG_SLI; 428 ccw->count = len; 429 ccw->cda = (__u32) __pa(data); 430 } 431 432 static int __qeth_issue_next_read(struct qeth_card *card) 433 { 434 struct qeth_cmd_buffer *iob = card->read_cmd; 435 struct qeth_channel *channel = iob->channel; 436 struct ccw1 *ccw = __ccw_from_cmd(iob); 437 int rc; 438 439 QETH_CARD_TEXT(card, 5, "issnxrd"); 440 if (channel->state != CH_STATE_UP) 441 return -EIO; 442 443 memset(iob->data, 0, iob->length); 444 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 445 iob->callback = qeth_issue_next_read_cb; 446 /* keep the cmd alive after completion: */ 447 qeth_get_cmd(iob); 448 449 QETH_CARD_TEXT(card, 6, "noirqpnd"); 450 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 451 if (!rc) { 452 channel->active_cmd = iob; 453 } else { 454 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 455 rc, CARD_DEVID(card)); 456 qeth_unlock_channel(card, channel); 457 qeth_put_cmd(iob); 458 card->read_or_write_problem = 1; 459 qeth_schedule_recovery(card); 460 } 461 return rc; 462 } 463 464 static int qeth_issue_next_read(struct qeth_card *card) 465 { 466 int ret; 467 468 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 469 ret = __qeth_issue_next_read(card); 470 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 471 472 return ret; 473 } 474 475 static void qeth_enqueue_cmd(struct qeth_card *card, 476 struct qeth_cmd_buffer *iob) 477 { 478 spin_lock_irq(&card->lock); 479 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 480 spin_unlock_irq(&card->lock); 481 } 482 483 static void qeth_dequeue_cmd(struct qeth_card *card, 484 struct qeth_cmd_buffer *iob) 485 { 486 spin_lock_irq(&card->lock); 487 list_del(&iob->list_entry); 488 spin_unlock_irq(&card->lock); 489 } 490 491 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 492 { 493 iob->rc = reason; 494 complete(&iob->done); 495 } 496 497 static void qeth_flush_local_addrs4(struct qeth_card *card) 498 { 499 struct qeth_local_addr *addr; 500 struct hlist_node *tmp; 501 unsigned int i; 502 503 spin_lock_irq(&card->local_addrs4_lock); 504 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 505 hash_del_rcu(&addr->hnode); 506 kfree_rcu(addr, rcu); 507 } 508 spin_unlock_irq(&card->local_addrs4_lock); 509 } 510 511 static void qeth_flush_local_addrs6(struct qeth_card *card) 512 { 513 struct qeth_local_addr *addr; 514 struct hlist_node *tmp; 515 unsigned int i; 516 517 spin_lock_irq(&card->local_addrs6_lock); 518 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 519 hash_del_rcu(&addr->hnode); 520 kfree_rcu(addr, rcu); 521 } 522 spin_unlock_irq(&card->local_addrs6_lock); 523 } 524 525 static void qeth_flush_local_addrs(struct qeth_card *card) 526 { 527 qeth_flush_local_addrs4(card); 528 qeth_flush_local_addrs6(card); 529 } 530 531 static void qeth_add_local_addrs4(struct qeth_card *card, 532 struct qeth_ipacmd_local_addrs4 *cmd) 533 { 534 unsigned int i; 535 536 if (cmd->addr_length != 537 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 538 dev_err_ratelimited(&card->gdev->dev, 539 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 540 cmd->addr_length); 541 return; 542 } 543 544 spin_lock(&card->local_addrs4_lock); 545 for (i = 0; i < cmd->count; i++) { 546 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 547 struct qeth_local_addr *addr; 548 bool duplicate = false; 549 550 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 551 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 552 duplicate = true; 553 break; 554 } 555 } 556 557 if (duplicate) 558 continue; 559 560 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 561 if (!addr) { 562 dev_err(&card->gdev->dev, 563 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 564 &cmd->addrs[i].addr); 565 continue; 566 } 567 568 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 569 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 570 } 571 spin_unlock(&card->local_addrs4_lock); 572 } 573 574 static void qeth_add_local_addrs6(struct qeth_card *card, 575 struct qeth_ipacmd_local_addrs6 *cmd) 576 { 577 unsigned int i; 578 579 if (cmd->addr_length != 580 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 581 dev_err_ratelimited(&card->gdev->dev, 582 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 583 cmd->addr_length); 584 return; 585 } 586 587 spin_lock(&card->local_addrs6_lock); 588 for (i = 0; i < cmd->count; i++) { 589 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 590 struct qeth_local_addr *addr; 591 bool duplicate = false; 592 593 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 594 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 595 duplicate = true; 596 break; 597 } 598 } 599 600 if (duplicate) 601 continue; 602 603 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 604 if (!addr) { 605 dev_err(&card->gdev->dev, 606 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 607 &cmd->addrs[i].addr); 608 continue; 609 } 610 611 addr->addr = cmd->addrs[i].addr; 612 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 613 } 614 spin_unlock(&card->local_addrs6_lock); 615 } 616 617 static void qeth_del_local_addrs4(struct qeth_card *card, 618 struct qeth_ipacmd_local_addrs4 *cmd) 619 { 620 unsigned int i; 621 622 if (cmd->addr_length != 623 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 624 dev_err_ratelimited(&card->gdev->dev, 625 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 626 cmd->addr_length); 627 return; 628 } 629 630 spin_lock(&card->local_addrs4_lock); 631 for (i = 0; i < cmd->count; i++) { 632 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 633 unsigned int key = ipv4_addr_hash(addr->addr); 634 struct qeth_local_addr *tmp; 635 636 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 637 if (tmp->addr.s6_addr32[3] == addr->addr) { 638 hash_del_rcu(&tmp->hnode); 639 kfree_rcu(tmp, rcu); 640 break; 641 } 642 } 643 } 644 spin_unlock(&card->local_addrs4_lock); 645 } 646 647 static void qeth_del_local_addrs6(struct qeth_card *card, 648 struct qeth_ipacmd_local_addrs6 *cmd) 649 { 650 unsigned int i; 651 652 if (cmd->addr_length != 653 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 654 dev_err_ratelimited(&card->gdev->dev, 655 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 656 cmd->addr_length); 657 return; 658 } 659 660 spin_lock(&card->local_addrs6_lock); 661 for (i = 0; i < cmd->count; i++) { 662 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 663 u32 key = ipv6_addr_hash(&addr->addr); 664 struct qeth_local_addr *tmp; 665 666 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 667 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 668 hash_del_rcu(&tmp->hnode); 669 kfree_rcu(tmp, rcu); 670 break; 671 } 672 } 673 } 674 spin_unlock(&card->local_addrs6_lock); 675 } 676 677 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 678 struct sk_buff *skb) 679 { 680 struct qeth_local_addr *tmp; 681 bool is_local = false; 682 unsigned int key; 683 __be32 next_hop; 684 685 if (hash_empty(card->local_addrs4)) 686 return false; 687 688 rcu_read_lock(); 689 next_hop = qeth_next_hop_v4_rcu(skb, 690 qeth_dst_check_rcu(skb, htons(ETH_P_IP))); 691 key = ipv4_addr_hash(next_hop); 692 693 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 694 if (tmp->addr.s6_addr32[3] == next_hop) { 695 is_local = true; 696 break; 697 } 698 } 699 rcu_read_unlock(); 700 701 return is_local; 702 } 703 704 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 705 struct sk_buff *skb) 706 { 707 struct qeth_local_addr *tmp; 708 struct in6_addr *next_hop; 709 bool is_local = false; 710 u32 key; 711 712 if (hash_empty(card->local_addrs6)) 713 return false; 714 715 rcu_read_lock(); 716 next_hop = qeth_next_hop_v6_rcu(skb, 717 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6))); 718 key = ipv6_addr_hash(next_hop); 719 720 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 721 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 722 is_local = true; 723 break; 724 } 725 } 726 rcu_read_unlock(); 727 728 return is_local; 729 } 730 731 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 732 { 733 struct qeth_card *card = m->private; 734 struct qeth_local_addr *tmp; 735 unsigned int i; 736 737 rcu_read_lock(); 738 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 739 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 740 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 741 seq_printf(m, "%pI6c\n", &tmp->addr); 742 rcu_read_unlock(); 743 744 return 0; 745 } 746 747 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 748 749 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 750 struct qeth_card *card) 751 { 752 const char *ipa_name; 753 int com = cmd->hdr.command; 754 755 ipa_name = qeth_get_ipa_cmd_name(com); 756 757 if (rc) 758 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 759 ipa_name, com, CARD_DEVID(card), rc, 760 qeth_get_ipa_msg(rc)); 761 else 762 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 763 ipa_name, com, CARD_DEVID(card)); 764 } 765 766 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 767 struct qeth_ipa_cmd *cmd) 768 { 769 QETH_CARD_TEXT(card, 5, "chkipad"); 770 771 if (IS_IPA_REPLY(cmd)) { 772 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 773 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 774 return cmd; 775 } 776 777 /* handle unsolicited event: */ 778 switch (cmd->hdr.command) { 779 case IPA_CMD_STOPLAN: 780 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 781 dev_err(&card->gdev->dev, 782 "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n", 783 netdev_name(card->dev)); 784 /* Set offline, then probably fail to set online: */ 785 qeth_schedule_recovery(card); 786 } else { 787 /* stay online for subsequent STARTLAN */ 788 dev_warn(&card->gdev->dev, 789 "The link for interface %s on CHPID 0x%X failed\n", 790 netdev_name(card->dev), card->info.chpid); 791 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 792 netif_carrier_off(card->dev); 793 } 794 return NULL; 795 case IPA_CMD_STARTLAN: 796 dev_info(&card->gdev->dev, 797 "The link for %s on CHPID 0x%X has been restored\n", 798 netdev_name(card->dev), card->info.chpid); 799 if (card->info.hwtrap) 800 card->info.hwtrap = 2; 801 qeth_schedule_recovery(card); 802 return NULL; 803 case IPA_CMD_SETBRIDGEPORT_IQD: 804 case IPA_CMD_SETBRIDGEPORT_OSA: 805 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 806 if (card->discipline->control_event_handler(card, cmd)) 807 return cmd; 808 return NULL; 809 case IPA_CMD_REGISTER_LOCAL_ADDR: 810 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 811 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 812 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 813 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 814 815 QETH_CARD_TEXT(card, 3, "irla"); 816 return NULL; 817 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 818 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 819 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 820 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 821 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 822 823 QETH_CARD_TEXT(card, 3, "urla"); 824 return NULL; 825 default: 826 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 827 return cmd; 828 } 829 } 830 831 static void qeth_clear_ipacmd_list(struct qeth_card *card) 832 { 833 struct qeth_cmd_buffer *iob; 834 unsigned long flags; 835 836 QETH_CARD_TEXT(card, 4, "clipalst"); 837 838 spin_lock_irqsave(&card->lock, flags); 839 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 840 qeth_notify_cmd(iob, -ECANCELED); 841 spin_unlock_irqrestore(&card->lock, flags); 842 } 843 844 static int qeth_check_idx_response(struct qeth_card *card, 845 unsigned char *buffer) 846 { 847 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 848 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 849 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 850 buffer[4]); 851 QETH_CARD_TEXT(card, 2, "ckidxres"); 852 QETH_CARD_TEXT(card, 2, " idxterm"); 853 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 854 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 855 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 856 dev_err(&card->gdev->dev, 857 "The device does not support the configured transport mode\n"); 858 return -EPROTONOSUPPORT; 859 } 860 return -EIO; 861 } 862 return 0; 863 } 864 865 static void qeth_release_buffer_cb(struct qeth_card *card, 866 struct qeth_cmd_buffer *iob, 867 unsigned int data_length) 868 { 869 qeth_put_cmd(iob); 870 } 871 872 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 873 { 874 qeth_notify_cmd(iob, rc); 875 qeth_put_cmd(iob); 876 } 877 878 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 879 unsigned int length, 880 unsigned int ccws, long timeout) 881 { 882 struct qeth_cmd_buffer *iob; 883 884 if (length > QETH_BUFSIZE) 885 return NULL; 886 887 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 888 if (!iob) 889 return NULL; 890 891 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 892 GFP_KERNEL | GFP_DMA); 893 if (!iob->data) { 894 kfree(iob); 895 return NULL; 896 } 897 898 init_completion(&iob->done); 899 spin_lock_init(&iob->lock); 900 refcount_set(&iob->ref_count, 1); 901 iob->channel = channel; 902 iob->timeout = timeout; 903 iob->length = length; 904 return iob; 905 } 906 907 static void qeth_issue_next_read_cb(struct qeth_card *card, 908 struct qeth_cmd_buffer *iob, 909 unsigned int data_length) 910 { 911 struct qeth_cmd_buffer *request = NULL; 912 struct qeth_ipa_cmd *cmd = NULL; 913 struct qeth_reply *reply = NULL; 914 struct qeth_cmd_buffer *tmp; 915 unsigned long flags; 916 int rc = 0; 917 918 QETH_CARD_TEXT(card, 4, "sndctlcb"); 919 rc = qeth_check_idx_response(card, iob->data); 920 switch (rc) { 921 case 0: 922 break; 923 case -EIO: 924 qeth_schedule_recovery(card); 925 fallthrough; 926 default: 927 qeth_clear_ipacmd_list(card); 928 goto err_idx; 929 } 930 931 cmd = __ipa_reply(iob); 932 if (cmd) { 933 cmd = qeth_check_ipa_data(card, cmd); 934 if (!cmd) 935 goto out; 936 } 937 938 /* match against pending cmd requests */ 939 spin_lock_irqsave(&card->lock, flags); 940 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 941 if (tmp->match && tmp->match(tmp, iob)) { 942 request = tmp; 943 /* take the object outside the lock */ 944 qeth_get_cmd(request); 945 break; 946 } 947 } 948 spin_unlock_irqrestore(&card->lock, flags); 949 950 if (!request) 951 goto out; 952 953 reply = &request->reply; 954 if (!reply->callback) { 955 rc = 0; 956 goto no_callback; 957 } 958 959 spin_lock_irqsave(&request->lock, flags); 960 if (request->rc) 961 /* Bail out when the requestor has already left: */ 962 rc = request->rc; 963 else 964 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 965 (unsigned long)iob); 966 spin_unlock_irqrestore(&request->lock, flags); 967 968 no_callback: 969 if (rc <= 0) 970 qeth_notify_cmd(request, rc); 971 qeth_put_cmd(request); 972 out: 973 memcpy(&card->seqno.pdu_hdr_ack, 974 QETH_PDU_HEADER_SEQ_NO(iob->data), 975 QETH_SEQ_NO_LENGTH); 976 __qeth_issue_next_read(card); 977 err_idx: 978 qeth_put_cmd(iob); 979 } 980 981 static int qeth_set_thread_start_bit(struct qeth_card *card, 982 unsigned long thread) 983 { 984 unsigned long flags; 985 int rc = 0; 986 987 spin_lock_irqsave(&card->thread_mask_lock, flags); 988 if (!(card->thread_allowed_mask & thread)) 989 rc = -EPERM; 990 else if (card->thread_start_mask & thread) 991 rc = -EBUSY; 992 else 993 card->thread_start_mask |= thread; 994 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 995 996 return rc; 997 } 998 999 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1000 unsigned long thread) 1001 { 1002 unsigned long flags; 1003 1004 spin_lock_irqsave(&card->thread_mask_lock, flags); 1005 card->thread_start_mask &= ~thread; 1006 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1007 wake_up(&card->wait_q); 1008 } 1009 1010 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1011 unsigned long thread) 1012 { 1013 unsigned long flags; 1014 1015 spin_lock_irqsave(&card->thread_mask_lock, flags); 1016 card->thread_running_mask &= ~thread; 1017 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1018 wake_up_all(&card->wait_q); 1019 } 1020 1021 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1022 { 1023 unsigned long flags; 1024 int rc = 0; 1025 1026 spin_lock_irqsave(&card->thread_mask_lock, flags); 1027 if (card->thread_start_mask & thread) { 1028 if ((card->thread_allowed_mask & thread) && 1029 !(card->thread_running_mask & thread)) { 1030 rc = 1; 1031 card->thread_start_mask &= ~thread; 1032 card->thread_running_mask |= thread; 1033 } else 1034 rc = -EPERM; 1035 } 1036 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1037 return rc; 1038 } 1039 1040 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1041 { 1042 int rc = 0; 1043 1044 wait_event(card->wait_q, 1045 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1046 return rc; 1047 } 1048 1049 int qeth_schedule_recovery(struct qeth_card *card) 1050 { 1051 int rc; 1052 1053 QETH_CARD_TEXT(card, 2, "startrec"); 1054 1055 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1056 if (!rc) 1057 schedule_work(&card->kernel_thread_starter); 1058 1059 return rc; 1060 } 1061 1062 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1063 struct irb *irb) 1064 { 1065 int dstat, cstat; 1066 char *sense; 1067 1068 sense = (char *) irb->ecw; 1069 cstat = irb->scsw.cmd.cstat; 1070 dstat = irb->scsw.cmd.dstat; 1071 1072 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1073 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1074 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1075 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1076 dev_warn(&cdev->dev, "The qeth device driver " 1077 "failed to recover an error on the device\n"); 1078 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1079 CCW_DEVID(cdev), dstat, cstat); 1080 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1081 16, 1, irb, 64, 1); 1082 return -EIO; 1083 } 1084 1085 if (dstat & DEV_STAT_UNIT_CHECK) { 1086 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1087 SENSE_RESETTING_EVENT_FLAG) { 1088 QETH_CARD_TEXT(card, 2, "REVIND"); 1089 return -EIO; 1090 } 1091 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1092 SENSE_COMMAND_REJECT_FLAG) { 1093 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1094 return -EIO; 1095 } 1096 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1097 QETH_CARD_TEXT(card, 2, "AFFE"); 1098 return -EIO; 1099 } 1100 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1101 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1102 return 0; 1103 } 1104 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1105 return -EIO; 1106 } 1107 return 0; 1108 } 1109 1110 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1111 struct irb *irb) 1112 { 1113 if (!IS_ERR(irb)) 1114 return 0; 1115 1116 switch (PTR_ERR(irb)) { 1117 case -EIO: 1118 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1119 CCW_DEVID(cdev)); 1120 QETH_CARD_TEXT(card, 2, "ckirberr"); 1121 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1122 return -EIO; 1123 case -ETIMEDOUT: 1124 dev_warn(&cdev->dev, "A hardware operation timed out" 1125 " on the device\n"); 1126 QETH_CARD_TEXT(card, 2, "ckirberr"); 1127 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1128 return -ETIMEDOUT; 1129 default: 1130 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1131 PTR_ERR(irb), CCW_DEVID(cdev)); 1132 QETH_CARD_TEXT(card, 2, "ckirberr"); 1133 QETH_CARD_TEXT(card, 2, " rc???"); 1134 return PTR_ERR(irb); 1135 } 1136 } 1137 1138 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1139 struct irb *irb) 1140 { 1141 int rc; 1142 int cstat, dstat; 1143 struct qeth_cmd_buffer *iob = NULL; 1144 struct ccwgroup_device *gdev; 1145 struct qeth_channel *channel; 1146 struct qeth_card *card; 1147 1148 /* while we hold the ccwdev lock, this stays valid: */ 1149 gdev = dev_get_drvdata(&cdev->dev); 1150 card = dev_get_drvdata(&gdev->dev); 1151 1152 QETH_CARD_TEXT(card, 5, "irq"); 1153 1154 if (card->read.ccwdev == cdev) { 1155 channel = &card->read; 1156 QETH_CARD_TEXT(card, 5, "read"); 1157 } else if (card->write.ccwdev == cdev) { 1158 channel = &card->write; 1159 QETH_CARD_TEXT(card, 5, "write"); 1160 } else { 1161 channel = &card->data; 1162 QETH_CARD_TEXT(card, 5, "data"); 1163 } 1164 1165 if (intparm == 0) { 1166 QETH_CARD_TEXT(card, 5, "irqunsol"); 1167 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1168 QETH_CARD_TEXT(card, 5, "irqunexp"); 1169 1170 dev_err(&cdev->dev, 1171 "Received IRQ with intparm %lx, expected %px\n", 1172 intparm, channel->active_cmd); 1173 if (channel->active_cmd) 1174 qeth_cancel_cmd(channel->active_cmd, -EIO); 1175 } else { 1176 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1177 } 1178 1179 qeth_unlock_channel(card, channel); 1180 1181 rc = qeth_check_irb_error(card, cdev, irb); 1182 if (rc) { 1183 /* IO was terminated, free its resources. */ 1184 if (iob) 1185 qeth_cancel_cmd(iob, rc); 1186 return; 1187 } 1188 1189 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1190 channel->state = CH_STATE_STOPPED; 1191 wake_up(&card->wait_q); 1192 } 1193 1194 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1195 channel->state = CH_STATE_HALTED; 1196 wake_up(&card->wait_q); 1197 } 1198 1199 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1200 SCSW_FCTL_HALT_FUNC))) { 1201 qeth_cancel_cmd(iob, -ECANCELED); 1202 iob = NULL; 1203 } 1204 1205 cstat = irb->scsw.cmd.cstat; 1206 dstat = irb->scsw.cmd.dstat; 1207 1208 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1209 (dstat & DEV_STAT_UNIT_CHECK) || 1210 (cstat)) { 1211 if (irb->esw.esw0.erw.cons) { 1212 dev_warn(&channel->ccwdev->dev, 1213 "The qeth device driver failed to recover " 1214 "an error on the device\n"); 1215 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1216 CCW_DEVID(channel->ccwdev), cstat, 1217 dstat); 1218 print_hex_dump(KERN_WARNING, "qeth: irb ", 1219 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1220 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1221 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1222 } 1223 1224 rc = qeth_get_problem(card, cdev, irb); 1225 if (rc) { 1226 card->read_or_write_problem = 1; 1227 if (iob) 1228 qeth_cancel_cmd(iob, rc); 1229 qeth_clear_ipacmd_list(card); 1230 qeth_schedule_recovery(card); 1231 return; 1232 } 1233 } 1234 1235 if (iob) { 1236 /* sanity check: */ 1237 if (irb->scsw.cmd.count > iob->length) { 1238 qeth_cancel_cmd(iob, -EIO); 1239 return; 1240 } 1241 if (iob->callback) 1242 iob->callback(card, iob, 1243 iob->length - irb->scsw.cmd.count); 1244 } 1245 } 1246 1247 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1248 struct qeth_qdio_out_buffer *buf, 1249 enum iucv_tx_notify notification) 1250 { 1251 struct sk_buff *skb; 1252 1253 skb_queue_walk(&buf->skb_list, skb) { 1254 struct sock *sk = skb->sk; 1255 1256 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1257 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1258 if (sk && sk->sk_family == PF_IUCV) 1259 iucv_sk(sk)->sk_txnotify(sk, notification); 1260 } 1261 } 1262 1263 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, 1264 struct qeth_qdio_out_buffer *buf, bool error, 1265 int budget) 1266 { 1267 struct sk_buff *skb; 1268 1269 /* Empty buffer? */ 1270 if (buf->next_element_to_fill == 0) 1271 return; 1272 1273 QETH_TXQ_STAT_INC(queue, bufs); 1274 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1275 if (error) { 1276 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1277 } else { 1278 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1279 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1280 } 1281 1282 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1283 unsigned int bytes = qdisc_pkt_len(skb); 1284 bool is_tso = skb_is_gso(skb); 1285 unsigned int packets; 1286 1287 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1288 if (!error) { 1289 if (skb->ip_summed == CHECKSUM_PARTIAL) 1290 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1291 if (skb_is_nonlinear(skb)) 1292 QETH_TXQ_STAT_INC(queue, skbs_sg); 1293 if (is_tso) { 1294 QETH_TXQ_STAT_INC(queue, skbs_tso); 1295 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1296 } 1297 } 1298 1299 napi_consume_skb(skb, budget); 1300 } 1301 } 1302 1303 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1304 struct qeth_qdio_out_buffer *buf, 1305 bool error, int budget) 1306 { 1307 int i; 1308 1309 /* is PCI flag set on buffer? */ 1310 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { 1311 atomic_dec(&queue->set_pci_flags_count); 1312 QETH_TXQ_STAT_INC(queue, completion_irq); 1313 } 1314 1315 qeth_tx_complete_buf(queue, buf, error, budget); 1316 1317 for (i = 0; i < queue->max_elements; ++i) { 1318 void *data = phys_to_virt(buf->buffer->element[i].addr); 1319 1320 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1321 kmem_cache_free(qeth_core_header_cache, data); 1322 } 1323 1324 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1325 buf->next_element_to_fill = 0; 1326 buf->frames = 0; 1327 buf->bytes = 0; 1328 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1329 } 1330 1331 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) 1332 { 1333 if (buf->aob) 1334 kmem_cache_free(qeth_qaob_cache, buf->aob); 1335 kmem_cache_free(qeth_qdio_outbuf_cache, buf); 1336 } 1337 1338 static void qeth_tx_complete_pending_bufs(struct qeth_card *card, 1339 struct qeth_qdio_out_q *queue, 1340 bool drain, int budget) 1341 { 1342 struct qeth_qdio_out_buffer *buf, *tmp; 1343 1344 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { 1345 struct qeth_qaob_priv1 *priv; 1346 struct qaob *aob = buf->aob; 1347 enum iucv_tx_notify notify; 1348 unsigned int i; 1349 1350 priv = (struct qeth_qaob_priv1 *)&aob->user1; 1351 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { 1352 QETH_CARD_TEXT(card, 5, "fp"); 1353 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); 1354 1355 notify = drain ? TX_NOTIFY_GENERALERROR : 1356 qeth_compute_cq_notification(aob->aorc, 1); 1357 qeth_notify_skbs(queue, buf, notify); 1358 qeth_tx_complete_buf(queue, buf, drain, budget); 1359 1360 for (i = 0; 1361 i < aob->sb_count && i < queue->max_elements; 1362 i++) { 1363 void *data = phys_to_virt(aob->sba[i]); 1364 1365 if (test_bit(i, buf->from_kmem_cache) && data) 1366 kmem_cache_free(qeth_core_header_cache, 1367 data); 1368 } 1369 1370 list_del(&buf->list_entry); 1371 qeth_free_out_buf(buf); 1372 } 1373 } 1374 } 1375 1376 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1377 { 1378 int j; 1379 1380 qeth_tx_complete_pending_bufs(q->card, q, true, 0); 1381 1382 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1383 if (!q->bufs[j]) 1384 continue; 1385 1386 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1387 if (free) { 1388 qeth_free_out_buf(q->bufs[j]); 1389 q->bufs[j] = NULL; 1390 } 1391 } 1392 } 1393 1394 static void qeth_drain_output_queues(struct qeth_card *card) 1395 { 1396 int i; 1397 1398 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1399 /* clear outbound buffers to free skbs */ 1400 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1401 if (card->qdio.out_qs[i]) 1402 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1403 } 1404 } 1405 1406 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1407 { 1408 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1409 1410 if (card->qdio.no_out_queues == max) 1411 return; 1412 1413 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1414 qeth_free_qdio_queues(card); 1415 1416 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1417 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1418 1419 card->qdio.no_out_queues = max; 1420 } 1421 1422 static int qeth_update_from_chp_desc(struct qeth_card *card) 1423 { 1424 struct ccw_device *ccwdev; 1425 struct channel_path_desc_fmt0 *chp_dsc; 1426 1427 QETH_CARD_TEXT(card, 2, "chp_desc"); 1428 1429 ccwdev = card->data.ccwdev; 1430 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1431 if (!chp_dsc) 1432 return -ENOMEM; 1433 1434 card->info.func_level = 0x4100 + chp_dsc->desc; 1435 1436 if (IS_OSD(card) || IS_OSX(card)) 1437 /* CHPP field bit 6 == 1 -> single queue */ 1438 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1439 1440 kfree(chp_dsc); 1441 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1442 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1443 return 0; 1444 } 1445 1446 static void qeth_init_qdio_info(struct qeth_card *card) 1447 { 1448 QETH_CARD_TEXT(card, 4, "intqdinf"); 1449 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1450 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1451 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1452 1453 /* inbound */ 1454 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1455 if (IS_IQD(card)) 1456 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1457 else 1458 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1459 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1460 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1461 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1462 } 1463 1464 static void qeth_set_initial_options(struct qeth_card *card) 1465 { 1466 card->options.route4.type = NO_ROUTER; 1467 card->options.route6.type = NO_ROUTER; 1468 card->options.isolation = ISOLATION_MODE_NONE; 1469 card->options.cq = QETH_CQ_DISABLED; 1470 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1471 } 1472 1473 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1474 { 1475 unsigned long flags; 1476 int rc = 0; 1477 1478 spin_lock_irqsave(&card->thread_mask_lock, flags); 1479 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1480 (u8) card->thread_start_mask, 1481 (u8) card->thread_allowed_mask, 1482 (u8) card->thread_running_mask); 1483 rc = (card->thread_start_mask & thread); 1484 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1485 return rc; 1486 } 1487 1488 static int qeth_do_reset(void *data); 1489 static void qeth_start_kernel_thread(struct work_struct *work) 1490 { 1491 struct task_struct *ts; 1492 struct qeth_card *card = container_of(work, struct qeth_card, 1493 kernel_thread_starter); 1494 QETH_CARD_TEXT(card, 2, "strthrd"); 1495 1496 if (card->read.state != CH_STATE_UP && 1497 card->write.state != CH_STATE_UP) 1498 return; 1499 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1500 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1501 if (IS_ERR(ts)) { 1502 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1503 qeth_clear_thread_running_bit(card, 1504 QETH_RECOVER_THREAD); 1505 } 1506 } 1507 } 1508 1509 static void qeth_buffer_reclaim_work(struct work_struct *); 1510 static void qeth_setup_card(struct qeth_card *card) 1511 { 1512 QETH_CARD_TEXT(card, 2, "setupcrd"); 1513 1514 card->info.type = CARD_RDEV(card)->id.driver_info; 1515 card->state = CARD_STATE_DOWN; 1516 spin_lock_init(&card->lock); 1517 spin_lock_init(&card->thread_mask_lock); 1518 mutex_init(&card->conf_mutex); 1519 mutex_init(&card->discipline_mutex); 1520 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1521 INIT_LIST_HEAD(&card->cmd_waiter_list); 1522 init_waitqueue_head(&card->wait_q); 1523 qeth_set_initial_options(card); 1524 /* IP address takeover */ 1525 INIT_LIST_HEAD(&card->ipato.entries); 1526 qeth_init_qdio_info(card); 1527 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1528 hash_init(card->rx_mode_addrs); 1529 hash_init(card->local_addrs4); 1530 hash_init(card->local_addrs6); 1531 spin_lock_init(&card->local_addrs4_lock); 1532 spin_lock_init(&card->local_addrs6_lock); 1533 } 1534 1535 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1536 { 1537 struct qeth_card *card = container_of(slr, struct qeth_card, 1538 qeth_service_level); 1539 if (card->info.mcl_level[0]) 1540 seq_printf(m, "qeth: %s firmware level %s\n", 1541 CARD_BUS_ID(card), card->info.mcl_level); 1542 } 1543 1544 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1545 { 1546 struct qeth_card *card; 1547 1548 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1549 card = kzalloc(sizeof(*card), GFP_KERNEL); 1550 if (!card) 1551 goto out; 1552 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1553 1554 card->gdev = gdev; 1555 dev_set_drvdata(&gdev->dev, card); 1556 CARD_RDEV(card) = gdev->cdev[0]; 1557 CARD_WDEV(card) = gdev->cdev[1]; 1558 CARD_DDEV(card) = gdev->cdev[2]; 1559 1560 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1561 dev_name(&gdev->dev)); 1562 if (!card->event_wq) 1563 goto out_wq; 1564 1565 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1566 if (!card->read_cmd) 1567 goto out_read_cmd; 1568 1569 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1570 qeth_debugfs_root); 1571 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1572 &qeth_debugfs_local_addr_fops); 1573 1574 card->qeth_service_level.seq_print = qeth_core_sl_print; 1575 register_service_level(&card->qeth_service_level); 1576 return card; 1577 1578 out_read_cmd: 1579 destroy_workqueue(card->event_wq); 1580 out_wq: 1581 dev_set_drvdata(&gdev->dev, NULL); 1582 kfree(card); 1583 out: 1584 return NULL; 1585 } 1586 1587 static int qeth_clear_channel(struct qeth_card *card, 1588 struct qeth_channel *channel) 1589 { 1590 int rc; 1591 1592 QETH_CARD_TEXT(card, 3, "clearch"); 1593 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1594 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1595 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1596 1597 if (rc) 1598 return rc; 1599 rc = wait_event_interruptible_timeout(card->wait_q, 1600 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1601 if (rc == -ERESTARTSYS) 1602 return rc; 1603 if (channel->state != CH_STATE_STOPPED) 1604 return -ETIME; 1605 channel->state = CH_STATE_DOWN; 1606 return 0; 1607 } 1608 1609 static int qeth_halt_channel(struct qeth_card *card, 1610 struct qeth_channel *channel) 1611 { 1612 int rc; 1613 1614 QETH_CARD_TEXT(card, 3, "haltch"); 1615 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1616 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1617 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1618 1619 if (rc) 1620 return rc; 1621 rc = wait_event_interruptible_timeout(card->wait_q, 1622 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1623 if (rc == -ERESTARTSYS) 1624 return rc; 1625 if (channel->state != CH_STATE_HALTED) 1626 return -ETIME; 1627 return 0; 1628 } 1629 1630 static int qeth_stop_channel(struct qeth_channel *channel) 1631 { 1632 struct ccw_device *cdev = channel->ccwdev; 1633 int rc; 1634 1635 rc = ccw_device_set_offline(cdev); 1636 1637 spin_lock_irq(get_ccwdev_lock(cdev)); 1638 if (channel->active_cmd) 1639 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1640 channel->active_cmd); 1641 1642 cdev->handler = NULL; 1643 spin_unlock_irq(get_ccwdev_lock(cdev)); 1644 1645 return rc; 1646 } 1647 1648 static int qeth_start_channel(struct qeth_channel *channel) 1649 { 1650 struct ccw_device *cdev = channel->ccwdev; 1651 int rc; 1652 1653 channel->state = CH_STATE_DOWN; 1654 xchg(&channel->active_cmd, NULL); 1655 1656 spin_lock_irq(get_ccwdev_lock(cdev)); 1657 cdev->handler = qeth_irq; 1658 spin_unlock_irq(get_ccwdev_lock(cdev)); 1659 1660 rc = ccw_device_set_online(cdev); 1661 if (rc) 1662 goto err; 1663 1664 return 0; 1665 1666 err: 1667 spin_lock_irq(get_ccwdev_lock(cdev)); 1668 cdev->handler = NULL; 1669 spin_unlock_irq(get_ccwdev_lock(cdev)); 1670 return rc; 1671 } 1672 1673 static int qeth_halt_channels(struct qeth_card *card) 1674 { 1675 int rc1 = 0, rc2 = 0, rc3 = 0; 1676 1677 QETH_CARD_TEXT(card, 3, "haltchs"); 1678 rc1 = qeth_halt_channel(card, &card->read); 1679 rc2 = qeth_halt_channel(card, &card->write); 1680 rc3 = qeth_halt_channel(card, &card->data); 1681 if (rc1) 1682 return rc1; 1683 if (rc2) 1684 return rc2; 1685 return rc3; 1686 } 1687 1688 static int qeth_clear_channels(struct qeth_card *card) 1689 { 1690 int rc1 = 0, rc2 = 0, rc3 = 0; 1691 1692 QETH_CARD_TEXT(card, 3, "clearchs"); 1693 rc1 = qeth_clear_channel(card, &card->read); 1694 rc2 = qeth_clear_channel(card, &card->write); 1695 rc3 = qeth_clear_channel(card, &card->data); 1696 if (rc1) 1697 return rc1; 1698 if (rc2) 1699 return rc2; 1700 return rc3; 1701 } 1702 1703 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1704 { 1705 int rc = 0; 1706 1707 QETH_CARD_TEXT(card, 3, "clhacrd"); 1708 1709 if (halt) 1710 rc = qeth_halt_channels(card); 1711 if (rc) 1712 return rc; 1713 return qeth_clear_channels(card); 1714 } 1715 1716 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1717 { 1718 int rc = 0; 1719 1720 QETH_CARD_TEXT(card, 3, "qdioclr"); 1721 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1722 QETH_QDIO_CLEANING)) { 1723 case QETH_QDIO_ESTABLISHED: 1724 if (IS_IQD(card)) 1725 rc = qdio_shutdown(CARD_DDEV(card), 1726 QDIO_FLAG_CLEANUP_USING_HALT); 1727 else 1728 rc = qdio_shutdown(CARD_DDEV(card), 1729 QDIO_FLAG_CLEANUP_USING_CLEAR); 1730 if (rc) 1731 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1732 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1733 break; 1734 case QETH_QDIO_CLEANING: 1735 return rc; 1736 default: 1737 break; 1738 } 1739 rc = qeth_clear_halt_card(card, use_halt); 1740 if (rc) 1741 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1742 return rc; 1743 } 1744 1745 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1746 { 1747 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1748 struct diag26c_vnic_resp *response = NULL; 1749 struct diag26c_vnic_req *request = NULL; 1750 struct ccw_dev_id id; 1751 char userid[80]; 1752 int rc = 0; 1753 1754 QETH_CARD_TEXT(card, 2, "vmlayer"); 1755 1756 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1757 if (rc) 1758 goto out; 1759 1760 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1761 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1762 if (!request || !response) { 1763 rc = -ENOMEM; 1764 goto out; 1765 } 1766 1767 ccw_device_get_id(CARD_RDEV(card), &id); 1768 request->resp_buf_len = sizeof(*response); 1769 request->resp_version = DIAG26C_VERSION6_VM65918; 1770 request->req_format = DIAG26C_VNIC_INFO; 1771 ASCEBC(userid, 8); 1772 memcpy(&request->sys_name, userid, 8); 1773 request->devno = id.devno; 1774 1775 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1776 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1777 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1778 if (rc) 1779 goto out; 1780 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1781 1782 if (request->resp_buf_len < sizeof(*response) || 1783 response->version != request->resp_version) { 1784 rc = -EIO; 1785 goto out; 1786 } 1787 1788 if (response->protocol == VNIC_INFO_PROT_L2) 1789 disc = QETH_DISCIPLINE_LAYER2; 1790 else if (response->protocol == VNIC_INFO_PROT_L3) 1791 disc = QETH_DISCIPLINE_LAYER3; 1792 1793 out: 1794 kfree(response); 1795 kfree(request); 1796 if (rc) 1797 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1798 return disc; 1799 } 1800 1801 /* Determine whether the device requires a specific layer discipline */ 1802 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1803 { 1804 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1805 1806 if (IS_OSM(card)) 1807 disc = QETH_DISCIPLINE_LAYER2; 1808 else if (IS_VM_NIC(card)) 1809 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1810 qeth_vm_detect_layer(card); 1811 1812 switch (disc) { 1813 case QETH_DISCIPLINE_LAYER2: 1814 QETH_CARD_TEXT(card, 3, "force l2"); 1815 break; 1816 case QETH_DISCIPLINE_LAYER3: 1817 QETH_CARD_TEXT(card, 3, "force l3"); 1818 break; 1819 default: 1820 QETH_CARD_TEXT(card, 3, "force no"); 1821 } 1822 1823 return disc; 1824 } 1825 1826 static void qeth_set_blkt_defaults(struct qeth_card *card) 1827 { 1828 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1829 1830 if (card->info.use_v1_blkt) { 1831 card->info.blkt.time_total = 0; 1832 card->info.blkt.inter_packet = 0; 1833 card->info.blkt.inter_packet_jumbo = 0; 1834 } else { 1835 card->info.blkt.time_total = 250; 1836 card->info.blkt.inter_packet = 5; 1837 card->info.blkt.inter_packet_jumbo = 15; 1838 } 1839 } 1840 1841 static void qeth_idx_init(struct qeth_card *card) 1842 { 1843 memset(&card->seqno, 0, sizeof(card->seqno)); 1844 1845 card->token.issuer_rm_w = 0x00010103UL; 1846 card->token.cm_filter_w = 0x00010108UL; 1847 card->token.cm_connection_w = 0x0001010aUL; 1848 card->token.ulp_filter_w = 0x0001010bUL; 1849 card->token.ulp_connection_w = 0x0001010dUL; 1850 1851 switch (card->info.type) { 1852 case QETH_CARD_TYPE_IQD: 1853 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1854 break; 1855 case QETH_CARD_TYPE_OSD: 1856 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1857 break; 1858 default: 1859 break; 1860 } 1861 } 1862 1863 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1864 struct qeth_cmd_buffer *iob) 1865 { 1866 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1867 QETH_SEQ_NO_LENGTH); 1868 if (iob->channel == &card->write) 1869 card->seqno.trans_hdr++; 1870 } 1871 1872 static int qeth_peer_func_level(int level) 1873 { 1874 if ((level & 0xff) == 8) 1875 return (level & 0xff) + 0x400; 1876 if (((level >> 8) & 3) == 1) 1877 return (level & 0xff) + 0x200; 1878 return level; 1879 } 1880 1881 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1882 struct qeth_cmd_buffer *iob) 1883 { 1884 qeth_idx_finalize_cmd(card, iob); 1885 1886 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1887 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1888 card->seqno.pdu_hdr++; 1889 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1890 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1891 1892 iob->callback = qeth_release_buffer_cb; 1893 } 1894 1895 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1896 struct qeth_cmd_buffer *reply) 1897 { 1898 /* MPC cmds are issued strictly in sequence. */ 1899 return !IS_IPA(reply->data); 1900 } 1901 1902 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1903 const void *data, 1904 unsigned int data_length) 1905 { 1906 struct qeth_cmd_buffer *iob; 1907 1908 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1909 if (!iob) 1910 return NULL; 1911 1912 memcpy(iob->data, data, data_length); 1913 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1914 iob->data); 1915 iob->finalize = qeth_mpc_finalize_cmd; 1916 iob->match = qeth_mpc_match_reply; 1917 return iob; 1918 } 1919 1920 /** 1921 * qeth_send_control_data() - send control command to the card 1922 * @card: qeth_card structure pointer 1923 * @iob: qeth_cmd_buffer pointer 1924 * @reply_cb: callback function pointer 1925 * cb_card: pointer to the qeth_card structure 1926 * cb_reply: pointer to the qeth_reply structure 1927 * cb_cmd: pointer to the original iob for non-IPA 1928 * commands, or to the qeth_ipa_cmd structure 1929 * for the IPA commands. 1930 * @reply_param: private pointer passed to the callback 1931 * 1932 * Callback function gets called one or more times, with cb_cmd 1933 * pointing to the response returned by the hardware. Callback 1934 * function must return 1935 * > 0 if more reply blocks are expected, 1936 * 0 if the last or only reply block is received, and 1937 * < 0 on error. 1938 * Callback function can get the value of the reply_param pointer from the 1939 * field 'param' of the structure qeth_reply. 1940 */ 1941 1942 static int qeth_send_control_data(struct qeth_card *card, 1943 struct qeth_cmd_buffer *iob, 1944 int (*reply_cb)(struct qeth_card *cb_card, 1945 struct qeth_reply *cb_reply, 1946 unsigned long cb_cmd), 1947 void *reply_param) 1948 { 1949 struct qeth_channel *channel = iob->channel; 1950 struct qeth_reply *reply = &iob->reply; 1951 long timeout = iob->timeout; 1952 int rc; 1953 1954 QETH_CARD_TEXT(card, 2, "sendctl"); 1955 1956 reply->callback = reply_cb; 1957 reply->param = reply_param; 1958 1959 timeout = wait_event_interruptible_timeout(card->wait_q, 1960 qeth_trylock_channel(channel, iob), 1961 timeout); 1962 if (timeout <= 0) { 1963 qeth_put_cmd(iob); 1964 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1965 } 1966 1967 if (iob->finalize) 1968 iob->finalize(card, iob); 1969 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 1970 1971 qeth_enqueue_cmd(card, iob); 1972 1973 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 1974 qeth_get_cmd(iob); 1975 1976 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1977 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1978 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1979 (addr_t) iob, 0, 0, timeout); 1980 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1981 if (rc) { 1982 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1983 CARD_DEVID(card), rc); 1984 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1985 qeth_dequeue_cmd(card, iob); 1986 qeth_put_cmd(iob); 1987 qeth_unlock_channel(card, channel); 1988 goto out; 1989 } 1990 1991 timeout = wait_for_completion_interruptible_timeout(&iob->done, 1992 timeout); 1993 if (timeout <= 0) 1994 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1995 1996 qeth_dequeue_cmd(card, iob); 1997 1998 if (reply_cb) { 1999 /* Wait until the callback for a late reply has completed: */ 2000 spin_lock_irq(&iob->lock); 2001 if (rc) 2002 /* Zap any callback that's still pending: */ 2003 iob->rc = rc; 2004 spin_unlock_irq(&iob->lock); 2005 } 2006 2007 if (!rc) 2008 rc = iob->rc; 2009 2010 out: 2011 qeth_put_cmd(iob); 2012 return rc; 2013 } 2014 2015 struct qeth_node_desc { 2016 struct node_descriptor nd1; 2017 struct node_descriptor nd2; 2018 struct node_descriptor nd3; 2019 }; 2020 2021 static void qeth_read_conf_data_cb(struct qeth_card *card, 2022 struct qeth_cmd_buffer *iob, 2023 unsigned int data_length) 2024 { 2025 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2026 int rc = 0; 2027 u8 *tag; 2028 2029 QETH_CARD_TEXT(card, 2, "cfgunit"); 2030 2031 if (data_length < sizeof(*nd)) { 2032 rc = -EINVAL; 2033 goto out; 2034 } 2035 2036 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2037 nd->nd1.plant[1] == _ascebc['M']; 2038 tag = (u8 *)&nd->nd1.tag; 2039 card->info.chpid = tag[0]; 2040 card->info.unit_addr2 = tag[1]; 2041 2042 tag = (u8 *)&nd->nd2.tag; 2043 card->info.cula = tag[1]; 2044 2045 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2046 nd->nd3.model[1] == 0xF0 && 2047 nd->nd3.model[2] >= 0xF1 && 2048 nd->nd3.model[2] <= 0xF4; 2049 2050 out: 2051 qeth_notify_cmd(iob, rc); 2052 qeth_put_cmd(iob); 2053 } 2054 2055 static int qeth_read_conf_data(struct qeth_card *card) 2056 { 2057 struct qeth_channel *channel = &card->data; 2058 struct qeth_cmd_buffer *iob; 2059 struct ciw *ciw; 2060 2061 /* scan for RCD command in extended SenseID data */ 2062 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2063 if (!ciw || ciw->cmd == 0) 2064 return -EOPNOTSUPP; 2065 if (ciw->count < sizeof(struct qeth_node_desc)) 2066 return -EINVAL; 2067 2068 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2069 if (!iob) 2070 return -ENOMEM; 2071 2072 iob->callback = qeth_read_conf_data_cb; 2073 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2074 iob->data); 2075 2076 return qeth_send_control_data(card, iob, NULL, NULL); 2077 } 2078 2079 static int qeth_idx_check_activate_response(struct qeth_card *card, 2080 struct qeth_channel *channel, 2081 struct qeth_cmd_buffer *iob) 2082 { 2083 int rc; 2084 2085 rc = qeth_check_idx_response(card, iob->data); 2086 if (rc) 2087 return rc; 2088 2089 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2090 return 0; 2091 2092 /* negative reply: */ 2093 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2094 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2095 2096 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2097 case QETH_IDX_ACT_ERR_EXCL: 2098 dev_err(&channel->ccwdev->dev, 2099 "The adapter is used exclusively by another host\n"); 2100 return -EBUSY; 2101 case QETH_IDX_ACT_ERR_AUTH: 2102 case QETH_IDX_ACT_ERR_AUTH_USER: 2103 dev_err(&channel->ccwdev->dev, 2104 "Setting the device online failed because of insufficient authorization\n"); 2105 return -EPERM; 2106 default: 2107 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2108 CCW_DEVID(channel->ccwdev)); 2109 return -EIO; 2110 } 2111 } 2112 2113 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2114 struct qeth_cmd_buffer *iob, 2115 unsigned int data_length) 2116 { 2117 struct qeth_channel *channel = iob->channel; 2118 u16 peer_level; 2119 int rc; 2120 2121 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2122 2123 rc = qeth_idx_check_activate_response(card, channel, iob); 2124 if (rc) 2125 goto out; 2126 2127 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2128 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2129 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2130 CCW_DEVID(channel->ccwdev), 2131 card->info.func_level, peer_level); 2132 rc = -EINVAL; 2133 goto out; 2134 } 2135 2136 memcpy(&card->token.issuer_rm_r, 2137 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2138 QETH_MPC_TOKEN_LENGTH); 2139 memcpy(&card->info.mcl_level[0], 2140 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2141 2142 out: 2143 qeth_notify_cmd(iob, rc); 2144 qeth_put_cmd(iob); 2145 } 2146 2147 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2148 struct qeth_cmd_buffer *iob, 2149 unsigned int data_length) 2150 { 2151 struct qeth_channel *channel = iob->channel; 2152 u16 peer_level; 2153 int rc; 2154 2155 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2156 2157 rc = qeth_idx_check_activate_response(card, channel, iob); 2158 if (rc) 2159 goto out; 2160 2161 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2162 if ((peer_level & ~0x0100) != 2163 qeth_peer_func_level(card->info.func_level)) { 2164 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2165 CCW_DEVID(channel->ccwdev), 2166 card->info.func_level, peer_level); 2167 rc = -EINVAL; 2168 } 2169 2170 out: 2171 qeth_notify_cmd(iob, rc); 2172 qeth_put_cmd(iob); 2173 } 2174 2175 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2176 struct qeth_cmd_buffer *iob) 2177 { 2178 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2179 u8 port = ((u8)card->dev->dev_port) | 0x80; 2180 struct ccw1 *ccw = __ccw_from_cmd(iob); 2181 2182 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2183 iob->data); 2184 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2185 iob->finalize = qeth_idx_finalize_cmd; 2186 2187 port |= QETH_IDX_ACT_INVAL_FRAME; 2188 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2189 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2190 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2191 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2192 &card->info.func_level, 2); 2193 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2194 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2195 } 2196 2197 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2198 { 2199 struct qeth_channel *channel = &card->read; 2200 struct qeth_cmd_buffer *iob; 2201 int rc; 2202 2203 QETH_CARD_TEXT(card, 2, "idxread"); 2204 2205 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2206 if (!iob) 2207 return -ENOMEM; 2208 2209 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2210 qeth_idx_setup_activate_cmd(card, iob); 2211 iob->callback = qeth_idx_activate_read_channel_cb; 2212 2213 rc = qeth_send_control_data(card, iob, NULL, NULL); 2214 if (rc) 2215 return rc; 2216 2217 channel->state = CH_STATE_UP; 2218 return 0; 2219 } 2220 2221 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2222 { 2223 struct qeth_channel *channel = &card->write; 2224 struct qeth_cmd_buffer *iob; 2225 int rc; 2226 2227 QETH_CARD_TEXT(card, 2, "idxwrite"); 2228 2229 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2230 if (!iob) 2231 return -ENOMEM; 2232 2233 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2234 qeth_idx_setup_activate_cmd(card, iob); 2235 iob->callback = qeth_idx_activate_write_channel_cb; 2236 2237 rc = qeth_send_control_data(card, iob, NULL, NULL); 2238 if (rc) 2239 return rc; 2240 2241 channel->state = CH_STATE_UP; 2242 return 0; 2243 } 2244 2245 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2246 unsigned long data) 2247 { 2248 struct qeth_cmd_buffer *iob; 2249 2250 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2251 2252 iob = (struct qeth_cmd_buffer *) data; 2253 memcpy(&card->token.cm_filter_r, 2254 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2255 QETH_MPC_TOKEN_LENGTH); 2256 return 0; 2257 } 2258 2259 static int qeth_cm_enable(struct qeth_card *card) 2260 { 2261 struct qeth_cmd_buffer *iob; 2262 2263 QETH_CARD_TEXT(card, 2, "cmenable"); 2264 2265 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2266 if (!iob) 2267 return -ENOMEM; 2268 2269 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2270 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2271 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2272 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2273 2274 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2275 } 2276 2277 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2278 unsigned long data) 2279 { 2280 struct qeth_cmd_buffer *iob; 2281 2282 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2283 2284 iob = (struct qeth_cmd_buffer *) data; 2285 memcpy(&card->token.cm_connection_r, 2286 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2287 QETH_MPC_TOKEN_LENGTH); 2288 return 0; 2289 } 2290 2291 static int qeth_cm_setup(struct qeth_card *card) 2292 { 2293 struct qeth_cmd_buffer *iob; 2294 2295 QETH_CARD_TEXT(card, 2, "cmsetup"); 2296 2297 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2298 if (!iob) 2299 return -ENOMEM; 2300 2301 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2302 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2303 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2304 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2305 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2306 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2307 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2308 } 2309 2310 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2311 { 2312 if (link_type == QETH_LINK_TYPE_LANE_TR || 2313 link_type == QETH_LINK_TYPE_HSTR) { 2314 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2315 return false; 2316 } 2317 2318 return true; 2319 } 2320 2321 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2322 { 2323 struct net_device *dev = card->dev; 2324 unsigned int new_mtu; 2325 2326 if (!max_mtu) { 2327 /* IQD needs accurate max MTU to set up its RX buffers: */ 2328 if (IS_IQD(card)) 2329 return -EINVAL; 2330 /* tolerate quirky HW: */ 2331 max_mtu = ETH_MAX_MTU; 2332 } 2333 2334 rtnl_lock(); 2335 if (IS_IQD(card)) { 2336 /* move any device with default MTU to new max MTU: */ 2337 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2338 2339 /* adjust RX buffer size to new max MTU: */ 2340 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2341 if (dev->max_mtu && dev->max_mtu != max_mtu) 2342 qeth_free_qdio_queues(card); 2343 } else { 2344 if (dev->mtu) 2345 new_mtu = dev->mtu; 2346 /* default MTUs for first setup: */ 2347 else if (IS_LAYER2(card)) 2348 new_mtu = ETH_DATA_LEN; 2349 else 2350 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2351 } 2352 2353 dev->max_mtu = max_mtu; 2354 dev->mtu = min(new_mtu, max_mtu); 2355 rtnl_unlock(); 2356 return 0; 2357 } 2358 2359 static int qeth_get_mtu_outof_framesize(int framesize) 2360 { 2361 switch (framesize) { 2362 case 0x4000: 2363 return 8192; 2364 case 0x6000: 2365 return 16384; 2366 case 0xa000: 2367 return 32768; 2368 case 0xffff: 2369 return 57344; 2370 default: 2371 return 0; 2372 } 2373 } 2374 2375 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2376 unsigned long data) 2377 { 2378 __u16 mtu, framesize; 2379 __u16 len; 2380 struct qeth_cmd_buffer *iob; 2381 u8 link_type = 0; 2382 2383 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2384 2385 iob = (struct qeth_cmd_buffer *) data; 2386 memcpy(&card->token.ulp_filter_r, 2387 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2388 QETH_MPC_TOKEN_LENGTH); 2389 if (IS_IQD(card)) { 2390 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2391 mtu = qeth_get_mtu_outof_framesize(framesize); 2392 } else { 2393 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2394 } 2395 *(u16 *)reply->param = mtu; 2396 2397 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2398 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2399 memcpy(&link_type, 2400 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2401 if (!qeth_is_supported_link_type(card, link_type)) 2402 return -EPROTONOSUPPORT; 2403 } 2404 2405 card->info.link_type = link_type; 2406 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2407 return 0; 2408 } 2409 2410 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2411 { 2412 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3; 2413 } 2414 2415 static int qeth_ulp_enable(struct qeth_card *card) 2416 { 2417 u8 prot_type = qeth_mpc_select_prot_type(card); 2418 struct qeth_cmd_buffer *iob; 2419 u16 max_mtu; 2420 int rc; 2421 2422 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2423 2424 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2425 if (!iob) 2426 return -ENOMEM; 2427 2428 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2429 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2430 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2431 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2432 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2433 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2434 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2435 if (rc) 2436 return rc; 2437 return qeth_update_max_mtu(card, max_mtu); 2438 } 2439 2440 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2441 unsigned long data) 2442 { 2443 struct qeth_cmd_buffer *iob; 2444 2445 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2446 2447 iob = (struct qeth_cmd_buffer *) data; 2448 memcpy(&card->token.ulp_connection_r, 2449 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2450 QETH_MPC_TOKEN_LENGTH); 2451 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2452 3)) { 2453 QETH_CARD_TEXT(card, 2, "olmlimit"); 2454 dev_err(&card->gdev->dev, "A connection could not be " 2455 "established because of an OLM limit\n"); 2456 return -EMLINK; 2457 } 2458 return 0; 2459 } 2460 2461 static int qeth_ulp_setup(struct qeth_card *card) 2462 { 2463 __u16 temp; 2464 struct qeth_cmd_buffer *iob; 2465 2466 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2467 2468 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2469 if (!iob) 2470 return -ENOMEM; 2471 2472 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2473 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2474 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2475 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2476 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2477 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2478 2479 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2480 temp = (card->info.cula << 8) + card->info.unit_addr2; 2481 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2482 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2483 } 2484 2485 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, 2486 gfp_t gfp) 2487 { 2488 struct qeth_qdio_out_buffer *newbuf; 2489 2490 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); 2491 if (!newbuf) 2492 return -ENOMEM; 2493 2494 newbuf->buffer = q->qdio_bufs[bidx]; 2495 skb_queue_head_init(&newbuf->skb_list); 2496 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2497 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2498 q->bufs[bidx] = newbuf; 2499 return 0; 2500 } 2501 2502 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2503 { 2504 if (!q) 2505 return; 2506 2507 qeth_drain_output_queue(q, true); 2508 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2509 kfree(q); 2510 } 2511 2512 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2513 { 2514 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2515 unsigned int i; 2516 2517 if (!q) 2518 return NULL; 2519 2520 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) 2521 goto err_qdio_bufs; 2522 2523 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 2524 if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) 2525 goto err_out_bufs; 2526 } 2527 2528 return q; 2529 2530 err_out_bufs: 2531 while (i > 0) 2532 qeth_free_out_buf(q->bufs[--i]); 2533 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2534 err_qdio_bufs: 2535 kfree(q); 2536 return NULL; 2537 } 2538 2539 static void qeth_tx_completion_timer(struct timer_list *timer) 2540 { 2541 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2542 2543 napi_schedule(&queue->napi); 2544 QETH_TXQ_STAT_INC(queue, completion_timer); 2545 } 2546 2547 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2548 { 2549 unsigned int i; 2550 2551 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2552 2553 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2554 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2555 return 0; 2556 2557 /* inbound buffer pool */ 2558 if (qeth_alloc_buffer_pool(card)) 2559 goto out_buffer_pool; 2560 2561 /* outbound */ 2562 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2563 struct qeth_qdio_out_q *queue; 2564 2565 queue = qeth_alloc_output_queue(); 2566 if (!queue) 2567 goto out_freeoutq; 2568 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2569 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2570 card->qdio.out_qs[i] = queue; 2571 queue->card = card; 2572 queue->queue_no = i; 2573 INIT_LIST_HEAD(&queue->pending_bufs); 2574 spin_lock_init(&queue->lock); 2575 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2576 if (IS_IQD(card)) { 2577 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2578 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2579 queue->rescan_usecs = QETH_TX_TIMER_USECS; 2580 } else { 2581 queue->coalesce_usecs = USEC_PER_SEC; 2582 queue->max_coalesced_frames = 0; 2583 queue->rescan_usecs = 10 * USEC_PER_SEC; 2584 } 2585 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2586 } 2587 2588 /* completion */ 2589 if (qeth_alloc_cq(card)) 2590 goto out_freeoutq; 2591 2592 return 0; 2593 2594 out_freeoutq: 2595 while (i > 0) { 2596 qeth_free_output_queue(card->qdio.out_qs[--i]); 2597 card->qdio.out_qs[i] = NULL; 2598 } 2599 qeth_free_buffer_pool(card); 2600 out_buffer_pool: 2601 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2602 return -ENOMEM; 2603 } 2604 2605 static void qeth_free_qdio_queues(struct qeth_card *card) 2606 { 2607 int i, j; 2608 2609 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2610 QETH_QDIO_UNINITIALIZED) 2611 return; 2612 2613 qeth_free_cq(card); 2614 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2615 if (card->qdio.in_q->bufs[j].rx_skb) { 2616 consume_skb(card->qdio.in_q->bufs[j].rx_skb); 2617 card->qdio.in_q->bufs[j].rx_skb = NULL; 2618 } 2619 } 2620 2621 /* inbound buffer pool */ 2622 qeth_free_buffer_pool(card); 2623 /* free outbound qdio_qs */ 2624 for (i = 0; i < card->qdio.no_out_queues; i++) { 2625 qeth_free_output_queue(card->qdio.out_qs[i]); 2626 card->qdio.out_qs[i] = NULL; 2627 } 2628 } 2629 2630 static void qeth_fill_qib_parms(struct qeth_card *card, 2631 struct qeth_qib_parms *parms) 2632 { 2633 struct qeth_qdio_out_q *queue; 2634 unsigned int i; 2635 2636 parms->pcit_magic[0] = 'P'; 2637 parms->pcit_magic[1] = 'C'; 2638 parms->pcit_magic[2] = 'I'; 2639 parms->pcit_magic[3] = 'T'; 2640 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2641 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2642 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2643 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2644 2645 parms->blkt_magic[0] = 'B'; 2646 parms->blkt_magic[1] = 'L'; 2647 parms->blkt_magic[2] = 'K'; 2648 parms->blkt_magic[3] = 'T'; 2649 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2650 parms->blkt_total = card->info.blkt.time_total; 2651 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2652 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2653 2654 /* Prio-queueing implicitly uses the default priorities: */ 2655 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2656 return; 2657 2658 parms->pque_magic[0] = 'P'; 2659 parms->pque_magic[1] = 'Q'; 2660 parms->pque_magic[2] = 'U'; 2661 parms->pque_magic[3] = 'E'; 2662 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2663 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2664 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2665 2666 qeth_for_each_output_queue(card, queue, i) 2667 parms->pque_priority[i] = queue->priority; 2668 } 2669 2670 static int qeth_qdio_activate(struct qeth_card *card) 2671 { 2672 QETH_CARD_TEXT(card, 3, "qdioact"); 2673 return qdio_activate(CARD_DDEV(card)); 2674 } 2675 2676 static int qeth_dm_act(struct qeth_card *card) 2677 { 2678 struct qeth_cmd_buffer *iob; 2679 2680 QETH_CARD_TEXT(card, 2, "dmact"); 2681 2682 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2683 if (!iob) 2684 return -ENOMEM; 2685 2686 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2687 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2688 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2689 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2690 return qeth_send_control_data(card, iob, NULL, NULL); 2691 } 2692 2693 static int qeth_mpc_initialize(struct qeth_card *card) 2694 { 2695 int rc; 2696 2697 QETH_CARD_TEXT(card, 2, "mpcinit"); 2698 2699 rc = qeth_issue_next_read(card); 2700 if (rc) { 2701 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2702 return rc; 2703 } 2704 rc = qeth_cm_enable(card); 2705 if (rc) { 2706 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2707 return rc; 2708 } 2709 rc = qeth_cm_setup(card); 2710 if (rc) { 2711 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2712 return rc; 2713 } 2714 rc = qeth_ulp_enable(card); 2715 if (rc) { 2716 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2717 return rc; 2718 } 2719 rc = qeth_ulp_setup(card); 2720 if (rc) { 2721 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2722 return rc; 2723 } 2724 rc = qeth_alloc_qdio_queues(card); 2725 if (rc) { 2726 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2727 return rc; 2728 } 2729 rc = qeth_qdio_establish(card); 2730 if (rc) { 2731 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2732 qeth_free_qdio_queues(card); 2733 return rc; 2734 } 2735 rc = qeth_qdio_activate(card); 2736 if (rc) { 2737 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2738 return rc; 2739 } 2740 rc = qeth_dm_act(card); 2741 if (rc) { 2742 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2743 return rc; 2744 } 2745 2746 return 0; 2747 } 2748 2749 static void qeth_print_status_message(struct qeth_card *card) 2750 { 2751 switch (card->info.type) { 2752 case QETH_CARD_TYPE_OSD: 2753 case QETH_CARD_TYPE_OSM: 2754 case QETH_CARD_TYPE_OSX: 2755 /* VM will use a non-zero first character 2756 * to indicate a HiperSockets like reporting 2757 * of the level OSA sets the first character to zero 2758 * */ 2759 if (!card->info.mcl_level[0]) { 2760 sprintf(card->info.mcl_level, "%02x%02x", 2761 card->info.mcl_level[2], 2762 card->info.mcl_level[3]); 2763 break; 2764 } 2765 fallthrough; 2766 case QETH_CARD_TYPE_IQD: 2767 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2768 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2769 card->info.mcl_level[0]]; 2770 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2771 card->info.mcl_level[1]]; 2772 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2773 card->info.mcl_level[2]]; 2774 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2775 card->info.mcl_level[3]]; 2776 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2777 } 2778 break; 2779 default: 2780 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2781 } 2782 dev_info(&card->gdev->dev, 2783 "Device is a%s card%s%s%s\nwith link type %s.\n", 2784 qeth_get_cardname(card), 2785 (card->info.mcl_level[0]) ? " (level: " : "", 2786 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2787 (card->info.mcl_level[0]) ? ")" : "", 2788 qeth_get_cardname_short(card)); 2789 } 2790 2791 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2792 { 2793 struct qeth_buffer_pool_entry *entry; 2794 2795 QETH_CARD_TEXT(card, 5, "inwrklst"); 2796 2797 list_for_each_entry(entry, 2798 &card->qdio.init_pool.entry_list, init_list) { 2799 qeth_put_buffer_pool_entry(card, entry); 2800 } 2801 } 2802 2803 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2804 struct qeth_card *card) 2805 { 2806 struct qeth_buffer_pool_entry *entry; 2807 int i, free; 2808 2809 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2810 return NULL; 2811 2812 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2813 free = 1; 2814 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2815 if (page_count(entry->elements[i]) > 1) { 2816 free = 0; 2817 break; 2818 } 2819 } 2820 if (free) { 2821 list_del_init(&entry->list); 2822 return entry; 2823 } 2824 } 2825 2826 /* no free buffer in pool so take first one and swap pages */ 2827 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2828 struct qeth_buffer_pool_entry, list); 2829 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2830 if (page_count(entry->elements[i]) > 1) { 2831 struct page *page = dev_alloc_page(); 2832 2833 if (!page) 2834 return NULL; 2835 2836 __free_page(entry->elements[i]); 2837 entry->elements[i] = page; 2838 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2839 } 2840 } 2841 list_del_init(&entry->list); 2842 return entry; 2843 } 2844 2845 static int qeth_init_input_buffer(struct qeth_card *card, 2846 struct qeth_qdio_buffer *buf) 2847 { 2848 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2849 int i; 2850 2851 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2852 buf->rx_skb = netdev_alloc_skb(card->dev, 2853 ETH_HLEN + 2854 sizeof(struct ipv6hdr)); 2855 if (!buf->rx_skb) 2856 return -ENOMEM; 2857 } 2858 2859 if (!pool_entry) { 2860 pool_entry = qeth_find_free_buffer_pool_entry(card); 2861 if (!pool_entry) 2862 return -ENOBUFS; 2863 2864 buf->pool_entry = pool_entry; 2865 } 2866 2867 /* 2868 * since the buffer is accessed only from the input_tasklet 2869 * there shouldn't be a need to synchronize; also, since we use 2870 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2871 * buffers 2872 */ 2873 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2874 buf->buffer->element[i].length = PAGE_SIZE; 2875 buf->buffer->element[i].addr = 2876 page_to_phys(pool_entry->elements[i]); 2877 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2878 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2879 else 2880 buf->buffer->element[i].eflags = 0; 2881 buf->buffer->element[i].sflags = 0; 2882 } 2883 return 0; 2884 } 2885 2886 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2887 struct qeth_qdio_out_q *queue) 2888 { 2889 if (!IS_IQD(card) || 2890 qeth_iqd_is_mcast_queue(card, queue) || 2891 card->options.cq == QETH_CQ_ENABLED || 2892 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2893 return 1; 2894 2895 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2896 } 2897 2898 static int qeth_init_qdio_queues(struct qeth_card *card) 2899 { 2900 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 2901 unsigned int i; 2902 int rc; 2903 2904 QETH_CARD_TEXT(card, 2, "initqdqs"); 2905 2906 /* inbound queue */ 2907 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2908 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2909 2910 qeth_initialize_working_pool_list(card); 2911 /*give only as many buffers to hardware as we have buffer pool entries*/ 2912 for (i = 0; i < rx_bufs; i++) { 2913 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2914 if (rc) 2915 return rc; 2916 } 2917 2918 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 2919 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, 2920 NULL); 2921 if (rc) { 2922 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2923 return rc; 2924 } 2925 2926 /* completion */ 2927 rc = qeth_cq_init(card); 2928 if (rc) { 2929 return rc; 2930 } 2931 2932 /* outbound queue */ 2933 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2934 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2935 2936 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2937 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2938 queue->next_buf_to_fill = 0; 2939 queue->do_pack = 0; 2940 queue->prev_hdr = NULL; 2941 queue->coalesced_frames = 0; 2942 queue->bulk_start = 0; 2943 queue->bulk_count = 0; 2944 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 2945 atomic_set(&queue->used_buffers, 0); 2946 atomic_set(&queue->set_pci_flags_count, 0); 2947 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2948 } 2949 return 0; 2950 } 2951 2952 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2953 struct qeth_cmd_buffer *iob) 2954 { 2955 qeth_mpc_finalize_cmd(card, iob); 2956 2957 /* override with IPA-specific values: */ 2958 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 2959 } 2960 2961 static void qeth_prepare_ipa_cmd(struct qeth_card *card, 2962 struct qeth_cmd_buffer *iob, u16 cmd_length) 2963 { 2964 u8 prot_type = qeth_mpc_select_prot_type(card); 2965 u16 total_length = iob->length; 2966 2967 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 2968 iob->data); 2969 iob->finalize = qeth_ipa_finalize_cmd; 2970 2971 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2972 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2973 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2974 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2975 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2976 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2977 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2978 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2979 } 2980 2981 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 2982 struct qeth_cmd_buffer *reply) 2983 { 2984 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 2985 2986 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 2987 } 2988 2989 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 2990 enum qeth_ipa_cmds cmd_code, 2991 enum qeth_prot_versions prot, 2992 unsigned int data_length) 2993 { 2994 struct qeth_cmd_buffer *iob; 2995 struct qeth_ipacmd_hdr *hdr; 2996 2997 data_length += offsetof(struct qeth_ipa_cmd, data); 2998 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 2999 QETH_IPA_TIMEOUT); 3000 if (!iob) 3001 return NULL; 3002 3003 qeth_prepare_ipa_cmd(card, iob, data_length); 3004 iob->match = qeth_ipa_match_reply; 3005 3006 hdr = &__ipa_cmd(iob)->hdr; 3007 hdr->command = cmd_code; 3008 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3009 /* hdr->seqno is set by qeth_send_control_data() */ 3010 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3011 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3012 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3013 hdr->param_count = 1; 3014 hdr->prot_version = prot; 3015 return iob; 3016 } 3017 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3018 3019 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3020 struct qeth_reply *reply, unsigned long data) 3021 { 3022 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3023 3024 return (cmd->hdr.return_code) ? -EIO : 0; 3025 } 3026 3027 /* 3028 * qeth_send_ipa_cmd() - send an IPA command 3029 * 3030 * See qeth_send_control_data() for explanation of the arguments. 3031 */ 3032 3033 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3034 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3035 unsigned long), 3036 void *reply_param) 3037 { 3038 int rc; 3039 3040 QETH_CARD_TEXT(card, 4, "sendipa"); 3041 3042 if (card->read_or_write_problem) { 3043 qeth_put_cmd(iob); 3044 return -EIO; 3045 } 3046 3047 if (reply_cb == NULL) 3048 reply_cb = qeth_send_ipa_cmd_cb; 3049 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3050 if (rc == -ETIME) { 3051 qeth_clear_ipacmd_list(card); 3052 qeth_schedule_recovery(card); 3053 } 3054 return rc; 3055 } 3056 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3057 3058 static int qeth_send_startlan_cb(struct qeth_card *card, 3059 struct qeth_reply *reply, unsigned long data) 3060 { 3061 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3062 3063 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3064 return -ENETDOWN; 3065 3066 return (cmd->hdr.return_code) ? -EIO : 0; 3067 } 3068 3069 static int qeth_send_startlan(struct qeth_card *card) 3070 { 3071 struct qeth_cmd_buffer *iob; 3072 3073 QETH_CARD_TEXT(card, 2, "strtlan"); 3074 3075 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3076 if (!iob) 3077 return -ENOMEM; 3078 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3079 } 3080 3081 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3082 { 3083 if (!cmd->hdr.return_code) 3084 cmd->hdr.return_code = 3085 cmd->data.setadapterparms.hdr.return_code; 3086 return cmd->hdr.return_code; 3087 } 3088 3089 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3090 struct qeth_reply *reply, unsigned long data) 3091 { 3092 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3093 struct qeth_query_cmds_supp *query_cmd; 3094 3095 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3096 if (qeth_setadpparms_inspect_rc(cmd)) 3097 return -EIO; 3098 3099 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3100 if (query_cmd->lan_type & 0x7f) { 3101 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3102 return -EPROTONOSUPPORT; 3103 3104 card->info.link_type = query_cmd->lan_type; 3105 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3106 } 3107 3108 card->options.adp.supported = query_cmd->supported_cmds; 3109 return 0; 3110 } 3111 3112 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3113 enum qeth_ipa_setadp_cmd adp_cmd, 3114 unsigned int data_length) 3115 { 3116 struct qeth_ipacmd_setadpparms_hdr *hdr; 3117 struct qeth_cmd_buffer *iob; 3118 3119 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3120 data_length + 3121 offsetof(struct qeth_ipacmd_setadpparms, 3122 data)); 3123 if (!iob) 3124 return NULL; 3125 3126 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3127 hdr->cmdlength = sizeof(*hdr) + data_length; 3128 hdr->command_code = adp_cmd; 3129 hdr->used_total = 1; 3130 hdr->seq_no = 1; 3131 return iob; 3132 } 3133 3134 static int qeth_query_setadapterparms(struct qeth_card *card) 3135 { 3136 int rc; 3137 struct qeth_cmd_buffer *iob; 3138 3139 QETH_CARD_TEXT(card, 3, "queryadp"); 3140 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3141 SETADP_DATA_SIZEOF(query_cmds_supp)); 3142 if (!iob) 3143 return -ENOMEM; 3144 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3145 return rc; 3146 } 3147 3148 static int qeth_query_ipassists_cb(struct qeth_card *card, 3149 struct qeth_reply *reply, unsigned long data) 3150 { 3151 struct qeth_ipa_cmd *cmd; 3152 3153 QETH_CARD_TEXT(card, 2, "qipasscb"); 3154 3155 cmd = (struct qeth_ipa_cmd *) data; 3156 3157 switch (cmd->hdr.return_code) { 3158 case IPA_RC_SUCCESS: 3159 break; 3160 case IPA_RC_NOTSUPP: 3161 case IPA_RC_L2_UNSUPPORTED_CMD: 3162 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3163 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3164 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3165 return -EOPNOTSUPP; 3166 default: 3167 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3168 CARD_DEVID(card), cmd->hdr.return_code); 3169 return -EIO; 3170 } 3171 3172 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3173 card->options.ipa4 = cmd->hdr.assists; 3174 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3175 card->options.ipa6 = cmd->hdr.assists; 3176 else 3177 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3178 CARD_DEVID(card)); 3179 return 0; 3180 } 3181 3182 static int qeth_query_ipassists(struct qeth_card *card, 3183 enum qeth_prot_versions prot) 3184 { 3185 int rc; 3186 struct qeth_cmd_buffer *iob; 3187 3188 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3189 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3190 if (!iob) 3191 return -ENOMEM; 3192 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3193 return rc; 3194 } 3195 3196 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3197 struct qeth_reply *reply, unsigned long data) 3198 { 3199 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3200 struct qeth_query_switch_attributes *attrs; 3201 struct qeth_switch_info *sw_info; 3202 3203 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3204 if (qeth_setadpparms_inspect_rc(cmd)) 3205 return -EIO; 3206 3207 sw_info = (struct qeth_switch_info *)reply->param; 3208 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3209 sw_info->capabilities = attrs->capabilities; 3210 sw_info->settings = attrs->settings; 3211 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3212 sw_info->settings); 3213 return 0; 3214 } 3215 3216 int qeth_query_switch_attributes(struct qeth_card *card, 3217 struct qeth_switch_info *sw_info) 3218 { 3219 struct qeth_cmd_buffer *iob; 3220 3221 QETH_CARD_TEXT(card, 2, "qswiattr"); 3222 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3223 return -EOPNOTSUPP; 3224 if (!netif_carrier_ok(card->dev)) 3225 return -ENOMEDIUM; 3226 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3227 if (!iob) 3228 return -ENOMEM; 3229 return qeth_send_ipa_cmd(card, iob, 3230 qeth_query_switch_attributes_cb, sw_info); 3231 } 3232 3233 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3234 enum qeth_diags_cmds sub_cmd, 3235 unsigned int data_length) 3236 { 3237 struct qeth_ipacmd_diagass *cmd; 3238 struct qeth_cmd_buffer *iob; 3239 3240 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3241 DIAG_HDR_LEN + data_length); 3242 if (!iob) 3243 return NULL; 3244 3245 cmd = &__ipa_cmd(iob)->data.diagass; 3246 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3247 cmd->subcmd = sub_cmd; 3248 return iob; 3249 } 3250 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3251 3252 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3253 struct qeth_reply *reply, unsigned long data) 3254 { 3255 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3256 u16 rc = cmd->hdr.return_code; 3257 3258 if (rc) { 3259 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3260 return -EIO; 3261 } 3262 3263 card->info.diagass_support = cmd->data.diagass.ext; 3264 return 0; 3265 } 3266 3267 static int qeth_query_setdiagass(struct qeth_card *card) 3268 { 3269 struct qeth_cmd_buffer *iob; 3270 3271 QETH_CARD_TEXT(card, 2, "qdiagass"); 3272 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3273 if (!iob) 3274 return -ENOMEM; 3275 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3276 } 3277 3278 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3279 { 3280 unsigned long info = get_zeroed_page(GFP_KERNEL); 3281 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3282 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3283 struct ccw_dev_id ccwid; 3284 int level; 3285 3286 tid->chpid = card->info.chpid; 3287 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3288 tid->ssid = ccwid.ssid; 3289 tid->devno = ccwid.devno; 3290 if (!info) 3291 return; 3292 level = stsi(NULL, 0, 0, 0); 3293 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3294 tid->lparnr = info222->lpar_number; 3295 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3296 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3297 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3298 } 3299 free_page(info); 3300 } 3301 3302 static int qeth_hw_trap_cb(struct qeth_card *card, 3303 struct qeth_reply *reply, unsigned long data) 3304 { 3305 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3306 u16 rc = cmd->hdr.return_code; 3307 3308 if (rc) { 3309 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3310 return -EIO; 3311 } 3312 return 0; 3313 } 3314 3315 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3316 { 3317 struct qeth_cmd_buffer *iob; 3318 struct qeth_ipa_cmd *cmd; 3319 3320 QETH_CARD_TEXT(card, 2, "diagtrap"); 3321 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3322 if (!iob) 3323 return -ENOMEM; 3324 cmd = __ipa_cmd(iob); 3325 cmd->data.diagass.type = 1; 3326 cmd->data.diagass.action = action; 3327 switch (action) { 3328 case QETH_DIAGS_TRAP_ARM: 3329 cmd->data.diagass.options = 0x0003; 3330 cmd->data.diagass.ext = 0x00010000 + 3331 sizeof(struct qeth_trap_id); 3332 qeth_get_trap_id(card, 3333 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3334 break; 3335 case QETH_DIAGS_TRAP_DISARM: 3336 cmd->data.diagass.options = 0x0001; 3337 break; 3338 case QETH_DIAGS_TRAP_CAPTURE: 3339 break; 3340 } 3341 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3342 } 3343 3344 static int qeth_check_qdio_errors(struct qeth_card *card, 3345 struct qdio_buffer *buf, 3346 unsigned int qdio_error, 3347 const char *dbftext) 3348 { 3349 if (qdio_error) { 3350 QETH_CARD_TEXT(card, 2, dbftext); 3351 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3352 buf->element[15].sflags); 3353 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3354 buf->element[14].sflags); 3355 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3356 if ((buf->element[15].sflags) == 0x12) { 3357 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3358 return 0; 3359 } else 3360 return 1; 3361 } 3362 return 0; 3363 } 3364 3365 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3366 unsigned int count) 3367 { 3368 struct qeth_qdio_q *queue = card->qdio.in_q; 3369 struct list_head *lh; 3370 int i; 3371 int rc; 3372 int newcount = 0; 3373 3374 /* only requeue at a certain threshold to avoid SIGAs */ 3375 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3376 for (i = queue->next_buf_to_init; 3377 i < queue->next_buf_to_init + count; ++i) { 3378 if (qeth_init_input_buffer(card, 3379 &queue->bufs[QDIO_BUFNR(i)])) { 3380 break; 3381 } else { 3382 newcount++; 3383 } 3384 } 3385 3386 if (newcount < count) { 3387 /* we are in memory shortage so we switch back to 3388 traditional skb allocation and drop packages */ 3389 atomic_set(&card->force_alloc_skb, 3); 3390 count = newcount; 3391 } else { 3392 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3393 } 3394 3395 if (!count) { 3396 i = 0; 3397 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3398 i++; 3399 if (i == card->qdio.in_buf_pool.buf_count) { 3400 QETH_CARD_TEXT(card, 2, "qsarbw"); 3401 schedule_delayed_work( 3402 &card->buffer_reclaim_work, 3403 QETH_RECLAIM_WORK_TIME); 3404 } 3405 return 0; 3406 } 3407 3408 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3409 queue->next_buf_to_init, count, NULL); 3410 if (rc) { 3411 QETH_CARD_TEXT(card, 2, "qinberr"); 3412 } 3413 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3414 count); 3415 return count; 3416 } 3417 3418 return 0; 3419 } 3420 3421 static void qeth_buffer_reclaim_work(struct work_struct *work) 3422 { 3423 struct qeth_card *card = container_of(to_delayed_work(work), 3424 struct qeth_card, 3425 buffer_reclaim_work); 3426 3427 local_bh_disable(); 3428 napi_schedule(&card->napi); 3429 /* kick-start the NAPI softirq: */ 3430 local_bh_enable(); 3431 } 3432 3433 static void qeth_handle_send_error(struct qeth_card *card, 3434 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3435 { 3436 int sbalf15 = buffer->buffer->element[15].sflags; 3437 3438 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3439 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3440 3441 if (!qdio_err) 3442 return; 3443 3444 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3445 return; 3446 3447 QETH_CARD_TEXT(card, 1, "lnkfail"); 3448 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3449 (u16)qdio_err, (u8)sbalf15); 3450 } 3451 3452 /** 3453 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3454 * @queue: queue to check for packing buffer 3455 * 3456 * Returns number of buffers that were prepared for flush. 3457 */ 3458 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3459 { 3460 struct qeth_qdio_out_buffer *buffer; 3461 3462 buffer = queue->bufs[queue->next_buf_to_fill]; 3463 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3464 (buffer->next_element_to_fill > 0)) { 3465 /* it's a packing buffer */ 3466 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3467 queue->next_buf_to_fill = 3468 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3469 return 1; 3470 } 3471 return 0; 3472 } 3473 3474 /* 3475 * Switched to packing state if the number of used buffers on a queue 3476 * reaches a certain limit. 3477 */ 3478 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3479 { 3480 if (!queue->do_pack) { 3481 if (atomic_read(&queue->used_buffers) 3482 >= QETH_HIGH_WATERMARK_PACK){ 3483 /* switch non-PACKING -> PACKING */ 3484 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3485 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3486 queue->do_pack = 1; 3487 } 3488 } 3489 } 3490 3491 /* 3492 * Switches from packing to non-packing mode. If there is a packing 3493 * buffer on the queue this buffer will be prepared to be flushed. 3494 * In that case 1 is returned to inform the caller. If no buffer 3495 * has to be flushed, zero is returned. 3496 */ 3497 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3498 { 3499 if (queue->do_pack) { 3500 if (atomic_read(&queue->used_buffers) 3501 <= QETH_LOW_WATERMARK_PACK) { 3502 /* switch PACKING -> non-PACKING */ 3503 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3504 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3505 queue->do_pack = 0; 3506 return qeth_prep_flush_pack_buffer(queue); 3507 } 3508 } 3509 return 0; 3510 } 3511 3512 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3513 int count) 3514 { 3515 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3516 struct qeth_card *card = queue->card; 3517 unsigned int frames, usecs; 3518 struct qaob *aob = NULL; 3519 int rc; 3520 int i; 3521 3522 for (i = index; i < index + count; ++i) { 3523 unsigned int bidx = QDIO_BUFNR(i); 3524 struct sk_buff *skb; 3525 3526 buf = queue->bufs[bidx]; 3527 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3528 SBAL_EFLAGS_LAST_ENTRY; 3529 queue->coalesced_frames += buf->frames; 3530 3531 if (IS_IQD(card)) { 3532 skb_queue_walk(&buf->skb_list, skb) 3533 skb_tx_timestamp(skb); 3534 } 3535 } 3536 3537 if (IS_IQD(card)) { 3538 if (card->options.cq == QETH_CQ_ENABLED && 3539 !qeth_iqd_is_mcast_queue(card, queue) && 3540 count == 1) { 3541 if (!buf->aob) 3542 buf->aob = kmem_cache_zalloc(qeth_qaob_cache, 3543 GFP_ATOMIC); 3544 if (buf->aob) { 3545 struct qeth_qaob_priv1 *priv; 3546 3547 aob = buf->aob; 3548 priv = (struct qeth_qaob_priv1 *)&aob->user1; 3549 priv->state = QETH_QAOB_ISSUED; 3550 priv->queue_no = queue->queue_no; 3551 } 3552 } 3553 } else { 3554 if (!queue->do_pack) { 3555 if ((atomic_read(&queue->used_buffers) >= 3556 (QETH_HIGH_WATERMARK_PACK - 3557 QETH_WATERMARK_PACK_FUZZ)) && 3558 !atomic_read(&queue->set_pci_flags_count)) { 3559 /* it's likely that we'll go to packing 3560 * mode soon */ 3561 atomic_inc(&queue->set_pci_flags_count); 3562 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3563 } 3564 } else { 3565 if (!atomic_read(&queue->set_pci_flags_count)) { 3566 /* 3567 * there's no outstanding PCI any more, so we 3568 * have to request a PCI to be sure the the PCI 3569 * will wake at some time in the future then we 3570 * can flush packed buffers that might still be 3571 * hanging around, which can happen if no 3572 * further send was requested by the stack 3573 */ 3574 atomic_inc(&queue->set_pci_flags_count); 3575 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3576 } 3577 } 3578 } 3579 3580 QETH_TXQ_STAT_INC(queue, doorbell); 3581 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, 3582 index, count, aob); 3583 3584 switch (rc) { 3585 case 0: 3586 case -ENOBUFS: 3587 /* ignore temporary SIGA errors without busy condition */ 3588 3589 /* Fake the TX completion interrupt: */ 3590 frames = READ_ONCE(queue->max_coalesced_frames); 3591 usecs = READ_ONCE(queue->coalesce_usecs); 3592 3593 if (frames && queue->coalesced_frames >= frames) { 3594 napi_schedule(&queue->napi); 3595 queue->coalesced_frames = 0; 3596 QETH_TXQ_STAT_INC(queue, coal_frames); 3597 } else if (qeth_use_tx_irqs(card) && 3598 atomic_read(&queue->used_buffers) >= 32) { 3599 /* Old behaviour carried over from the qdio layer: */ 3600 napi_schedule(&queue->napi); 3601 QETH_TXQ_STAT_INC(queue, coal_frames); 3602 } else if (usecs) { 3603 qeth_tx_arm_timer(queue, usecs); 3604 } 3605 3606 break; 3607 default: 3608 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3609 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3610 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3611 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3612 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3613 3614 /* this must not happen under normal circumstances. if it 3615 * happens something is really wrong -> recover */ 3616 qeth_schedule_recovery(queue->card); 3617 } 3618 } 3619 3620 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3621 { 3622 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3623 3624 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3625 queue->prev_hdr = NULL; 3626 queue->bulk_count = 0; 3627 } 3628 3629 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3630 { 3631 /* 3632 * check if weed have to switch to non-packing mode or if 3633 * we have to get a pci flag out on the queue 3634 */ 3635 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3636 !atomic_read(&queue->set_pci_flags_count)) { 3637 unsigned int index, flush_cnt; 3638 bool q_was_packing; 3639 3640 spin_lock(&queue->lock); 3641 3642 index = queue->next_buf_to_fill; 3643 q_was_packing = queue->do_pack; 3644 3645 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3646 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3647 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3648 3649 if (flush_cnt) { 3650 qeth_flush_buffers(queue, index, flush_cnt); 3651 if (q_was_packing) 3652 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3653 } 3654 3655 spin_unlock(&queue->lock); 3656 } 3657 } 3658 3659 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3660 { 3661 struct qeth_card *card = (struct qeth_card *)card_ptr; 3662 3663 napi_schedule_irqoff(&card->napi); 3664 } 3665 3666 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3667 { 3668 int rc; 3669 3670 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3671 rc = -1; 3672 goto out; 3673 } else { 3674 if (card->options.cq == cq) { 3675 rc = 0; 3676 goto out; 3677 } 3678 3679 qeth_free_qdio_queues(card); 3680 card->options.cq = cq; 3681 rc = 0; 3682 } 3683 out: 3684 return rc; 3685 3686 } 3687 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3688 3689 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) 3690 { 3691 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; 3692 unsigned int queue_no = priv->queue_no; 3693 3694 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); 3695 3696 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && 3697 queue_no < card->qdio.no_out_queues) 3698 napi_schedule(&card->qdio.out_qs[queue_no]->napi); 3699 } 3700 3701 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3702 unsigned int queue, int first_element, 3703 int count) 3704 { 3705 struct qeth_qdio_q *cq = card->qdio.c_q; 3706 int i; 3707 int rc; 3708 3709 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3710 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3711 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3712 3713 if (qdio_err) { 3714 netif_tx_stop_all_queues(card->dev); 3715 qeth_schedule_recovery(card); 3716 return; 3717 } 3718 3719 for (i = first_element; i < first_element + count; ++i) { 3720 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3721 int e = 0; 3722 3723 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3724 buffer->element[e].addr) { 3725 unsigned long phys_aob_addr = buffer->element[e].addr; 3726 3727 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); 3728 ++e; 3729 } 3730 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3731 } 3732 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3733 cq->next_buf_to_init, count, NULL); 3734 if (rc) { 3735 dev_warn(&card->gdev->dev, 3736 "QDIO reported an error, rc=%i\n", rc); 3737 QETH_CARD_TEXT(card, 2, "qcqherr"); 3738 } 3739 3740 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3741 } 3742 3743 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3744 unsigned int qdio_err, int queue, 3745 int first_elem, int count, 3746 unsigned long card_ptr) 3747 { 3748 struct qeth_card *card = (struct qeth_card *)card_ptr; 3749 3750 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3751 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3752 3753 if (qdio_err) 3754 qeth_schedule_recovery(card); 3755 } 3756 3757 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3758 unsigned int qdio_error, int __queue, 3759 int first_element, int count, 3760 unsigned long card_ptr) 3761 { 3762 struct qeth_card *card = (struct qeth_card *) card_ptr; 3763 3764 QETH_CARD_TEXT(card, 2, "achkcond"); 3765 netif_tx_stop_all_queues(card->dev); 3766 qeth_schedule_recovery(card); 3767 } 3768 3769 /* 3770 * Note: Function assumes that we have 4 outbound queues. 3771 */ 3772 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3773 { 3774 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3775 u8 tos; 3776 3777 switch (card->qdio.do_prio_queueing) { 3778 case QETH_PRIO_Q_ING_TOS: 3779 case QETH_PRIO_Q_ING_PREC: 3780 switch (vlan_get_protocol(skb)) { 3781 case htons(ETH_P_IP): 3782 tos = ipv4_get_dsfield(ip_hdr(skb)); 3783 break; 3784 case htons(ETH_P_IPV6): 3785 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3786 break; 3787 default: 3788 return card->qdio.default_out_queue; 3789 } 3790 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3791 return ~tos >> 6 & 3; 3792 if (tos & IPTOS_MINCOST) 3793 return 3; 3794 if (tos & IPTOS_RELIABILITY) 3795 return 2; 3796 if (tos & IPTOS_THROUGHPUT) 3797 return 1; 3798 if (tos & IPTOS_LOWDELAY) 3799 return 0; 3800 break; 3801 case QETH_PRIO_Q_ING_SKB: 3802 if (skb->priority > 5) 3803 return 0; 3804 return ~skb->priority >> 1 & 3; 3805 case QETH_PRIO_Q_ING_VLAN: 3806 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3807 return ~ntohs(veth->h_vlan_TCI) >> 3808 (VLAN_PRIO_SHIFT + 1) & 3; 3809 break; 3810 case QETH_PRIO_Q_ING_FIXED: 3811 return card->qdio.default_out_queue; 3812 default: 3813 break; 3814 } 3815 return card->qdio.default_out_queue; 3816 } 3817 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3818 3819 /** 3820 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3821 * @skb: SKB address 3822 * 3823 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3824 * fragmented part of the SKB. Returns zero for linear SKB. 3825 */ 3826 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3827 { 3828 int cnt, elements = 0; 3829 3830 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3831 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3832 3833 elements += qeth_get_elements_for_range( 3834 (addr_t)skb_frag_address(frag), 3835 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3836 } 3837 return elements; 3838 } 3839 3840 /** 3841 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3842 * to transmit an skb. 3843 * @skb: the skb to operate on. 3844 * @data_offset: skip this part of the skb's linear data 3845 * 3846 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3847 * skb's data (both its linear part and paged fragments). 3848 */ 3849 static unsigned int qeth_count_elements(struct sk_buff *skb, 3850 unsigned int data_offset) 3851 { 3852 unsigned int elements = qeth_get_elements_for_frags(skb); 3853 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3854 addr_t start = (addr_t)skb->data + data_offset; 3855 3856 if (start != end) 3857 elements += qeth_get_elements_for_range(start, end); 3858 return elements; 3859 } 3860 3861 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3862 MAX_TCP_HEADER) 3863 3864 /** 3865 * qeth_add_hw_header() - add a HW header to an skb. 3866 * @queue: TX queue that the skb will be placed on. 3867 * @skb: skb that the HW header should be added to. 3868 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3869 * it contains a valid pointer to a qeth_hdr. 3870 * @hdr_len: length of the HW header. 3871 * @proto_len: length of protocol headers that need to be in same page as the 3872 * HW header. 3873 * @elements: returns the required number of buffer elements for this skb. 3874 * 3875 * Returns the pushed length. If the header can't be pushed on 3876 * (eg. because it would cross a page boundary), it is allocated from 3877 * the cache instead and 0 is returned. 3878 * The number of needed buffer elements is returned in @elements. 3879 * Error to create the hdr is indicated by returning with < 0. 3880 */ 3881 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3882 struct sk_buff *skb, struct qeth_hdr **hdr, 3883 unsigned int hdr_len, unsigned int proto_len, 3884 unsigned int *elements) 3885 { 3886 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 3887 const unsigned int contiguous = proto_len ? proto_len : 1; 3888 const unsigned int max_elements = queue->max_elements; 3889 unsigned int __elements; 3890 addr_t start, end; 3891 bool push_ok; 3892 int rc; 3893 3894 check_layout: 3895 start = (addr_t)skb->data - hdr_len; 3896 end = (addr_t)skb->data; 3897 3898 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3899 /* Push HW header into same page as first protocol header. */ 3900 push_ok = true; 3901 /* ... but TSO always needs a separate element for headers: */ 3902 if (skb_is_gso(skb)) 3903 __elements = 1 + qeth_count_elements(skb, proto_len); 3904 else 3905 __elements = qeth_count_elements(skb, 0); 3906 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3907 /* Push HW header into preceding page, flush with skb->data. */ 3908 push_ok = true; 3909 __elements = 1 + qeth_count_elements(skb, 0); 3910 } else { 3911 /* Use header cache, copy protocol headers up. */ 3912 push_ok = false; 3913 __elements = 1 + qeth_count_elements(skb, proto_len); 3914 } 3915 3916 /* Compress skb to fit into one IO buffer: */ 3917 if (__elements > max_elements) { 3918 if (!skb_is_nonlinear(skb)) { 3919 /* Drop it, no easy way of shrinking it further. */ 3920 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3921 max_elements, __elements, skb->len); 3922 return -E2BIG; 3923 } 3924 3925 rc = skb_linearize(skb); 3926 if (rc) { 3927 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3928 return rc; 3929 } 3930 3931 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3932 /* Linearization changed the layout, re-evaluate: */ 3933 goto check_layout; 3934 } 3935 3936 *elements = __elements; 3937 /* Add the header: */ 3938 if (push_ok) { 3939 *hdr = skb_push(skb, hdr_len); 3940 return hdr_len; 3941 } 3942 3943 /* Fall back to cache element with known-good alignment: */ 3944 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3945 return -E2BIG; 3946 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 3947 if (!*hdr) 3948 return -ENOMEM; 3949 /* Copy protocol headers behind HW header: */ 3950 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3951 return 0; 3952 } 3953 3954 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 3955 struct sk_buff *curr_skb, 3956 struct qeth_hdr *curr_hdr) 3957 { 3958 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 3959 struct qeth_hdr *prev_hdr = queue->prev_hdr; 3960 3961 if (!prev_hdr) 3962 return true; 3963 3964 /* All packets must have the same target: */ 3965 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 3966 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 3967 3968 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 3969 eth_hdr(curr_skb)->h_dest) && 3970 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 3971 } 3972 3973 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 3974 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 3975 } 3976 3977 /** 3978 * qeth_fill_buffer() - map skb into an output buffer 3979 * @buf: buffer to transport the skb 3980 * @skb: skb to map into the buffer 3981 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3982 * from qeth_core_header_cache. 3983 * @offset: when mapping the skb, start at skb->data + offset 3984 * @hd_len: if > 0, build a dedicated header element of this size 3985 */ 3986 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 3987 struct sk_buff *skb, struct qeth_hdr *hdr, 3988 unsigned int offset, unsigned int hd_len) 3989 { 3990 struct qdio_buffer *buffer = buf->buffer; 3991 int element = buf->next_element_to_fill; 3992 int length = skb_headlen(skb) - offset; 3993 char *data = skb->data + offset; 3994 unsigned int elem_length, cnt; 3995 bool is_first_elem = true; 3996 3997 __skb_queue_tail(&buf->skb_list, skb); 3998 3999 /* build dedicated element for HW Header */ 4000 if (hd_len) { 4001 is_first_elem = false; 4002 4003 buffer->element[element].addr = virt_to_phys(hdr); 4004 buffer->element[element].length = hd_len; 4005 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4006 4007 /* HW header is allocated from cache: */ 4008 if ((void *)hdr != skb->data) 4009 __set_bit(element, buf->from_kmem_cache); 4010 /* HW header was pushed and is contiguous with linear part: */ 4011 else if (length > 0 && !PAGE_ALIGNED(data) && 4012 (data == (char *)hdr + hd_len)) 4013 buffer->element[element].eflags |= 4014 SBAL_EFLAGS_CONTIGUOUS; 4015 4016 element++; 4017 } 4018 4019 /* map linear part into buffer element(s) */ 4020 while (length > 0) { 4021 elem_length = min_t(unsigned int, length, 4022 PAGE_SIZE - offset_in_page(data)); 4023 4024 buffer->element[element].addr = virt_to_phys(data); 4025 buffer->element[element].length = elem_length; 4026 length -= elem_length; 4027 if (is_first_elem) { 4028 is_first_elem = false; 4029 if (length || skb_is_nonlinear(skb)) 4030 /* skb needs additional elements */ 4031 buffer->element[element].eflags = 4032 SBAL_EFLAGS_FIRST_FRAG; 4033 else 4034 buffer->element[element].eflags = 0; 4035 } else { 4036 buffer->element[element].eflags = 4037 SBAL_EFLAGS_MIDDLE_FRAG; 4038 } 4039 4040 data += elem_length; 4041 element++; 4042 } 4043 4044 /* map page frags into buffer element(s) */ 4045 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4046 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4047 4048 data = skb_frag_address(frag); 4049 length = skb_frag_size(frag); 4050 while (length > 0) { 4051 elem_length = min_t(unsigned int, length, 4052 PAGE_SIZE - offset_in_page(data)); 4053 4054 buffer->element[element].addr = virt_to_phys(data); 4055 buffer->element[element].length = elem_length; 4056 buffer->element[element].eflags = 4057 SBAL_EFLAGS_MIDDLE_FRAG; 4058 4059 length -= elem_length; 4060 data += elem_length; 4061 element++; 4062 } 4063 } 4064 4065 if (buffer->element[element - 1].eflags) 4066 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4067 buf->next_element_to_fill = element; 4068 return element; 4069 } 4070 4071 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4072 struct sk_buff *skb, unsigned int elements, 4073 struct qeth_hdr *hdr, unsigned int offset, 4074 unsigned int hd_len) 4075 { 4076 unsigned int bytes = qdisc_pkt_len(skb); 4077 struct qeth_qdio_out_buffer *buffer; 4078 unsigned int next_element; 4079 struct netdev_queue *txq; 4080 bool stopped = false; 4081 bool flush; 4082 4083 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4084 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4085 4086 /* Just a sanity check, the wake/stop logic should ensure that we always 4087 * get a free buffer. 4088 */ 4089 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4090 return -EBUSY; 4091 4092 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4093 4094 if (flush || 4095 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4096 if (buffer->next_element_to_fill > 0) { 4097 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4098 queue->bulk_count++; 4099 } 4100 4101 if (queue->bulk_count >= queue->bulk_max) 4102 flush = true; 4103 4104 if (flush) 4105 qeth_flush_queue(queue); 4106 4107 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4108 queue->bulk_count)]; 4109 4110 /* Sanity-check again: */ 4111 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4112 return -EBUSY; 4113 } 4114 4115 if (buffer->next_element_to_fill == 0 && 4116 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4117 /* If a TX completion happens right _here_ and misses to wake 4118 * the txq, then our re-check below will catch the race. 4119 */ 4120 QETH_TXQ_STAT_INC(queue, stopped); 4121 netif_tx_stop_queue(txq); 4122 stopped = true; 4123 } 4124 4125 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4126 buffer->bytes += bytes; 4127 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4128 queue->prev_hdr = hdr; 4129 4130 flush = __netdev_tx_sent_queue(txq, bytes, 4131 !stopped && netdev_xmit_more()); 4132 4133 if (flush || next_element >= queue->max_elements) { 4134 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4135 queue->bulk_count++; 4136 4137 if (queue->bulk_count >= queue->bulk_max) 4138 flush = true; 4139 4140 if (flush) 4141 qeth_flush_queue(queue); 4142 } 4143 4144 if (stopped && !qeth_out_queue_is_full(queue)) 4145 netif_tx_start_queue(txq); 4146 return 0; 4147 } 4148 4149 static int qeth_do_send_packet(struct qeth_card *card, 4150 struct qeth_qdio_out_q *queue, 4151 struct sk_buff *skb, struct qeth_hdr *hdr, 4152 unsigned int offset, unsigned int hd_len, 4153 unsigned int elements_needed) 4154 { 4155 unsigned int start_index = queue->next_buf_to_fill; 4156 struct qeth_qdio_out_buffer *buffer; 4157 unsigned int next_element; 4158 struct netdev_queue *txq; 4159 bool stopped = false; 4160 int flush_count = 0; 4161 int do_pack = 0; 4162 int rc = 0; 4163 4164 buffer = queue->bufs[queue->next_buf_to_fill]; 4165 4166 /* Just a sanity check, the wake/stop logic should ensure that we always 4167 * get a free buffer. 4168 */ 4169 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4170 return -EBUSY; 4171 4172 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4173 4174 /* check if we need to switch packing state of this queue */ 4175 qeth_switch_to_packing_if_needed(queue); 4176 if (queue->do_pack) { 4177 do_pack = 1; 4178 /* does packet fit in current buffer? */ 4179 if (buffer->next_element_to_fill + elements_needed > 4180 queue->max_elements) { 4181 /* ... no -> set state PRIMED */ 4182 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4183 flush_count++; 4184 queue->next_buf_to_fill = 4185 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4186 buffer = queue->bufs[queue->next_buf_to_fill]; 4187 4188 /* We stepped forward, so sanity-check again: */ 4189 if (atomic_read(&buffer->state) != 4190 QETH_QDIO_BUF_EMPTY) { 4191 qeth_flush_buffers(queue, start_index, 4192 flush_count); 4193 rc = -EBUSY; 4194 goto out; 4195 } 4196 } 4197 } 4198 4199 if (buffer->next_element_to_fill == 0 && 4200 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4201 /* If a TX completion happens right _here_ and misses to wake 4202 * the txq, then our re-check below will catch the race. 4203 */ 4204 QETH_TXQ_STAT_INC(queue, stopped); 4205 netif_tx_stop_queue(txq); 4206 stopped = true; 4207 } 4208 4209 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4210 buffer->bytes += qdisc_pkt_len(skb); 4211 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4212 4213 if (queue->do_pack) 4214 QETH_TXQ_STAT_INC(queue, skbs_pack); 4215 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4216 flush_count++; 4217 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4218 queue->next_buf_to_fill = 4219 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4220 } 4221 4222 if (flush_count) 4223 qeth_flush_buffers(queue, start_index, flush_count); 4224 4225 out: 4226 if (do_pack) 4227 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4228 4229 if (stopped && !qeth_out_queue_is_full(queue)) 4230 netif_tx_start_queue(txq); 4231 return rc; 4232 } 4233 4234 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4235 unsigned int payload_len, struct sk_buff *skb, 4236 unsigned int proto_len) 4237 { 4238 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4239 4240 ext->hdr_tot_len = sizeof(*ext); 4241 ext->imb_hdr_no = 1; 4242 ext->hdr_type = 1; 4243 ext->hdr_version = 1; 4244 ext->hdr_len = 28; 4245 ext->payload_len = payload_len; 4246 ext->mss = skb_shinfo(skb)->gso_size; 4247 ext->dg_hdr_len = proto_len; 4248 } 4249 4250 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4251 struct qeth_qdio_out_q *queue, __be16 proto, 4252 void (*fill_header)(struct qeth_qdio_out_q *queue, 4253 struct qeth_hdr *hdr, struct sk_buff *skb, 4254 __be16 proto, unsigned int data_len)) 4255 { 4256 unsigned int proto_len, hw_hdr_len; 4257 unsigned int frame_len = skb->len; 4258 bool is_tso = skb_is_gso(skb); 4259 unsigned int data_offset = 0; 4260 struct qeth_hdr *hdr = NULL; 4261 unsigned int hd_len = 0; 4262 unsigned int elements; 4263 int push_len, rc; 4264 4265 if (is_tso) { 4266 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4267 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4268 } else { 4269 hw_hdr_len = sizeof(struct qeth_hdr); 4270 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4271 } 4272 4273 rc = skb_cow_head(skb, hw_hdr_len); 4274 if (rc) 4275 return rc; 4276 4277 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4278 &elements); 4279 if (push_len < 0) 4280 return push_len; 4281 if (is_tso || !push_len) { 4282 /* HW header needs its own buffer element. */ 4283 hd_len = hw_hdr_len + proto_len; 4284 data_offset = push_len + proto_len; 4285 } 4286 memset(hdr, 0, hw_hdr_len); 4287 fill_header(queue, hdr, skb, proto, frame_len); 4288 if (is_tso) 4289 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4290 frame_len - proto_len, skb, proto_len); 4291 4292 if (IS_IQD(card)) { 4293 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4294 hd_len); 4295 } else { 4296 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4297 skb_orphan(skb); 4298 spin_lock(&queue->lock); 4299 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4300 hd_len, elements); 4301 spin_unlock(&queue->lock); 4302 } 4303 4304 if (rc && !push_len) 4305 kmem_cache_free(qeth_core_header_cache, hdr); 4306 4307 return rc; 4308 } 4309 EXPORT_SYMBOL_GPL(qeth_xmit); 4310 4311 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4312 struct qeth_reply *reply, unsigned long data) 4313 { 4314 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4315 struct qeth_ipacmd_setadpparms *setparms; 4316 4317 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4318 4319 setparms = &(cmd->data.setadapterparms); 4320 if (qeth_setadpparms_inspect_rc(cmd)) { 4321 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4322 setparms->data.mode = SET_PROMISC_MODE_OFF; 4323 } 4324 card->info.promisc_mode = setparms->data.mode; 4325 return (cmd->hdr.return_code) ? -EIO : 0; 4326 } 4327 4328 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4329 { 4330 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4331 SET_PROMISC_MODE_OFF; 4332 struct qeth_cmd_buffer *iob; 4333 struct qeth_ipa_cmd *cmd; 4334 4335 QETH_CARD_TEXT(card, 4, "setprom"); 4336 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4337 4338 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4339 SETADP_DATA_SIZEOF(mode)); 4340 if (!iob) 4341 return; 4342 cmd = __ipa_cmd(iob); 4343 cmd->data.setadapterparms.data.mode = mode; 4344 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4345 } 4346 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4347 4348 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4349 struct qeth_reply *reply, unsigned long data) 4350 { 4351 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4352 struct qeth_ipacmd_setadpparms *adp_cmd; 4353 4354 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4355 if (qeth_setadpparms_inspect_rc(cmd)) 4356 return -EIO; 4357 4358 adp_cmd = &cmd->data.setadapterparms; 4359 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4360 return -EADDRNOTAVAIL; 4361 4362 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4363 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4364 return -EADDRNOTAVAIL; 4365 4366 eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr); 4367 return 0; 4368 } 4369 4370 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4371 { 4372 int rc; 4373 struct qeth_cmd_buffer *iob; 4374 struct qeth_ipa_cmd *cmd; 4375 4376 QETH_CARD_TEXT(card, 4, "chgmac"); 4377 4378 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4379 SETADP_DATA_SIZEOF(change_addr)); 4380 if (!iob) 4381 return -ENOMEM; 4382 cmd = __ipa_cmd(iob); 4383 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4384 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4385 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4386 card->dev->dev_addr); 4387 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4388 NULL); 4389 return rc; 4390 } 4391 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4392 4393 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4394 struct qeth_reply *reply, unsigned long data) 4395 { 4396 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4397 struct qeth_set_access_ctrl *access_ctrl_req; 4398 4399 QETH_CARD_TEXT(card, 4, "setaccb"); 4400 4401 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4402 QETH_CARD_TEXT_(card, 2, "rc=%d", 4403 cmd->data.setadapterparms.hdr.return_code); 4404 if (cmd->data.setadapterparms.hdr.return_code != 4405 SET_ACCESS_CTRL_RC_SUCCESS) 4406 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4407 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4408 cmd->data.setadapterparms.hdr.return_code); 4409 switch (qeth_setadpparms_inspect_rc(cmd)) { 4410 case SET_ACCESS_CTRL_RC_SUCCESS: 4411 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4412 dev_info(&card->gdev->dev, 4413 "QDIO data connection isolation is deactivated\n"); 4414 else 4415 dev_info(&card->gdev->dev, 4416 "QDIO data connection isolation is activated\n"); 4417 return 0; 4418 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4419 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4420 CARD_DEVID(card)); 4421 return 0; 4422 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4423 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4424 CARD_DEVID(card)); 4425 return 0; 4426 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4427 dev_err(&card->gdev->dev, "Adapter does not " 4428 "support QDIO data connection isolation\n"); 4429 return -EOPNOTSUPP; 4430 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4431 dev_err(&card->gdev->dev, 4432 "Adapter is dedicated. " 4433 "QDIO data connection isolation not supported\n"); 4434 return -EOPNOTSUPP; 4435 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4436 dev_err(&card->gdev->dev, 4437 "TSO does not permit QDIO data connection isolation\n"); 4438 return -EPERM; 4439 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4440 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4441 "support reflective relay mode\n"); 4442 return -EOPNOTSUPP; 4443 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4444 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4445 "enabled at the adjacent switch port"); 4446 return -EREMOTEIO; 4447 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4448 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4449 "at the adjacent switch failed\n"); 4450 /* benign error while disabling ISOLATION_MODE_FWD */ 4451 return 0; 4452 default: 4453 return -EIO; 4454 } 4455 } 4456 4457 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4458 enum qeth_ipa_isolation_modes mode) 4459 { 4460 int rc; 4461 struct qeth_cmd_buffer *iob; 4462 struct qeth_ipa_cmd *cmd; 4463 struct qeth_set_access_ctrl *access_ctrl_req; 4464 4465 QETH_CARD_TEXT(card, 4, "setacctl"); 4466 4467 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4468 dev_err(&card->gdev->dev, 4469 "Adapter does not support QDIO data connection isolation\n"); 4470 return -EOPNOTSUPP; 4471 } 4472 4473 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4474 SETADP_DATA_SIZEOF(set_access_ctrl)); 4475 if (!iob) 4476 return -ENOMEM; 4477 cmd = __ipa_cmd(iob); 4478 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4479 access_ctrl_req->subcmd_code = mode; 4480 4481 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4482 NULL); 4483 if (rc) { 4484 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4485 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4486 rc, CARD_DEVID(card)); 4487 } 4488 4489 return rc; 4490 } 4491 4492 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4493 { 4494 struct qeth_card *card; 4495 4496 card = dev->ml_priv; 4497 QETH_CARD_TEXT(card, 4, "txtimeo"); 4498 qeth_schedule_recovery(card); 4499 } 4500 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4501 4502 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4503 { 4504 struct qeth_card *card = dev->ml_priv; 4505 int rc = 0; 4506 4507 switch (regnum) { 4508 case MII_BMCR: /* Basic mode control register */ 4509 rc = BMCR_FULLDPLX; 4510 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4511 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4512 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4513 rc |= BMCR_SPEED100; 4514 break; 4515 case MII_BMSR: /* Basic mode status register */ 4516 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4517 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4518 BMSR_100BASE4; 4519 break; 4520 case MII_PHYSID1: /* PHYS ID 1 */ 4521 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4522 dev->dev_addr[2]; 4523 rc = (rc >> 5) & 0xFFFF; 4524 break; 4525 case MII_PHYSID2: /* PHYS ID 2 */ 4526 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4527 break; 4528 case MII_ADVERTISE: /* Advertisement control reg */ 4529 rc = ADVERTISE_ALL; 4530 break; 4531 case MII_LPA: /* Link partner ability reg */ 4532 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4533 LPA_100BASE4 | LPA_LPACK; 4534 break; 4535 case MII_EXPANSION: /* Expansion register */ 4536 break; 4537 case MII_DCOUNTER: /* disconnect counter */ 4538 break; 4539 case MII_FCSCOUNTER: /* false carrier counter */ 4540 break; 4541 case MII_NWAYTEST: /* N-way auto-neg test register */ 4542 break; 4543 case MII_RERRCOUNTER: /* rx error counter */ 4544 rc = card->stats.rx_length_errors + 4545 card->stats.rx_frame_errors + 4546 card->stats.rx_fifo_errors; 4547 break; 4548 case MII_SREVISION: /* silicon revision */ 4549 break; 4550 case MII_RESV1: /* reserved 1 */ 4551 break; 4552 case MII_LBRERROR: /* loopback, rx, bypass error */ 4553 break; 4554 case MII_PHYADDR: /* physical address */ 4555 break; 4556 case MII_RESV2: /* reserved 2 */ 4557 break; 4558 case MII_TPISTATUS: /* TPI status for 10mbps */ 4559 break; 4560 case MII_NCONFIG: /* network interface config */ 4561 break; 4562 default: 4563 break; 4564 } 4565 return rc; 4566 } 4567 4568 static int qeth_snmp_command_cb(struct qeth_card *card, 4569 struct qeth_reply *reply, unsigned long data) 4570 { 4571 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4572 struct qeth_arp_query_info *qinfo = reply->param; 4573 struct qeth_ipacmd_setadpparms *adp_cmd; 4574 unsigned int data_len; 4575 void *snmp_data; 4576 4577 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4578 4579 if (cmd->hdr.return_code) { 4580 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4581 return -EIO; 4582 } 4583 if (cmd->data.setadapterparms.hdr.return_code) { 4584 cmd->hdr.return_code = 4585 cmd->data.setadapterparms.hdr.return_code; 4586 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4587 return -EIO; 4588 } 4589 4590 adp_cmd = &cmd->data.setadapterparms; 4591 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4592 if (adp_cmd->hdr.seq_no == 1) { 4593 snmp_data = &adp_cmd->data.snmp; 4594 } else { 4595 snmp_data = &adp_cmd->data.snmp.request; 4596 data_len -= offsetof(struct qeth_snmp_cmd, request); 4597 } 4598 4599 /* check if there is enough room in userspace */ 4600 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4601 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4602 return -ENOSPC; 4603 } 4604 QETH_CARD_TEXT_(card, 4, "snore%i", 4605 cmd->data.setadapterparms.hdr.used_total); 4606 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4607 cmd->data.setadapterparms.hdr.seq_no); 4608 /*copy entries to user buffer*/ 4609 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4610 qinfo->udata_offset += data_len; 4611 4612 if (cmd->data.setadapterparms.hdr.seq_no < 4613 cmd->data.setadapterparms.hdr.used_total) 4614 return 1; 4615 return 0; 4616 } 4617 4618 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4619 { 4620 struct qeth_snmp_ureq __user *ureq; 4621 struct qeth_cmd_buffer *iob; 4622 unsigned int req_len; 4623 struct qeth_arp_query_info qinfo = {0, }; 4624 int rc = 0; 4625 4626 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4627 4628 if (IS_VM_NIC(card)) 4629 return -EOPNOTSUPP; 4630 4631 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4632 IS_LAYER3(card)) 4633 return -EOPNOTSUPP; 4634 4635 ureq = (struct qeth_snmp_ureq __user *) udata; 4636 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4637 get_user(req_len, &ureq->hdr.req_len)) 4638 return -EFAULT; 4639 4640 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4641 if (req_len > QETH_BUFSIZE) 4642 return -EINVAL; 4643 4644 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4645 if (!iob) 4646 return -ENOMEM; 4647 4648 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4649 &ureq->cmd, req_len)) { 4650 qeth_put_cmd(iob); 4651 return -EFAULT; 4652 } 4653 4654 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4655 if (!qinfo.udata) { 4656 qeth_put_cmd(iob); 4657 return -ENOMEM; 4658 } 4659 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4660 4661 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4662 if (rc) 4663 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4664 CARD_DEVID(card), rc); 4665 else { 4666 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4667 rc = -EFAULT; 4668 } 4669 4670 kfree(qinfo.udata); 4671 return rc; 4672 } 4673 4674 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4675 struct qeth_reply *reply, 4676 unsigned long data) 4677 { 4678 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4679 struct qeth_qoat_priv *priv = reply->param; 4680 int resdatalen; 4681 4682 QETH_CARD_TEXT(card, 3, "qoatcb"); 4683 if (qeth_setadpparms_inspect_rc(cmd)) 4684 return -EIO; 4685 4686 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4687 4688 if (resdatalen > (priv->buffer_len - priv->response_len)) 4689 return -ENOSPC; 4690 4691 memcpy(priv->buffer + priv->response_len, 4692 &cmd->data.setadapterparms.hdr, resdatalen); 4693 priv->response_len += resdatalen; 4694 4695 if (cmd->data.setadapterparms.hdr.seq_no < 4696 cmd->data.setadapterparms.hdr.used_total) 4697 return 1; 4698 return 0; 4699 } 4700 4701 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4702 { 4703 int rc = 0; 4704 struct qeth_cmd_buffer *iob; 4705 struct qeth_ipa_cmd *cmd; 4706 struct qeth_query_oat *oat_req; 4707 struct qeth_query_oat_data oat_data; 4708 struct qeth_qoat_priv priv; 4709 void __user *tmp; 4710 4711 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4712 4713 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4714 return -EOPNOTSUPP; 4715 4716 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4717 return -EFAULT; 4718 4719 priv.buffer_len = oat_data.buffer_len; 4720 priv.response_len = 0; 4721 priv.buffer = vzalloc(oat_data.buffer_len); 4722 if (!priv.buffer) 4723 return -ENOMEM; 4724 4725 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4726 SETADP_DATA_SIZEOF(query_oat)); 4727 if (!iob) { 4728 rc = -ENOMEM; 4729 goto out_free; 4730 } 4731 cmd = __ipa_cmd(iob); 4732 oat_req = &cmd->data.setadapterparms.data.query_oat; 4733 oat_req->subcmd_code = oat_data.command; 4734 4735 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4736 if (!rc) { 4737 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4738 u64_to_user_ptr(oat_data.ptr); 4739 oat_data.response_len = priv.response_len; 4740 4741 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4742 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4743 rc = -EFAULT; 4744 } 4745 4746 out_free: 4747 vfree(priv.buffer); 4748 return rc; 4749 } 4750 4751 static int qeth_query_card_info_cb(struct qeth_card *card, 4752 struct qeth_reply *reply, unsigned long data) 4753 { 4754 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4755 struct qeth_link_info *link_info = reply->param; 4756 struct qeth_query_card_info *card_info; 4757 4758 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4759 if (qeth_setadpparms_inspect_rc(cmd)) 4760 return -EIO; 4761 4762 card_info = &cmd->data.setadapterparms.data.card_info; 4763 netdev_dbg(card->dev, 4764 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", 4765 card_info->card_type, card_info->port_mode, 4766 card_info->port_speed); 4767 4768 switch (card_info->port_mode) { 4769 case CARD_INFO_PORTM_FULLDUPLEX: 4770 link_info->duplex = DUPLEX_FULL; 4771 break; 4772 case CARD_INFO_PORTM_HALFDUPLEX: 4773 link_info->duplex = DUPLEX_HALF; 4774 break; 4775 default: 4776 link_info->duplex = DUPLEX_UNKNOWN; 4777 } 4778 4779 switch (card_info->card_type) { 4780 case CARD_INFO_TYPE_1G_COPPER_A: 4781 case CARD_INFO_TYPE_1G_COPPER_B: 4782 link_info->speed = SPEED_1000; 4783 link_info->port = PORT_TP; 4784 break; 4785 case CARD_INFO_TYPE_1G_FIBRE_A: 4786 case CARD_INFO_TYPE_1G_FIBRE_B: 4787 link_info->speed = SPEED_1000; 4788 link_info->port = PORT_FIBRE; 4789 break; 4790 case CARD_INFO_TYPE_10G_FIBRE_A: 4791 case CARD_INFO_TYPE_10G_FIBRE_B: 4792 link_info->speed = SPEED_10000; 4793 link_info->port = PORT_FIBRE; 4794 break; 4795 default: 4796 switch (card_info->port_speed) { 4797 case CARD_INFO_PORTS_10M: 4798 link_info->speed = SPEED_10; 4799 break; 4800 case CARD_INFO_PORTS_100M: 4801 link_info->speed = SPEED_100; 4802 break; 4803 case CARD_INFO_PORTS_1G: 4804 link_info->speed = SPEED_1000; 4805 break; 4806 case CARD_INFO_PORTS_10G: 4807 link_info->speed = SPEED_10000; 4808 break; 4809 case CARD_INFO_PORTS_25G: 4810 link_info->speed = SPEED_25000; 4811 break; 4812 default: 4813 link_info->speed = SPEED_UNKNOWN; 4814 } 4815 4816 link_info->port = PORT_OTHER; 4817 } 4818 4819 return 0; 4820 } 4821 4822 int qeth_query_card_info(struct qeth_card *card, 4823 struct qeth_link_info *link_info) 4824 { 4825 struct qeth_cmd_buffer *iob; 4826 4827 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4828 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4829 return -EOPNOTSUPP; 4830 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4831 if (!iob) 4832 return -ENOMEM; 4833 4834 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info); 4835 } 4836 4837 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4838 struct qeth_reply *reply_priv, 4839 unsigned long data) 4840 { 4841 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4842 struct qeth_link_info *link_info = reply_priv->param; 4843 struct qeth_query_oat_physical_if *phys_if; 4844 struct qeth_query_oat_reply *reply; 4845 4846 if (qeth_setadpparms_inspect_rc(cmd)) 4847 return -EIO; 4848 4849 /* Multi-part reply is unexpected, don't bother: */ 4850 if (cmd->data.setadapterparms.hdr.used_total > 1) 4851 return -EINVAL; 4852 4853 /* Expect the reply to start with phys_if data: */ 4854 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4855 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4856 reply->length < sizeof(*reply)) 4857 return -EINVAL; 4858 4859 phys_if = &reply->phys_if; 4860 4861 switch (phys_if->speed_duplex) { 4862 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4863 link_info->speed = SPEED_10; 4864 link_info->duplex = DUPLEX_HALF; 4865 break; 4866 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4867 link_info->speed = SPEED_10; 4868 link_info->duplex = DUPLEX_FULL; 4869 break; 4870 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4871 link_info->speed = SPEED_100; 4872 link_info->duplex = DUPLEX_HALF; 4873 break; 4874 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4875 link_info->speed = SPEED_100; 4876 link_info->duplex = DUPLEX_FULL; 4877 break; 4878 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4879 link_info->speed = SPEED_1000; 4880 link_info->duplex = DUPLEX_HALF; 4881 break; 4882 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 4883 link_info->speed = SPEED_1000; 4884 link_info->duplex = DUPLEX_FULL; 4885 break; 4886 case QETH_QOAT_PHYS_SPEED_10G_FULL: 4887 link_info->speed = SPEED_10000; 4888 link_info->duplex = DUPLEX_FULL; 4889 break; 4890 case QETH_QOAT_PHYS_SPEED_25G_FULL: 4891 link_info->speed = SPEED_25000; 4892 link_info->duplex = DUPLEX_FULL; 4893 break; 4894 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 4895 default: 4896 link_info->speed = SPEED_UNKNOWN; 4897 link_info->duplex = DUPLEX_UNKNOWN; 4898 break; 4899 } 4900 4901 switch (phys_if->media_type) { 4902 case QETH_QOAT_PHYS_MEDIA_COPPER: 4903 link_info->port = PORT_TP; 4904 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4905 break; 4906 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 4907 link_info->port = PORT_FIBRE; 4908 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4909 break; 4910 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 4911 link_info->port = PORT_FIBRE; 4912 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 4913 break; 4914 default: 4915 link_info->port = PORT_OTHER; 4916 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4917 break; 4918 } 4919 4920 return 0; 4921 } 4922 4923 static void qeth_init_link_info(struct qeth_card *card) 4924 { 4925 card->info.link_info.duplex = DUPLEX_FULL; 4926 4927 if (IS_IQD(card) || IS_VM_NIC(card)) { 4928 card->info.link_info.speed = SPEED_10000; 4929 card->info.link_info.port = PORT_FIBRE; 4930 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4931 } else { 4932 switch (card->info.link_type) { 4933 case QETH_LINK_TYPE_FAST_ETH: 4934 case QETH_LINK_TYPE_LANE_ETH100: 4935 card->info.link_info.speed = SPEED_100; 4936 card->info.link_info.port = PORT_TP; 4937 break; 4938 case QETH_LINK_TYPE_GBIT_ETH: 4939 case QETH_LINK_TYPE_LANE_ETH1000: 4940 card->info.link_info.speed = SPEED_1000; 4941 card->info.link_info.port = PORT_FIBRE; 4942 break; 4943 case QETH_LINK_TYPE_10GBIT_ETH: 4944 card->info.link_info.speed = SPEED_10000; 4945 card->info.link_info.port = PORT_FIBRE; 4946 break; 4947 case QETH_LINK_TYPE_25GBIT_ETH: 4948 card->info.link_info.speed = SPEED_25000; 4949 card->info.link_info.port = PORT_FIBRE; 4950 break; 4951 default: 4952 dev_info(&card->gdev->dev, "Unknown link type %x\n", 4953 card->info.link_type); 4954 card->info.link_info.speed = SPEED_UNKNOWN; 4955 card->info.link_info.port = PORT_OTHER; 4956 } 4957 4958 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN; 4959 } 4960 4961 /* Get more accurate data via QUERY OAT: */ 4962 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4963 struct qeth_link_info link_info; 4964 struct qeth_cmd_buffer *iob; 4965 4966 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4967 SETADP_DATA_SIZEOF(query_oat)); 4968 if (iob) { 4969 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 4970 struct qeth_query_oat *oat_req; 4971 4972 oat_req = &cmd->data.setadapterparms.data.query_oat; 4973 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 4974 4975 if (!qeth_send_ipa_cmd(card, iob, 4976 qeth_init_link_info_oat_cb, 4977 &link_info)) { 4978 if (link_info.speed != SPEED_UNKNOWN) 4979 card->info.link_info.speed = link_info.speed; 4980 if (link_info.duplex != DUPLEX_UNKNOWN) 4981 card->info.link_info.duplex = link_info.duplex; 4982 if (link_info.port != PORT_OTHER) 4983 card->info.link_info.port = link_info.port; 4984 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 4985 card->info.link_info.link_mode = link_info.link_mode; 4986 } 4987 } 4988 } 4989 } 4990 4991 /** 4992 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4993 * @card: pointer to a qeth_card 4994 * 4995 * Returns 4996 * 0, if a MAC address has been set for the card's netdevice 4997 * a return code, for various error conditions 4998 */ 4999 int qeth_vm_request_mac(struct qeth_card *card) 5000 { 5001 struct diag26c_mac_resp *response; 5002 struct diag26c_mac_req *request; 5003 int rc; 5004 5005 QETH_CARD_TEXT(card, 2, "vmreqmac"); 5006 5007 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 5008 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 5009 if (!request || !response) { 5010 rc = -ENOMEM; 5011 goto out; 5012 } 5013 5014 request->resp_buf_len = sizeof(*response); 5015 request->resp_version = DIAG26C_VERSION2; 5016 request->op_code = DIAG26C_GET_MAC; 5017 request->devno = card->info.ddev_devno; 5018 5019 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5020 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 5021 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5022 if (rc) 5023 goto out; 5024 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 5025 5026 if (request->resp_buf_len < sizeof(*response) || 5027 response->version != request->resp_version) { 5028 rc = -EIO; 5029 QETH_CARD_TEXT(card, 2, "badresp"); 5030 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 5031 sizeof(request->resp_buf_len)); 5032 } else if (!is_valid_ether_addr(response->mac)) { 5033 rc = -EINVAL; 5034 QETH_CARD_TEXT(card, 2, "badmac"); 5035 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 5036 } else { 5037 eth_hw_addr_set(card->dev, response->mac); 5038 } 5039 5040 out: 5041 kfree(response); 5042 kfree(request); 5043 return rc; 5044 } 5045 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 5046 5047 static void qeth_determine_capabilities(struct qeth_card *card) 5048 { 5049 struct qeth_channel *channel = &card->data; 5050 struct ccw_device *ddev = channel->ccwdev; 5051 int rc; 5052 int ddev_offline = 0; 5053 5054 QETH_CARD_TEXT(card, 2, "detcapab"); 5055 if (!ddev->online) { 5056 ddev_offline = 1; 5057 rc = qeth_start_channel(channel); 5058 if (rc) { 5059 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5060 goto out; 5061 } 5062 } 5063 5064 rc = qeth_read_conf_data(card); 5065 if (rc) { 5066 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5067 CARD_DEVID(card), rc); 5068 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5069 goto out_offline; 5070 } 5071 5072 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5073 if (rc) 5074 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5075 5076 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5077 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5078 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5079 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5080 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5081 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5082 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5083 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5084 dev_info(&card->gdev->dev, 5085 "Completion Queueing supported\n"); 5086 } else { 5087 card->options.cq = QETH_CQ_NOTAVAILABLE; 5088 } 5089 5090 out_offline: 5091 if (ddev_offline == 1) 5092 qeth_stop_channel(channel); 5093 out: 5094 return; 5095 } 5096 5097 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5098 { 5099 struct qeth_card_info *info = &card->info; 5100 struct ccw_device *cdev = CARD_DDEV(card); 5101 struct ccw_dev_id dev_id; 5102 5103 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5104 ccw_device_get_id(cdev, &dev_id); 5105 5106 info->ddev_devno = dev_id.devno; 5107 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5108 !ccw_device_get_iid(cdev, &info->iid) && 5109 !ccw_device_get_chid(cdev, 0, &info->chid); 5110 info->ssid = dev_id.ssid; 5111 5112 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5113 info->chid, info->chpid); 5114 5115 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5116 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5117 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5118 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5119 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5120 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5121 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5122 } 5123 5124 static int qeth_qdio_establish(struct qeth_card *card) 5125 { 5126 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5127 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5128 struct qeth_qib_parms *qib_parms = NULL; 5129 struct qdio_initialize init_data; 5130 unsigned int no_input_qs = 1; 5131 unsigned int i; 5132 int rc = 0; 5133 5134 QETH_CARD_TEXT(card, 2, "qdioest"); 5135 5136 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5137 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5138 if (!qib_parms) 5139 return -ENOMEM; 5140 5141 qeth_fill_qib_parms(card, qib_parms); 5142 } 5143 5144 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5145 if (card->options.cq == QETH_CQ_ENABLED) { 5146 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5147 no_input_qs++; 5148 } 5149 5150 for (i = 0; i < card->qdio.no_out_queues; i++) 5151 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5152 5153 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5154 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5155 QDIO_QETH_QFMT; 5156 init_data.qib_param_field_format = 0; 5157 init_data.qib_param_field = (void *)qib_parms; 5158 init_data.no_input_qs = no_input_qs; 5159 init_data.no_output_qs = card->qdio.no_out_queues; 5160 init_data.input_handler = qeth_qdio_input_handler; 5161 init_data.output_handler = qeth_qdio_output_handler; 5162 init_data.irq_poll = qeth_qdio_poll; 5163 init_data.int_parm = (unsigned long) card; 5164 init_data.input_sbal_addr_array = in_sbal_ptrs; 5165 init_data.output_sbal_addr_array = out_sbal_ptrs; 5166 5167 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5168 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5169 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5170 init_data.no_output_qs); 5171 if (rc) { 5172 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5173 goto out; 5174 } 5175 rc = qdio_establish(CARD_DDEV(card), &init_data); 5176 if (rc) { 5177 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5178 qdio_free(CARD_DDEV(card)); 5179 } 5180 } 5181 5182 switch (card->options.cq) { 5183 case QETH_CQ_ENABLED: 5184 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5185 break; 5186 case QETH_CQ_DISABLED: 5187 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5188 break; 5189 default: 5190 break; 5191 } 5192 5193 out: 5194 kfree(qib_parms); 5195 return rc; 5196 } 5197 5198 static void qeth_core_free_card(struct qeth_card *card) 5199 { 5200 QETH_CARD_TEXT(card, 2, "freecrd"); 5201 5202 unregister_service_level(&card->qeth_service_level); 5203 debugfs_remove_recursive(card->debugfs); 5204 qeth_put_cmd(card->read_cmd); 5205 destroy_workqueue(card->event_wq); 5206 dev_set_drvdata(&card->gdev->dev, NULL); 5207 kfree(card); 5208 } 5209 5210 static void qeth_trace_features(struct qeth_card *card) 5211 { 5212 QETH_CARD_TEXT(card, 2, "features"); 5213 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5214 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5215 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5216 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5217 sizeof(card->info.diagass_support)); 5218 } 5219 5220 static struct ccw_device_id qeth_ids[] = { 5221 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5222 .driver_info = QETH_CARD_TYPE_OSD}, 5223 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5224 .driver_info = QETH_CARD_TYPE_IQD}, 5225 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5226 .driver_info = QETH_CARD_TYPE_OSM}, 5227 #ifdef CONFIG_QETH_OSX 5228 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5229 .driver_info = QETH_CARD_TYPE_OSX}, 5230 #endif 5231 {}, 5232 }; 5233 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5234 5235 static struct ccw_driver qeth_ccw_driver = { 5236 .driver = { 5237 .owner = THIS_MODULE, 5238 .name = "qeth", 5239 }, 5240 .ids = qeth_ids, 5241 .probe = ccwgroup_probe_ccwdev, 5242 .remove = ccwgroup_remove_ccwdev, 5243 }; 5244 5245 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5246 { 5247 int retries = 3; 5248 int rc; 5249 5250 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5251 atomic_set(&card->force_alloc_skb, 0); 5252 rc = qeth_update_from_chp_desc(card); 5253 if (rc) 5254 return rc; 5255 retry: 5256 if (retries < 3) 5257 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5258 CARD_DEVID(card)); 5259 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5260 qeth_stop_channel(&card->data); 5261 qeth_stop_channel(&card->write); 5262 qeth_stop_channel(&card->read); 5263 qdio_free(CARD_DDEV(card)); 5264 5265 rc = qeth_start_channel(&card->read); 5266 if (rc) 5267 goto retriable; 5268 rc = qeth_start_channel(&card->write); 5269 if (rc) 5270 goto retriable; 5271 rc = qeth_start_channel(&card->data); 5272 if (rc) 5273 goto retriable; 5274 retriable: 5275 if (rc == -ERESTARTSYS) { 5276 QETH_CARD_TEXT(card, 2, "break1"); 5277 return rc; 5278 } else if (rc) { 5279 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5280 if (--retries < 0) 5281 goto out; 5282 else 5283 goto retry; 5284 } 5285 5286 qeth_determine_capabilities(card); 5287 qeth_read_ccw_conf_data(card); 5288 qeth_idx_init(card); 5289 5290 rc = qeth_idx_activate_read_channel(card); 5291 if (rc == -EINTR) { 5292 QETH_CARD_TEXT(card, 2, "break2"); 5293 return rc; 5294 } else if (rc) { 5295 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5296 if (--retries < 0) 5297 goto out; 5298 else 5299 goto retry; 5300 } 5301 5302 rc = qeth_idx_activate_write_channel(card); 5303 if (rc == -EINTR) { 5304 QETH_CARD_TEXT(card, 2, "break3"); 5305 return rc; 5306 } else if (rc) { 5307 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5308 if (--retries < 0) 5309 goto out; 5310 else 5311 goto retry; 5312 } 5313 card->read_or_write_problem = 0; 5314 rc = qeth_mpc_initialize(card); 5315 if (rc) { 5316 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5317 goto out; 5318 } 5319 5320 rc = qeth_send_startlan(card); 5321 if (rc) { 5322 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5323 if (rc == -ENETDOWN) { 5324 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5325 *carrier_ok = false; 5326 } else { 5327 goto out; 5328 } 5329 } else { 5330 *carrier_ok = true; 5331 } 5332 5333 card->options.ipa4.supported = 0; 5334 card->options.ipa6.supported = 0; 5335 card->options.adp.supported = 0; 5336 card->options.sbp.supported_funcs = 0; 5337 card->info.diagass_support = 0; 5338 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5339 if (rc == -ENOMEM) 5340 goto out; 5341 if (qeth_is_supported(card, IPA_IPV6)) { 5342 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5343 if (rc == -ENOMEM) 5344 goto out; 5345 } 5346 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5347 rc = qeth_query_setadapterparms(card); 5348 if (rc < 0) { 5349 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5350 goto out; 5351 } 5352 } 5353 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5354 rc = qeth_query_setdiagass(card); 5355 if (rc) 5356 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5357 } 5358 5359 qeth_trace_features(card); 5360 5361 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5362 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5363 card->info.hwtrap = 0; 5364 5365 if (card->options.isolation != ISOLATION_MODE_NONE) { 5366 rc = qeth_setadpparms_set_access_ctrl(card, 5367 card->options.isolation); 5368 if (rc) 5369 goto out; 5370 } 5371 5372 qeth_init_link_info(card); 5373 5374 rc = qeth_init_qdio_queues(card); 5375 if (rc) { 5376 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5377 goto out; 5378 } 5379 5380 return 0; 5381 out: 5382 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5383 "an error on the device\n"); 5384 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5385 CARD_DEVID(card), rc); 5386 return rc; 5387 } 5388 5389 static int qeth_set_online(struct qeth_card *card, 5390 const struct qeth_discipline *disc) 5391 { 5392 bool carrier_ok; 5393 int rc; 5394 5395 mutex_lock(&card->conf_mutex); 5396 QETH_CARD_TEXT(card, 2, "setonlin"); 5397 5398 rc = qeth_hardsetup_card(card, &carrier_ok); 5399 if (rc) { 5400 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5401 rc = -ENODEV; 5402 goto err_hardsetup; 5403 } 5404 5405 qeth_print_status_message(card); 5406 5407 if (card->dev->reg_state != NETREG_REGISTERED) 5408 /* no need for locking / error handling at this early stage: */ 5409 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5410 5411 rc = disc->set_online(card, carrier_ok); 5412 if (rc) 5413 goto err_online; 5414 5415 /* let user_space know that device is online */ 5416 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5417 5418 mutex_unlock(&card->conf_mutex); 5419 return 0; 5420 5421 err_online: 5422 err_hardsetup: 5423 qeth_qdio_clear_card(card, 0); 5424 qeth_clear_working_pool_list(card); 5425 qeth_flush_local_addrs(card); 5426 5427 qeth_stop_channel(&card->data); 5428 qeth_stop_channel(&card->write); 5429 qeth_stop_channel(&card->read); 5430 qdio_free(CARD_DDEV(card)); 5431 5432 mutex_unlock(&card->conf_mutex); 5433 return rc; 5434 } 5435 5436 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5437 bool resetting) 5438 { 5439 int rc, rc2, rc3; 5440 5441 mutex_lock(&card->conf_mutex); 5442 QETH_CARD_TEXT(card, 3, "setoffl"); 5443 5444 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5445 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5446 card->info.hwtrap = 1; 5447 } 5448 5449 /* cancel any stalled cmd that might block the rtnl: */ 5450 qeth_clear_ipacmd_list(card); 5451 5452 rtnl_lock(); 5453 card->info.open_when_online = card->dev->flags & IFF_UP; 5454 dev_close(card->dev); 5455 netif_device_detach(card->dev); 5456 netif_carrier_off(card->dev); 5457 rtnl_unlock(); 5458 5459 cancel_work_sync(&card->rx_mode_work); 5460 5461 disc->set_offline(card); 5462 5463 qeth_qdio_clear_card(card, 0); 5464 qeth_drain_output_queues(card); 5465 qeth_clear_working_pool_list(card); 5466 qeth_flush_local_addrs(card); 5467 card->info.promisc_mode = 0; 5468 5469 rc = qeth_stop_channel(&card->data); 5470 rc2 = qeth_stop_channel(&card->write); 5471 rc3 = qeth_stop_channel(&card->read); 5472 if (!rc) 5473 rc = (rc2) ? rc2 : rc3; 5474 if (rc) 5475 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5476 qdio_free(CARD_DDEV(card)); 5477 5478 /* let user_space know that device is offline */ 5479 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5480 5481 mutex_unlock(&card->conf_mutex); 5482 return 0; 5483 } 5484 EXPORT_SYMBOL_GPL(qeth_set_offline); 5485 5486 static int qeth_do_reset(void *data) 5487 { 5488 const struct qeth_discipline *disc; 5489 struct qeth_card *card = data; 5490 int rc; 5491 5492 /* Lock-free, other users will block until we are done. */ 5493 disc = card->discipline; 5494 5495 QETH_CARD_TEXT(card, 2, "recover1"); 5496 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5497 return 0; 5498 QETH_CARD_TEXT(card, 2, "recover2"); 5499 dev_warn(&card->gdev->dev, 5500 "A recovery process has been started for the device\n"); 5501 5502 qeth_set_offline(card, disc, true); 5503 rc = qeth_set_online(card, disc); 5504 if (!rc) { 5505 dev_info(&card->gdev->dev, 5506 "Device successfully recovered!\n"); 5507 } else { 5508 qeth_set_offline(card, disc, true); 5509 ccwgroup_set_offline(card->gdev, false); 5510 dev_warn(&card->gdev->dev, 5511 "The qeth device driver failed to recover an error on the device\n"); 5512 } 5513 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5514 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5515 return 0; 5516 } 5517 5518 #if IS_ENABLED(CONFIG_QETH_L3) 5519 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5520 struct qeth_hdr *hdr) 5521 { 5522 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5523 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5524 struct net_device *dev = skb->dev; 5525 5526 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5527 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5528 "FAKELL", skb->len); 5529 return; 5530 } 5531 5532 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5533 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5534 ETH_P_IP; 5535 unsigned char tg_addr[ETH_ALEN]; 5536 5537 skb_reset_network_header(skb); 5538 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5539 case QETH_CAST_MULTICAST: 5540 if (prot == ETH_P_IP) 5541 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5542 else 5543 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5544 QETH_CARD_STAT_INC(card, rx_multicast); 5545 break; 5546 case QETH_CAST_BROADCAST: 5547 ether_addr_copy(tg_addr, dev->broadcast); 5548 QETH_CARD_STAT_INC(card, rx_multicast); 5549 break; 5550 default: 5551 if (card->options.sniffer) 5552 skb->pkt_type = PACKET_OTHERHOST; 5553 ether_addr_copy(tg_addr, dev->dev_addr); 5554 } 5555 5556 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5557 dev_hard_header(skb, dev, prot, tg_addr, 5558 &l3_hdr->next_hop.rx.src_mac, skb->len); 5559 else 5560 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5561 skb->len); 5562 } 5563 5564 /* copy VLAN tag from hdr into skb */ 5565 if (!card->options.sniffer && 5566 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5567 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5568 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5569 l3_hdr->vlan_id : 5570 l3_hdr->next_hop.rx.vlan_id; 5571 5572 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5573 } 5574 } 5575 #endif 5576 5577 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5578 struct qeth_hdr *hdr, bool uses_frags) 5579 { 5580 struct napi_struct *napi = &card->napi; 5581 bool is_cso; 5582 5583 switch (hdr->hdr.l2.id) { 5584 #if IS_ENABLED(CONFIG_QETH_L3) 5585 case QETH_HEADER_TYPE_LAYER3: 5586 qeth_l3_rebuild_skb(card, skb, hdr); 5587 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5588 break; 5589 #endif 5590 case QETH_HEADER_TYPE_LAYER2: 5591 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5592 break; 5593 default: 5594 /* never happens */ 5595 if (uses_frags) 5596 napi_free_frags(napi); 5597 else 5598 kfree_skb(skb); 5599 return; 5600 } 5601 5602 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5603 skb->ip_summed = CHECKSUM_UNNECESSARY; 5604 QETH_CARD_STAT_INC(card, rx_skb_csum); 5605 } else { 5606 skb->ip_summed = CHECKSUM_NONE; 5607 } 5608 5609 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5610 QETH_CARD_STAT_INC(card, rx_packets); 5611 if (skb_is_nonlinear(skb)) { 5612 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5613 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5614 skb_shinfo(skb)->nr_frags); 5615 } 5616 5617 if (uses_frags) { 5618 napi_gro_frags(napi); 5619 } else { 5620 skb->protocol = eth_type_trans(skb, skb->dev); 5621 napi_gro_receive(napi, skb); 5622 } 5623 } 5624 5625 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5626 { 5627 struct page *page = virt_to_page(data); 5628 unsigned int next_frag; 5629 5630 next_frag = skb_shinfo(skb)->nr_frags; 5631 get_page(page); 5632 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5633 data_len); 5634 } 5635 5636 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5637 { 5638 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5639 } 5640 5641 static int qeth_extract_skb(struct qeth_card *card, 5642 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5643 int *__offset) 5644 { 5645 struct qeth_priv *priv = netdev_priv(card->dev); 5646 struct qdio_buffer *buffer = qethbuffer->buffer; 5647 struct napi_struct *napi = &card->napi; 5648 struct qdio_buffer_element *element; 5649 unsigned int linear_len = 0; 5650 bool uses_frags = false; 5651 int offset = *__offset; 5652 bool use_rx_sg = false; 5653 unsigned int headroom; 5654 struct qeth_hdr *hdr; 5655 struct sk_buff *skb; 5656 int skb_len = 0; 5657 5658 element = &buffer->element[*element_no]; 5659 5660 next_packet: 5661 /* qeth_hdr must not cross element boundaries */ 5662 while (element->length < offset + sizeof(struct qeth_hdr)) { 5663 if (qeth_is_last_sbale(element)) 5664 return -ENODATA; 5665 element++; 5666 offset = 0; 5667 } 5668 5669 hdr = phys_to_virt(element->addr) + offset; 5670 offset += sizeof(*hdr); 5671 skb = NULL; 5672 5673 switch (hdr->hdr.l2.id) { 5674 case QETH_HEADER_TYPE_LAYER2: 5675 skb_len = hdr->hdr.l2.pkt_length; 5676 linear_len = ETH_HLEN; 5677 headroom = 0; 5678 break; 5679 case QETH_HEADER_TYPE_LAYER3: 5680 skb_len = hdr->hdr.l3.length; 5681 if (!IS_LAYER3(card)) { 5682 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5683 goto walk_packet; 5684 } 5685 5686 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5687 linear_len = ETH_HLEN; 5688 headroom = 0; 5689 break; 5690 } 5691 5692 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5693 linear_len = sizeof(struct ipv6hdr); 5694 else 5695 linear_len = sizeof(struct iphdr); 5696 headroom = ETH_HLEN; 5697 break; 5698 default: 5699 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5700 QETH_CARD_STAT_INC(card, rx_frame_errors); 5701 else 5702 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5703 5704 /* Can't determine packet length, drop the whole buffer. */ 5705 return -EPROTONOSUPPORT; 5706 } 5707 5708 if (skb_len < linear_len) { 5709 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5710 goto walk_packet; 5711 } 5712 5713 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5714 (skb_len > READ_ONCE(priv->rx_copybreak) && 5715 !atomic_read(&card->force_alloc_skb)); 5716 5717 if (use_rx_sg) { 5718 /* QETH_CQ_ENABLED only: */ 5719 if (qethbuffer->rx_skb && 5720 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5721 skb = qethbuffer->rx_skb; 5722 qethbuffer->rx_skb = NULL; 5723 goto use_skb; 5724 } 5725 5726 skb = napi_get_frags(napi); 5727 if (!skb) { 5728 /* -ENOMEM, no point in falling back further. */ 5729 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5730 goto walk_packet; 5731 } 5732 5733 if (skb_tailroom(skb) >= linear_len + headroom) { 5734 uses_frags = true; 5735 goto use_skb; 5736 } 5737 5738 netdev_info_once(card->dev, 5739 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5740 linear_len + headroom, skb_tailroom(skb)); 5741 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5742 } 5743 5744 linear_len = skb_len; 5745 skb = napi_alloc_skb(napi, linear_len + headroom); 5746 if (!skb) { 5747 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5748 goto walk_packet; 5749 } 5750 5751 use_skb: 5752 if (headroom) 5753 skb_reserve(skb, headroom); 5754 walk_packet: 5755 while (skb_len) { 5756 int data_len = min(skb_len, (int)(element->length - offset)); 5757 char *data = phys_to_virt(element->addr) + offset; 5758 5759 skb_len -= data_len; 5760 offset += data_len; 5761 5762 /* Extract data from current element: */ 5763 if (skb && data_len) { 5764 if (linear_len) { 5765 unsigned int copy_len; 5766 5767 copy_len = min_t(unsigned int, linear_len, 5768 data_len); 5769 5770 skb_put_data(skb, data, copy_len); 5771 linear_len -= copy_len; 5772 data_len -= copy_len; 5773 data += copy_len; 5774 } 5775 5776 if (data_len) 5777 qeth_create_skb_frag(skb, data, data_len); 5778 } 5779 5780 /* Step forward to next element: */ 5781 if (skb_len) { 5782 if (qeth_is_last_sbale(element)) { 5783 QETH_CARD_TEXT(card, 4, "unexeob"); 5784 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5785 if (skb) { 5786 if (uses_frags) 5787 napi_free_frags(napi); 5788 else 5789 kfree_skb(skb); 5790 QETH_CARD_STAT_INC(card, 5791 rx_length_errors); 5792 } 5793 return -EMSGSIZE; 5794 } 5795 element++; 5796 offset = 0; 5797 } 5798 } 5799 5800 /* This packet was skipped, go get another one: */ 5801 if (!skb) 5802 goto next_packet; 5803 5804 *element_no = element - &buffer->element[0]; 5805 *__offset = offset; 5806 5807 qeth_receive_skb(card, skb, hdr, uses_frags); 5808 return 0; 5809 } 5810 5811 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5812 struct qeth_qdio_buffer *buf, bool *done) 5813 { 5814 unsigned int work_done = 0; 5815 5816 while (budget) { 5817 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5818 &card->rx.e_offset)) { 5819 *done = true; 5820 break; 5821 } 5822 5823 work_done++; 5824 budget--; 5825 } 5826 5827 return work_done; 5828 } 5829 5830 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5831 { 5832 struct qeth_rx *ctx = &card->rx; 5833 unsigned int work_done = 0; 5834 5835 while (budget > 0) { 5836 struct qeth_qdio_buffer *buffer; 5837 unsigned int skbs_done = 0; 5838 bool done = false; 5839 5840 /* Fetch completed RX buffers: */ 5841 if (!card->rx.b_count) { 5842 card->rx.qdio_err = 0; 5843 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), 5844 0, true, 5845 &card->rx.b_index, 5846 &card->rx.qdio_err); 5847 if (card->rx.b_count <= 0) { 5848 card->rx.b_count = 0; 5849 break; 5850 } 5851 } 5852 5853 /* Process one completed RX buffer: */ 5854 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5855 if (!(card->rx.qdio_err && 5856 qeth_check_qdio_errors(card, buffer->buffer, 5857 card->rx.qdio_err, "qinerr"))) 5858 skbs_done = qeth_extract_skbs(card, budget, buffer, 5859 &done); 5860 else 5861 done = true; 5862 5863 work_done += skbs_done; 5864 budget -= skbs_done; 5865 5866 if (done) { 5867 QETH_CARD_STAT_INC(card, rx_bufs); 5868 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5869 buffer->pool_entry = NULL; 5870 card->rx.b_count--; 5871 ctx->bufs_refill++; 5872 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5873 ctx->bufs_refill); 5874 5875 /* Step forward to next buffer: */ 5876 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5877 card->rx.buf_element = 0; 5878 card->rx.e_offset = 0; 5879 } 5880 } 5881 5882 return work_done; 5883 } 5884 5885 static void qeth_cq_poll(struct qeth_card *card) 5886 { 5887 unsigned int work_done = 0; 5888 5889 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5890 unsigned int start, error; 5891 int completed; 5892 5893 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 5894 &error); 5895 if (completed <= 0) 5896 return; 5897 5898 qeth_qdio_cq_handler(card, error, 1, start, completed); 5899 work_done += completed; 5900 } 5901 } 5902 5903 int qeth_poll(struct napi_struct *napi, int budget) 5904 { 5905 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5906 unsigned int work_done; 5907 5908 work_done = qeth_rx_poll(card, budget); 5909 5910 if (qeth_use_tx_irqs(card)) { 5911 struct qeth_qdio_out_q *queue; 5912 unsigned int i; 5913 5914 qeth_for_each_output_queue(card, queue, i) { 5915 if (!qeth_out_queue_is_empty(queue)) 5916 napi_schedule(&queue->napi); 5917 } 5918 } 5919 5920 if (card->options.cq == QETH_CQ_ENABLED) 5921 qeth_cq_poll(card); 5922 5923 if (budget) { 5924 struct qeth_rx *ctx = &card->rx; 5925 5926 /* Process any substantial refill backlog: */ 5927 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5928 5929 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5930 if (work_done >= budget) 5931 return work_done; 5932 } 5933 5934 if (napi_complete_done(napi, work_done) && 5935 qdio_start_irq(CARD_DDEV(card))) 5936 napi_schedule(napi); 5937 5938 return work_done; 5939 } 5940 EXPORT_SYMBOL_GPL(qeth_poll); 5941 5942 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5943 unsigned int bidx, unsigned int qdio_error, 5944 int budget) 5945 { 5946 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5947 u8 sflags = buffer->buffer->element[15].sflags; 5948 struct qeth_card *card = queue->card; 5949 bool error = !!qdio_error; 5950 5951 if (qdio_error == QDIO_ERROR_SLSB_PENDING) { 5952 struct qaob *aob = buffer->aob; 5953 struct qeth_qaob_priv1 *priv; 5954 enum iucv_tx_notify notify; 5955 5956 if (!aob) { 5957 netdev_WARN_ONCE(card->dev, 5958 "Pending TX buffer %#x without QAOB on TX queue %u\n", 5959 bidx, queue->queue_no); 5960 qeth_schedule_recovery(card); 5961 return; 5962 } 5963 5964 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5965 5966 priv = (struct qeth_qaob_priv1 *)&aob->user1; 5967 /* QAOB hasn't completed yet: */ 5968 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { 5969 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5970 5971 /* Prepare the queue slot for immediate re-use: */ 5972 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5973 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { 5974 QETH_CARD_TEXT(card, 2, "outofbuf"); 5975 qeth_schedule_recovery(card); 5976 } 5977 5978 list_add(&buffer->list_entry, &queue->pending_bufs); 5979 /* Skip clearing the buffer: */ 5980 return; 5981 } 5982 5983 /* QAOB already completed: */ 5984 notify = qeth_compute_cq_notification(aob->aorc, 0); 5985 qeth_notify_skbs(queue, buffer, notify); 5986 error = !!aob->aorc; 5987 memset(aob, 0, sizeof(*aob)); 5988 } else if (card->options.cq == QETH_CQ_ENABLED) { 5989 qeth_notify_skbs(queue, buffer, 5990 qeth_compute_cq_notification(sflags, 0)); 5991 } 5992 5993 qeth_clear_output_buffer(queue, buffer, error, budget); 5994 } 5995 5996 static int qeth_tx_poll(struct napi_struct *napi, int budget) 5997 { 5998 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 5999 unsigned int queue_no = queue->queue_no; 6000 struct qeth_card *card = queue->card; 6001 struct net_device *dev = card->dev; 6002 unsigned int work_done = 0; 6003 struct netdev_queue *txq; 6004 6005 if (IS_IQD(card)) 6006 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 6007 else 6008 txq = netdev_get_tx_queue(dev, queue_no); 6009 6010 while (1) { 6011 unsigned int start, error, i; 6012 unsigned int packets = 0; 6013 unsigned int bytes = 0; 6014 int completed; 6015 6016 qeth_tx_complete_pending_bufs(card, queue, false, budget); 6017 6018 if (qeth_out_queue_is_empty(queue)) { 6019 napi_complete(napi); 6020 return 0; 6021 } 6022 6023 /* Give the CPU a breather: */ 6024 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 6025 QETH_TXQ_STAT_INC(queue, completion_yield); 6026 if (napi_complete_done(napi, 0)) 6027 napi_schedule(napi); 6028 return 0; 6029 } 6030 6031 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 6032 &start, &error); 6033 if (completed <= 0) { 6034 /* Ensure we see TX completion for pending work: */ 6035 if (napi_complete_done(napi, 0) && 6036 !atomic_read(&queue->set_pci_flags_count)) 6037 qeth_tx_arm_timer(queue, queue->rescan_usecs); 6038 return 0; 6039 } 6040 6041 for (i = start; i < start + completed; i++) { 6042 struct qeth_qdio_out_buffer *buffer; 6043 unsigned int bidx = QDIO_BUFNR(i); 6044 6045 buffer = queue->bufs[bidx]; 6046 packets += buffer->frames; 6047 bytes += buffer->bytes; 6048 6049 qeth_handle_send_error(card, buffer, error); 6050 if (IS_IQD(card)) 6051 qeth_iqd_tx_complete(queue, bidx, error, budget); 6052 else 6053 qeth_clear_output_buffer(queue, buffer, error, 6054 budget); 6055 } 6056 6057 atomic_sub(completed, &queue->used_buffers); 6058 work_done += completed; 6059 if (IS_IQD(card)) 6060 netdev_tx_completed_queue(txq, packets, bytes); 6061 else 6062 qeth_check_outbound_queue(queue); 6063 6064 /* xmit may have observed the full-condition, but not yet 6065 * stopped the txq. In which case the code below won't trigger. 6066 * So before returning, xmit will re-check the txq's fill level 6067 * and wake it up if needed. 6068 */ 6069 if (netif_tx_queue_stopped(txq) && 6070 !qeth_out_queue_is_full(queue)) 6071 netif_tx_wake_queue(txq); 6072 } 6073 } 6074 6075 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6076 { 6077 if (!cmd->hdr.return_code) 6078 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6079 return cmd->hdr.return_code; 6080 } 6081 6082 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6083 struct qeth_reply *reply, 6084 unsigned long data) 6085 { 6086 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6087 struct qeth_ipa_caps *caps = reply->param; 6088 6089 if (qeth_setassparms_inspect_rc(cmd)) 6090 return -EIO; 6091 6092 caps->supported = cmd->data.setassparms.data.caps.supported; 6093 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6094 return 0; 6095 } 6096 6097 int qeth_setassparms_cb(struct qeth_card *card, 6098 struct qeth_reply *reply, unsigned long data) 6099 { 6100 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6101 6102 QETH_CARD_TEXT(card, 4, "defadpcb"); 6103 6104 if (cmd->hdr.return_code) 6105 return -EIO; 6106 6107 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6108 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6109 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6110 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6111 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6112 return 0; 6113 } 6114 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6115 6116 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6117 enum qeth_ipa_funcs ipa_func, 6118 u16 cmd_code, 6119 unsigned int data_length, 6120 enum qeth_prot_versions prot) 6121 { 6122 struct qeth_ipacmd_setassparms *setassparms; 6123 struct qeth_ipacmd_setassparms_hdr *hdr; 6124 struct qeth_cmd_buffer *iob; 6125 6126 QETH_CARD_TEXT(card, 4, "getasscm"); 6127 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6128 data_length + 6129 offsetof(struct qeth_ipacmd_setassparms, 6130 data)); 6131 if (!iob) 6132 return NULL; 6133 6134 setassparms = &__ipa_cmd(iob)->data.setassparms; 6135 setassparms->assist_no = ipa_func; 6136 6137 hdr = &setassparms->hdr; 6138 hdr->length = sizeof(*hdr) + data_length; 6139 hdr->command_code = cmd_code; 6140 return iob; 6141 } 6142 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6143 6144 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6145 enum qeth_ipa_funcs ipa_func, 6146 u16 cmd_code, u32 *data, 6147 enum qeth_prot_versions prot) 6148 { 6149 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6150 struct qeth_cmd_buffer *iob; 6151 6152 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6153 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6154 if (!iob) 6155 return -ENOMEM; 6156 6157 if (data) 6158 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6159 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6160 } 6161 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6162 6163 static void qeth_unregister_dbf_views(void) 6164 { 6165 int x; 6166 6167 for (x = 0; x < QETH_DBF_INFOS; x++) { 6168 debug_unregister(qeth_dbf[x].id); 6169 qeth_dbf[x].id = NULL; 6170 } 6171 } 6172 6173 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6174 { 6175 char dbf_txt_buf[32]; 6176 va_list args; 6177 6178 if (!debug_level_enabled(id, level)) 6179 return; 6180 va_start(args, fmt); 6181 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6182 va_end(args); 6183 debug_text_event(id, level, dbf_txt_buf); 6184 } 6185 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6186 6187 static int qeth_register_dbf_views(void) 6188 { 6189 int ret; 6190 int x; 6191 6192 for (x = 0; x < QETH_DBF_INFOS; x++) { 6193 /* register the areas */ 6194 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6195 qeth_dbf[x].pages, 6196 qeth_dbf[x].areas, 6197 qeth_dbf[x].len); 6198 if (qeth_dbf[x].id == NULL) { 6199 qeth_unregister_dbf_views(); 6200 return -ENOMEM; 6201 } 6202 6203 /* register a view */ 6204 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6205 if (ret) { 6206 qeth_unregister_dbf_views(); 6207 return ret; 6208 } 6209 6210 /* set a passing level */ 6211 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6212 } 6213 6214 return 0; 6215 } 6216 6217 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6218 6219 int qeth_setup_discipline(struct qeth_card *card, 6220 enum qeth_discipline_id discipline) 6221 { 6222 int rc; 6223 6224 mutex_lock(&qeth_mod_mutex); 6225 switch (discipline) { 6226 case QETH_DISCIPLINE_LAYER3: 6227 card->discipline = try_then_request_module( 6228 symbol_get(qeth_l3_discipline), "qeth_l3"); 6229 break; 6230 case QETH_DISCIPLINE_LAYER2: 6231 card->discipline = try_then_request_module( 6232 symbol_get(qeth_l2_discipline), "qeth_l2"); 6233 break; 6234 default: 6235 break; 6236 } 6237 mutex_unlock(&qeth_mod_mutex); 6238 6239 if (!card->discipline) { 6240 dev_err(&card->gdev->dev, "There is no kernel module to " 6241 "support discipline %d\n", discipline); 6242 return -EINVAL; 6243 } 6244 6245 rc = card->discipline->setup(card->gdev); 6246 if (rc) { 6247 if (discipline == QETH_DISCIPLINE_LAYER2) 6248 symbol_put(qeth_l2_discipline); 6249 else 6250 symbol_put(qeth_l3_discipline); 6251 card->discipline = NULL; 6252 6253 return rc; 6254 } 6255 6256 card->options.layer = discipline; 6257 return 0; 6258 } 6259 6260 void qeth_remove_discipline(struct qeth_card *card) 6261 { 6262 card->discipline->remove(card->gdev); 6263 6264 if (IS_LAYER2(card)) 6265 symbol_put(qeth_l2_discipline); 6266 else 6267 symbol_put(qeth_l3_discipline); 6268 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6269 card->discipline = NULL; 6270 } 6271 6272 static const struct device_type qeth_generic_devtype = { 6273 .name = "qeth_generic", 6274 }; 6275 6276 #define DBF_NAME_LEN 20 6277 6278 struct qeth_dbf_entry { 6279 char dbf_name[DBF_NAME_LEN]; 6280 debug_info_t *dbf_info; 6281 struct list_head dbf_list; 6282 }; 6283 6284 static LIST_HEAD(qeth_dbf_list); 6285 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6286 6287 static debug_info_t *qeth_get_dbf_entry(char *name) 6288 { 6289 struct qeth_dbf_entry *entry; 6290 debug_info_t *rc = NULL; 6291 6292 mutex_lock(&qeth_dbf_list_mutex); 6293 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6294 if (strcmp(entry->dbf_name, name) == 0) { 6295 rc = entry->dbf_info; 6296 break; 6297 } 6298 } 6299 mutex_unlock(&qeth_dbf_list_mutex); 6300 return rc; 6301 } 6302 6303 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6304 { 6305 struct qeth_dbf_entry *new_entry; 6306 6307 card->debug = debug_register(name, 2, 1, 8); 6308 if (!card->debug) { 6309 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6310 goto err; 6311 } 6312 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6313 goto err_dbg; 6314 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6315 if (!new_entry) 6316 goto err_dbg; 6317 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6318 new_entry->dbf_info = card->debug; 6319 mutex_lock(&qeth_dbf_list_mutex); 6320 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6321 mutex_unlock(&qeth_dbf_list_mutex); 6322 6323 return 0; 6324 6325 err_dbg: 6326 debug_unregister(card->debug); 6327 err: 6328 return -ENOMEM; 6329 } 6330 6331 static void qeth_clear_dbf_list(void) 6332 { 6333 struct qeth_dbf_entry *entry, *tmp; 6334 6335 mutex_lock(&qeth_dbf_list_mutex); 6336 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6337 list_del(&entry->dbf_list); 6338 debug_unregister(entry->dbf_info); 6339 kfree(entry); 6340 } 6341 mutex_unlock(&qeth_dbf_list_mutex); 6342 } 6343 6344 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6345 { 6346 struct net_device *dev; 6347 struct qeth_priv *priv; 6348 6349 switch (card->info.type) { 6350 case QETH_CARD_TYPE_IQD: 6351 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6352 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6353 break; 6354 case QETH_CARD_TYPE_OSM: 6355 dev = alloc_etherdev(sizeof(*priv)); 6356 break; 6357 default: 6358 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6359 } 6360 6361 if (!dev) 6362 return NULL; 6363 6364 priv = netdev_priv(dev); 6365 priv->rx_copybreak = QETH_RX_COPYBREAK; 6366 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6367 6368 dev->ml_priv = card; 6369 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6370 dev->min_mtu = 576; 6371 /* initialized when device first goes online: */ 6372 dev->max_mtu = 0; 6373 dev->mtu = 0; 6374 SET_NETDEV_DEV(dev, &card->gdev->dev); 6375 netif_carrier_off(dev); 6376 6377 dev->ethtool_ops = &qeth_ethtool_ops; 6378 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6379 dev->hw_features |= NETIF_F_SG; 6380 dev->vlan_features |= NETIF_F_SG; 6381 if (IS_IQD(card)) 6382 dev->features |= NETIF_F_SG; 6383 6384 return dev; 6385 } 6386 6387 struct net_device *qeth_clone_netdev(struct net_device *orig) 6388 { 6389 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6390 6391 if (!clone) 6392 return NULL; 6393 6394 clone->dev_port = orig->dev_port; 6395 return clone; 6396 } 6397 6398 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6399 { 6400 struct qeth_card *card; 6401 struct device *dev; 6402 int rc; 6403 enum qeth_discipline_id enforced_disc; 6404 char dbf_name[DBF_NAME_LEN]; 6405 6406 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6407 6408 dev = &gdev->dev; 6409 if (!get_device(dev)) 6410 return -ENODEV; 6411 6412 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6413 6414 card = qeth_alloc_card(gdev); 6415 if (!card) { 6416 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6417 rc = -ENOMEM; 6418 goto err_dev; 6419 } 6420 6421 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6422 dev_name(&gdev->dev)); 6423 card->debug = qeth_get_dbf_entry(dbf_name); 6424 if (!card->debug) { 6425 rc = qeth_add_dbf_entry(card, dbf_name); 6426 if (rc) 6427 goto err_card; 6428 } 6429 6430 qeth_setup_card(card); 6431 card->dev = qeth_alloc_netdev(card); 6432 if (!card->dev) { 6433 rc = -ENOMEM; 6434 goto err_card; 6435 } 6436 6437 qeth_determine_capabilities(card); 6438 qeth_set_blkt_defaults(card); 6439 6440 card->qdio.in_q = qeth_alloc_qdio_queue(); 6441 if (!card->qdio.in_q) { 6442 rc = -ENOMEM; 6443 goto err_rx_queue; 6444 } 6445 6446 card->qdio.no_out_queues = card->dev->num_tx_queues; 6447 rc = qeth_update_from_chp_desc(card); 6448 if (rc) 6449 goto err_chp_desc; 6450 6451 gdev->dev.groups = qeth_dev_groups; 6452 6453 enforced_disc = qeth_enforce_discipline(card); 6454 switch (enforced_disc) { 6455 case QETH_DISCIPLINE_UNDETERMINED: 6456 gdev->dev.type = &qeth_generic_devtype; 6457 break; 6458 default: 6459 card->info.layer_enforced = true; 6460 /* It's so early that we don't need the discipline_mutex yet. */ 6461 rc = qeth_setup_discipline(card, enforced_disc); 6462 if (rc) 6463 goto err_setup_disc; 6464 6465 break; 6466 } 6467 6468 return 0; 6469 6470 err_setup_disc: 6471 err_chp_desc: 6472 qeth_free_qdio_queue(card->qdio.in_q); 6473 err_rx_queue: 6474 free_netdev(card->dev); 6475 err_card: 6476 qeth_core_free_card(card); 6477 err_dev: 6478 put_device(dev); 6479 return rc; 6480 } 6481 6482 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6483 { 6484 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6485 6486 QETH_CARD_TEXT(card, 2, "removedv"); 6487 6488 mutex_lock(&card->discipline_mutex); 6489 if (card->discipline) 6490 qeth_remove_discipline(card); 6491 mutex_unlock(&card->discipline_mutex); 6492 6493 qeth_free_qdio_queues(card); 6494 6495 qeth_free_qdio_queue(card->qdio.in_q); 6496 free_netdev(card->dev); 6497 qeth_core_free_card(card); 6498 put_device(&gdev->dev); 6499 } 6500 6501 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6502 { 6503 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6504 int rc = 0; 6505 enum qeth_discipline_id def_discipline; 6506 6507 mutex_lock(&card->discipline_mutex); 6508 if (!card->discipline) { 6509 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6510 QETH_DISCIPLINE_LAYER2; 6511 rc = qeth_setup_discipline(card, def_discipline); 6512 if (rc) 6513 goto err; 6514 } 6515 6516 rc = qeth_set_online(card, card->discipline); 6517 6518 err: 6519 mutex_unlock(&card->discipline_mutex); 6520 return rc; 6521 } 6522 6523 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6524 { 6525 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6526 int rc; 6527 6528 mutex_lock(&card->discipline_mutex); 6529 rc = qeth_set_offline(card, card->discipline, false); 6530 mutex_unlock(&card->discipline_mutex); 6531 6532 return rc; 6533 } 6534 6535 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6536 { 6537 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6538 6539 qeth_set_allowed_threads(card, 0, 1); 6540 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6541 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6542 qeth_qdio_clear_card(card, 0); 6543 qeth_drain_output_queues(card); 6544 qdio_free(CARD_DDEV(card)); 6545 } 6546 6547 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6548 size_t count) 6549 { 6550 int err; 6551 6552 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6553 buf); 6554 6555 return err ? err : count; 6556 } 6557 static DRIVER_ATTR_WO(group); 6558 6559 static struct attribute *qeth_drv_attrs[] = { 6560 &driver_attr_group.attr, 6561 NULL, 6562 }; 6563 static struct attribute_group qeth_drv_attr_group = { 6564 .attrs = qeth_drv_attrs, 6565 }; 6566 static const struct attribute_group *qeth_drv_attr_groups[] = { 6567 &qeth_drv_attr_group, 6568 NULL, 6569 }; 6570 6571 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6572 .driver = { 6573 .groups = qeth_drv_attr_groups, 6574 .owner = THIS_MODULE, 6575 .name = "qeth", 6576 }, 6577 .ccw_driver = &qeth_ccw_driver, 6578 .setup = qeth_core_probe_device, 6579 .remove = qeth_core_remove_device, 6580 .set_online = qeth_core_set_online, 6581 .set_offline = qeth_core_set_offline, 6582 .shutdown = qeth_core_shutdown, 6583 }; 6584 6585 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) 6586 { 6587 struct qeth_card *card = dev->ml_priv; 6588 int rc = 0; 6589 6590 switch (cmd) { 6591 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6592 rc = qeth_snmp_command(card, data); 6593 break; 6594 case SIOC_QETH_GET_CARD_TYPE: 6595 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6596 !IS_VM_NIC(card)) 6597 return 1; 6598 return 0; 6599 case SIOC_QETH_QUERY_OAT: 6600 rc = qeth_query_oat_command(card, data); 6601 break; 6602 default: 6603 rc = -EOPNOTSUPP; 6604 } 6605 if (rc) 6606 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6607 return rc; 6608 } 6609 EXPORT_SYMBOL_GPL(qeth_siocdevprivate); 6610 6611 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6612 { 6613 struct qeth_card *card = dev->ml_priv; 6614 struct mii_ioctl_data *mii_data; 6615 int rc = 0; 6616 6617 switch (cmd) { 6618 case SIOCGMIIPHY: 6619 mii_data = if_mii(rq); 6620 mii_data->phy_id = 0; 6621 break; 6622 case SIOCGMIIREG: 6623 mii_data = if_mii(rq); 6624 if (mii_data->phy_id != 0) 6625 rc = -EINVAL; 6626 else 6627 mii_data->val_out = qeth_mdio_read(dev, 6628 mii_data->phy_id, mii_data->reg_num); 6629 break; 6630 default: 6631 return -EOPNOTSUPP; 6632 } 6633 if (rc) 6634 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6635 return rc; 6636 } 6637 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6638 6639 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6640 unsigned long data) 6641 { 6642 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6643 u32 *features = reply->param; 6644 6645 if (qeth_setassparms_inspect_rc(cmd)) 6646 return -EIO; 6647 6648 *features = cmd->data.setassparms.data.flags_32bit; 6649 return 0; 6650 } 6651 6652 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6653 enum qeth_prot_versions prot) 6654 { 6655 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6656 NULL, prot); 6657 } 6658 6659 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6660 enum qeth_prot_versions prot, u8 *lp2lp) 6661 { 6662 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6663 struct qeth_cmd_buffer *iob; 6664 struct qeth_ipa_caps caps; 6665 u32 features; 6666 int rc; 6667 6668 /* some L3 HW requires combined L3+L4 csum offload: */ 6669 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6670 cstype == IPA_OUTBOUND_CHECKSUM) 6671 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6672 6673 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6674 prot); 6675 if (!iob) 6676 return -ENOMEM; 6677 6678 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6679 if (rc) 6680 return rc; 6681 6682 if ((required_features & features) != required_features) { 6683 qeth_set_csum_off(card, cstype, prot); 6684 return -EOPNOTSUPP; 6685 } 6686 6687 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6688 SETASS_DATA_SIZEOF(flags_32bit), 6689 prot); 6690 if (!iob) { 6691 qeth_set_csum_off(card, cstype, prot); 6692 return -ENOMEM; 6693 } 6694 6695 if (features & QETH_IPA_CHECKSUM_LP2LP) 6696 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6697 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6698 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6699 if (rc) { 6700 qeth_set_csum_off(card, cstype, prot); 6701 return rc; 6702 } 6703 6704 if (!qeth_ipa_caps_supported(&caps, required_features) || 6705 !qeth_ipa_caps_enabled(&caps, required_features)) { 6706 qeth_set_csum_off(card, cstype, prot); 6707 return -EOPNOTSUPP; 6708 } 6709 6710 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6711 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6712 6713 if (lp2lp) 6714 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6715 6716 return 0; 6717 } 6718 6719 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6720 enum qeth_prot_versions prot, u8 *lp2lp) 6721 { 6722 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6723 qeth_set_csum_off(card, cstype, prot); 6724 } 6725 6726 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6727 unsigned long data) 6728 { 6729 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6730 struct qeth_tso_start_data *tso_data = reply->param; 6731 6732 if (qeth_setassparms_inspect_rc(cmd)) 6733 return -EIO; 6734 6735 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6736 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6737 return 0; 6738 } 6739 6740 static int qeth_set_tso_off(struct qeth_card *card, 6741 enum qeth_prot_versions prot) 6742 { 6743 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6744 IPA_CMD_ASS_STOP, NULL, prot); 6745 } 6746 6747 static int qeth_set_tso_on(struct qeth_card *card, 6748 enum qeth_prot_versions prot) 6749 { 6750 struct qeth_tso_start_data tso_data; 6751 struct qeth_cmd_buffer *iob; 6752 struct qeth_ipa_caps caps; 6753 int rc; 6754 6755 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6756 IPA_CMD_ASS_START, 0, prot); 6757 if (!iob) 6758 return -ENOMEM; 6759 6760 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6761 if (rc) 6762 return rc; 6763 6764 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6765 qeth_set_tso_off(card, prot); 6766 return -EOPNOTSUPP; 6767 } 6768 6769 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6770 IPA_CMD_ASS_ENABLE, 6771 SETASS_DATA_SIZEOF(caps), prot); 6772 if (!iob) { 6773 qeth_set_tso_off(card, prot); 6774 return -ENOMEM; 6775 } 6776 6777 /* enable TSO capability */ 6778 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6779 QETH_IPA_LARGE_SEND_TCP; 6780 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6781 if (rc) { 6782 qeth_set_tso_off(card, prot); 6783 return rc; 6784 } 6785 6786 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6787 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6788 qeth_set_tso_off(card, prot); 6789 return -EOPNOTSUPP; 6790 } 6791 6792 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6793 tso_data.mss); 6794 return 0; 6795 } 6796 6797 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6798 enum qeth_prot_versions prot) 6799 { 6800 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6801 } 6802 6803 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6804 { 6805 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6806 int rc_ipv6; 6807 6808 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6809 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6810 QETH_PROT_IPV4, NULL); 6811 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6812 /* no/one Offload Assist available, so the rc is trivial */ 6813 return rc_ipv4; 6814 6815 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6816 QETH_PROT_IPV6, NULL); 6817 6818 if (on) 6819 /* enable: success if any Assist is active */ 6820 return (rc_ipv6) ? rc_ipv4 : 0; 6821 6822 /* disable: failure if any Assist is still active */ 6823 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6824 } 6825 6826 /** 6827 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6828 * @dev: a net_device 6829 */ 6830 void qeth_enable_hw_features(struct net_device *dev) 6831 { 6832 struct qeth_card *card = dev->ml_priv; 6833 netdev_features_t features; 6834 6835 features = dev->features; 6836 /* force-off any feature that might need an IPA sequence. 6837 * netdev_update_features() will restart them. 6838 */ 6839 dev->features &= ~dev->hw_features; 6840 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6841 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6842 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6843 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6844 } 6845 netdev_update_features(dev); 6846 if (features != dev->features) 6847 dev_warn(&card->gdev->dev, 6848 "Device recovery failed to restore all offload features\n"); 6849 } 6850 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6851 6852 static void qeth_check_restricted_features(struct qeth_card *card, 6853 netdev_features_t changed, 6854 netdev_features_t actual) 6855 { 6856 netdev_features_t ipv6_features = NETIF_F_TSO6; 6857 netdev_features_t ipv4_features = NETIF_F_TSO; 6858 6859 if (!card->info.has_lp2lp_cso_v6) 6860 ipv6_features |= NETIF_F_IPV6_CSUM; 6861 if (!card->info.has_lp2lp_cso_v4) 6862 ipv4_features |= NETIF_F_IP_CSUM; 6863 6864 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6865 qeth_flush_local_addrs6(card); 6866 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6867 qeth_flush_local_addrs4(card); 6868 } 6869 6870 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6871 { 6872 struct qeth_card *card = dev->ml_priv; 6873 netdev_features_t changed = dev->features ^ features; 6874 int rc = 0; 6875 6876 QETH_CARD_TEXT(card, 2, "setfeat"); 6877 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6878 6879 if ((changed & NETIF_F_IP_CSUM)) { 6880 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6881 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6882 &card->info.has_lp2lp_cso_v4); 6883 if (rc) 6884 changed ^= NETIF_F_IP_CSUM; 6885 } 6886 if (changed & NETIF_F_IPV6_CSUM) { 6887 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6888 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6889 &card->info.has_lp2lp_cso_v6); 6890 if (rc) 6891 changed ^= NETIF_F_IPV6_CSUM; 6892 } 6893 if (changed & NETIF_F_RXCSUM) { 6894 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6895 if (rc) 6896 changed ^= NETIF_F_RXCSUM; 6897 } 6898 if (changed & NETIF_F_TSO) { 6899 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6900 QETH_PROT_IPV4); 6901 if (rc) 6902 changed ^= NETIF_F_TSO; 6903 } 6904 if (changed & NETIF_F_TSO6) { 6905 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6906 QETH_PROT_IPV6); 6907 if (rc) 6908 changed ^= NETIF_F_TSO6; 6909 } 6910 6911 qeth_check_restricted_features(card, dev->features ^ features, 6912 dev->features ^ changed); 6913 6914 /* everything changed successfully? */ 6915 if ((dev->features ^ features) == changed) 6916 return 0; 6917 /* something went wrong. save changed features and return error */ 6918 dev->features ^= changed; 6919 return -EIO; 6920 } 6921 EXPORT_SYMBOL_GPL(qeth_set_features); 6922 6923 netdev_features_t qeth_fix_features(struct net_device *dev, 6924 netdev_features_t features) 6925 { 6926 struct qeth_card *card = dev->ml_priv; 6927 6928 QETH_CARD_TEXT(card, 2, "fixfeat"); 6929 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6930 features &= ~NETIF_F_IP_CSUM; 6931 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6932 features &= ~NETIF_F_IPV6_CSUM; 6933 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6934 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6935 features &= ~NETIF_F_RXCSUM; 6936 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6937 features &= ~NETIF_F_TSO; 6938 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6939 features &= ~NETIF_F_TSO6; 6940 6941 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6942 return features; 6943 } 6944 EXPORT_SYMBOL_GPL(qeth_fix_features); 6945 6946 netdev_features_t qeth_features_check(struct sk_buff *skb, 6947 struct net_device *dev, 6948 netdev_features_t features) 6949 { 6950 struct qeth_card *card = dev->ml_priv; 6951 6952 /* Traffic with local next-hop is not eligible for some offloads: */ 6953 if (skb->ip_summed == CHECKSUM_PARTIAL && 6954 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 6955 netdev_features_t restricted = 0; 6956 6957 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 6958 restricted |= NETIF_F_ALL_TSO; 6959 6960 switch (vlan_get_protocol(skb)) { 6961 case htons(ETH_P_IP): 6962 if (!card->info.has_lp2lp_cso_v4) 6963 restricted |= NETIF_F_IP_CSUM; 6964 6965 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 6966 features &= ~restricted; 6967 break; 6968 case htons(ETH_P_IPV6): 6969 if (!card->info.has_lp2lp_cso_v6) 6970 restricted |= NETIF_F_IPV6_CSUM; 6971 6972 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 6973 features &= ~restricted; 6974 break; 6975 default: 6976 break; 6977 } 6978 } 6979 6980 /* GSO segmentation builds skbs with 6981 * a (small) linear part for the headers, and 6982 * page frags for the data. 6983 * Compared to a linear skb, the header-only part consumes an 6984 * additional buffer element. This reduces buffer utilization, and 6985 * hurts throughput. So compress small segments into one element. 6986 */ 6987 if (netif_needs_gso(skb, features)) { 6988 /* match skb_segment(): */ 6989 unsigned int doffset = skb->data - skb_mac_header(skb); 6990 unsigned int hsize = skb_shinfo(skb)->gso_size; 6991 unsigned int hroom = skb_headroom(skb); 6992 6993 /* linearize only if resulting skb allocations are order-0: */ 6994 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6995 features &= ~NETIF_F_SG; 6996 } 6997 6998 return vlan_features_check(skb, features); 6999 } 7000 EXPORT_SYMBOL_GPL(qeth_features_check); 7001 7002 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7003 { 7004 struct qeth_card *card = dev->ml_priv; 7005 struct qeth_qdio_out_q *queue; 7006 unsigned int i; 7007 7008 QETH_CARD_TEXT(card, 5, "getstat"); 7009 7010 stats->rx_packets = card->stats.rx_packets; 7011 stats->rx_bytes = card->stats.rx_bytes; 7012 stats->rx_errors = card->stats.rx_length_errors + 7013 card->stats.rx_frame_errors + 7014 card->stats.rx_fifo_errors; 7015 stats->rx_dropped = card->stats.rx_dropped_nomem + 7016 card->stats.rx_dropped_notsupp + 7017 card->stats.rx_dropped_runt; 7018 stats->multicast = card->stats.rx_multicast; 7019 stats->rx_length_errors = card->stats.rx_length_errors; 7020 stats->rx_frame_errors = card->stats.rx_frame_errors; 7021 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 7022 7023 for (i = 0; i < card->qdio.no_out_queues; i++) { 7024 queue = card->qdio.out_qs[i]; 7025 7026 stats->tx_packets += queue->stats.tx_packets; 7027 stats->tx_bytes += queue->stats.tx_bytes; 7028 stats->tx_errors += queue->stats.tx_errors; 7029 stats->tx_dropped += queue->stats.tx_dropped; 7030 } 7031 } 7032 EXPORT_SYMBOL_GPL(qeth_get_stats64); 7033 7034 #define TC_IQD_UCAST 0 7035 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 7036 unsigned int ucast_txqs) 7037 { 7038 unsigned int prio; 7039 7040 /* IQD requires mcast traffic to be placed on a dedicated queue, and 7041 * qeth_iqd_select_queue() deals with this. 7042 * For unicast traffic, we defer the queue selection to the stack. 7043 * By installing a trivial prio map that spans over only the unicast 7044 * queues, we can encourage the stack to spread the ucast traffic evenly 7045 * without selecting the mcast queue. 7046 */ 7047 7048 /* One traffic class, spanning over all active ucast queues: */ 7049 netdev_set_num_tc(dev, 1); 7050 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 7051 QETH_IQD_MIN_UCAST_TXQ); 7052 7053 /* Map all priorities to this traffic class: */ 7054 for (prio = 0; prio <= TC_BITMASK; prio++) 7055 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 7056 } 7057 7058 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7059 { 7060 struct net_device *dev = card->dev; 7061 int rc; 7062 7063 /* Per netif_setup_tc(), adjust the mapping first: */ 7064 if (IS_IQD(card)) 7065 qeth_iqd_set_prio_tc_map(dev, count - 1); 7066 7067 rc = netif_set_real_num_tx_queues(dev, count); 7068 7069 if (rc && IS_IQD(card)) 7070 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7071 7072 return rc; 7073 } 7074 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7075 7076 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7077 u8 cast_type, struct net_device *sb_dev) 7078 { 7079 u16 txq; 7080 7081 if (cast_type != RTN_UNICAST) 7082 return QETH_IQD_MCAST_TXQ; 7083 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7084 return QETH_IQD_MIN_UCAST_TXQ; 7085 7086 txq = netdev_pick_tx(dev, skb, sb_dev); 7087 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7088 } 7089 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7090 7091 int qeth_open(struct net_device *dev) 7092 { 7093 struct qeth_card *card = dev->ml_priv; 7094 struct qeth_qdio_out_q *queue; 7095 unsigned int i; 7096 7097 QETH_CARD_TEXT(card, 4, "qethopen"); 7098 7099 card->data.state = CH_STATE_UP; 7100 netif_tx_start_all_queues(dev); 7101 7102 local_bh_disable(); 7103 qeth_for_each_output_queue(card, queue, i) { 7104 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 7105 QETH_NAPI_WEIGHT); 7106 napi_enable(&queue->napi); 7107 napi_schedule(&queue->napi); 7108 } 7109 7110 napi_enable(&card->napi); 7111 napi_schedule(&card->napi); 7112 /* kick-start the NAPI softirq: */ 7113 local_bh_enable(); 7114 7115 return 0; 7116 } 7117 EXPORT_SYMBOL_GPL(qeth_open); 7118 7119 int qeth_stop(struct net_device *dev) 7120 { 7121 struct qeth_card *card = dev->ml_priv; 7122 struct qeth_qdio_out_q *queue; 7123 unsigned int i; 7124 7125 QETH_CARD_TEXT(card, 4, "qethstop"); 7126 7127 napi_disable(&card->napi); 7128 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7129 qdio_stop_irq(CARD_DDEV(card)); 7130 7131 /* Quiesce the NAPI instances: */ 7132 qeth_for_each_output_queue(card, queue, i) 7133 napi_disable(&queue->napi); 7134 7135 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7136 netif_tx_disable(dev); 7137 7138 qeth_for_each_output_queue(card, queue, i) { 7139 del_timer_sync(&queue->timer); 7140 /* Queues may get re-allocated, so remove the NAPIs. */ 7141 netif_napi_del(&queue->napi); 7142 } 7143 7144 return 0; 7145 } 7146 EXPORT_SYMBOL_GPL(qeth_stop); 7147 7148 static int __init qeth_core_init(void) 7149 { 7150 int rc; 7151 7152 pr_info("loading core functions\n"); 7153 7154 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7155 7156 rc = qeth_register_dbf_views(); 7157 if (rc) 7158 goto dbf_err; 7159 qeth_core_root_dev = root_device_register("qeth"); 7160 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7161 if (rc) 7162 goto register_err; 7163 qeth_core_header_cache = 7164 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7165 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7166 0, NULL); 7167 if (!qeth_core_header_cache) { 7168 rc = -ENOMEM; 7169 goto slab_err; 7170 } 7171 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7172 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7173 if (!qeth_qdio_outbuf_cache) { 7174 rc = -ENOMEM; 7175 goto cqslab_err; 7176 } 7177 7178 qeth_qaob_cache = kmem_cache_create("qeth_qaob", 7179 sizeof(struct qaob), 7180 sizeof(struct qaob), 7181 0, NULL); 7182 if (!qeth_qaob_cache) { 7183 rc = -ENOMEM; 7184 goto qaob_err; 7185 } 7186 7187 rc = ccw_driver_register(&qeth_ccw_driver); 7188 if (rc) 7189 goto ccw_err; 7190 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7191 if (rc) 7192 goto ccwgroup_err; 7193 7194 return 0; 7195 7196 ccwgroup_err: 7197 ccw_driver_unregister(&qeth_ccw_driver); 7198 ccw_err: 7199 kmem_cache_destroy(qeth_qaob_cache); 7200 qaob_err: 7201 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7202 cqslab_err: 7203 kmem_cache_destroy(qeth_core_header_cache); 7204 slab_err: 7205 root_device_unregister(qeth_core_root_dev); 7206 register_err: 7207 qeth_unregister_dbf_views(); 7208 dbf_err: 7209 debugfs_remove_recursive(qeth_debugfs_root); 7210 pr_err("Initializing the qeth device driver failed\n"); 7211 return rc; 7212 } 7213 7214 static void __exit qeth_core_exit(void) 7215 { 7216 qeth_clear_dbf_list(); 7217 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7218 ccw_driver_unregister(&qeth_ccw_driver); 7219 kmem_cache_destroy(qeth_qaob_cache); 7220 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7221 kmem_cache_destroy(qeth_core_header_cache); 7222 root_device_unregister(qeth_core_root_dev); 7223 qeth_unregister_dbf_views(); 7224 debugfs_remove_recursive(qeth_debugfs_root); 7225 pr_info("core functions removed\n"); 7226 } 7227 7228 module_init(qeth_core_init); 7229 module_exit(qeth_core_exit); 7230 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7231 MODULE_DESCRIPTION("qeth core functions"); 7232 MODULE_LICENSE("GPL"); 7233