1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 #include <net/sock.h> 37 38 #include <asm/ebcdic.h> 39 #include <asm/chpid.h> 40 #include <asm/sysinfo.h> 41 #include <asm/diag.h> 42 #include <asm/cio.h> 43 #include <asm/ccwdev.h> 44 #include <asm/cpcmd.h> 45 46 #include "qeth_core.h" 47 48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 50 /* N P A M L V H */ 51 [QETH_DBF_SETUP] = {"qeth_setup", 52 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 54 &debug_sprintf_view, NULL}, 55 [QETH_DBF_CTRL] = {"qeth_control", 56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 57 }; 58 EXPORT_SYMBOL_GPL(qeth_dbf); 59 60 static struct kmem_cache *qeth_core_header_cache; 61 static struct kmem_cache *qeth_qdio_outbuf_cache; 62 63 static struct device *qeth_core_root_dev; 64 static struct dentry *qeth_debugfs_root; 65 static struct lock_class_key qdio_out_skb_queue_key; 66 67 static void qeth_issue_next_read_cb(struct qeth_card *card, 68 struct qeth_cmd_buffer *iob, 69 unsigned int data_length); 70 static int qeth_qdio_establish(struct qeth_card *); 71 static void qeth_free_qdio_queues(struct qeth_card *card); 72 73 static void qeth_close_dev_handler(struct work_struct *work) 74 { 75 struct qeth_card *card; 76 77 card = container_of(work, struct qeth_card, close_dev_work); 78 QETH_CARD_TEXT(card, 2, "cldevhdl"); 79 ccwgroup_set_offline(card->gdev); 80 } 81 82 static const char *qeth_get_cardname(struct qeth_card *card) 83 { 84 if (IS_VM_NIC(card)) { 85 switch (card->info.type) { 86 case QETH_CARD_TYPE_OSD: 87 return " Virtual NIC QDIO"; 88 case QETH_CARD_TYPE_IQD: 89 return " Virtual NIC Hiper"; 90 case QETH_CARD_TYPE_OSM: 91 return " Virtual NIC QDIO - OSM"; 92 case QETH_CARD_TYPE_OSX: 93 return " Virtual NIC QDIO - OSX"; 94 default: 95 return " unknown"; 96 } 97 } else { 98 switch (card->info.type) { 99 case QETH_CARD_TYPE_OSD: 100 return " OSD Express"; 101 case QETH_CARD_TYPE_IQD: 102 return " HiperSockets"; 103 case QETH_CARD_TYPE_OSM: 104 return " OSM QDIO"; 105 case QETH_CARD_TYPE_OSX: 106 return " OSX QDIO"; 107 default: 108 return " unknown"; 109 } 110 } 111 return " n/a"; 112 } 113 114 /* max length to be returned: 14 */ 115 const char *qeth_get_cardname_short(struct qeth_card *card) 116 { 117 if (IS_VM_NIC(card)) { 118 switch (card->info.type) { 119 case QETH_CARD_TYPE_OSD: 120 return "Virt.NIC QDIO"; 121 case QETH_CARD_TYPE_IQD: 122 return "Virt.NIC Hiper"; 123 case QETH_CARD_TYPE_OSM: 124 return "Virt.NIC OSM"; 125 case QETH_CARD_TYPE_OSX: 126 return "Virt.NIC OSX"; 127 default: 128 return "unknown"; 129 } 130 } else { 131 switch (card->info.type) { 132 case QETH_CARD_TYPE_OSD: 133 switch (card->info.link_type) { 134 case QETH_LINK_TYPE_FAST_ETH: 135 return "OSD_100"; 136 case QETH_LINK_TYPE_HSTR: 137 return "HSTR"; 138 case QETH_LINK_TYPE_GBIT_ETH: 139 return "OSD_1000"; 140 case QETH_LINK_TYPE_10GBIT_ETH: 141 return "OSD_10GIG"; 142 case QETH_LINK_TYPE_25GBIT_ETH: 143 return "OSD_25GIG"; 144 case QETH_LINK_TYPE_LANE_ETH100: 145 return "OSD_FE_LANE"; 146 case QETH_LINK_TYPE_LANE_TR: 147 return "OSD_TR_LANE"; 148 case QETH_LINK_TYPE_LANE_ETH1000: 149 return "OSD_GbE_LANE"; 150 case QETH_LINK_TYPE_LANE: 151 return "OSD_ATM_LANE"; 152 default: 153 return "OSD_Express"; 154 } 155 case QETH_CARD_TYPE_IQD: 156 return "HiperSockets"; 157 case QETH_CARD_TYPE_OSM: 158 return "OSM_1000"; 159 case QETH_CARD_TYPE_OSX: 160 return "OSX_10GIG"; 161 default: 162 return "unknown"; 163 } 164 } 165 return "n/a"; 166 } 167 168 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 169 int clear_start_mask) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&card->thread_mask_lock, flags); 174 card->thread_allowed_mask = threads; 175 if (clear_start_mask) 176 card->thread_start_mask &= threads; 177 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 178 wake_up(&card->wait_q); 179 } 180 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 181 182 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 183 { 184 unsigned long flags; 185 int rc = 0; 186 187 spin_lock_irqsave(&card->thread_mask_lock, flags); 188 rc = (card->thread_running_mask & threads); 189 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 190 return rc; 191 } 192 EXPORT_SYMBOL_GPL(qeth_threads_running); 193 194 static void qeth_clear_working_pool_list(struct qeth_card *card) 195 { 196 struct qeth_buffer_pool_entry *pool_entry, *tmp; 197 struct qeth_qdio_q *queue = card->qdio.in_q; 198 unsigned int i; 199 200 QETH_CARD_TEXT(card, 5, "clwrklst"); 201 list_for_each_entry_safe(pool_entry, tmp, 202 &card->qdio.in_buf_pool.entry_list, list) 203 list_del(&pool_entry->list); 204 205 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 206 queue->bufs[i].pool_entry = NULL; 207 } 208 209 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 210 { 211 unsigned int i; 212 213 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 214 if (entry->elements[i]) 215 __free_page(entry->elements[i]); 216 } 217 218 kfree(entry); 219 } 220 221 static void qeth_free_buffer_pool(struct qeth_card *card) 222 { 223 struct qeth_buffer_pool_entry *entry, *tmp; 224 225 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 226 init_list) { 227 list_del(&entry->init_list); 228 qeth_free_pool_entry(entry); 229 } 230 } 231 232 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 233 { 234 struct qeth_buffer_pool_entry *entry; 235 unsigned int i; 236 237 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 238 if (!entry) 239 return NULL; 240 241 for (i = 0; i < pages; i++) { 242 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 243 244 if (!entry->elements[i]) { 245 qeth_free_pool_entry(entry); 246 return NULL; 247 } 248 } 249 250 return entry; 251 } 252 253 static int qeth_alloc_buffer_pool(struct qeth_card *card) 254 { 255 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 256 unsigned int i; 257 258 QETH_CARD_TEXT(card, 5, "alocpool"); 259 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 260 struct qeth_buffer_pool_entry *entry; 261 262 entry = qeth_alloc_pool_entry(buf_elements); 263 if (!entry) { 264 qeth_free_buffer_pool(card); 265 return -ENOMEM; 266 } 267 268 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 269 } 270 return 0; 271 } 272 273 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 274 { 275 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 276 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 277 struct qeth_buffer_pool_entry *entry, *tmp; 278 int delta = count - pool->buf_count; 279 LIST_HEAD(entries); 280 281 QETH_CARD_TEXT(card, 2, "realcbp"); 282 283 /* Defer until queue is allocated: */ 284 if (!card->qdio.in_q) 285 goto out; 286 287 /* Remove entries from the pool: */ 288 while (delta < 0) { 289 entry = list_first_entry(&pool->entry_list, 290 struct qeth_buffer_pool_entry, 291 init_list); 292 list_del(&entry->init_list); 293 qeth_free_pool_entry(entry); 294 295 delta++; 296 } 297 298 /* Allocate additional entries: */ 299 while (delta > 0) { 300 entry = qeth_alloc_pool_entry(buf_elements); 301 if (!entry) { 302 list_for_each_entry_safe(entry, tmp, &entries, 303 init_list) { 304 list_del(&entry->init_list); 305 qeth_free_pool_entry(entry); 306 } 307 308 return -ENOMEM; 309 } 310 311 list_add(&entry->init_list, &entries); 312 313 delta--; 314 } 315 316 list_splice(&entries, &pool->entry_list); 317 318 out: 319 card->qdio.in_buf_pool.buf_count = count; 320 pool->buf_count = count; 321 return 0; 322 } 323 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 324 325 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 326 { 327 if (!q) 328 return; 329 330 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 331 kfree(q); 332 } 333 334 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 335 { 336 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 337 int i; 338 339 if (!q) 340 return NULL; 341 342 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 343 kfree(q); 344 return NULL; 345 } 346 347 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 348 q->bufs[i].buffer = q->qdio_bufs[i]; 349 350 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 351 return q; 352 } 353 354 static int qeth_cq_init(struct qeth_card *card) 355 { 356 int rc; 357 358 if (card->options.cq == QETH_CQ_ENABLED) { 359 QETH_CARD_TEXT(card, 2, "cqinit"); 360 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 361 QDIO_MAX_BUFFERS_PER_Q); 362 card->qdio.c_q->next_buf_to_init = 127; 363 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 364 card->qdio.no_in_queues - 1, 0, 127, NULL); 365 if (rc) { 366 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 367 goto out; 368 } 369 } 370 rc = 0; 371 out: 372 return rc; 373 } 374 375 static int qeth_alloc_cq(struct qeth_card *card) 376 { 377 if (card->options.cq == QETH_CQ_ENABLED) { 378 QETH_CARD_TEXT(card, 2, "cqon"); 379 card->qdio.c_q = qeth_alloc_qdio_queue(); 380 if (!card->qdio.c_q) { 381 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 382 return -ENOMEM; 383 } 384 385 card->qdio.no_in_queues = 2; 386 } else { 387 QETH_CARD_TEXT(card, 2, "nocq"); 388 card->qdio.c_q = NULL; 389 card->qdio.no_in_queues = 1; 390 } 391 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 392 return 0; 393 } 394 395 static void qeth_free_cq(struct qeth_card *card) 396 { 397 if (card->qdio.c_q) { 398 --card->qdio.no_in_queues; 399 qeth_free_qdio_queue(card->qdio.c_q); 400 card->qdio.c_q = NULL; 401 } 402 } 403 404 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 405 int delayed) 406 { 407 enum iucv_tx_notify n; 408 409 switch (sbalf15) { 410 case 0: 411 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 412 break; 413 case 4: 414 case 16: 415 case 17: 416 case 18: 417 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 418 TX_NOTIFY_UNREACHABLE; 419 break; 420 default: 421 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 422 TX_NOTIFY_GENERALERROR; 423 break; 424 } 425 426 return n; 427 } 428 429 static void qeth_put_cmd(struct qeth_cmd_buffer *iob) 430 { 431 if (refcount_dec_and_test(&iob->ref_count)) { 432 kfree(iob->data); 433 kfree(iob); 434 } 435 } 436 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 437 void *data) 438 { 439 ccw->cmd_code = cmd_code; 440 ccw->flags = flags | CCW_FLAG_SLI; 441 ccw->count = len; 442 ccw->cda = (__u32) __pa(data); 443 } 444 445 static int __qeth_issue_next_read(struct qeth_card *card) 446 { 447 struct qeth_cmd_buffer *iob = card->read_cmd; 448 struct qeth_channel *channel = iob->channel; 449 struct ccw1 *ccw = __ccw_from_cmd(iob); 450 int rc; 451 452 QETH_CARD_TEXT(card, 5, "issnxrd"); 453 if (channel->state != CH_STATE_UP) 454 return -EIO; 455 456 memset(iob->data, 0, iob->length); 457 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 458 iob->callback = qeth_issue_next_read_cb; 459 /* keep the cmd alive after completion: */ 460 qeth_get_cmd(iob); 461 462 QETH_CARD_TEXT(card, 6, "noirqpnd"); 463 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 464 if (!rc) { 465 channel->active_cmd = iob; 466 } else { 467 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 468 rc, CARD_DEVID(card)); 469 qeth_unlock_channel(card, channel); 470 qeth_put_cmd(iob); 471 card->read_or_write_problem = 1; 472 qeth_schedule_recovery(card); 473 } 474 return rc; 475 } 476 477 static int qeth_issue_next_read(struct qeth_card *card) 478 { 479 int ret; 480 481 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 482 ret = __qeth_issue_next_read(card); 483 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 484 485 return ret; 486 } 487 488 static void qeth_enqueue_cmd(struct qeth_card *card, 489 struct qeth_cmd_buffer *iob) 490 { 491 spin_lock_irq(&card->lock); 492 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 493 spin_unlock_irq(&card->lock); 494 } 495 496 static void qeth_dequeue_cmd(struct qeth_card *card, 497 struct qeth_cmd_buffer *iob) 498 { 499 spin_lock_irq(&card->lock); 500 list_del(&iob->list_entry); 501 spin_unlock_irq(&card->lock); 502 } 503 504 static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 505 { 506 iob->rc = reason; 507 complete(&iob->done); 508 } 509 510 static void qeth_flush_local_addrs4(struct qeth_card *card) 511 { 512 struct qeth_local_addr *addr; 513 struct hlist_node *tmp; 514 unsigned int i; 515 516 spin_lock_irq(&card->local_addrs4_lock); 517 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 518 hash_del_rcu(&addr->hnode); 519 kfree_rcu(addr, rcu); 520 } 521 spin_unlock_irq(&card->local_addrs4_lock); 522 } 523 524 static void qeth_flush_local_addrs6(struct qeth_card *card) 525 { 526 struct qeth_local_addr *addr; 527 struct hlist_node *tmp; 528 unsigned int i; 529 530 spin_lock_irq(&card->local_addrs6_lock); 531 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 532 hash_del_rcu(&addr->hnode); 533 kfree_rcu(addr, rcu); 534 } 535 spin_unlock_irq(&card->local_addrs6_lock); 536 } 537 538 static void qeth_flush_local_addrs(struct qeth_card *card) 539 { 540 qeth_flush_local_addrs4(card); 541 qeth_flush_local_addrs6(card); 542 } 543 544 static void qeth_add_local_addrs4(struct qeth_card *card, 545 struct qeth_ipacmd_local_addrs4 *cmd) 546 { 547 unsigned int i; 548 549 if (cmd->addr_length != 550 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 551 dev_err_ratelimited(&card->gdev->dev, 552 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 553 cmd->addr_length); 554 return; 555 } 556 557 spin_lock(&card->local_addrs4_lock); 558 for (i = 0; i < cmd->count; i++) { 559 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 560 struct qeth_local_addr *addr; 561 bool duplicate = false; 562 563 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 564 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 565 duplicate = true; 566 break; 567 } 568 } 569 570 if (duplicate) 571 continue; 572 573 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 574 if (!addr) { 575 dev_err(&card->gdev->dev, 576 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 577 &cmd->addrs[i].addr); 578 continue; 579 } 580 581 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 582 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 583 } 584 spin_unlock(&card->local_addrs4_lock); 585 } 586 587 static void qeth_add_local_addrs6(struct qeth_card *card, 588 struct qeth_ipacmd_local_addrs6 *cmd) 589 { 590 unsigned int i; 591 592 if (cmd->addr_length != 593 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 594 dev_err_ratelimited(&card->gdev->dev, 595 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 596 cmd->addr_length); 597 return; 598 } 599 600 spin_lock(&card->local_addrs6_lock); 601 for (i = 0; i < cmd->count; i++) { 602 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 603 struct qeth_local_addr *addr; 604 bool duplicate = false; 605 606 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 607 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 608 duplicate = true; 609 break; 610 } 611 } 612 613 if (duplicate) 614 continue; 615 616 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 617 if (!addr) { 618 dev_err(&card->gdev->dev, 619 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 620 &cmd->addrs[i].addr); 621 continue; 622 } 623 624 addr->addr = cmd->addrs[i].addr; 625 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 626 } 627 spin_unlock(&card->local_addrs6_lock); 628 } 629 630 static void qeth_del_local_addrs4(struct qeth_card *card, 631 struct qeth_ipacmd_local_addrs4 *cmd) 632 { 633 unsigned int i; 634 635 if (cmd->addr_length != 636 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 637 dev_err_ratelimited(&card->gdev->dev, 638 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 639 cmd->addr_length); 640 return; 641 } 642 643 spin_lock(&card->local_addrs4_lock); 644 for (i = 0; i < cmd->count; i++) { 645 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 646 unsigned int key = ipv4_addr_hash(addr->addr); 647 struct qeth_local_addr *tmp; 648 649 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 650 if (tmp->addr.s6_addr32[3] == addr->addr) { 651 hash_del_rcu(&tmp->hnode); 652 kfree_rcu(tmp, rcu); 653 break; 654 } 655 } 656 } 657 spin_unlock(&card->local_addrs4_lock); 658 } 659 660 static void qeth_del_local_addrs6(struct qeth_card *card, 661 struct qeth_ipacmd_local_addrs6 *cmd) 662 { 663 unsigned int i; 664 665 if (cmd->addr_length != 666 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 667 dev_err_ratelimited(&card->gdev->dev, 668 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 669 cmd->addr_length); 670 return; 671 } 672 673 spin_lock(&card->local_addrs6_lock); 674 for (i = 0; i < cmd->count; i++) { 675 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 676 u32 key = ipv6_addr_hash(&addr->addr); 677 struct qeth_local_addr *tmp; 678 679 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 680 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 681 hash_del_rcu(&tmp->hnode); 682 kfree_rcu(tmp, rcu); 683 break; 684 } 685 } 686 } 687 spin_unlock(&card->local_addrs6_lock); 688 } 689 690 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 691 struct sk_buff *skb) 692 { 693 struct qeth_local_addr *tmp; 694 bool is_local = false; 695 unsigned int key; 696 __be32 next_hop; 697 698 if (hash_empty(card->local_addrs4)) 699 return false; 700 701 rcu_read_lock(); 702 next_hop = qeth_next_hop_v4_rcu(skb, 703 qeth_dst_check_rcu(skb, htons(ETH_P_IP))); 704 key = ipv4_addr_hash(next_hop); 705 706 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 707 if (tmp->addr.s6_addr32[3] == next_hop) { 708 is_local = true; 709 break; 710 } 711 } 712 rcu_read_unlock(); 713 714 return is_local; 715 } 716 717 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 718 struct sk_buff *skb) 719 { 720 struct qeth_local_addr *tmp; 721 struct in6_addr *next_hop; 722 bool is_local = false; 723 u32 key; 724 725 if (hash_empty(card->local_addrs6)) 726 return false; 727 728 rcu_read_lock(); 729 next_hop = qeth_next_hop_v6_rcu(skb, 730 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6))); 731 key = ipv6_addr_hash(next_hop); 732 733 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 734 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 735 is_local = true; 736 break; 737 } 738 } 739 rcu_read_unlock(); 740 741 return is_local; 742 } 743 744 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 745 { 746 struct qeth_card *card = m->private; 747 struct qeth_local_addr *tmp; 748 unsigned int i; 749 750 rcu_read_lock(); 751 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 752 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 753 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 754 seq_printf(m, "%pI6c\n", &tmp->addr); 755 rcu_read_unlock(); 756 757 return 0; 758 } 759 760 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 761 762 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 763 struct qeth_card *card) 764 { 765 const char *ipa_name; 766 int com = cmd->hdr.command; 767 768 ipa_name = qeth_get_ipa_cmd_name(com); 769 770 if (rc) 771 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 772 ipa_name, com, CARD_DEVID(card), rc, 773 qeth_get_ipa_msg(rc)); 774 else 775 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 776 ipa_name, com, CARD_DEVID(card)); 777 } 778 779 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 780 struct qeth_ipa_cmd *cmd) 781 { 782 QETH_CARD_TEXT(card, 5, "chkipad"); 783 784 if (IS_IPA_REPLY(cmd)) { 785 if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 786 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 787 return cmd; 788 } 789 790 /* handle unsolicited event: */ 791 switch (cmd->hdr.command) { 792 case IPA_CMD_STOPLAN: 793 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 794 dev_err(&card->gdev->dev, 795 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 796 netdev_name(card->dev)); 797 schedule_work(&card->close_dev_work); 798 } else { 799 dev_warn(&card->gdev->dev, 800 "The link for interface %s on CHPID 0x%X failed\n", 801 netdev_name(card->dev), card->info.chpid); 802 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 803 netif_carrier_off(card->dev); 804 } 805 return NULL; 806 case IPA_CMD_STARTLAN: 807 dev_info(&card->gdev->dev, 808 "The link for %s on CHPID 0x%X has been restored\n", 809 netdev_name(card->dev), card->info.chpid); 810 if (card->info.hwtrap) 811 card->info.hwtrap = 2; 812 qeth_schedule_recovery(card); 813 return NULL; 814 case IPA_CMD_SETBRIDGEPORT_IQD: 815 case IPA_CMD_SETBRIDGEPORT_OSA: 816 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 817 if (card->discipline->control_event_handler(card, cmd)) 818 return cmd; 819 return NULL; 820 case IPA_CMD_REGISTER_LOCAL_ADDR: 821 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 822 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 823 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 824 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 825 826 QETH_CARD_TEXT(card, 3, "irla"); 827 return NULL; 828 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 829 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 830 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 831 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 832 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 833 834 QETH_CARD_TEXT(card, 3, "urla"); 835 return NULL; 836 default: 837 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 838 return cmd; 839 } 840 } 841 842 static void qeth_clear_ipacmd_list(struct qeth_card *card) 843 { 844 struct qeth_cmd_buffer *iob; 845 unsigned long flags; 846 847 QETH_CARD_TEXT(card, 4, "clipalst"); 848 849 spin_lock_irqsave(&card->lock, flags); 850 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 851 qeth_notify_cmd(iob, -ECANCELED); 852 spin_unlock_irqrestore(&card->lock, flags); 853 } 854 855 static int qeth_check_idx_response(struct qeth_card *card, 856 unsigned char *buffer) 857 { 858 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 859 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 860 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 861 buffer[4]); 862 QETH_CARD_TEXT(card, 2, "ckidxres"); 863 QETH_CARD_TEXT(card, 2, " idxterm"); 864 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 865 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 866 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 867 dev_err(&card->gdev->dev, 868 "The device does not support the configured transport mode\n"); 869 return -EPROTONOSUPPORT; 870 } 871 return -EIO; 872 } 873 return 0; 874 } 875 876 static void qeth_release_buffer_cb(struct qeth_card *card, 877 struct qeth_cmd_buffer *iob, 878 unsigned int data_length) 879 { 880 qeth_put_cmd(iob); 881 } 882 883 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 884 { 885 qeth_notify_cmd(iob, rc); 886 qeth_put_cmd(iob); 887 } 888 889 static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 890 unsigned int length, 891 unsigned int ccws, long timeout) 892 { 893 struct qeth_cmd_buffer *iob; 894 895 if (length > QETH_BUFSIZE) 896 return NULL; 897 898 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 899 if (!iob) 900 return NULL; 901 902 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 903 GFP_KERNEL | GFP_DMA); 904 if (!iob->data) { 905 kfree(iob); 906 return NULL; 907 } 908 909 init_completion(&iob->done); 910 spin_lock_init(&iob->lock); 911 refcount_set(&iob->ref_count, 1); 912 iob->channel = channel; 913 iob->timeout = timeout; 914 iob->length = length; 915 return iob; 916 } 917 918 static void qeth_issue_next_read_cb(struct qeth_card *card, 919 struct qeth_cmd_buffer *iob, 920 unsigned int data_length) 921 { 922 struct qeth_cmd_buffer *request = NULL; 923 struct qeth_ipa_cmd *cmd = NULL; 924 struct qeth_reply *reply = NULL; 925 struct qeth_cmd_buffer *tmp; 926 unsigned long flags; 927 int rc = 0; 928 929 QETH_CARD_TEXT(card, 4, "sndctlcb"); 930 rc = qeth_check_idx_response(card, iob->data); 931 switch (rc) { 932 case 0: 933 break; 934 case -EIO: 935 qeth_schedule_recovery(card); 936 fallthrough; 937 default: 938 qeth_clear_ipacmd_list(card); 939 goto err_idx; 940 } 941 942 cmd = __ipa_reply(iob); 943 if (cmd) { 944 cmd = qeth_check_ipa_data(card, cmd); 945 if (!cmd) 946 goto out; 947 } 948 949 /* match against pending cmd requests */ 950 spin_lock_irqsave(&card->lock, flags); 951 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 952 if (tmp->match && tmp->match(tmp, iob)) { 953 request = tmp; 954 /* take the object outside the lock */ 955 qeth_get_cmd(request); 956 break; 957 } 958 } 959 spin_unlock_irqrestore(&card->lock, flags); 960 961 if (!request) 962 goto out; 963 964 reply = &request->reply; 965 if (!reply->callback) { 966 rc = 0; 967 goto no_callback; 968 } 969 970 spin_lock_irqsave(&request->lock, flags); 971 if (request->rc) 972 /* Bail out when the requestor has already left: */ 973 rc = request->rc; 974 else 975 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 976 (unsigned long)iob); 977 spin_unlock_irqrestore(&request->lock, flags); 978 979 no_callback: 980 if (rc <= 0) 981 qeth_notify_cmd(request, rc); 982 qeth_put_cmd(request); 983 out: 984 memcpy(&card->seqno.pdu_hdr_ack, 985 QETH_PDU_HEADER_SEQ_NO(iob->data), 986 QETH_SEQ_NO_LENGTH); 987 __qeth_issue_next_read(card); 988 err_idx: 989 qeth_put_cmd(iob); 990 } 991 992 static int qeth_set_thread_start_bit(struct qeth_card *card, 993 unsigned long thread) 994 { 995 unsigned long flags; 996 int rc = 0; 997 998 spin_lock_irqsave(&card->thread_mask_lock, flags); 999 if (!(card->thread_allowed_mask & thread)) 1000 rc = -EPERM; 1001 else if (card->thread_start_mask & thread) 1002 rc = -EBUSY; 1003 else 1004 card->thread_start_mask |= thread; 1005 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1006 1007 return rc; 1008 } 1009 1010 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1011 unsigned long thread) 1012 { 1013 unsigned long flags; 1014 1015 spin_lock_irqsave(&card->thread_mask_lock, flags); 1016 card->thread_start_mask &= ~thread; 1017 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1018 wake_up(&card->wait_q); 1019 } 1020 1021 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1022 unsigned long thread) 1023 { 1024 unsigned long flags; 1025 1026 spin_lock_irqsave(&card->thread_mask_lock, flags); 1027 card->thread_running_mask &= ~thread; 1028 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1029 wake_up_all(&card->wait_q); 1030 } 1031 1032 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1033 { 1034 unsigned long flags; 1035 int rc = 0; 1036 1037 spin_lock_irqsave(&card->thread_mask_lock, flags); 1038 if (card->thread_start_mask & thread) { 1039 if ((card->thread_allowed_mask & thread) && 1040 !(card->thread_running_mask & thread)) { 1041 rc = 1; 1042 card->thread_start_mask &= ~thread; 1043 card->thread_running_mask |= thread; 1044 } else 1045 rc = -EPERM; 1046 } 1047 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1048 return rc; 1049 } 1050 1051 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1052 { 1053 int rc = 0; 1054 1055 wait_event(card->wait_q, 1056 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1057 return rc; 1058 } 1059 1060 int qeth_schedule_recovery(struct qeth_card *card) 1061 { 1062 int rc; 1063 1064 QETH_CARD_TEXT(card, 2, "startrec"); 1065 1066 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1067 if (!rc) 1068 schedule_work(&card->kernel_thread_starter); 1069 1070 return rc; 1071 } 1072 1073 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1074 struct irb *irb) 1075 { 1076 int dstat, cstat; 1077 char *sense; 1078 1079 sense = (char *) irb->ecw; 1080 cstat = irb->scsw.cmd.cstat; 1081 dstat = irb->scsw.cmd.dstat; 1082 1083 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1084 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1085 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1086 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1087 dev_warn(&cdev->dev, "The qeth device driver " 1088 "failed to recover an error on the device\n"); 1089 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1090 CCW_DEVID(cdev), dstat, cstat); 1091 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1092 16, 1, irb, 64, 1); 1093 return -EIO; 1094 } 1095 1096 if (dstat & DEV_STAT_UNIT_CHECK) { 1097 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1098 SENSE_RESETTING_EVENT_FLAG) { 1099 QETH_CARD_TEXT(card, 2, "REVIND"); 1100 return -EIO; 1101 } 1102 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1103 SENSE_COMMAND_REJECT_FLAG) { 1104 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1105 return -EIO; 1106 } 1107 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1108 QETH_CARD_TEXT(card, 2, "AFFE"); 1109 return -EIO; 1110 } 1111 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1112 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1113 return 0; 1114 } 1115 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1116 return -EIO; 1117 } 1118 return 0; 1119 } 1120 1121 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1122 struct irb *irb) 1123 { 1124 if (!IS_ERR(irb)) 1125 return 0; 1126 1127 switch (PTR_ERR(irb)) { 1128 case -EIO: 1129 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1130 CCW_DEVID(cdev)); 1131 QETH_CARD_TEXT(card, 2, "ckirberr"); 1132 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1133 return -EIO; 1134 case -ETIMEDOUT: 1135 dev_warn(&cdev->dev, "A hardware operation timed out" 1136 " on the device\n"); 1137 QETH_CARD_TEXT(card, 2, "ckirberr"); 1138 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1139 return -ETIMEDOUT; 1140 default: 1141 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1142 PTR_ERR(irb), CCW_DEVID(cdev)); 1143 QETH_CARD_TEXT(card, 2, "ckirberr"); 1144 QETH_CARD_TEXT(card, 2, " rc???"); 1145 return PTR_ERR(irb); 1146 } 1147 } 1148 1149 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1150 struct irb *irb) 1151 { 1152 int rc; 1153 int cstat, dstat; 1154 struct qeth_cmd_buffer *iob = NULL; 1155 struct ccwgroup_device *gdev; 1156 struct qeth_channel *channel; 1157 struct qeth_card *card; 1158 1159 /* while we hold the ccwdev lock, this stays valid: */ 1160 gdev = dev_get_drvdata(&cdev->dev); 1161 card = dev_get_drvdata(&gdev->dev); 1162 1163 QETH_CARD_TEXT(card, 5, "irq"); 1164 1165 if (card->read.ccwdev == cdev) { 1166 channel = &card->read; 1167 QETH_CARD_TEXT(card, 5, "read"); 1168 } else if (card->write.ccwdev == cdev) { 1169 channel = &card->write; 1170 QETH_CARD_TEXT(card, 5, "write"); 1171 } else { 1172 channel = &card->data; 1173 QETH_CARD_TEXT(card, 5, "data"); 1174 } 1175 1176 if (intparm == 0) { 1177 QETH_CARD_TEXT(card, 5, "irqunsol"); 1178 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1179 QETH_CARD_TEXT(card, 5, "irqunexp"); 1180 1181 dev_err(&cdev->dev, 1182 "Received IRQ with intparm %lx, expected %px\n", 1183 intparm, channel->active_cmd); 1184 if (channel->active_cmd) 1185 qeth_cancel_cmd(channel->active_cmd, -EIO); 1186 } else { 1187 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1188 } 1189 1190 qeth_unlock_channel(card, channel); 1191 1192 rc = qeth_check_irb_error(card, cdev, irb); 1193 if (rc) { 1194 /* IO was terminated, free its resources. */ 1195 if (iob) 1196 qeth_cancel_cmd(iob, rc); 1197 return; 1198 } 1199 1200 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1201 channel->state = CH_STATE_STOPPED; 1202 wake_up(&card->wait_q); 1203 } 1204 1205 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1206 channel->state = CH_STATE_HALTED; 1207 wake_up(&card->wait_q); 1208 } 1209 1210 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1211 SCSW_FCTL_HALT_FUNC))) { 1212 qeth_cancel_cmd(iob, -ECANCELED); 1213 iob = NULL; 1214 } 1215 1216 cstat = irb->scsw.cmd.cstat; 1217 dstat = irb->scsw.cmd.dstat; 1218 1219 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1220 (dstat & DEV_STAT_UNIT_CHECK) || 1221 (cstat)) { 1222 if (irb->esw.esw0.erw.cons) { 1223 dev_warn(&channel->ccwdev->dev, 1224 "The qeth device driver failed to recover " 1225 "an error on the device\n"); 1226 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1227 CCW_DEVID(channel->ccwdev), cstat, 1228 dstat); 1229 print_hex_dump(KERN_WARNING, "qeth: irb ", 1230 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1231 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1232 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1233 } 1234 1235 rc = qeth_get_problem(card, cdev, irb); 1236 if (rc) { 1237 card->read_or_write_problem = 1; 1238 if (iob) 1239 qeth_cancel_cmd(iob, rc); 1240 qeth_clear_ipacmd_list(card); 1241 qeth_schedule_recovery(card); 1242 return; 1243 } 1244 } 1245 1246 if (iob) { 1247 /* sanity check: */ 1248 if (irb->scsw.cmd.count > iob->length) { 1249 qeth_cancel_cmd(iob, -EIO); 1250 return; 1251 } 1252 if (iob->callback) 1253 iob->callback(card, iob, 1254 iob->length - irb->scsw.cmd.count); 1255 } 1256 } 1257 1258 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1259 struct qeth_qdio_out_buffer *buf, 1260 enum iucv_tx_notify notification) 1261 { 1262 struct sk_buff *skb; 1263 1264 skb_queue_walk(&buf->skb_list, skb) { 1265 struct sock *sk = skb->sk; 1266 1267 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1268 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1269 if (sk && sk->sk_family == PF_IUCV) 1270 iucv_sk(sk)->sk_txnotify(sk, notification); 1271 } 1272 } 1273 1274 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, 1275 struct qeth_qdio_out_buffer *buf, bool error, 1276 int budget) 1277 { 1278 struct sk_buff *skb; 1279 1280 /* Empty buffer? */ 1281 if (buf->next_element_to_fill == 0) 1282 return; 1283 1284 QETH_TXQ_STAT_INC(queue, bufs); 1285 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1286 if (error) { 1287 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1288 } else { 1289 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1290 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1291 } 1292 1293 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1294 unsigned int bytes = qdisc_pkt_len(skb); 1295 bool is_tso = skb_is_gso(skb); 1296 unsigned int packets; 1297 1298 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1299 if (!error) { 1300 if (skb->ip_summed == CHECKSUM_PARTIAL) 1301 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1302 if (skb_is_nonlinear(skb)) 1303 QETH_TXQ_STAT_INC(queue, skbs_sg); 1304 if (is_tso) { 1305 QETH_TXQ_STAT_INC(queue, skbs_tso); 1306 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1307 } 1308 } 1309 1310 napi_consume_skb(skb, budget); 1311 } 1312 } 1313 1314 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1315 struct qeth_qdio_out_buffer *buf, 1316 bool error, int budget) 1317 { 1318 int i; 1319 1320 /* is PCI flag set on buffer? */ 1321 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { 1322 atomic_dec(&queue->set_pci_flags_count); 1323 QETH_TXQ_STAT_INC(queue, completion_irq); 1324 } 1325 1326 qeth_tx_complete_buf(queue, buf, error, budget); 1327 1328 for (i = 0; i < queue->max_elements; ++i) { 1329 void *data = phys_to_virt(buf->buffer->element[i].addr); 1330 1331 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1332 kmem_cache_free(qeth_core_header_cache, data); 1333 } 1334 1335 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1336 buf->next_element_to_fill = 0; 1337 buf->frames = 0; 1338 buf->bytes = 0; 1339 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1340 } 1341 1342 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) 1343 { 1344 if (buf->aob) 1345 qdio_release_aob(buf->aob); 1346 kmem_cache_free(qeth_qdio_outbuf_cache, buf); 1347 } 1348 1349 static void qeth_tx_complete_pending_bufs(struct qeth_card *card, 1350 struct qeth_qdio_out_q *queue, 1351 bool drain, int budget) 1352 { 1353 struct qeth_qdio_out_buffer *buf, *tmp; 1354 1355 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { 1356 struct qeth_qaob_priv1 *priv; 1357 struct qaob *aob = buf->aob; 1358 enum iucv_tx_notify notify; 1359 unsigned int i; 1360 1361 priv = (struct qeth_qaob_priv1 *)&aob->user1; 1362 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { 1363 QETH_CARD_TEXT(card, 5, "fp"); 1364 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); 1365 1366 notify = drain ? TX_NOTIFY_GENERALERROR : 1367 qeth_compute_cq_notification(aob->aorc, 1); 1368 qeth_notify_skbs(queue, buf, notify); 1369 qeth_tx_complete_buf(queue, buf, drain, budget); 1370 1371 for (i = 0; 1372 i < aob->sb_count && i < queue->max_elements; 1373 i++) { 1374 void *data = phys_to_virt(aob->sba[i]); 1375 1376 if (test_bit(i, buf->from_kmem_cache) && data) 1377 kmem_cache_free(qeth_core_header_cache, 1378 data); 1379 } 1380 1381 list_del(&buf->list_entry); 1382 qeth_free_out_buf(buf); 1383 } 1384 } 1385 } 1386 1387 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1388 { 1389 int j; 1390 1391 qeth_tx_complete_pending_bufs(q->card, q, true, 0); 1392 1393 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1394 if (!q->bufs[j]) 1395 continue; 1396 1397 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1398 if (free) { 1399 qeth_free_out_buf(q->bufs[j]); 1400 q->bufs[j] = NULL; 1401 } 1402 } 1403 } 1404 1405 static void qeth_drain_output_queues(struct qeth_card *card) 1406 { 1407 int i; 1408 1409 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1410 /* clear outbound buffers to free skbs */ 1411 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1412 if (card->qdio.out_qs[i]) 1413 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1414 } 1415 } 1416 1417 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1418 { 1419 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1420 1421 if (card->qdio.no_out_queues == max) 1422 return; 1423 1424 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1425 qeth_free_qdio_queues(card); 1426 1427 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1428 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1429 1430 card->qdio.no_out_queues = max; 1431 } 1432 1433 static int qeth_update_from_chp_desc(struct qeth_card *card) 1434 { 1435 struct ccw_device *ccwdev; 1436 struct channel_path_desc_fmt0 *chp_dsc; 1437 1438 QETH_CARD_TEXT(card, 2, "chp_desc"); 1439 1440 ccwdev = card->data.ccwdev; 1441 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1442 if (!chp_dsc) 1443 return -ENOMEM; 1444 1445 card->info.func_level = 0x4100 + chp_dsc->desc; 1446 1447 if (IS_OSD(card) || IS_OSX(card)) 1448 /* CHPP field bit 6 == 1 -> single queue */ 1449 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1450 1451 kfree(chp_dsc); 1452 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1453 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1454 return 0; 1455 } 1456 1457 static void qeth_init_qdio_info(struct qeth_card *card) 1458 { 1459 QETH_CARD_TEXT(card, 4, "intqdinf"); 1460 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1461 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1462 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1463 1464 /* inbound */ 1465 card->qdio.no_in_queues = 1; 1466 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1467 if (IS_IQD(card)) 1468 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1469 else 1470 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1471 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1472 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1473 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1474 } 1475 1476 static void qeth_set_initial_options(struct qeth_card *card) 1477 { 1478 card->options.route4.type = NO_ROUTER; 1479 card->options.route6.type = NO_ROUTER; 1480 card->options.isolation = ISOLATION_MODE_NONE; 1481 card->options.cq = QETH_CQ_DISABLED; 1482 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1483 } 1484 1485 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1486 { 1487 unsigned long flags; 1488 int rc = 0; 1489 1490 spin_lock_irqsave(&card->thread_mask_lock, flags); 1491 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1492 (u8) card->thread_start_mask, 1493 (u8) card->thread_allowed_mask, 1494 (u8) card->thread_running_mask); 1495 rc = (card->thread_start_mask & thread); 1496 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1497 return rc; 1498 } 1499 1500 static int qeth_do_reset(void *data); 1501 static void qeth_start_kernel_thread(struct work_struct *work) 1502 { 1503 struct task_struct *ts; 1504 struct qeth_card *card = container_of(work, struct qeth_card, 1505 kernel_thread_starter); 1506 QETH_CARD_TEXT(card, 2, "strthrd"); 1507 1508 if (card->read.state != CH_STATE_UP && 1509 card->write.state != CH_STATE_UP) 1510 return; 1511 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1512 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1513 if (IS_ERR(ts)) { 1514 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1515 qeth_clear_thread_running_bit(card, 1516 QETH_RECOVER_THREAD); 1517 } 1518 } 1519 } 1520 1521 static void qeth_buffer_reclaim_work(struct work_struct *); 1522 static void qeth_setup_card(struct qeth_card *card) 1523 { 1524 QETH_CARD_TEXT(card, 2, "setupcrd"); 1525 1526 card->info.type = CARD_RDEV(card)->id.driver_info; 1527 card->state = CARD_STATE_DOWN; 1528 spin_lock_init(&card->lock); 1529 spin_lock_init(&card->thread_mask_lock); 1530 mutex_init(&card->conf_mutex); 1531 mutex_init(&card->discipline_mutex); 1532 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1533 INIT_LIST_HEAD(&card->cmd_waiter_list); 1534 init_waitqueue_head(&card->wait_q); 1535 qeth_set_initial_options(card); 1536 /* IP address takeover */ 1537 INIT_LIST_HEAD(&card->ipato.entries); 1538 qeth_init_qdio_info(card); 1539 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1540 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1541 hash_init(card->rx_mode_addrs); 1542 hash_init(card->local_addrs4); 1543 hash_init(card->local_addrs6); 1544 spin_lock_init(&card->local_addrs4_lock); 1545 spin_lock_init(&card->local_addrs6_lock); 1546 } 1547 1548 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1549 { 1550 struct qeth_card *card = container_of(slr, struct qeth_card, 1551 qeth_service_level); 1552 if (card->info.mcl_level[0]) 1553 seq_printf(m, "qeth: %s firmware level %s\n", 1554 CARD_BUS_ID(card), card->info.mcl_level); 1555 } 1556 1557 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1558 { 1559 struct qeth_card *card; 1560 1561 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1562 card = kzalloc(sizeof(*card), GFP_KERNEL); 1563 if (!card) 1564 goto out; 1565 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1566 1567 card->gdev = gdev; 1568 dev_set_drvdata(&gdev->dev, card); 1569 CARD_RDEV(card) = gdev->cdev[0]; 1570 CARD_WDEV(card) = gdev->cdev[1]; 1571 CARD_DDEV(card) = gdev->cdev[2]; 1572 1573 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1574 dev_name(&gdev->dev)); 1575 if (!card->event_wq) 1576 goto out_wq; 1577 1578 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1579 if (!card->read_cmd) 1580 goto out_read_cmd; 1581 1582 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1583 qeth_debugfs_root); 1584 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1585 &qeth_debugfs_local_addr_fops); 1586 1587 card->qeth_service_level.seq_print = qeth_core_sl_print; 1588 register_service_level(&card->qeth_service_level); 1589 return card; 1590 1591 out_read_cmd: 1592 destroy_workqueue(card->event_wq); 1593 out_wq: 1594 dev_set_drvdata(&gdev->dev, NULL); 1595 kfree(card); 1596 out: 1597 return NULL; 1598 } 1599 1600 static int qeth_clear_channel(struct qeth_card *card, 1601 struct qeth_channel *channel) 1602 { 1603 int rc; 1604 1605 QETH_CARD_TEXT(card, 3, "clearch"); 1606 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1607 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1608 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1609 1610 if (rc) 1611 return rc; 1612 rc = wait_event_interruptible_timeout(card->wait_q, 1613 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1614 if (rc == -ERESTARTSYS) 1615 return rc; 1616 if (channel->state != CH_STATE_STOPPED) 1617 return -ETIME; 1618 channel->state = CH_STATE_DOWN; 1619 return 0; 1620 } 1621 1622 static int qeth_halt_channel(struct qeth_card *card, 1623 struct qeth_channel *channel) 1624 { 1625 int rc; 1626 1627 QETH_CARD_TEXT(card, 3, "haltch"); 1628 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1629 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1630 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1631 1632 if (rc) 1633 return rc; 1634 rc = wait_event_interruptible_timeout(card->wait_q, 1635 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1636 if (rc == -ERESTARTSYS) 1637 return rc; 1638 if (channel->state != CH_STATE_HALTED) 1639 return -ETIME; 1640 return 0; 1641 } 1642 1643 static int qeth_stop_channel(struct qeth_channel *channel) 1644 { 1645 struct ccw_device *cdev = channel->ccwdev; 1646 int rc; 1647 1648 rc = ccw_device_set_offline(cdev); 1649 1650 spin_lock_irq(get_ccwdev_lock(cdev)); 1651 if (channel->active_cmd) 1652 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1653 channel->active_cmd); 1654 1655 cdev->handler = NULL; 1656 spin_unlock_irq(get_ccwdev_lock(cdev)); 1657 1658 return rc; 1659 } 1660 1661 static int qeth_start_channel(struct qeth_channel *channel) 1662 { 1663 struct ccw_device *cdev = channel->ccwdev; 1664 int rc; 1665 1666 channel->state = CH_STATE_DOWN; 1667 xchg(&channel->active_cmd, NULL); 1668 1669 spin_lock_irq(get_ccwdev_lock(cdev)); 1670 cdev->handler = qeth_irq; 1671 spin_unlock_irq(get_ccwdev_lock(cdev)); 1672 1673 rc = ccw_device_set_online(cdev); 1674 if (rc) 1675 goto err; 1676 1677 return 0; 1678 1679 err: 1680 spin_lock_irq(get_ccwdev_lock(cdev)); 1681 cdev->handler = NULL; 1682 spin_unlock_irq(get_ccwdev_lock(cdev)); 1683 return rc; 1684 } 1685 1686 static int qeth_halt_channels(struct qeth_card *card) 1687 { 1688 int rc1 = 0, rc2 = 0, rc3 = 0; 1689 1690 QETH_CARD_TEXT(card, 3, "haltchs"); 1691 rc1 = qeth_halt_channel(card, &card->read); 1692 rc2 = qeth_halt_channel(card, &card->write); 1693 rc3 = qeth_halt_channel(card, &card->data); 1694 if (rc1) 1695 return rc1; 1696 if (rc2) 1697 return rc2; 1698 return rc3; 1699 } 1700 1701 static int qeth_clear_channels(struct qeth_card *card) 1702 { 1703 int rc1 = 0, rc2 = 0, rc3 = 0; 1704 1705 QETH_CARD_TEXT(card, 3, "clearchs"); 1706 rc1 = qeth_clear_channel(card, &card->read); 1707 rc2 = qeth_clear_channel(card, &card->write); 1708 rc3 = qeth_clear_channel(card, &card->data); 1709 if (rc1) 1710 return rc1; 1711 if (rc2) 1712 return rc2; 1713 return rc3; 1714 } 1715 1716 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1717 { 1718 int rc = 0; 1719 1720 QETH_CARD_TEXT(card, 3, "clhacrd"); 1721 1722 if (halt) 1723 rc = qeth_halt_channels(card); 1724 if (rc) 1725 return rc; 1726 return qeth_clear_channels(card); 1727 } 1728 1729 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1730 { 1731 int rc = 0; 1732 1733 QETH_CARD_TEXT(card, 3, "qdioclr"); 1734 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1735 QETH_QDIO_CLEANING)) { 1736 case QETH_QDIO_ESTABLISHED: 1737 if (IS_IQD(card)) 1738 rc = qdio_shutdown(CARD_DDEV(card), 1739 QDIO_FLAG_CLEANUP_USING_HALT); 1740 else 1741 rc = qdio_shutdown(CARD_DDEV(card), 1742 QDIO_FLAG_CLEANUP_USING_CLEAR); 1743 if (rc) 1744 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1745 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1746 break; 1747 case QETH_QDIO_CLEANING: 1748 return rc; 1749 default: 1750 break; 1751 } 1752 rc = qeth_clear_halt_card(card, use_halt); 1753 if (rc) 1754 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1755 return rc; 1756 } 1757 1758 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1759 { 1760 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1761 struct diag26c_vnic_resp *response = NULL; 1762 struct diag26c_vnic_req *request = NULL; 1763 struct ccw_dev_id id; 1764 char userid[80]; 1765 int rc = 0; 1766 1767 QETH_CARD_TEXT(card, 2, "vmlayer"); 1768 1769 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1770 if (rc) 1771 goto out; 1772 1773 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1774 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1775 if (!request || !response) { 1776 rc = -ENOMEM; 1777 goto out; 1778 } 1779 1780 ccw_device_get_id(CARD_RDEV(card), &id); 1781 request->resp_buf_len = sizeof(*response); 1782 request->resp_version = DIAG26C_VERSION6_VM65918; 1783 request->req_format = DIAG26C_VNIC_INFO; 1784 ASCEBC(userid, 8); 1785 memcpy(&request->sys_name, userid, 8); 1786 request->devno = id.devno; 1787 1788 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1789 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1790 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1791 if (rc) 1792 goto out; 1793 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1794 1795 if (request->resp_buf_len < sizeof(*response) || 1796 response->version != request->resp_version) { 1797 rc = -EIO; 1798 goto out; 1799 } 1800 1801 if (response->protocol == VNIC_INFO_PROT_L2) 1802 disc = QETH_DISCIPLINE_LAYER2; 1803 else if (response->protocol == VNIC_INFO_PROT_L3) 1804 disc = QETH_DISCIPLINE_LAYER3; 1805 1806 out: 1807 kfree(response); 1808 kfree(request); 1809 if (rc) 1810 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1811 return disc; 1812 } 1813 1814 /* Determine whether the device requires a specific layer discipline */ 1815 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1816 { 1817 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1818 1819 if (IS_OSM(card)) 1820 disc = QETH_DISCIPLINE_LAYER2; 1821 else if (IS_VM_NIC(card)) 1822 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1823 qeth_vm_detect_layer(card); 1824 1825 switch (disc) { 1826 case QETH_DISCIPLINE_LAYER2: 1827 QETH_CARD_TEXT(card, 3, "force l2"); 1828 break; 1829 case QETH_DISCIPLINE_LAYER3: 1830 QETH_CARD_TEXT(card, 3, "force l3"); 1831 break; 1832 default: 1833 QETH_CARD_TEXT(card, 3, "force no"); 1834 } 1835 1836 return disc; 1837 } 1838 1839 static void qeth_set_blkt_defaults(struct qeth_card *card) 1840 { 1841 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1842 1843 if (card->info.use_v1_blkt) { 1844 card->info.blkt.time_total = 0; 1845 card->info.blkt.inter_packet = 0; 1846 card->info.blkt.inter_packet_jumbo = 0; 1847 } else { 1848 card->info.blkt.time_total = 250; 1849 card->info.blkt.inter_packet = 5; 1850 card->info.blkt.inter_packet_jumbo = 15; 1851 } 1852 } 1853 1854 static void qeth_idx_init(struct qeth_card *card) 1855 { 1856 memset(&card->seqno, 0, sizeof(card->seqno)); 1857 1858 card->token.issuer_rm_w = 0x00010103UL; 1859 card->token.cm_filter_w = 0x00010108UL; 1860 card->token.cm_connection_w = 0x0001010aUL; 1861 card->token.ulp_filter_w = 0x0001010bUL; 1862 card->token.ulp_connection_w = 0x0001010dUL; 1863 1864 switch (card->info.type) { 1865 case QETH_CARD_TYPE_IQD: 1866 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1867 break; 1868 case QETH_CARD_TYPE_OSD: 1869 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1870 break; 1871 default: 1872 break; 1873 } 1874 } 1875 1876 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1877 struct qeth_cmd_buffer *iob) 1878 { 1879 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1880 QETH_SEQ_NO_LENGTH); 1881 if (iob->channel == &card->write) 1882 card->seqno.trans_hdr++; 1883 } 1884 1885 static int qeth_peer_func_level(int level) 1886 { 1887 if ((level & 0xff) == 8) 1888 return (level & 0xff) + 0x400; 1889 if (((level >> 8) & 3) == 1) 1890 return (level & 0xff) + 0x200; 1891 return level; 1892 } 1893 1894 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1895 struct qeth_cmd_buffer *iob) 1896 { 1897 qeth_idx_finalize_cmd(card, iob); 1898 1899 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1900 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1901 card->seqno.pdu_hdr++; 1902 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1903 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1904 1905 iob->callback = qeth_release_buffer_cb; 1906 } 1907 1908 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1909 struct qeth_cmd_buffer *reply) 1910 { 1911 /* MPC cmds are issued strictly in sequence. */ 1912 return !IS_IPA(reply->data); 1913 } 1914 1915 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1916 const void *data, 1917 unsigned int data_length) 1918 { 1919 struct qeth_cmd_buffer *iob; 1920 1921 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1922 if (!iob) 1923 return NULL; 1924 1925 memcpy(iob->data, data, data_length); 1926 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1927 iob->data); 1928 iob->finalize = qeth_mpc_finalize_cmd; 1929 iob->match = qeth_mpc_match_reply; 1930 return iob; 1931 } 1932 1933 /** 1934 * qeth_send_control_data() - send control command to the card 1935 * @card: qeth_card structure pointer 1936 * @iob: qeth_cmd_buffer pointer 1937 * @reply_cb: callback function pointer 1938 * @cb_card: pointer to the qeth_card structure 1939 * @cb_reply: pointer to the qeth_reply structure 1940 * @cb_cmd: pointer to the original iob for non-IPA 1941 * commands, or to the qeth_ipa_cmd structure 1942 * for the IPA commands. 1943 * @reply_param: private pointer passed to the callback 1944 * 1945 * Callback function gets called one or more times, with cb_cmd 1946 * pointing to the response returned by the hardware. Callback 1947 * function must return 1948 * > 0 if more reply blocks are expected, 1949 * 0 if the last or only reply block is received, and 1950 * < 0 on error. 1951 * Callback function can get the value of the reply_param pointer from the 1952 * field 'param' of the structure qeth_reply. 1953 */ 1954 1955 static int qeth_send_control_data(struct qeth_card *card, 1956 struct qeth_cmd_buffer *iob, 1957 int (*reply_cb)(struct qeth_card *cb_card, 1958 struct qeth_reply *cb_reply, 1959 unsigned long cb_cmd), 1960 void *reply_param) 1961 { 1962 struct qeth_channel *channel = iob->channel; 1963 struct qeth_reply *reply = &iob->reply; 1964 long timeout = iob->timeout; 1965 int rc; 1966 1967 QETH_CARD_TEXT(card, 2, "sendctl"); 1968 1969 reply->callback = reply_cb; 1970 reply->param = reply_param; 1971 1972 timeout = wait_event_interruptible_timeout(card->wait_q, 1973 qeth_trylock_channel(channel, iob), 1974 timeout); 1975 if (timeout <= 0) { 1976 qeth_put_cmd(iob); 1977 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1978 } 1979 1980 if (iob->finalize) 1981 iob->finalize(card, iob); 1982 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 1983 1984 qeth_enqueue_cmd(card, iob); 1985 1986 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 1987 qeth_get_cmd(iob); 1988 1989 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1990 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1991 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1992 (addr_t) iob, 0, 0, timeout); 1993 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1994 if (rc) { 1995 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1996 CARD_DEVID(card), rc); 1997 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1998 qeth_dequeue_cmd(card, iob); 1999 qeth_put_cmd(iob); 2000 qeth_unlock_channel(card, channel); 2001 goto out; 2002 } 2003 2004 timeout = wait_for_completion_interruptible_timeout(&iob->done, 2005 timeout); 2006 if (timeout <= 0) 2007 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2008 2009 qeth_dequeue_cmd(card, iob); 2010 2011 if (reply_cb) { 2012 /* Wait until the callback for a late reply has completed: */ 2013 spin_lock_irq(&iob->lock); 2014 if (rc) 2015 /* Zap any callback that's still pending: */ 2016 iob->rc = rc; 2017 spin_unlock_irq(&iob->lock); 2018 } 2019 2020 if (!rc) 2021 rc = iob->rc; 2022 2023 out: 2024 qeth_put_cmd(iob); 2025 return rc; 2026 } 2027 2028 struct qeth_node_desc { 2029 struct node_descriptor nd1; 2030 struct node_descriptor nd2; 2031 struct node_descriptor nd3; 2032 }; 2033 2034 static void qeth_read_conf_data_cb(struct qeth_card *card, 2035 struct qeth_cmd_buffer *iob, 2036 unsigned int data_length) 2037 { 2038 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2039 int rc = 0; 2040 u8 *tag; 2041 2042 QETH_CARD_TEXT(card, 2, "cfgunit"); 2043 2044 if (data_length < sizeof(*nd)) { 2045 rc = -EINVAL; 2046 goto out; 2047 } 2048 2049 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2050 nd->nd1.plant[1] == _ascebc['M']; 2051 tag = (u8 *)&nd->nd1.tag; 2052 card->info.chpid = tag[0]; 2053 card->info.unit_addr2 = tag[1]; 2054 2055 tag = (u8 *)&nd->nd2.tag; 2056 card->info.cula = tag[1]; 2057 2058 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2059 nd->nd3.model[1] == 0xF0 && 2060 nd->nd3.model[2] >= 0xF1 && 2061 nd->nd3.model[2] <= 0xF4; 2062 2063 out: 2064 qeth_notify_cmd(iob, rc); 2065 qeth_put_cmd(iob); 2066 } 2067 2068 static int qeth_read_conf_data(struct qeth_card *card) 2069 { 2070 struct qeth_channel *channel = &card->data; 2071 struct qeth_cmd_buffer *iob; 2072 struct ciw *ciw; 2073 2074 /* scan for RCD command in extended SenseID data */ 2075 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2076 if (!ciw || ciw->cmd == 0) 2077 return -EOPNOTSUPP; 2078 if (ciw->count < sizeof(struct qeth_node_desc)) 2079 return -EINVAL; 2080 2081 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2082 if (!iob) 2083 return -ENOMEM; 2084 2085 iob->callback = qeth_read_conf_data_cb; 2086 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2087 iob->data); 2088 2089 return qeth_send_control_data(card, iob, NULL, NULL); 2090 } 2091 2092 static int qeth_idx_check_activate_response(struct qeth_card *card, 2093 struct qeth_channel *channel, 2094 struct qeth_cmd_buffer *iob) 2095 { 2096 int rc; 2097 2098 rc = qeth_check_idx_response(card, iob->data); 2099 if (rc) 2100 return rc; 2101 2102 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2103 return 0; 2104 2105 /* negative reply: */ 2106 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2107 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2108 2109 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2110 case QETH_IDX_ACT_ERR_EXCL: 2111 dev_err(&channel->ccwdev->dev, 2112 "The adapter is used exclusively by another host\n"); 2113 return -EBUSY; 2114 case QETH_IDX_ACT_ERR_AUTH: 2115 case QETH_IDX_ACT_ERR_AUTH_USER: 2116 dev_err(&channel->ccwdev->dev, 2117 "Setting the device online failed because of insufficient authorization\n"); 2118 return -EPERM; 2119 default: 2120 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2121 CCW_DEVID(channel->ccwdev)); 2122 return -EIO; 2123 } 2124 } 2125 2126 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2127 struct qeth_cmd_buffer *iob, 2128 unsigned int data_length) 2129 { 2130 struct qeth_channel *channel = iob->channel; 2131 u16 peer_level; 2132 int rc; 2133 2134 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2135 2136 rc = qeth_idx_check_activate_response(card, channel, iob); 2137 if (rc) 2138 goto out; 2139 2140 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2141 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2142 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2143 CCW_DEVID(channel->ccwdev), 2144 card->info.func_level, peer_level); 2145 rc = -EINVAL; 2146 goto out; 2147 } 2148 2149 memcpy(&card->token.issuer_rm_r, 2150 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2151 QETH_MPC_TOKEN_LENGTH); 2152 memcpy(&card->info.mcl_level[0], 2153 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2154 2155 out: 2156 qeth_notify_cmd(iob, rc); 2157 qeth_put_cmd(iob); 2158 } 2159 2160 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2161 struct qeth_cmd_buffer *iob, 2162 unsigned int data_length) 2163 { 2164 struct qeth_channel *channel = iob->channel; 2165 u16 peer_level; 2166 int rc; 2167 2168 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2169 2170 rc = qeth_idx_check_activate_response(card, channel, iob); 2171 if (rc) 2172 goto out; 2173 2174 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2175 if ((peer_level & ~0x0100) != 2176 qeth_peer_func_level(card->info.func_level)) { 2177 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2178 CCW_DEVID(channel->ccwdev), 2179 card->info.func_level, peer_level); 2180 rc = -EINVAL; 2181 } 2182 2183 out: 2184 qeth_notify_cmd(iob, rc); 2185 qeth_put_cmd(iob); 2186 } 2187 2188 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2189 struct qeth_cmd_buffer *iob) 2190 { 2191 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2192 u8 port = ((u8)card->dev->dev_port) | 0x80; 2193 struct ccw1 *ccw = __ccw_from_cmd(iob); 2194 2195 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2196 iob->data); 2197 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2198 iob->finalize = qeth_idx_finalize_cmd; 2199 2200 port |= QETH_IDX_ACT_INVAL_FRAME; 2201 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2202 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2203 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2204 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2205 &card->info.func_level, 2); 2206 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2207 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2208 } 2209 2210 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2211 { 2212 struct qeth_channel *channel = &card->read; 2213 struct qeth_cmd_buffer *iob; 2214 int rc; 2215 2216 QETH_CARD_TEXT(card, 2, "idxread"); 2217 2218 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2219 if (!iob) 2220 return -ENOMEM; 2221 2222 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2223 qeth_idx_setup_activate_cmd(card, iob); 2224 iob->callback = qeth_idx_activate_read_channel_cb; 2225 2226 rc = qeth_send_control_data(card, iob, NULL, NULL); 2227 if (rc) 2228 return rc; 2229 2230 channel->state = CH_STATE_UP; 2231 return 0; 2232 } 2233 2234 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2235 { 2236 struct qeth_channel *channel = &card->write; 2237 struct qeth_cmd_buffer *iob; 2238 int rc; 2239 2240 QETH_CARD_TEXT(card, 2, "idxwrite"); 2241 2242 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2243 if (!iob) 2244 return -ENOMEM; 2245 2246 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2247 qeth_idx_setup_activate_cmd(card, iob); 2248 iob->callback = qeth_idx_activate_write_channel_cb; 2249 2250 rc = qeth_send_control_data(card, iob, NULL, NULL); 2251 if (rc) 2252 return rc; 2253 2254 channel->state = CH_STATE_UP; 2255 return 0; 2256 } 2257 2258 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2259 unsigned long data) 2260 { 2261 struct qeth_cmd_buffer *iob; 2262 2263 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2264 2265 iob = (struct qeth_cmd_buffer *) data; 2266 memcpy(&card->token.cm_filter_r, 2267 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2268 QETH_MPC_TOKEN_LENGTH); 2269 return 0; 2270 } 2271 2272 static int qeth_cm_enable(struct qeth_card *card) 2273 { 2274 struct qeth_cmd_buffer *iob; 2275 2276 QETH_CARD_TEXT(card, 2, "cmenable"); 2277 2278 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2279 if (!iob) 2280 return -ENOMEM; 2281 2282 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2283 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2284 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2285 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2286 2287 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2288 } 2289 2290 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2291 unsigned long data) 2292 { 2293 struct qeth_cmd_buffer *iob; 2294 2295 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2296 2297 iob = (struct qeth_cmd_buffer *) data; 2298 memcpy(&card->token.cm_connection_r, 2299 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2300 QETH_MPC_TOKEN_LENGTH); 2301 return 0; 2302 } 2303 2304 static int qeth_cm_setup(struct qeth_card *card) 2305 { 2306 struct qeth_cmd_buffer *iob; 2307 2308 QETH_CARD_TEXT(card, 2, "cmsetup"); 2309 2310 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2311 if (!iob) 2312 return -ENOMEM; 2313 2314 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2315 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2316 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2317 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2318 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2319 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2320 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2321 } 2322 2323 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2324 { 2325 if (link_type == QETH_LINK_TYPE_LANE_TR || 2326 link_type == QETH_LINK_TYPE_HSTR) { 2327 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2328 return false; 2329 } 2330 2331 return true; 2332 } 2333 2334 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2335 { 2336 struct net_device *dev = card->dev; 2337 unsigned int new_mtu; 2338 2339 if (!max_mtu) { 2340 /* IQD needs accurate max MTU to set up its RX buffers: */ 2341 if (IS_IQD(card)) 2342 return -EINVAL; 2343 /* tolerate quirky HW: */ 2344 max_mtu = ETH_MAX_MTU; 2345 } 2346 2347 rtnl_lock(); 2348 if (IS_IQD(card)) { 2349 /* move any device with default MTU to new max MTU: */ 2350 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2351 2352 /* adjust RX buffer size to new max MTU: */ 2353 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2354 if (dev->max_mtu && dev->max_mtu != max_mtu) 2355 qeth_free_qdio_queues(card); 2356 } else { 2357 if (dev->mtu) 2358 new_mtu = dev->mtu; 2359 /* default MTUs for first setup: */ 2360 else if (IS_LAYER2(card)) 2361 new_mtu = ETH_DATA_LEN; 2362 else 2363 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2364 } 2365 2366 dev->max_mtu = max_mtu; 2367 dev->mtu = min(new_mtu, max_mtu); 2368 rtnl_unlock(); 2369 return 0; 2370 } 2371 2372 static int qeth_get_mtu_outof_framesize(int framesize) 2373 { 2374 switch (framesize) { 2375 case 0x4000: 2376 return 8192; 2377 case 0x6000: 2378 return 16384; 2379 case 0xa000: 2380 return 32768; 2381 case 0xffff: 2382 return 57344; 2383 default: 2384 return 0; 2385 } 2386 } 2387 2388 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2389 unsigned long data) 2390 { 2391 __u16 mtu, framesize; 2392 __u16 len; 2393 struct qeth_cmd_buffer *iob; 2394 u8 link_type = 0; 2395 2396 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2397 2398 iob = (struct qeth_cmd_buffer *) data; 2399 memcpy(&card->token.ulp_filter_r, 2400 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2401 QETH_MPC_TOKEN_LENGTH); 2402 if (IS_IQD(card)) { 2403 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2404 mtu = qeth_get_mtu_outof_framesize(framesize); 2405 } else { 2406 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2407 } 2408 *(u16 *)reply->param = mtu; 2409 2410 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2411 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2412 memcpy(&link_type, 2413 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2414 if (!qeth_is_supported_link_type(card, link_type)) 2415 return -EPROTONOSUPPORT; 2416 } 2417 2418 card->info.link_type = link_type; 2419 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2420 return 0; 2421 } 2422 2423 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2424 { 2425 return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3; 2426 } 2427 2428 static int qeth_ulp_enable(struct qeth_card *card) 2429 { 2430 u8 prot_type = qeth_mpc_select_prot_type(card); 2431 struct qeth_cmd_buffer *iob; 2432 u16 max_mtu; 2433 int rc; 2434 2435 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2436 2437 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2438 if (!iob) 2439 return -ENOMEM; 2440 2441 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2442 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2443 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2444 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2445 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2446 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2447 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2448 if (rc) 2449 return rc; 2450 return qeth_update_max_mtu(card, max_mtu); 2451 } 2452 2453 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2454 unsigned long data) 2455 { 2456 struct qeth_cmd_buffer *iob; 2457 2458 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2459 2460 iob = (struct qeth_cmd_buffer *) data; 2461 memcpy(&card->token.ulp_connection_r, 2462 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2463 QETH_MPC_TOKEN_LENGTH); 2464 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2465 3)) { 2466 QETH_CARD_TEXT(card, 2, "olmlimit"); 2467 dev_err(&card->gdev->dev, "A connection could not be " 2468 "established because of an OLM limit\n"); 2469 return -EMLINK; 2470 } 2471 return 0; 2472 } 2473 2474 static int qeth_ulp_setup(struct qeth_card *card) 2475 { 2476 __u16 temp; 2477 struct qeth_cmd_buffer *iob; 2478 2479 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2480 2481 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2482 if (!iob) 2483 return -ENOMEM; 2484 2485 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2486 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2487 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2488 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2489 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2490 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2491 2492 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2493 temp = (card->info.cula << 8) + card->info.unit_addr2; 2494 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2495 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2496 } 2497 2498 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, 2499 gfp_t gfp) 2500 { 2501 struct qeth_qdio_out_buffer *newbuf; 2502 2503 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); 2504 if (!newbuf) 2505 return -ENOMEM; 2506 2507 newbuf->buffer = q->qdio_bufs[bidx]; 2508 skb_queue_head_init(&newbuf->skb_list); 2509 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2510 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2511 q->bufs[bidx] = newbuf; 2512 return 0; 2513 } 2514 2515 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2516 { 2517 if (!q) 2518 return; 2519 2520 qeth_drain_output_queue(q, true); 2521 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2522 kfree(q); 2523 } 2524 2525 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2526 { 2527 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2528 unsigned int i; 2529 2530 if (!q) 2531 return NULL; 2532 2533 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) 2534 goto err_qdio_bufs; 2535 2536 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 2537 if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) 2538 goto err_out_bufs; 2539 } 2540 2541 return q; 2542 2543 err_out_bufs: 2544 while (i > 0) 2545 qeth_free_out_buf(q->bufs[--i]); 2546 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2547 err_qdio_bufs: 2548 kfree(q); 2549 return NULL; 2550 } 2551 2552 static void qeth_tx_completion_timer(struct timer_list *timer) 2553 { 2554 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2555 2556 napi_schedule(&queue->napi); 2557 QETH_TXQ_STAT_INC(queue, completion_timer); 2558 } 2559 2560 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2561 { 2562 unsigned int i; 2563 2564 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2565 2566 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2567 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2568 return 0; 2569 2570 QETH_CARD_TEXT(card, 2, "inq"); 2571 card->qdio.in_q = qeth_alloc_qdio_queue(); 2572 if (!card->qdio.in_q) 2573 goto out_nomem; 2574 2575 /* inbound buffer pool */ 2576 if (qeth_alloc_buffer_pool(card)) 2577 goto out_freeinq; 2578 2579 /* outbound */ 2580 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2581 struct qeth_qdio_out_q *queue; 2582 2583 queue = qeth_alloc_output_queue(); 2584 if (!queue) 2585 goto out_freeoutq; 2586 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2587 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2588 card->qdio.out_qs[i] = queue; 2589 queue->card = card; 2590 queue->queue_no = i; 2591 INIT_LIST_HEAD(&queue->pending_bufs); 2592 spin_lock_init(&queue->lock); 2593 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2594 if (IS_IQD(card)) { 2595 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2596 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2597 queue->rescan_usecs = QETH_TX_TIMER_USECS; 2598 } else { 2599 queue->coalesce_usecs = USEC_PER_SEC; 2600 queue->max_coalesced_frames = 0; 2601 queue->rescan_usecs = 10 * USEC_PER_SEC; 2602 } 2603 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2604 } 2605 2606 /* completion */ 2607 if (qeth_alloc_cq(card)) 2608 goto out_freeoutq; 2609 2610 return 0; 2611 2612 out_freeoutq: 2613 while (i > 0) { 2614 qeth_free_output_queue(card->qdio.out_qs[--i]); 2615 card->qdio.out_qs[i] = NULL; 2616 } 2617 qeth_free_buffer_pool(card); 2618 out_freeinq: 2619 qeth_free_qdio_queue(card->qdio.in_q); 2620 card->qdio.in_q = NULL; 2621 out_nomem: 2622 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2623 return -ENOMEM; 2624 } 2625 2626 static void qeth_free_qdio_queues(struct qeth_card *card) 2627 { 2628 int i, j; 2629 2630 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2631 QETH_QDIO_UNINITIALIZED) 2632 return; 2633 2634 qeth_free_cq(card); 2635 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2636 if (card->qdio.in_q->bufs[j].rx_skb) 2637 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2638 } 2639 qeth_free_qdio_queue(card->qdio.in_q); 2640 card->qdio.in_q = NULL; 2641 /* inbound buffer pool */ 2642 qeth_free_buffer_pool(card); 2643 /* free outbound qdio_qs */ 2644 for (i = 0; i < card->qdio.no_out_queues; i++) { 2645 qeth_free_output_queue(card->qdio.out_qs[i]); 2646 card->qdio.out_qs[i] = NULL; 2647 } 2648 } 2649 2650 static void qeth_fill_qib_parms(struct qeth_card *card, 2651 struct qeth_qib_parms *parms) 2652 { 2653 struct qeth_qdio_out_q *queue; 2654 unsigned int i; 2655 2656 parms->pcit_magic[0] = 'P'; 2657 parms->pcit_magic[1] = 'C'; 2658 parms->pcit_magic[2] = 'I'; 2659 parms->pcit_magic[3] = 'T'; 2660 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2661 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2662 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2663 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2664 2665 parms->blkt_magic[0] = 'B'; 2666 parms->blkt_magic[1] = 'L'; 2667 parms->blkt_magic[2] = 'K'; 2668 parms->blkt_magic[3] = 'T'; 2669 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2670 parms->blkt_total = card->info.blkt.time_total; 2671 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2672 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2673 2674 /* Prio-queueing implicitly uses the default priorities: */ 2675 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2676 return; 2677 2678 parms->pque_magic[0] = 'P'; 2679 parms->pque_magic[1] = 'Q'; 2680 parms->pque_magic[2] = 'U'; 2681 parms->pque_magic[3] = 'E'; 2682 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2683 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2684 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2685 2686 qeth_for_each_output_queue(card, queue, i) 2687 parms->pque_priority[i] = queue->priority; 2688 } 2689 2690 static int qeth_qdio_activate(struct qeth_card *card) 2691 { 2692 QETH_CARD_TEXT(card, 3, "qdioact"); 2693 return qdio_activate(CARD_DDEV(card)); 2694 } 2695 2696 static int qeth_dm_act(struct qeth_card *card) 2697 { 2698 struct qeth_cmd_buffer *iob; 2699 2700 QETH_CARD_TEXT(card, 2, "dmact"); 2701 2702 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2703 if (!iob) 2704 return -ENOMEM; 2705 2706 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2707 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2708 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2709 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2710 return qeth_send_control_data(card, iob, NULL, NULL); 2711 } 2712 2713 static int qeth_mpc_initialize(struct qeth_card *card) 2714 { 2715 int rc; 2716 2717 QETH_CARD_TEXT(card, 2, "mpcinit"); 2718 2719 rc = qeth_issue_next_read(card); 2720 if (rc) { 2721 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2722 return rc; 2723 } 2724 rc = qeth_cm_enable(card); 2725 if (rc) { 2726 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2727 return rc; 2728 } 2729 rc = qeth_cm_setup(card); 2730 if (rc) { 2731 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2732 return rc; 2733 } 2734 rc = qeth_ulp_enable(card); 2735 if (rc) { 2736 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2737 return rc; 2738 } 2739 rc = qeth_ulp_setup(card); 2740 if (rc) { 2741 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2742 return rc; 2743 } 2744 rc = qeth_alloc_qdio_queues(card); 2745 if (rc) { 2746 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2747 return rc; 2748 } 2749 rc = qeth_qdio_establish(card); 2750 if (rc) { 2751 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2752 qeth_free_qdio_queues(card); 2753 return rc; 2754 } 2755 rc = qeth_qdio_activate(card); 2756 if (rc) { 2757 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2758 return rc; 2759 } 2760 rc = qeth_dm_act(card); 2761 if (rc) { 2762 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2763 return rc; 2764 } 2765 2766 return 0; 2767 } 2768 2769 static void qeth_print_status_message(struct qeth_card *card) 2770 { 2771 switch (card->info.type) { 2772 case QETH_CARD_TYPE_OSD: 2773 case QETH_CARD_TYPE_OSM: 2774 case QETH_CARD_TYPE_OSX: 2775 /* VM will use a non-zero first character 2776 * to indicate a HiperSockets like reporting 2777 * of the level OSA sets the first character to zero 2778 * */ 2779 if (!card->info.mcl_level[0]) { 2780 sprintf(card->info.mcl_level, "%02x%02x", 2781 card->info.mcl_level[2], 2782 card->info.mcl_level[3]); 2783 break; 2784 } 2785 fallthrough; 2786 case QETH_CARD_TYPE_IQD: 2787 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2788 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2789 card->info.mcl_level[0]]; 2790 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2791 card->info.mcl_level[1]]; 2792 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2793 card->info.mcl_level[2]]; 2794 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2795 card->info.mcl_level[3]]; 2796 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2797 } 2798 break; 2799 default: 2800 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2801 } 2802 dev_info(&card->gdev->dev, 2803 "Device is a%s card%s%s%s\nwith link type %s.\n", 2804 qeth_get_cardname(card), 2805 (card->info.mcl_level[0]) ? " (level: " : "", 2806 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2807 (card->info.mcl_level[0]) ? ")" : "", 2808 qeth_get_cardname_short(card)); 2809 } 2810 2811 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2812 { 2813 struct qeth_buffer_pool_entry *entry; 2814 2815 QETH_CARD_TEXT(card, 5, "inwrklst"); 2816 2817 list_for_each_entry(entry, 2818 &card->qdio.init_pool.entry_list, init_list) { 2819 qeth_put_buffer_pool_entry(card, entry); 2820 } 2821 } 2822 2823 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2824 struct qeth_card *card) 2825 { 2826 struct qeth_buffer_pool_entry *entry; 2827 int i, free; 2828 2829 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2830 return NULL; 2831 2832 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2833 free = 1; 2834 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2835 if (page_count(entry->elements[i]) > 1) { 2836 free = 0; 2837 break; 2838 } 2839 } 2840 if (free) { 2841 list_del_init(&entry->list); 2842 return entry; 2843 } 2844 } 2845 2846 /* no free buffer in pool so take first one and swap pages */ 2847 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2848 struct qeth_buffer_pool_entry, list); 2849 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2850 if (page_count(entry->elements[i]) > 1) { 2851 struct page *page = dev_alloc_page(); 2852 2853 if (!page) 2854 return NULL; 2855 2856 __free_page(entry->elements[i]); 2857 entry->elements[i] = page; 2858 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2859 } 2860 } 2861 list_del_init(&entry->list); 2862 return entry; 2863 } 2864 2865 static int qeth_init_input_buffer(struct qeth_card *card, 2866 struct qeth_qdio_buffer *buf) 2867 { 2868 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2869 int i; 2870 2871 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2872 buf->rx_skb = netdev_alloc_skb(card->dev, 2873 ETH_HLEN + 2874 sizeof(struct ipv6hdr)); 2875 if (!buf->rx_skb) 2876 return -ENOMEM; 2877 } 2878 2879 if (!pool_entry) { 2880 pool_entry = qeth_find_free_buffer_pool_entry(card); 2881 if (!pool_entry) 2882 return -ENOBUFS; 2883 2884 buf->pool_entry = pool_entry; 2885 } 2886 2887 /* 2888 * since the buffer is accessed only from the input_tasklet 2889 * there shouldn't be a need to synchronize; also, since we use 2890 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2891 * buffers 2892 */ 2893 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2894 buf->buffer->element[i].length = PAGE_SIZE; 2895 buf->buffer->element[i].addr = 2896 page_to_phys(pool_entry->elements[i]); 2897 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2898 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2899 else 2900 buf->buffer->element[i].eflags = 0; 2901 buf->buffer->element[i].sflags = 0; 2902 } 2903 return 0; 2904 } 2905 2906 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2907 struct qeth_qdio_out_q *queue) 2908 { 2909 if (!IS_IQD(card) || 2910 qeth_iqd_is_mcast_queue(card, queue) || 2911 card->options.cq == QETH_CQ_ENABLED || 2912 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2913 return 1; 2914 2915 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2916 } 2917 2918 static int qeth_init_qdio_queues(struct qeth_card *card) 2919 { 2920 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 2921 unsigned int i; 2922 int rc; 2923 2924 QETH_CARD_TEXT(card, 2, "initqdqs"); 2925 2926 /* inbound queue */ 2927 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2928 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2929 2930 qeth_initialize_working_pool_list(card); 2931 /*give only as many buffers to hardware as we have buffer pool entries*/ 2932 for (i = 0; i < rx_bufs; i++) { 2933 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2934 if (rc) 2935 return rc; 2936 } 2937 2938 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 2939 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, 2940 NULL); 2941 if (rc) { 2942 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2943 return rc; 2944 } 2945 2946 /* completion */ 2947 rc = qeth_cq_init(card); 2948 if (rc) { 2949 return rc; 2950 } 2951 2952 /* outbound queue */ 2953 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2954 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2955 2956 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2957 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2958 queue->next_buf_to_fill = 0; 2959 queue->do_pack = 0; 2960 queue->prev_hdr = NULL; 2961 queue->coalesced_frames = 0; 2962 queue->bulk_start = 0; 2963 queue->bulk_count = 0; 2964 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 2965 atomic_set(&queue->used_buffers, 0); 2966 atomic_set(&queue->set_pci_flags_count, 0); 2967 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2968 } 2969 return 0; 2970 } 2971 2972 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2973 struct qeth_cmd_buffer *iob) 2974 { 2975 qeth_mpc_finalize_cmd(card, iob); 2976 2977 /* override with IPA-specific values: */ 2978 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 2979 } 2980 2981 static void qeth_prepare_ipa_cmd(struct qeth_card *card, 2982 struct qeth_cmd_buffer *iob, u16 cmd_length) 2983 { 2984 u8 prot_type = qeth_mpc_select_prot_type(card); 2985 u16 total_length = iob->length; 2986 2987 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 2988 iob->data); 2989 iob->finalize = qeth_ipa_finalize_cmd; 2990 2991 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2992 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2993 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2994 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2995 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2996 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2997 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2998 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2999 } 3000 3001 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 3002 struct qeth_cmd_buffer *reply) 3003 { 3004 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 3005 3006 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 3007 } 3008 3009 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3010 enum qeth_ipa_cmds cmd_code, 3011 enum qeth_prot_versions prot, 3012 unsigned int data_length) 3013 { 3014 struct qeth_cmd_buffer *iob; 3015 struct qeth_ipacmd_hdr *hdr; 3016 3017 data_length += offsetof(struct qeth_ipa_cmd, data); 3018 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3019 QETH_IPA_TIMEOUT); 3020 if (!iob) 3021 return NULL; 3022 3023 qeth_prepare_ipa_cmd(card, iob, data_length); 3024 iob->match = qeth_ipa_match_reply; 3025 3026 hdr = &__ipa_cmd(iob)->hdr; 3027 hdr->command = cmd_code; 3028 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3029 /* hdr->seqno is set by qeth_send_control_data() */ 3030 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3031 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3032 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3033 hdr->param_count = 1; 3034 hdr->prot_version = prot; 3035 return iob; 3036 } 3037 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3038 3039 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3040 struct qeth_reply *reply, unsigned long data) 3041 { 3042 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3043 3044 return (cmd->hdr.return_code) ? -EIO : 0; 3045 } 3046 3047 /** 3048 * qeth_send_ipa_cmd() - send an IPA command 3049 * 3050 * See qeth_send_control_data() for explanation of the arguments. 3051 */ 3052 3053 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3054 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3055 unsigned long), 3056 void *reply_param) 3057 { 3058 int rc; 3059 3060 QETH_CARD_TEXT(card, 4, "sendipa"); 3061 3062 if (card->read_or_write_problem) { 3063 qeth_put_cmd(iob); 3064 return -EIO; 3065 } 3066 3067 if (reply_cb == NULL) 3068 reply_cb = qeth_send_ipa_cmd_cb; 3069 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3070 if (rc == -ETIME) { 3071 qeth_clear_ipacmd_list(card); 3072 qeth_schedule_recovery(card); 3073 } 3074 return rc; 3075 } 3076 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3077 3078 static int qeth_send_startlan_cb(struct qeth_card *card, 3079 struct qeth_reply *reply, unsigned long data) 3080 { 3081 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3082 3083 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3084 return -ENETDOWN; 3085 3086 return (cmd->hdr.return_code) ? -EIO : 0; 3087 } 3088 3089 static int qeth_send_startlan(struct qeth_card *card) 3090 { 3091 struct qeth_cmd_buffer *iob; 3092 3093 QETH_CARD_TEXT(card, 2, "strtlan"); 3094 3095 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3096 if (!iob) 3097 return -ENOMEM; 3098 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3099 } 3100 3101 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3102 { 3103 if (!cmd->hdr.return_code) 3104 cmd->hdr.return_code = 3105 cmd->data.setadapterparms.hdr.return_code; 3106 return cmd->hdr.return_code; 3107 } 3108 3109 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3110 struct qeth_reply *reply, unsigned long data) 3111 { 3112 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3113 struct qeth_query_cmds_supp *query_cmd; 3114 3115 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3116 if (qeth_setadpparms_inspect_rc(cmd)) 3117 return -EIO; 3118 3119 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3120 if (query_cmd->lan_type & 0x7f) { 3121 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3122 return -EPROTONOSUPPORT; 3123 3124 card->info.link_type = query_cmd->lan_type; 3125 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3126 } 3127 3128 card->options.adp.supported = query_cmd->supported_cmds; 3129 return 0; 3130 } 3131 3132 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3133 enum qeth_ipa_setadp_cmd adp_cmd, 3134 unsigned int data_length) 3135 { 3136 struct qeth_ipacmd_setadpparms_hdr *hdr; 3137 struct qeth_cmd_buffer *iob; 3138 3139 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3140 data_length + 3141 offsetof(struct qeth_ipacmd_setadpparms, 3142 data)); 3143 if (!iob) 3144 return NULL; 3145 3146 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3147 hdr->cmdlength = sizeof(*hdr) + data_length; 3148 hdr->command_code = adp_cmd; 3149 hdr->used_total = 1; 3150 hdr->seq_no = 1; 3151 return iob; 3152 } 3153 3154 static int qeth_query_setadapterparms(struct qeth_card *card) 3155 { 3156 int rc; 3157 struct qeth_cmd_buffer *iob; 3158 3159 QETH_CARD_TEXT(card, 3, "queryadp"); 3160 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3161 SETADP_DATA_SIZEOF(query_cmds_supp)); 3162 if (!iob) 3163 return -ENOMEM; 3164 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3165 return rc; 3166 } 3167 3168 static int qeth_query_ipassists_cb(struct qeth_card *card, 3169 struct qeth_reply *reply, unsigned long data) 3170 { 3171 struct qeth_ipa_cmd *cmd; 3172 3173 QETH_CARD_TEXT(card, 2, "qipasscb"); 3174 3175 cmd = (struct qeth_ipa_cmd *) data; 3176 3177 switch (cmd->hdr.return_code) { 3178 case IPA_RC_SUCCESS: 3179 break; 3180 case IPA_RC_NOTSUPP: 3181 case IPA_RC_L2_UNSUPPORTED_CMD: 3182 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3183 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3184 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3185 return -EOPNOTSUPP; 3186 default: 3187 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3188 CARD_DEVID(card), cmd->hdr.return_code); 3189 return -EIO; 3190 } 3191 3192 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3193 card->options.ipa4 = cmd->hdr.assists; 3194 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3195 card->options.ipa6 = cmd->hdr.assists; 3196 else 3197 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3198 CARD_DEVID(card)); 3199 return 0; 3200 } 3201 3202 static int qeth_query_ipassists(struct qeth_card *card, 3203 enum qeth_prot_versions prot) 3204 { 3205 int rc; 3206 struct qeth_cmd_buffer *iob; 3207 3208 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3209 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3210 if (!iob) 3211 return -ENOMEM; 3212 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3213 return rc; 3214 } 3215 3216 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3217 struct qeth_reply *reply, unsigned long data) 3218 { 3219 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3220 struct qeth_query_switch_attributes *attrs; 3221 struct qeth_switch_info *sw_info; 3222 3223 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3224 if (qeth_setadpparms_inspect_rc(cmd)) 3225 return -EIO; 3226 3227 sw_info = (struct qeth_switch_info *)reply->param; 3228 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3229 sw_info->capabilities = attrs->capabilities; 3230 sw_info->settings = attrs->settings; 3231 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3232 sw_info->settings); 3233 return 0; 3234 } 3235 3236 int qeth_query_switch_attributes(struct qeth_card *card, 3237 struct qeth_switch_info *sw_info) 3238 { 3239 struct qeth_cmd_buffer *iob; 3240 3241 QETH_CARD_TEXT(card, 2, "qswiattr"); 3242 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3243 return -EOPNOTSUPP; 3244 if (!netif_carrier_ok(card->dev)) 3245 return -ENOMEDIUM; 3246 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3247 if (!iob) 3248 return -ENOMEM; 3249 return qeth_send_ipa_cmd(card, iob, 3250 qeth_query_switch_attributes_cb, sw_info); 3251 } 3252 3253 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3254 enum qeth_diags_cmds sub_cmd, 3255 unsigned int data_length) 3256 { 3257 struct qeth_ipacmd_diagass *cmd; 3258 struct qeth_cmd_buffer *iob; 3259 3260 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3261 DIAG_HDR_LEN + data_length); 3262 if (!iob) 3263 return NULL; 3264 3265 cmd = &__ipa_cmd(iob)->data.diagass; 3266 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3267 cmd->subcmd = sub_cmd; 3268 return iob; 3269 } 3270 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3271 3272 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3273 struct qeth_reply *reply, unsigned long data) 3274 { 3275 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3276 u16 rc = cmd->hdr.return_code; 3277 3278 if (rc) { 3279 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3280 return -EIO; 3281 } 3282 3283 card->info.diagass_support = cmd->data.diagass.ext; 3284 return 0; 3285 } 3286 3287 static int qeth_query_setdiagass(struct qeth_card *card) 3288 { 3289 struct qeth_cmd_buffer *iob; 3290 3291 QETH_CARD_TEXT(card, 2, "qdiagass"); 3292 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3293 if (!iob) 3294 return -ENOMEM; 3295 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3296 } 3297 3298 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3299 { 3300 unsigned long info = get_zeroed_page(GFP_KERNEL); 3301 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3302 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3303 struct ccw_dev_id ccwid; 3304 int level; 3305 3306 tid->chpid = card->info.chpid; 3307 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3308 tid->ssid = ccwid.ssid; 3309 tid->devno = ccwid.devno; 3310 if (!info) 3311 return; 3312 level = stsi(NULL, 0, 0, 0); 3313 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3314 tid->lparnr = info222->lpar_number; 3315 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3316 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3317 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3318 } 3319 free_page(info); 3320 } 3321 3322 static int qeth_hw_trap_cb(struct qeth_card *card, 3323 struct qeth_reply *reply, unsigned long data) 3324 { 3325 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3326 u16 rc = cmd->hdr.return_code; 3327 3328 if (rc) { 3329 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3330 return -EIO; 3331 } 3332 return 0; 3333 } 3334 3335 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3336 { 3337 struct qeth_cmd_buffer *iob; 3338 struct qeth_ipa_cmd *cmd; 3339 3340 QETH_CARD_TEXT(card, 2, "diagtrap"); 3341 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3342 if (!iob) 3343 return -ENOMEM; 3344 cmd = __ipa_cmd(iob); 3345 cmd->data.diagass.type = 1; 3346 cmd->data.diagass.action = action; 3347 switch (action) { 3348 case QETH_DIAGS_TRAP_ARM: 3349 cmd->data.diagass.options = 0x0003; 3350 cmd->data.diagass.ext = 0x00010000 + 3351 sizeof(struct qeth_trap_id); 3352 qeth_get_trap_id(card, 3353 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3354 break; 3355 case QETH_DIAGS_TRAP_DISARM: 3356 cmd->data.diagass.options = 0x0001; 3357 break; 3358 case QETH_DIAGS_TRAP_CAPTURE: 3359 break; 3360 } 3361 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3362 } 3363 3364 static int qeth_check_qdio_errors(struct qeth_card *card, 3365 struct qdio_buffer *buf, 3366 unsigned int qdio_error, 3367 const char *dbftext) 3368 { 3369 if (qdio_error) { 3370 QETH_CARD_TEXT(card, 2, dbftext); 3371 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3372 buf->element[15].sflags); 3373 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3374 buf->element[14].sflags); 3375 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3376 if ((buf->element[15].sflags) == 0x12) { 3377 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3378 return 0; 3379 } else 3380 return 1; 3381 } 3382 return 0; 3383 } 3384 3385 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3386 unsigned int count) 3387 { 3388 struct qeth_qdio_q *queue = card->qdio.in_q; 3389 struct list_head *lh; 3390 int i; 3391 int rc; 3392 int newcount = 0; 3393 3394 /* only requeue at a certain threshold to avoid SIGAs */ 3395 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3396 for (i = queue->next_buf_to_init; 3397 i < queue->next_buf_to_init + count; ++i) { 3398 if (qeth_init_input_buffer(card, 3399 &queue->bufs[QDIO_BUFNR(i)])) { 3400 break; 3401 } else { 3402 newcount++; 3403 } 3404 } 3405 3406 if (newcount < count) { 3407 /* we are in memory shortage so we switch back to 3408 traditional skb allocation and drop packages */ 3409 atomic_set(&card->force_alloc_skb, 3); 3410 count = newcount; 3411 } else { 3412 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3413 } 3414 3415 if (!count) { 3416 i = 0; 3417 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3418 i++; 3419 if (i == card->qdio.in_buf_pool.buf_count) { 3420 QETH_CARD_TEXT(card, 2, "qsarbw"); 3421 schedule_delayed_work( 3422 &card->buffer_reclaim_work, 3423 QETH_RECLAIM_WORK_TIME); 3424 } 3425 return 0; 3426 } 3427 3428 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3429 queue->next_buf_to_init, count, NULL); 3430 if (rc) { 3431 QETH_CARD_TEXT(card, 2, "qinberr"); 3432 } 3433 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3434 count); 3435 return count; 3436 } 3437 3438 return 0; 3439 } 3440 3441 static void qeth_buffer_reclaim_work(struct work_struct *work) 3442 { 3443 struct qeth_card *card = container_of(to_delayed_work(work), 3444 struct qeth_card, 3445 buffer_reclaim_work); 3446 3447 local_bh_disable(); 3448 napi_schedule(&card->napi); 3449 /* kick-start the NAPI softirq: */ 3450 local_bh_enable(); 3451 } 3452 3453 static void qeth_handle_send_error(struct qeth_card *card, 3454 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3455 { 3456 int sbalf15 = buffer->buffer->element[15].sflags; 3457 3458 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3459 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3460 3461 if (!qdio_err) 3462 return; 3463 3464 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3465 return; 3466 3467 QETH_CARD_TEXT(card, 1, "lnkfail"); 3468 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3469 (u16)qdio_err, (u8)sbalf15); 3470 } 3471 3472 /** 3473 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3474 * @queue: queue to check for packing buffer 3475 * 3476 * Returns number of buffers that were prepared for flush. 3477 */ 3478 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3479 { 3480 struct qeth_qdio_out_buffer *buffer; 3481 3482 buffer = queue->bufs[queue->next_buf_to_fill]; 3483 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3484 (buffer->next_element_to_fill > 0)) { 3485 /* it's a packing buffer */ 3486 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3487 queue->next_buf_to_fill = 3488 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3489 return 1; 3490 } 3491 return 0; 3492 } 3493 3494 /* 3495 * Switched to packing state if the number of used buffers on a queue 3496 * reaches a certain limit. 3497 */ 3498 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3499 { 3500 if (!queue->do_pack) { 3501 if (atomic_read(&queue->used_buffers) 3502 >= QETH_HIGH_WATERMARK_PACK){ 3503 /* switch non-PACKING -> PACKING */ 3504 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3505 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3506 queue->do_pack = 1; 3507 } 3508 } 3509 } 3510 3511 /* 3512 * Switches from packing to non-packing mode. If there is a packing 3513 * buffer on the queue this buffer will be prepared to be flushed. 3514 * In that case 1 is returned to inform the caller. If no buffer 3515 * has to be flushed, zero is returned. 3516 */ 3517 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3518 { 3519 if (queue->do_pack) { 3520 if (atomic_read(&queue->used_buffers) 3521 <= QETH_LOW_WATERMARK_PACK) { 3522 /* switch PACKING -> non-PACKING */ 3523 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3524 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3525 queue->do_pack = 0; 3526 return qeth_prep_flush_pack_buffer(queue); 3527 } 3528 } 3529 return 0; 3530 } 3531 3532 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3533 int count) 3534 { 3535 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3536 struct qeth_card *card = queue->card; 3537 unsigned int frames, usecs; 3538 struct qaob *aob = NULL; 3539 int rc; 3540 int i; 3541 3542 for (i = index; i < index + count; ++i) { 3543 unsigned int bidx = QDIO_BUFNR(i); 3544 struct sk_buff *skb; 3545 3546 buf = queue->bufs[bidx]; 3547 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3548 SBAL_EFLAGS_LAST_ENTRY; 3549 queue->coalesced_frames += buf->frames; 3550 3551 if (IS_IQD(card)) { 3552 skb_queue_walk(&buf->skb_list, skb) 3553 skb_tx_timestamp(skb); 3554 } 3555 } 3556 3557 if (IS_IQD(card)) { 3558 if (card->options.cq == QETH_CQ_ENABLED && 3559 !qeth_iqd_is_mcast_queue(card, queue) && 3560 count == 1) { 3561 if (!buf->aob) 3562 buf->aob = qdio_allocate_aob(); 3563 if (buf->aob) { 3564 struct qeth_qaob_priv1 *priv; 3565 3566 aob = buf->aob; 3567 priv = (struct qeth_qaob_priv1 *)&aob->user1; 3568 priv->state = QETH_QAOB_ISSUED; 3569 priv->queue_no = queue->queue_no; 3570 } 3571 } 3572 } else { 3573 if (!queue->do_pack) { 3574 if ((atomic_read(&queue->used_buffers) >= 3575 (QETH_HIGH_WATERMARK_PACK - 3576 QETH_WATERMARK_PACK_FUZZ)) && 3577 !atomic_read(&queue->set_pci_flags_count)) { 3578 /* it's likely that we'll go to packing 3579 * mode soon */ 3580 atomic_inc(&queue->set_pci_flags_count); 3581 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3582 } 3583 } else { 3584 if (!atomic_read(&queue->set_pci_flags_count)) { 3585 /* 3586 * there's no outstanding PCI any more, so we 3587 * have to request a PCI to be sure the the PCI 3588 * will wake at some time in the future then we 3589 * can flush packed buffers that might still be 3590 * hanging around, which can happen if no 3591 * further send was requested by the stack 3592 */ 3593 atomic_inc(&queue->set_pci_flags_count); 3594 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3595 } 3596 } 3597 } 3598 3599 QETH_TXQ_STAT_INC(queue, doorbell); 3600 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, 3601 index, count, aob); 3602 3603 switch (rc) { 3604 case 0: 3605 case -ENOBUFS: 3606 /* ignore temporary SIGA errors without busy condition */ 3607 3608 /* Fake the TX completion interrupt: */ 3609 frames = READ_ONCE(queue->max_coalesced_frames); 3610 usecs = READ_ONCE(queue->coalesce_usecs); 3611 3612 if (frames && queue->coalesced_frames >= frames) { 3613 napi_schedule(&queue->napi); 3614 queue->coalesced_frames = 0; 3615 QETH_TXQ_STAT_INC(queue, coal_frames); 3616 } else if (qeth_use_tx_irqs(card) && 3617 atomic_read(&queue->used_buffers) >= 32) { 3618 /* Old behaviour carried over from the qdio layer: */ 3619 napi_schedule(&queue->napi); 3620 QETH_TXQ_STAT_INC(queue, coal_frames); 3621 } else if (usecs) { 3622 qeth_tx_arm_timer(queue, usecs); 3623 } 3624 3625 break; 3626 default: 3627 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3628 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3629 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3630 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3631 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3632 3633 /* this must not happen under normal circumstances. if it 3634 * happens something is really wrong -> recover */ 3635 qeth_schedule_recovery(queue->card); 3636 } 3637 } 3638 3639 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3640 { 3641 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3642 3643 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3644 queue->prev_hdr = NULL; 3645 queue->bulk_count = 0; 3646 } 3647 3648 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3649 { 3650 /* 3651 * check if weed have to switch to non-packing mode or if 3652 * we have to get a pci flag out on the queue 3653 */ 3654 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3655 !atomic_read(&queue->set_pci_flags_count)) { 3656 unsigned int index, flush_cnt; 3657 bool q_was_packing; 3658 3659 spin_lock(&queue->lock); 3660 3661 index = queue->next_buf_to_fill; 3662 q_was_packing = queue->do_pack; 3663 3664 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3665 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3666 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3667 3668 if (flush_cnt) { 3669 qeth_flush_buffers(queue, index, flush_cnt); 3670 if (q_was_packing) 3671 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3672 } 3673 3674 spin_unlock(&queue->lock); 3675 } 3676 } 3677 3678 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3679 { 3680 struct qeth_card *card = (struct qeth_card *)card_ptr; 3681 3682 napi_schedule_irqoff(&card->napi); 3683 } 3684 3685 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3686 { 3687 int rc; 3688 3689 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3690 rc = -1; 3691 goto out; 3692 } else { 3693 if (card->options.cq == cq) { 3694 rc = 0; 3695 goto out; 3696 } 3697 3698 qeth_free_qdio_queues(card); 3699 card->options.cq = cq; 3700 rc = 0; 3701 } 3702 out: 3703 return rc; 3704 3705 } 3706 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3707 3708 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) 3709 { 3710 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; 3711 unsigned int queue_no = priv->queue_no; 3712 3713 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); 3714 3715 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && 3716 queue_no < card->qdio.no_out_queues) 3717 napi_schedule(&card->qdio.out_qs[queue_no]->napi); 3718 } 3719 3720 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3721 unsigned int queue, int first_element, 3722 int count) 3723 { 3724 struct qeth_qdio_q *cq = card->qdio.c_q; 3725 int i; 3726 int rc; 3727 3728 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3729 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3730 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3731 3732 if (qdio_err) { 3733 netif_tx_stop_all_queues(card->dev); 3734 qeth_schedule_recovery(card); 3735 return; 3736 } 3737 3738 for (i = first_element; i < first_element + count; ++i) { 3739 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3740 int e = 0; 3741 3742 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3743 buffer->element[e].addr) { 3744 unsigned long phys_aob_addr = buffer->element[e].addr; 3745 3746 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); 3747 ++e; 3748 } 3749 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3750 } 3751 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3752 cq->next_buf_to_init, count, NULL); 3753 if (rc) { 3754 dev_warn(&card->gdev->dev, 3755 "QDIO reported an error, rc=%i\n", rc); 3756 QETH_CARD_TEXT(card, 2, "qcqherr"); 3757 } 3758 3759 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3760 } 3761 3762 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3763 unsigned int qdio_err, int queue, 3764 int first_elem, int count, 3765 unsigned long card_ptr) 3766 { 3767 struct qeth_card *card = (struct qeth_card *)card_ptr; 3768 3769 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3770 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3771 3772 if (qdio_err) 3773 qeth_schedule_recovery(card); 3774 } 3775 3776 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3777 unsigned int qdio_error, int __queue, 3778 int first_element, int count, 3779 unsigned long card_ptr) 3780 { 3781 struct qeth_card *card = (struct qeth_card *) card_ptr; 3782 3783 QETH_CARD_TEXT(card, 2, "achkcond"); 3784 netif_tx_stop_all_queues(card->dev); 3785 qeth_schedule_recovery(card); 3786 } 3787 3788 /** 3789 * Note: Function assumes that we have 4 outbound queues. 3790 */ 3791 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3792 { 3793 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3794 u8 tos; 3795 3796 switch (card->qdio.do_prio_queueing) { 3797 case QETH_PRIO_Q_ING_TOS: 3798 case QETH_PRIO_Q_ING_PREC: 3799 switch (vlan_get_protocol(skb)) { 3800 case htons(ETH_P_IP): 3801 tos = ipv4_get_dsfield(ip_hdr(skb)); 3802 break; 3803 case htons(ETH_P_IPV6): 3804 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3805 break; 3806 default: 3807 return card->qdio.default_out_queue; 3808 } 3809 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3810 return ~tos >> 6 & 3; 3811 if (tos & IPTOS_MINCOST) 3812 return 3; 3813 if (tos & IPTOS_RELIABILITY) 3814 return 2; 3815 if (tos & IPTOS_THROUGHPUT) 3816 return 1; 3817 if (tos & IPTOS_LOWDELAY) 3818 return 0; 3819 break; 3820 case QETH_PRIO_Q_ING_SKB: 3821 if (skb->priority > 5) 3822 return 0; 3823 return ~skb->priority >> 1 & 3; 3824 case QETH_PRIO_Q_ING_VLAN: 3825 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3826 return ~ntohs(veth->h_vlan_TCI) >> 3827 (VLAN_PRIO_SHIFT + 1) & 3; 3828 break; 3829 case QETH_PRIO_Q_ING_FIXED: 3830 return card->qdio.default_out_queue; 3831 default: 3832 break; 3833 } 3834 return card->qdio.default_out_queue; 3835 } 3836 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3837 3838 /** 3839 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3840 * @skb: SKB address 3841 * 3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3843 * fragmented part of the SKB. Returns zero for linear SKB. 3844 */ 3845 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3846 { 3847 int cnt, elements = 0; 3848 3849 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3850 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3851 3852 elements += qeth_get_elements_for_range( 3853 (addr_t)skb_frag_address(frag), 3854 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3855 } 3856 return elements; 3857 } 3858 3859 /** 3860 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3861 * to transmit an skb. 3862 * @skb: the skb to operate on. 3863 * @data_offset: skip this part of the skb's linear data 3864 * 3865 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3866 * skb's data (both its linear part and paged fragments). 3867 */ 3868 static unsigned int qeth_count_elements(struct sk_buff *skb, 3869 unsigned int data_offset) 3870 { 3871 unsigned int elements = qeth_get_elements_for_frags(skb); 3872 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3873 addr_t start = (addr_t)skb->data + data_offset; 3874 3875 if (start != end) 3876 elements += qeth_get_elements_for_range(start, end); 3877 return elements; 3878 } 3879 3880 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3881 MAX_TCP_HEADER) 3882 3883 /** 3884 * qeth_add_hw_header() - add a HW header to an skb. 3885 * @skb: skb that the HW header should be added to. 3886 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3887 * it contains a valid pointer to a qeth_hdr. 3888 * @hdr_len: length of the HW header. 3889 * @proto_len: length of protocol headers that need to be in same page as the 3890 * HW header. 3891 * 3892 * Returns the pushed length. If the header can't be pushed on 3893 * (eg. because it would cross a page boundary), it is allocated from 3894 * the cache instead and 0 is returned. 3895 * The number of needed buffer elements is returned in @elements. 3896 * Error to create the hdr is indicated by returning with < 0. 3897 */ 3898 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3899 struct sk_buff *skb, struct qeth_hdr **hdr, 3900 unsigned int hdr_len, unsigned int proto_len, 3901 unsigned int *elements) 3902 { 3903 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 3904 const unsigned int contiguous = proto_len ? proto_len : 1; 3905 const unsigned int max_elements = queue->max_elements; 3906 unsigned int __elements; 3907 addr_t start, end; 3908 bool push_ok; 3909 int rc; 3910 3911 check_layout: 3912 start = (addr_t)skb->data - hdr_len; 3913 end = (addr_t)skb->data; 3914 3915 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3916 /* Push HW header into same page as first protocol header. */ 3917 push_ok = true; 3918 /* ... but TSO always needs a separate element for headers: */ 3919 if (skb_is_gso(skb)) 3920 __elements = 1 + qeth_count_elements(skb, proto_len); 3921 else 3922 __elements = qeth_count_elements(skb, 0); 3923 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3924 /* Push HW header into preceding page, flush with skb->data. */ 3925 push_ok = true; 3926 __elements = 1 + qeth_count_elements(skb, 0); 3927 } else { 3928 /* Use header cache, copy protocol headers up. */ 3929 push_ok = false; 3930 __elements = 1 + qeth_count_elements(skb, proto_len); 3931 } 3932 3933 /* Compress skb to fit into one IO buffer: */ 3934 if (__elements > max_elements) { 3935 if (!skb_is_nonlinear(skb)) { 3936 /* Drop it, no easy way of shrinking it further. */ 3937 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3938 max_elements, __elements, skb->len); 3939 return -E2BIG; 3940 } 3941 3942 rc = skb_linearize(skb); 3943 if (rc) { 3944 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3945 return rc; 3946 } 3947 3948 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3949 /* Linearization changed the layout, re-evaluate: */ 3950 goto check_layout; 3951 } 3952 3953 *elements = __elements; 3954 /* Add the header: */ 3955 if (push_ok) { 3956 *hdr = skb_push(skb, hdr_len); 3957 return hdr_len; 3958 } 3959 3960 /* Fall back to cache element with known-good alignment: */ 3961 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3962 return -E2BIG; 3963 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 3964 if (!*hdr) 3965 return -ENOMEM; 3966 /* Copy protocol headers behind HW header: */ 3967 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3968 return 0; 3969 } 3970 3971 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 3972 struct sk_buff *curr_skb, 3973 struct qeth_hdr *curr_hdr) 3974 { 3975 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 3976 struct qeth_hdr *prev_hdr = queue->prev_hdr; 3977 3978 if (!prev_hdr) 3979 return true; 3980 3981 /* All packets must have the same target: */ 3982 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 3983 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 3984 3985 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 3986 eth_hdr(curr_skb)->h_dest) && 3987 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 3988 } 3989 3990 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 3991 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 3992 } 3993 3994 /** 3995 * qeth_fill_buffer() - map skb into an output buffer 3996 * @buf: buffer to transport the skb 3997 * @skb: skb to map into the buffer 3998 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3999 * from qeth_core_header_cache. 4000 * @offset: when mapping the skb, start at skb->data + offset 4001 * @hd_len: if > 0, build a dedicated header element of this size 4002 */ 4003 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 4004 struct sk_buff *skb, struct qeth_hdr *hdr, 4005 unsigned int offset, unsigned int hd_len) 4006 { 4007 struct qdio_buffer *buffer = buf->buffer; 4008 int element = buf->next_element_to_fill; 4009 int length = skb_headlen(skb) - offset; 4010 char *data = skb->data + offset; 4011 unsigned int elem_length, cnt; 4012 bool is_first_elem = true; 4013 4014 __skb_queue_tail(&buf->skb_list, skb); 4015 4016 /* build dedicated element for HW Header */ 4017 if (hd_len) { 4018 is_first_elem = false; 4019 4020 buffer->element[element].addr = virt_to_phys(hdr); 4021 buffer->element[element].length = hd_len; 4022 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4023 4024 /* HW header is allocated from cache: */ 4025 if ((void *)hdr != skb->data) 4026 __set_bit(element, buf->from_kmem_cache); 4027 /* HW header was pushed and is contiguous with linear part: */ 4028 else if (length > 0 && !PAGE_ALIGNED(data) && 4029 (data == (char *)hdr + hd_len)) 4030 buffer->element[element].eflags |= 4031 SBAL_EFLAGS_CONTIGUOUS; 4032 4033 element++; 4034 } 4035 4036 /* map linear part into buffer element(s) */ 4037 while (length > 0) { 4038 elem_length = min_t(unsigned int, length, 4039 PAGE_SIZE - offset_in_page(data)); 4040 4041 buffer->element[element].addr = virt_to_phys(data); 4042 buffer->element[element].length = elem_length; 4043 length -= elem_length; 4044 if (is_first_elem) { 4045 is_first_elem = false; 4046 if (length || skb_is_nonlinear(skb)) 4047 /* skb needs additional elements */ 4048 buffer->element[element].eflags = 4049 SBAL_EFLAGS_FIRST_FRAG; 4050 else 4051 buffer->element[element].eflags = 0; 4052 } else { 4053 buffer->element[element].eflags = 4054 SBAL_EFLAGS_MIDDLE_FRAG; 4055 } 4056 4057 data += elem_length; 4058 element++; 4059 } 4060 4061 /* map page frags into buffer element(s) */ 4062 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4063 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4064 4065 data = skb_frag_address(frag); 4066 length = skb_frag_size(frag); 4067 while (length > 0) { 4068 elem_length = min_t(unsigned int, length, 4069 PAGE_SIZE - offset_in_page(data)); 4070 4071 buffer->element[element].addr = virt_to_phys(data); 4072 buffer->element[element].length = elem_length; 4073 buffer->element[element].eflags = 4074 SBAL_EFLAGS_MIDDLE_FRAG; 4075 4076 length -= elem_length; 4077 data += elem_length; 4078 element++; 4079 } 4080 } 4081 4082 if (buffer->element[element - 1].eflags) 4083 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4084 buf->next_element_to_fill = element; 4085 return element; 4086 } 4087 4088 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4089 struct sk_buff *skb, unsigned int elements, 4090 struct qeth_hdr *hdr, unsigned int offset, 4091 unsigned int hd_len) 4092 { 4093 unsigned int bytes = qdisc_pkt_len(skb); 4094 struct qeth_qdio_out_buffer *buffer; 4095 unsigned int next_element; 4096 struct netdev_queue *txq; 4097 bool stopped = false; 4098 bool flush; 4099 4100 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4101 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4102 4103 /* Just a sanity check, the wake/stop logic should ensure that we always 4104 * get a free buffer. 4105 */ 4106 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4107 return -EBUSY; 4108 4109 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4110 4111 if (flush || 4112 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4113 if (buffer->next_element_to_fill > 0) { 4114 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4115 queue->bulk_count++; 4116 } 4117 4118 if (queue->bulk_count >= queue->bulk_max) 4119 flush = true; 4120 4121 if (flush) 4122 qeth_flush_queue(queue); 4123 4124 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4125 queue->bulk_count)]; 4126 4127 /* Sanity-check again: */ 4128 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4129 return -EBUSY; 4130 } 4131 4132 if (buffer->next_element_to_fill == 0 && 4133 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4134 /* If a TX completion happens right _here_ and misses to wake 4135 * the txq, then our re-check below will catch the race. 4136 */ 4137 QETH_TXQ_STAT_INC(queue, stopped); 4138 netif_tx_stop_queue(txq); 4139 stopped = true; 4140 } 4141 4142 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4143 buffer->bytes += bytes; 4144 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4145 queue->prev_hdr = hdr; 4146 4147 flush = __netdev_tx_sent_queue(txq, bytes, 4148 !stopped && netdev_xmit_more()); 4149 4150 if (flush || next_element >= queue->max_elements) { 4151 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4152 queue->bulk_count++; 4153 4154 if (queue->bulk_count >= queue->bulk_max) 4155 flush = true; 4156 4157 if (flush) 4158 qeth_flush_queue(queue); 4159 } 4160 4161 if (stopped && !qeth_out_queue_is_full(queue)) 4162 netif_tx_start_queue(txq); 4163 return 0; 4164 } 4165 4166 static int qeth_do_send_packet(struct qeth_card *card, 4167 struct qeth_qdio_out_q *queue, 4168 struct sk_buff *skb, struct qeth_hdr *hdr, 4169 unsigned int offset, unsigned int hd_len, 4170 unsigned int elements_needed) 4171 { 4172 unsigned int start_index = queue->next_buf_to_fill; 4173 struct qeth_qdio_out_buffer *buffer; 4174 unsigned int next_element; 4175 struct netdev_queue *txq; 4176 bool stopped = false; 4177 int flush_count = 0; 4178 int do_pack = 0; 4179 int rc = 0; 4180 4181 buffer = queue->bufs[queue->next_buf_to_fill]; 4182 4183 /* Just a sanity check, the wake/stop logic should ensure that we always 4184 * get a free buffer. 4185 */ 4186 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4187 return -EBUSY; 4188 4189 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4190 4191 /* check if we need to switch packing state of this queue */ 4192 qeth_switch_to_packing_if_needed(queue); 4193 if (queue->do_pack) { 4194 do_pack = 1; 4195 /* does packet fit in current buffer? */ 4196 if (buffer->next_element_to_fill + elements_needed > 4197 queue->max_elements) { 4198 /* ... no -> set state PRIMED */ 4199 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4200 flush_count++; 4201 queue->next_buf_to_fill = 4202 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4203 buffer = queue->bufs[queue->next_buf_to_fill]; 4204 4205 /* We stepped forward, so sanity-check again: */ 4206 if (atomic_read(&buffer->state) != 4207 QETH_QDIO_BUF_EMPTY) { 4208 qeth_flush_buffers(queue, start_index, 4209 flush_count); 4210 rc = -EBUSY; 4211 goto out; 4212 } 4213 } 4214 } 4215 4216 if (buffer->next_element_to_fill == 0 && 4217 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4218 /* If a TX completion happens right _here_ and misses to wake 4219 * the txq, then our re-check below will catch the race. 4220 */ 4221 QETH_TXQ_STAT_INC(queue, stopped); 4222 netif_tx_stop_queue(txq); 4223 stopped = true; 4224 } 4225 4226 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4227 buffer->bytes += qdisc_pkt_len(skb); 4228 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4229 4230 if (queue->do_pack) 4231 QETH_TXQ_STAT_INC(queue, skbs_pack); 4232 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4233 flush_count++; 4234 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4235 queue->next_buf_to_fill = 4236 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4237 } 4238 4239 if (flush_count) 4240 qeth_flush_buffers(queue, start_index, flush_count); 4241 4242 out: 4243 if (do_pack) 4244 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4245 4246 if (stopped && !qeth_out_queue_is_full(queue)) 4247 netif_tx_start_queue(txq); 4248 return rc; 4249 } 4250 4251 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4252 unsigned int payload_len, struct sk_buff *skb, 4253 unsigned int proto_len) 4254 { 4255 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4256 4257 ext->hdr_tot_len = sizeof(*ext); 4258 ext->imb_hdr_no = 1; 4259 ext->hdr_type = 1; 4260 ext->hdr_version = 1; 4261 ext->hdr_len = 28; 4262 ext->payload_len = payload_len; 4263 ext->mss = skb_shinfo(skb)->gso_size; 4264 ext->dg_hdr_len = proto_len; 4265 } 4266 4267 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4268 struct qeth_qdio_out_q *queue, __be16 proto, 4269 void (*fill_header)(struct qeth_qdio_out_q *queue, 4270 struct qeth_hdr *hdr, struct sk_buff *skb, 4271 __be16 proto, unsigned int data_len)) 4272 { 4273 unsigned int proto_len, hw_hdr_len; 4274 unsigned int frame_len = skb->len; 4275 bool is_tso = skb_is_gso(skb); 4276 unsigned int data_offset = 0; 4277 struct qeth_hdr *hdr = NULL; 4278 unsigned int hd_len = 0; 4279 unsigned int elements; 4280 int push_len, rc; 4281 4282 if (is_tso) { 4283 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4284 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4285 } else { 4286 hw_hdr_len = sizeof(struct qeth_hdr); 4287 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4288 } 4289 4290 rc = skb_cow_head(skb, hw_hdr_len); 4291 if (rc) 4292 return rc; 4293 4294 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4295 &elements); 4296 if (push_len < 0) 4297 return push_len; 4298 if (is_tso || !push_len) { 4299 /* HW header needs its own buffer element. */ 4300 hd_len = hw_hdr_len + proto_len; 4301 data_offset = push_len + proto_len; 4302 } 4303 memset(hdr, 0, hw_hdr_len); 4304 fill_header(queue, hdr, skb, proto, frame_len); 4305 if (is_tso) 4306 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4307 frame_len - proto_len, skb, proto_len); 4308 4309 if (IS_IQD(card)) { 4310 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4311 hd_len); 4312 } else { 4313 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4314 skb_orphan(skb); 4315 spin_lock(&queue->lock); 4316 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4317 hd_len, elements); 4318 spin_unlock(&queue->lock); 4319 } 4320 4321 if (rc && !push_len) 4322 kmem_cache_free(qeth_core_header_cache, hdr); 4323 4324 return rc; 4325 } 4326 EXPORT_SYMBOL_GPL(qeth_xmit); 4327 4328 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4329 struct qeth_reply *reply, unsigned long data) 4330 { 4331 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4332 struct qeth_ipacmd_setadpparms *setparms; 4333 4334 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4335 4336 setparms = &(cmd->data.setadapterparms); 4337 if (qeth_setadpparms_inspect_rc(cmd)) { 4338 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4339 setparms->data.mode = SET_PROMISC_MODE_OFF; 4340 } 4341 card->info.promisc_mode = setparms->data.mode; 4342 return (cmd->hdr.return_code) ? -EIO : 0; 4343 } 4344 4345 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4346 { 4347 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4348 SET_PROMISC_MODE_OFF; 4349 struct qeth_cmd_buffer *iob; 4350 struct qeth_ipa_cmd *cmd; 4351 4352 QETH_CARD_TEXT(card, 4, "setprom"); 4353 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4354 4355 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4356 SETADP_DATA_SIZEOF(mode)); 4357 if (!iob) 4358 return; 4359 cmd = __ipa_cmd(iob); 4360 cmd->data.setadapterparms.data.mode = mode; 4361 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4362 } 4363 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4364 4365 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4366 struct qeth_reply *reply, unsigned long data) 4367 { 4368 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4369 struct qeth_ipacmd_setadpparms *adp_cmd; 4370 4371 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4372 if (qeth_setadpparms_inspect_rc(cmd)) 4373 return -EIO; 4374 4375 adp_cmd = &cmd->data.setadapterparms; 4376 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4377 return -EADDRNOTAVAIL; 4378 4379 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4380 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4381 return -EADDRNOTAVAIL; 4382 4383 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4384 return 0; 4385 } 4386 4387 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4388 { 4389 int rc; 4390 struct qeth_cmd_buffer *iob; 4391 struct qeth_ipa_cmd *cmd; 4392 4393 QETH_CARD_TEXT(card, 4, "chgmac"); 4394 4395 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4396 SETADP_DATA_SIZEOF(change_addr)); 4397 if (!iob) 4398 return -ENOMEM; 4399 cmd = __ipa_cmd(iob); 4400 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4401 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4402 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4403 card->dev->dev_addr); 4404 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4405 NULL); 4406 return rc; 4407 } 4408 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4409 4410 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4411 struct qeth_reply *reply, unsigned long data) 4412 { 4413 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4414 struct qeth_set_access_ctrl *access_ctrl_req; 4415 4416 QETH_CARD_TEXT(card, 4, "setaccb"); 4417 4418 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4419 QETH_CARD_TEXT_(card, 2, "rc=%d", 4420 cmd->data.setadapterparms.hdr.return_code); 4421 if (cmd->data.setadapterparms.hdr.return_code != 4422 SET_ACCESS_CTRL_RC_SUCCESS) 4423 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4424 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4425 cmd->data.setadapterparms.hdr.return_code); 4426 switch (qeth_setadpparms_inspect_rc(cmd)) { 4427 case SET_ACCESS_CTRL_RC_SUCCESS: 4428 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4429 dev_info(&card->gdev->dev, 4430 "QDIO data connection isolation is deactivated\n"); 4431 else 4432 dev_info(&card->gdev->dev, 4433 "QDIO data connection isolation is activated\n"); 4434 return 0; 4435 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4436 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4437 CARD_DEVID(card)); 4438 return 0; 4439 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4440 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4441 CARD_DEVID(card)); 4442 return 0; 4443 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4444 dev_err(&card->gdev->dev, "Adapter does not " 4445 "support QDIO data connection isolation\n"); 4446 return -EOPNOTSUPP; 4447 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4448 dev_err(&card->gdev->dev, 4449 "Adapter is dedicated. " 4450 "QDIO data connection isolation not supported\n"); 4451 return -EOPNOTSUPP; 4452 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4453 dev_err(&card->gdev->dev, 4454 "TSO does not permit QDIO data connection isolation\n"); 4455 return -EPERM; 4456 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4457 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4458 "support reflective relay mode\n"); 4459 return -EOPNOTSUPP; 4460 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4461 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4462 "enabled at the adjacent switch port"); 4463 return -EREMOTEIO; 4464 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4465 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4466 "at the adjacent switch failed\n"); 4467 /* benign error while disabling ISOLATION_MODE_FWD */ 4468 return 0; 4469 default: 4470 return -EIO; 4471 } 4472 } 4473 4474 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4475 enum qeth_ipa_isolation_modes mode) 4476 { 4477 int rc; 4478 struct qeth_cmd_buffer *iob; 4479 struct qeth_ipa_cmd *cmd; 4480 struct qeth_set_access_ctrl *access_ctrl_req; 4481 4482 QETH_CARD_TEXT(card, 4, "setacctl"); 4483 4484 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4485 dev_err(&card->gdev->dev, 4486 "Adapter does not support QDIO data connection isolation\n"); 4487 return -EOPNOTSUPP; 4488 } 4489 4490 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4491 SETADP_DATA_SIZEOF(set_access_ctrl)); 4492 if (!iob) 4493 return -ENOMEM; 4494 cmd = __ipa_cmd(iob); 4495 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4496 access_ctrl_req->subcmd_code = mode; 4497 4498 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4499 NULL); 4500 if (rc) { 4501 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4502 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4503 rc, CARD_DEVID(card)); 4504 } 4505 4506 return rc; 4507 } 4508 4509 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4510 { 4511 struct qeth_card *card; 4512 4513 card = dev->ml_priv; 4514 QETH_CARD_TEXT(card, 4, "txtimeo"); 4515 qeth_schedule_recovery(card); 4516 } 4517 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4518 4519 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4520 { 4521 struct qeth_card *card = dev->ml_priv; 4522 int rc = 0; 4523 4524 switch (regnum) { 4525 case MII_BMCR: /* Basic mode control register */ 4526 rc = BMCR_FULLDPLX; 4527 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4528 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4529 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4530 rc |= BMCR_SPEED100; 4531 break; 4532 case MII_BMSR: /* Basic mode status register */ 4533 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4534 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4535 BMSR_100BASE4; 4536 break; 4537 case MII_PHYSID1: /* PHYS ID 1 */ 4538 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4539 dev->dev_addr[2]; 4540 rc = (rc >> 5) & 0xFFFF; 4541 break; 4542 case MII_PHYSID2: /* PHYS ID 2 */ 4543 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4544 break; 4545 case MII_ADVERTISE: /* Advertisement control reg */ 4546 rc = ADVERTISE_ALL; 4547 break; 4548 case MII_LPA: /* Link partner ability reg */ 4549 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4550 LPA_100BASE4 | LPA_LPACK; 4551 break; 4552 case MII_EXPANSION: /* Expansion register */ 4553 break; 4554 case MII_DCOUNTER: /* disconnect counter */ 4555 break; 4556 case MII_FCSCOUNTER: /* false carrier counter */ 4557 break; 4558 case MII_NWAYTEST: /* N-way auto-neg test register */ 4559 break; 4560 case MII_RERRCOUNTER: /* rx error counter */ 4561 rc = card->stats.rx_length_errors + 4562 card->stats.rx_frame_errors + 4563 card->stats.rx_fifo_errors; 4564 break; 4565 case MII_SREVISION: /* silicon revision */ 4566 break; 4567 case MII_RESV1: /* reserved 1 */ 4568 break; 4569 case MII_LBRERROR: /* loopback, rx, bypass error */ 4570 break; 4571 case MII_PHYADDR: /* physical address */ 4572 break; 4573 case MII_RESV2: /* reserved 2 */ 4574 break; 4575 case MII_TPISTATUS: /* TPI status for 10mbps */ 4576 break; 4577 case MII_NCONFIG: /* network interface config */ 4578 break; 4579 default: 4580 break; 4581 } 4582 return rc; 4583 } 4584 4585 static int qeth_snmp_command_cb(struct qeth_card *card, 4586 struct qeth_reply *reply, unsigned long data) 4587 { 4588 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4589 struct qeth_arp_query_info *qinfo = reply->param; 4590 struct qeth_ipacmd_setadpparms *adp_cmd; 4591 unsigned int data_len; 4592 void *snmp_data; 4593 4594 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4595 4596 if (cmd->hdr.return_code) { 4597 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4598 return -EIO; 4599 } 4600 if (cmd->data.setadapterparms.hdr.return_code) { 4601 cmd->hdr.return_code = 4602 cmd->data.setadapterparms.hdr.return_code; 4603 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4604 return -EIO; 4605 } 4606 4607 adp_cmd = &cmd->data.setadapterparms; 4608 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4609 if (adp_cmd->hdr.seq_no == 1) { 4610 snmp_data = &adp_cmd->data.snmp; 4611 } else { 4612 snmp_data = &adp_cmd->data.snmp.request; 4613 data_len -= offsetof(struct qeth_snmp_cmd, request); 4614 } 4615 4616 /* check if there is enough room in userspace */ 4617 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4618 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4619 return -ENOSPC; 4620 } 4621 QETH_CARD_TEXT_(card, 4, "snore%i", 4622 cmd->data.setadapterparms.hdr.used_total); 4623 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4624 cmd->data.setadapterparms.hdr.seq_no); 4625 /*copy entries to user buffer*/ 4626 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4627 qinfo->udata_offset += data_len; 4628 4629 if (cmd->data.setadapterparms.hdr.seq_no < 4630 cmd->data.setadapterparms.hdr.used_total) 4631 return 1; 4632 return 0; 4633 } 4634 4635 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4636 { 4637 struct qeth_snmp_ureq __user *ureq; 4638 struct qeth_cmd_buffer *iob; 4639 unsigned int req_len; 4640 struct qeth_arp_query_info qinfo = {0, }; 4641 int rc = 0; 4642 4643 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4644 4645 if (IS_VM_NIC(card)) 4646 return -EOPNOTSUPP; 4647 4648 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4649 IS_LAYER3(card)) 4650 return -EOPNOTSUPP; 4651 4652 ureq = (struct qeth_snmp_ureq __user *) udata; 4653 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4654 get_user(req_len, &ureq->hdr.req_len)) 4655 return -EFAULT; 4656 4657 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4658 if (req_len > QETH_BUFSIZE) 4659 return -EINVAL; 4660 4661 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4662 if (!iob) 4663 return -ENOMEM; 4664 4665 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4666 &ureq->cmd, req_len)) { 4667 qeth_put_cmd(iob); 4668 return -EFAULT; 4669 } 4670 4671 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4672 if (!qinfo.udata) { 4673 qeth_put_cmd(iob); 4674 return -ENOMEM; 4675 } 4676 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4677 4678 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4679 if (rc) 4680 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4681 CARD_DEVID(card), rc); 4682 else { 4683 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4684 rc = -EFAULT; 4685 } 4686 4687 kfree(qinfo.udata); 4688 return rc; 4689 } 4690 4691 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4692 struct qeth_reply *reply, 4693 unsigned long data) 4694 { 4695 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4696 struct qeth_qoat_priv *priv = reply->param; 4697 int resdatalen; 4698 4699 QETH_CARD_TEXT(card, 3, "qoatcb"); 4700 if (qeth_setadpparms_inspect_rc(cmd)) 4701 return -EIO; 4702 4703 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4704 4705 if (resdatalen > (priv->buffer_len - priv->response_len)) 4706 return -ENOSPC; 4707 4708 memcpy(priv->buffer + priv->response_len, 4709 &cmd->data.setadapterparms.hdr, resdatalen); 4710 priv->response_len += resdatalen; 4711 4712 if (cmd->data.setadapterparms.hdr.seq_no < 4713 cmd->data.setadapterparms.hdr.used_total) 4714 return 1; 4715 return 0; 4716 } 4717 4718 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4719 { 4720 int rc = 0; 4721 struct qeth_cmd_buffer *iob; 4722 struct qeth_ipa_cmd *cmd; 4723 struct qeth_query_oat *oat_req; 4724 struct qeth_query_oat_data oat_data; 4725 struct qeth_qoat_priv priv; 4726 void __user *tmp; 4727 4728 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4729 4730 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4731 return -EOPNOTSUPP; 4732 4733 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4734 return -EFAULT; 4735 4736 priv.buffer_len = oat_data.buffer_len; 4737 priv.response_len = 0; 4738 priv.buffer = vzalloc(oat_data.buffer_len); 4739 if (!priv.buffer) 4740 return -ENOMEM; 4741 4742 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4743 SETADP_DATA_SIZEOF(query_oat)); 4744 if (!iob) { 4745 rc = -ENOMEM; 4746 goto out_free; 4747 } 4748 cmd = __ipa_cmd(iob); 4749 oat_req = &cmd->data.setadapterparms.data.query_oat; 4750 oat_req->subcmd_code = oat_data.command; 4751 4752 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4753 if (!rc) { 4754 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4755 u64_to_user_ptr(oat_data.ptr); 4756 oat_data.response_len = priv.response_len; 4757 4758 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4759 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4760 rc = -EFAULT; 4761 } 4762 4763 out_free: 4764 vfree(priv.buffer); 4765 return rc; 4766 } 4767 4768 static int qeth_query_card_info_cb(struct qeth_card *card, 4769 struct qeth_reply *reply, unsigned long data) 4770 { 4771 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4772 struct qeth_link_info *link_info = reply->param; 4773 struct qeth_query_card_info *card_info; 4774 4775 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4776 if (qeth_setadpparms_inspect_rc(cmd)) 4777 return -EIO; 4778 4779 card_info = &cmd->data.setadapterparms.data.card_info; 4780 netdev_dbg(card->dev, 4781 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", 4782 card_info->card_type, card_info->port_mode, 4783 card_info->port_speed); 4784 4785 switch (card_info->port_mode) { 4786 case CARD_INFO_PORTM_FULLDUPLEX: 4787 link_info->duplex = DUPLEX_FULL; 4788 break; 4789 case CARD_INFO_PORTM_HALFDUPLEX: 4790 link_info->duplex = DUPLEX_HALF; 4791 break; 4792 default: 4793 link_info->duplex = DUPLEX_UNKNOWN; 4794 } 4795 4796 switch (card_info->card_type) { 4797 case CARD_INFO_TYPE_1G_COPPER_A: 4798 case CARD_INFO_TYPE_1G_COPPER_B: 4799 link_info->speed = SPEED_1000; 4800 link_info->port = PORT_TP; 4801 break; 4802 case CARD_INFO_TYPE_1G_FIBRE_A: 4803 case CARD_INFO_TYPE_1G_FIBRE_B: 4804 link_info->speed = SPEED_1000; 4805 link_info->port = PORT_FIBRE; 4806 break; 4807 case CARD_INFO_TYPE_10G_FIBRE_A: 4808 case CARD_INFO_TYPE_10G_FIBRE_B: 4809 link_info->speed = SPEED_10000; 4810 link_info->port = PORT_FIBRE; 4811 break; 4812 default: 4813 switch (card_info->port_speed) { 4814 case CARD_INFO_PORTS_10M: 4815 link_info->speed = SPEED_10; 4816 break; 4817 case CARD_INFO_PORTS_100M: 4818 link_info->speed = SPEED_100; 4819 break; 4820 case CARD_INFO_PORTS_1G: 4821 link_info->speed = SPEED_1000; 4822 break; 4823 case CARD_INFO_PORTS_10G: 4824 link_info->speed = SPEED_10000; 4825 break; 4826 case CARD_INFO_PORTS_25G: 4827 link_info->speed = SPEED_25000; 4828 break; 4829 default: 4830 link_info->speed = SPEED_UNKNOWN; 4831 } 4832 4833 link_info->port = PORT_OTHER; 4834 } 4835 4836 return 0; 4837 } 4838 4839 int qeth_query_card_info(struct qeth_card *card, 4840 struct qeth_link_info *link_info) 4841 { 4842 struct qeth_cmd_buffer *iob; 4843 4844 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4845 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4846 return -EOPNOTSUPP; 4847 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4848 if (!iob) 4849 return -ENOMEM; 4850 4851 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info); 4852 } 4853 4854 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4855 struct qeth_reply *reply_priv, 4856 unsigned long data) 4857 { 4858 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4859 struct qeth_link_info *link_info = reply_priv->param; 4860 struct qeth_query_oat_physical_if *phys_if; 4861 struct qeth_query_oat_reply *reply; 4862 4863 if (qeth_setadpparms_inspect_rc(cmd)) 4864 return -EIO; 4865 4866 /* Multi-part reply is unexpected, don't bother: */ 4867 if (cmd->data.setadapterparms.hdr.used_total > 1) 4868 return -EINVAL; 4869 4870 /* Expect the reply to start with phys_if data: */ 4871 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4872 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4873 reply->length < sizeof(*reply)) 4874 return -EINVAL; 4875 4876 phys_if = &reply->phys_if; 4877 4878 switch (phys_if->speed_duplex) { 4879 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4880 link_info->speed = SPEED_10; 4881 link_info->duplex = DUPLEX_HALF; 4882 break; 4883 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4884 link_info->speed = SPEED_10; 4885 link_info->duplex = DUPLEX_FULL; 4886 break; 4887 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4888 link_info->speed = SPEED_100; 4889 link_info->duplex = DUPLEX_HALF; 4890 break; 4891 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4892 link_info->speed = SPEED_100; 4893 link_info->duplex = DUPLEX_FULL; 4894 break; 4895 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4896 link_info->speed = SPEED_1000; 4897 link_info->duplex = DUPLEX_HALF; 4898 break; 4899 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 4900 link_info->speed = SPEED_1000; 4901 link_info->duplex = DUPLEX_FULL; 4902 break; 4903 case QETH_QOAT_PHYS_SPEED_10G_FULL: 4904 link_info->speed = SPEED_10000; 4905 link_info->duplex = DUPLEX_FULL; 4906 break; 4907 case QETH_QOAT_PHYS_SPEED_25G_FULL: 4908 link_info->speed = SPEED_25000; 4909 link_info->duplex = DUPLEX_FULL; 4910 break; 4911 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 4912 default: 4913 link_info->speed = SPEED_UNKNOWN; 4914 link_info->duplex = DUPLEX_UNKNOWN; 4915 break; 4916 } 4917 4918 switch (phys_if->media_type) { 4919 case QETH_QOAT_PHYS_MEDIA_COPPER: 4920 link_info->port = PORT_TP; 4921 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4922 break; 4923 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 4924 link_info->port = PORT_FIBRE; 4925 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4926 break; 4927 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 4928 link_info->port = PORT_FIBRE; 4929 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 4930 break; 4931 default: 4932 link_info->port = PORT_OTHER; 4933 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4934 break; 4935 } 4936 4937 return 0; 4938 } 4939 4940 static void qeth_init_link_info(struct qeth_card *card) 4941 { 4942 card->info.link_info.duplex = DUPLEX_FULL; 4943 4944 if (IS_IQD(card) || IS_VM_NIC(card)) { 4945 card->info.link_info.speed = SPEED_10000; 4946 card->info.link_info.port = PORT_FIBRE; 4947 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4948 } else { 4949 switch (card->info.link_type) { 4950 case QETH_LINK_TYPE_FAST_ETH: 4951 case QETH_LINK_TYPE_LANE_ETH100: 4952 card->info.link_info.speed = SPEED_100; 4953 card->info.link_info.port = PORT_TP; 4954 break; 4955 case QETH_LINK_TYPE_GBIT_ETH: 4956 case QETH_LINK_TYPE_LANE_ETH1000: 4957 card->info.link_info.speed = SPEED_1000; 4958 card->info.link_info.port = PORT_FIBRE; 4959 break; 4960 case QETH_LINK_TYPE_10GBIT_ETH: 4961 card->info.link_info.speed = SPEED_10000; 4962 card->info.link_info.port = PORT_FIBRE; 4963 break; 4964 case QETH_LINK_TYPE_25GBIT_ETH: 4965 card->info.link_info.speed = SPEED_25000; 4966 card->info.link_info.port = PORT_FIBRE; 4967 break; 4968 default: 4969 dev_info(&card->gdev->dev, "Unknown link type %x\n", 4970 card->info.link_type); 4971 card->info.link_info.speed = SPEED_UNKNOWN; 4972 card->info.link_info.port = PORT_OTHER; 4973 } 4974 4975 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN; 4976 } 4977 4978 /* Get more accurate data via QUERY OAT: */ 4979 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4980 struct qeth_link_info link_info; 4981 struct qeth_cmd_buffer *iob; 4982 4983 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4984 SETADP_DATA_SIZEOF(query_oat)); 4985 if (iob) { 4986 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 4987 struct qeth_query_oat *oat_req; 4988 4989 oat_req = &cmd->data.setadapterparms.data.query_oat; 4990 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 4991 4992 if (!qeth_send_ipa_cmd(card, iob, 4993 qeth_init_link_info_oat_cb, 4994 &link_info)) { 4995 if (link_info.speed != SPEED_UNKNOWN) 4996 card->info.link_info.speed = link_info.speed; 4997 if (link_info.duplex != DUPLEX_UNKNOWN) 4998 card->info.link_info.duplex = link_info.duplex; 4999 if (link_info.port != PORT_OTHER) 5000 card->info.link_info.port = link_info.port; 5001 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 5002 card->info.link_info.link_mode = link_info.link_mode; 5003 } 5004 } 5005 } 5006 } 5007 5008 /** 5009 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 5010 * @card: pointer to a qeth_card 5011 * 5012 * Returns 5013 * 0, if a MAC address has been set for the card's netdevice 5014 * a return code, for various error conditions 5015 */ 5016 int qeth_vm_request_mac(struct qeth_card *card) 5017 { 5018 struct diag26c_mac_resp *response; 5019 struct diag26c_mac_req *request; 5020 int rc; 5021 5022 QETH_CARD_TEXT(card, 2, "vmreqmac"); 5023 5024 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 5025 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 5026 if (!request || !response) { 5027 rc = -ENOMEM; 5028 goto out; 5029 } 5030 5031 request->resp_buf_len = sizeof(*response); 5032 request->resp_version = DIAG26C_VERSION2; 5033 request->op_code = DIAG26C_GET_MAC; 5034 request->devno = card->info.ddev_devno; 5035 5036 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5037 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 5038 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5039 if (rc) 5040 goto out; 5041 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 5042 5043 if (request->resp_buf_len < sizeof(*response) || 5044 response->version != request->resp_version) { 5045 rc = -EIO; 5046 QETH_CARD_TEXT(card, 2, "badresp"); 5047 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 5048 sizeof(request->resp_buf_len)); 5049 } else if (!is_valid_ether_addr(response->mac)) { 5050 rc = -EINVAL; 5051 QETH_CARD_TEXT(card, 2, "badmac"); 5052 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 5053 } else { 5054 ether_addr_copy(card->dev->dev_addr, response->mac); 5055 } 5056 5057 out: 5058 kfree(response); 5059 kfree(request); 5060 return rc; 5061 } 5062 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 5063 5064 static void qeth_determine_capabilities(struct qeth_card *card) 5065 { 5066 struct qeth_channel *channel = &card->data; 5067 struct ccw_device *ddev = channel->ccwdev; 5068 int rc; 5069 int ddev_offline = 0; 5070 5071 QETH_CARD_TEXT(card, 2, "detcapab"); 5072 if (!ddev->online) { 5073 ddev_offline = 1; 5074 rc = qeth_start_channel(channel); 5075 if (rc) { 5076 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5077 goto out; 5078 } 5079 } 5080 5081 rc = qeth_read_conf_data(card); 5082 if (rc) { 5083 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5084 CARD_DEVID(card), rc); 5085 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5086 goto out_offline; 5087 } 5088 5089 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5090 if (rc) 5091 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5092 5093 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5094 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5095 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5096 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5097 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5098 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5099 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5100 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5101 dev_info(&card->gdev->dev, 5102 "Completion Queueing supported\n"); 5103 } else { 5104 card->options.cq = QETH_CQ_NOTAVAILABLE; 5105 } 5106 5107 out_offline: 5108 if (ddev_offline == 1) 5109 qeth_stop_channel(channel); 5110 out: 5111 return; 5112 } 5113 5114 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5115 { 5116 struct qeth_card_info *info = &card->info; 5117 struct ccw_device *cdev = CARD_DDEV(card); 5118 struct ccw_dev_id dev_id; 5119 5120 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5121 ccw_device_get_id(cdev, &dev_id); 5122 5123 info->ddev_devno = dev_id.devno; 5124 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5125 !ccw_device_get_iid(cdev, &info->iid) && 5126 !ccw_device_get_chid(cdev, 0, &info->chid); 5127 info->ssid = dev_id.ssid; 5128 5129 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5130 info->chid, info->chpid); 5131 5132 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5133 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5134 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5135 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5136 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5137 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5138 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5139 } 5140 5141 static int qeth_qdio_establish(struct qeth_card *card) 5142 { 5143 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5144 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5145 struct qeth_qib_parms *qib_parms = NULL; 5146 struct qdio_initialize init_data; 5147 unsigned int i; 5148 int rc = 0; 5149 5150 QETH_CARD_TEXT(card, 2, "qdioest"); 5151 5152 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5153 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5154 if (!qib_parms) 5155 return -ENOMEM; 5156 5157 qeth_fill_qib_parms(card, qib_parms); 5158 } 5159 5160 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5161 if (card->options.cq == QETH_CQ_ENABLED) 5162 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5163 5164 for (i = 0; i < card->qdio.no_out_queues; i++) 5165 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5166 5167 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5168 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5169 QDIO_QETH_QFMT; 5170 init_data.qib_param_field_format = 0; 5171 init_data.qib_param_field = (void *)qib_parms; 5172 init_data.no_input_qs = card->qdio.no_in_queues; 5173 init_data.no_output_qs = card->qdio.no_out_queues; 5174 init_data.input_handler = qeth_qdio_input_handler; 5175 init_data.output_handler = qeth_qdio_output_handler; 5176 init_data.irq_poll = qeth_qdio_poll; 5177 init_data.int_parm = (unsigned long) card; 5178 init_data.input_sbal_addr_array = in_sbal_ptrs; 5179 init_data.output_sbal_addr_array = out_sbal_ptrs; 5180 5181 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5182 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5183 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5184 init_data.no_output_qs); 5185 if (rc) { 5186 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5187 goto out; 5188 } 5189 rc = qdio_establish(CARD_DDEV(card), &init_data); 5190 if (rc) { 5191 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5192 qdio_free(CARD_DDEV(card)); 5193 } 5194 } 5195 5196 switch (card->options.cq) { 5197 case QETH_CQ_ENABLED: 5198 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5199 break; 5200 case QETH_CQ_DISABLED: 5201 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5202 break; 5203 default: 5204 break; 5205 } 5206 5207 out: 5208 kfree(qib_parms); 5209 return rc; 5210 } 5211 5212 static void qeth_core_free_card(struct qeth_card *card) 5213 { 5214 QETH_CARD_TEXT(card, 2, "freecrd"); 5215 5216 unregister_service_level(&card->qeth_service_level); 5217 debugfs_remove_recursive(card->debugfs); 5218 qeth_put_cmd(card->read_cmd); 5219 destroy_workqueue(card->event_wq); 5220 dev_set_drvdata(&card->gdev->dev, NULL); 5221 kfree(card); 5222 } 5223 5224 static void qeth_trace_features(struct qeth_card *card) 5225 { 5226 QETH_CARD_TEXT(card, 2, "features"); 5227 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5228 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5229 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5230 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5231 sizeof(card->info.diagass_support)); 5232 } 5233 5234 static struct ccw_device_id qeth_ids[] = { 5235 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5236 .driver_info = QETH_CARD_TYPE_OSD}, 5237 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5238 .driver_info = QETH_CARD_TYPE_IQD}, 5239 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5240 .driver_info = QETH_CARD_TYPE_OSM}, 5241 #ifdef CONFIG_QETH_OSX 5242 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5243 .driver_info = QETH_CARD_TYPE_OSX}, 5244 #endif 5245 {}, 5246 }; 5247 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5248 5249 static struct ccw_driver qeth_ccw_driver = { 5250 .driver = { 5251 .owner = THIS_MODULE, 5252 .name = "qeth", 5253 }, 5254 .ids = qeth_ids, 5255 .probe = ccwgroup_probe_ccwdev, 5256 .remove = ccwgroup_remove_ccwdev, 5257 }; 5258 5259 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5260 { 5261 int retries = 3; 5262 int rc; 5263 5264 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5265 atomic_set(&card->force_alloc_skb, 0); 5266 rc = qeth_update_from_chp_desc(card); 5267 if (rc) 5268 return rc; 5269 retry: 5270 if (retries < 3) 5271 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5272 CARD_DEVID(card)); 5273 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5274 qeth_stop_channel(&card->data); 5275 qeth_stop_channel(&card->write); 5276 qeth_stop_channel(&card->read); 5277 qdio_free(CARD_DDEV(card)); 5278 5279 rc = qeth_start_channel(&card->read); 5280 if (rc) 5281 goto retriable; 5282 rc = qeth_start_channel(&card->write); 5283 if (rc) 5284 goto retriable; 5285 rc = qeth_start_channel(&card->data); 5286 if (rc) 5287 goto retriable; 5288 retriable: 5289 if (rc == -ERESTARTSYS) { 5290 QETH_CARD_TEXT(card, 2, "break1"); 5291 return rc; 5292 } else if (rc) { 5293 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5294 if (--retries < 0) 5295 goto out; 5296 else 5297 goto retry; 5298 } 5299 5300 qeth_determine_capabilities(card); 5301 qeth_read_ccw_conf_data(card); 5302 qeth_idx_init(card); 5303 5304 rc = qeth_idx_activate_read_channel(card); 5305 if (rc == -EINTR) { 5306 QETH_CARD_TEXT(card, 2, "break2"); 5307 return rc; 5308 } else if (rc) { 5309 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5310 if (--retries < 0) 5311 goto out; 5312 else 5313 goto retry; 5314 } 5315 5316 rc = qeth_idx_activate_write_channel(card); 5317 if (rc == -EINTR) { 5318 QETH_CARD_TEXT(card, 2, "break3"); 5319 return rc; 5320 } else if (rc) { 5321 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5322 if (--retries < 0) 5323 goto out; 5324 else 5325 goto retry; 5326 } 5327 card->read_or_write_problem = 0; 5328 rc = qeth_mpc_initialize(card); 5329 if (rc) { 5330 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5331 goto out; 5332 } 5333 5334 rc = qeth_send_startlan(card); 5335 if (rc) { 5336 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5337 if (rc == -ENETDOWN) { 5338 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5339 *carrier_ok = false; 5340 } else { 5341 goto out; 5342 } 5343 } else { 5344 *carrier_ok = true; 5345 } 5346 5347 card->options.ipa4.supported = 0; 5348 card->options.ipa6.supported = 0; 5349 card->options.adp.supported = 0; 5350 card->options.sbp.supported_funcs = 0; 5351 card->info.diagass_support = 0; 5352 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5353 if (rc == -ENOMEM) 5354 goto out; 5355 if (qeth_is_supported(card, IPA_IPV6)) { 5356 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5357 if (rc == -ENOMEM) 5358 goto out; 5359 } 5360 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5361 rc = qeth_query_setadapterparms(card); 5362 if (rc < 0) { 5363 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5364 goto out; 5365 } 5366 } 5367 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5368 rc = qeth_query_setdiagass(card); 5369 if (rc) 5370 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5371 } 5372 5373 qeth_trace_features(card); 5374 5375 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5376 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5377 card->info.hwtrap = 0; 5378 5379 if (card->options.isolation != ISOLATION_MODE_NONE) { 5380 rc = qeth_setadpparms_set_access_ctrl(card, 5381 card->options.isolation); 5382 if (rc) 5383 goto out; 5384 } 5385 5386 qeth_init_link_info(card); 5387 5388 rc = qeth_init_qdio_queues(card); 5389 if (rc) { 5390 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5391 goto out; 5392 } 5393 5394 return 0; 5395 out: 5396 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5397 "an error on the device\n"); 5398 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5399 CARD_DEVID(card), rc); 5400 return rc; 5401 } 5402 5403 static int qeth_set_online(struct qeth_card *card, 5404 const struct qeth_discipline *disc) 5405 { 5406 bool carrier_ok; 5407 int rc; 5408 5409 mutex_lock(&card->conf_mutex); 5410 QETH_CARD_TEXT(card, 2, "setonlin"); 5411 5412 rc = qeth_hardsetup_card(card, &carrier_ok); 5413 if (rc) { 5414 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5415 rc = -ENODEV; 5416 goto err_hardsetup; 5417 } 5418 5419 qeth_print_status_message(card); 5420 5421 if (card->dev->reg_state != NETREG_REGISTERED) 5422 /* no need for locking / error handling at this early stage: */ 5423 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5424 5425 rc = disc->set_online(card, carrier_ok); 5426 if (rc) 5427 goto err_online; 5428 5429 /* let user_space know that device is online */ 5430 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5431 5432 mutex_unlock(&card->conf_mutex); 5433 return 0; 5434 5435 err_online: 5436 err_hardsetup: 5437 qeth_qdio_clear_card(card, 0); 5438 qeth_clear_working_pool_list(card); 5439 qeth_flush_local_addrs(card); 5440 5441 qeth_stop_channel(&card->data); 5442 qeth_stop_channel(&card->write); 5443 qeth_stop_channel(&card->read); 5444 qdio_free(CARD_DDEV(card)); 5445 5446 mutex_unlock(&card->conf_mutex); 5447 return rc; 5448 } 5449 5450 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5451 bool resetting) 5452 { 5453 int rc, rc2, rc3; 5454 5455 mutex_lock(&card->conf_mutex); 5456 QETH_CARD_TEXT(card, 3, "setoffl"); 5457 5458 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5459 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5460 card->info.hwtrap = 1; 5461 } 5462 5463 /* cancel any stalled cmd that might block the rtnl: */ 5464 qeth_clear_ipacmd_list(card); 5465 5466 rtnl_lock(); 5467 card->info.open_when_online = card->dev->flags & IFF_UP; 5468 dev_close(card->dev); 5469 netif_device_detach(card->dev); 5470 netif_carrier_off(card->dev); 5471 rtnl_unlock(); 5472 5473 cancel_work_sync(&card->rx_mode_work); 5474 5475 disc->set_offline(card); 5476 5477 qeth_qdio_clear_card(card, 0); 5478 qeth_drain_output_queues(card); 5479 qeth_clear_working_pool_list(card); 5480 qeth_flush_local_addrs(card); 5481 card->info.promisc_mode = 0; 5482 5483 rc = qeth_stop_channel(&card->data); 5484 rc2 = qeth_stop_channel(&card->write); 5485 rc3 = qeth_stop_channel(&card->read); 5486 if (!rc) 5487 rc = (rc2) ? rc2 : rc3; 5488 if (rc) 5489 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5490 qdio_free(CARD_DDEV(card)); 5491 5492 /* let user_space know that device is offline */ 5493 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5494 5495 mutex_unlock(&card->conf_mutex); 5496 return 0; 5497 } 5498 EXPORT_SYMBOL_GPL(qeth_set_offline); 5499 5500 static int qeth_do_reset(void *data) 5501 { 5502 const struct qeth_discipline *disc; 5503 struct qeth_card *card = data; 5504 int rc; 5505 5506 /* Lock-free, other users will block until we are done. */ 5507 disc = card->discipline; 5508 5509 QETH_CARD_TEXT(card, 2, "recover1"); 5510 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5511 return 0; 5512 QETH_CARD_TEXT(card, 2, "recover2"); 5513 dev_warn(&card->gdev->dev, 5514 "A recovery process has been started for the device\n"); 5515 5516 qeth_set_offline(card, disc, true); 5517 rc = qeth_set_online(card, disc); 5518 if (!rc) { 5519 dev_info(&card->gdev->dev, 5520 "Device successfully recovered!\n"); 5521 } else { 5522 ccwgroup_set_offline(card->gdev); 5523 dev_warn(&card->gdev->dev, 5524 "The qeth device driver failed to recover an error on the device\n"); 5525 } 5526 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5527 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5528 return 0; 5529 } 5530 5531 #if IS_ENABLED(CONFIG_QETH_L3) 5532 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5533 struct qeth_hdr *hdr) 5534 { 5535 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5536 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5537 struct net_device *dev = skb->dev; 5538 5539 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5540 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5541 "FAKELL", skb->len); 5542 return; 5543 } 5544 5545 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5546 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5547 ETH_P_IP; 5548 unsigned char tg_addr[ETH_ALEN]; 5549 5550 skb_reset_network_header(skb); 5551 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5552 case QETH_CAST_MULTICAST: 5553 if (prot == ETH_P_IP) 5554 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5555 else 5556 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5557 QETH_CARD_STAT_INC(card, rx_multicast); 5558 break; 5559 case QETH_CAST_BROADCAST: 5560 ether_addr_copy(tg_addr, dev->broadcast); 5561 QETH_CARD_STAT_INC(card, rx_multicast); 5562 break; 5563 default: 5564 if (card->options.sniffer) 5565 skb->pkt_type = PACKET_OTHERHOST; 5566 ether_addr_copy(tg_addr, dev->dev_addr); 5567 } 5568 5569 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5570 dev_hard_header(skb, dev, prot, tg_addr, 5571 &l3_hdr->next_hop.rx.src_mac, skb->len); 5572 else 5573 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5574 skb->len); 5575 } 5576 5577 /* copy VLAN tag from hdr into skb */ 5578 if (!card->options.sniffer && 5579 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5580 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5581 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5582 l3_hdr->vlan_id : 5583 l3_hdr->next_hop.rx.vlan_id; 5584 5585 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5586 } 5587 } 5588 #endif 5589 5590 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5591 struct qeth_hdr *hdr, bool uses_frags) 5592 { 5593 struct napi_struct *napi = &card->napi; 5594 bool is_cso; 5595 5596 switch (hdr->hdr.l2.id) { 5597 #if IS_ENABLED(CONFIG_QETH_L3) 5598 case QETH_HEADER_TYPE_LAYER3: 5599 qeth_l3_rebuild_skb(card, skb, hdr); 5600 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5601 break; 5602 #endif 5603 case QETH_HEADER_TYPE_LAYER2: 5604 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5605 break; 5606 default: 5607 /* never happens */ 5608 if (uses_frags) 5609 napi_free_frags(napi); 5610 else 5611 dev_kfree_skb_any(skb); 5612 return; 5613 } 5614 5615 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5616 skb->ip_summed = CHECKSUM_UNNECESSARY; 5617 QETH_CARD_STAT_INC(card, rx_skb_csum); 5618 } else { 5619 skb->ip_summed = CHECKSUM_NONE; 5620 } 5621 5622 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5623 QETH_CARD_STAT_INC(card, rx_packets); 5624 if (skb_is_nonlinear(skb)) { 5625 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5626 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5627 skb_shinfo(skb)->nr_frags); 5628 } 5629 5630 if (uses_frags) { 5631 napi_gro_frags(napi); 5632 } else { 5633 skb->protocol = eth_type_trans(skb, skb->dev); 5634 napi_gro_receive(napi, skb); 5635 } 5636 } 5637 5638 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5639 { 5640 struct page *page = virt_to_page(data); 5641 unsigned int next_frag; 5642 5643 next_frag = skb_shinfo(skb)->nr_frags; 5644 get_page(page); 5645 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5646 data_len); 5647 } 5648 5649 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5650 { 5651 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5652 } 5653 5654 static int qeth_extract_skb(struct qeth_card *card, 5655 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5656 int *__offset) 5657 { 5658 struct qeth_priv *priv = netdev_priv(card->dev); 5659 struct qdio_buffer *buffer = qethbuffer->buffer; 5660 struct napi_struct *napi = &card->napi; 5661 struct qdio_buffer_element *element; 5662 unsigned int linear_len = 0; 5663 bool uses_frags = false; 5664 int offset = *__offset; 5665 bool use_rx_sg = false; 5666 unsigned int headroom; 5667 struct qeth_hdr *hdr; 5668 struct sk_buff *skb; 5669 int skb_len = 0; 5670 5671 element = &buffer->element[*element_no]; 5672 5673 next_packet: 5674 /* qeth_hdr must not cross element boundaries */ 5675 while (element->length < offset + sizeof(struct qeth_hdr)) { 5676 if (qeth_is_last_sbale(element)) 5677 return -ENODATA; 5678 element++; 5679 offset = 0; 5680 } 5681 5682 hdr = phys_to_virt(element->addr) + offset; 5683 offset += sizeof(*hdr); 5684 skb = NULL; 5685 5686 switch (hdr->hdr.l2.id) { 5687 case QETH_HEADER_TYPE_LAYER2: 5688 skb_len = hdr->hdr.l2.pkt_length; 5689 linear_len = ETH_HLEN; 5690 headroom = 0; 5691 break; 5692 case QETH_HEADER_TYPE_LAYER3: 5693 skb_len = hdr->hdr.l3.length; 5694 if (!IS_LAYER3(card)) { 5695 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5696 goto walk_packet; 5697 } 5698 5699 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5700 linear_len = ETH_HLEN; 5701 headroom = 0; 5702 break; 5703 } 5704 5705 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5706 linear_len = sizeof(struct ipv6hdr); 5707 else 5708 linear_len = sizeof(struct iphdr); 5709 headroom = ETH_HLEN; 5710 break; 5711 default: 5712 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5713 QETH_CARD_STAT_INC(card, rx_frame_errors); 5714 else 5715 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5716 5717 /* Can't determine packet length, drop the whole buffer. */ 5718 return -EPROTONOSUPPORT; 5719 } 5720 5721 if (skb_len < linear_len) { 5722 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5723 goto walk_packet; 5724 } 5725 5726 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5727 (skb_len > READ_ONCE(priv->rx_copybreak) && 5728 !atomic_read(&card->force_alloc_skb)); 5729 5730 if (use_rx_sg) { 5731 /* QETH_CQ_ENABLED only: */ 5732 if (qethbuffer->rx_skb && 5733 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5734 skb = qethbuffer->rx_skb; 5735 qethbuffer->rx_skb = NULL; 5736 goto use_skb; 5737 } 5738 5739 skb = napi_get_frags(napi); 5740 if (!skb) { 5741 /* -ENOMEM, no point in falling back further. */ 5742 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5743 goto walk_packet; 5744 } 5745 5746 if (skb_tailroom(skb) >= linear_len + headroom) { 5747 uses_frags = true; 5748 goto use_skb; 5749 } 5750 5751 netdev_info_once(card->dev, 5752 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5753 linear_len + headroom, skb_tailroom(skb)); 5754 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5755 } 5756 5757 linear_len = skb_len; 5758 skb = napi_alloc_skb(napi, linear_len + headroom); 5759 if (!skb) { 5760 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5761 goto walk_packet; 5762 } 5763 5764 use_skb: 5765 if (headroom) 5766 skb_reserve(skb, headroom); 5767 walk_packet: 5768 while (skb_len) { 5769 int data_len = min(skb_len, (int)(element->length - offset)); 5770 char *data = phys_to_virt(element->addr) + offset; 5771 5772 skb_len -= data_len; 5773 offset += data_len; 5774 5775 /* Extract data from current element: */ 5776 if (skb && data_len) { 5777 if (linear_len) { 5778 unsigned int copy_len; 5779 5780 copy_len = min_t(unsigned int, linear_len, 5781 data_len); 5782 5783 skb_put_data(skb, data, copy_len); 5784 linear_len -= copy_len; 5785 data_len -= copy_len; 5786 data += copy_len; 5787 } 5788 5789 if (data_len) 5790 qeth_create_skb_frag(skb, data, data_len); 5791 } 5792 5793 /* Step forward to next element: */ 5794 if (skb_len) { 5795 if (qeth_is_last_sbale(element)) { 5796 QETH_CARD_TEXT(card, 4, "unexeob"); 5797 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5798 if (skb) { 5799 if (uses_frags) 5800 napi_free_frags(napi); 5801 else 5802 dev_kfree_skb_any(skb); 5803 QETH_CARD_STAT_INC(card, 5804 rx_length_errors); 5805 } 5806 return -EMSGSIZE; 5807 } 5808 element++; 5809 offset = 0; 5810 } 5811 } 5812 5813 /* This packet was skipped, go get another one: */ 5814 if (!skb) 5815 goto next_packet; 5816 5817 *element_no = element - &buffer->element[0]; 5818 *__offset = offset; 5819 5820 qeth_receive_skb(card, skb, hdr, uses_frags); 5821 return 0; 5822 } 5823 5824 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5825 struct qeth_qdio_buffer *buf, bool *done) 5826 { 5827 unsigned int work_done = 0; 5828 5829 while (budget) { 5830 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5831 &card->rx.e_offset)) { 5832 *done = true; 5833 break; 5834 } 5835 5836 work_done++; 5837 budget--; 5838 } 5839 5840 return work_done; 5841 } 5842 5843 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5844 { 5845 struct qeth_rx *ctx = &card->rx; 5846 unsigned int work_done = 0; 5847 5848 while (budget > 0) { 5849 struct qeth_qdio_buffer *buffer; 5850 unsigned int skbs_done = 0; 5851 bool done = false; 5852 5853 /* Fetch completed RX buffers: */ 5854 if (!card->rx.b_count) { 5855 card->rx.qdio_err = 0; 5856 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), 5857 0, true, 5858 &card->rx.b_index, 5859 &card->rx.qdio_err); 5860 if (card->rx.b_count <= 0) { 5861 card->rx.b_count = 0; 5862 break; 5863 } 5864 } 5865 5866 /* Process one completed RX buffer: */ 5867 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5868 if (!(card->rx.qdio_err && 5869 qeth_check_qdio_errors(card, buffer->buffer, 5870 card->rx.qdio_err, "qinerr"))) 5871 skbs_done = qeth_extract_skbs(card, budget, buffer, 5872 &done); 5873 else 5874 done = true; 5875 5876 work_done += skbs_done; 5877 budget -= skbs_done; 5878 5879 if (done) { 5880 QETH_CARD_STAT_INC(card, rx_bufs); 5881 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5882 buffer->pool_entry = NULL; 5883 card->rx.b_count--; 5884 ctx->bufs_refill++; 5885 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5886 ctx->bufs_refill); 5887 5888 /* Step forward to next buffer: */ 5889 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5890 card->rx.buf_element = 0; 5891 card->rx.e_offset = 0; 5892 } 5893 } 5894 5895 return work_done; 5896 } 5897 5898 static void qeth_cq_poll(struct qeth_card *card) 5899 { 5900 unsigned int work_done = 0; 5901 5902 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5903 unsigned int start, error; 5904 int completed; 5905 5906 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 5907 &error); 5908 if (completed <= 0) 5909 return; 5910 5911 qeth_qdio_cq_handler(card, error, 1, start, completed); 5912 work_done += completed; 5913 } 5914 } 5915 5916 int qeth_poll(struct napi_struct *napi, int budget) 5917 { 5918 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5919 unsigned int work_done; 5920 5921 work_done = qeth_rx_poll(card, budget); 5922 5923 if (qeth_use_tx_irqs(card)) { 5924 struct qeth_qdio_out_q *queue; 5925 unsigned int i; 5926 5927 qeth_for_each_output_queue(card, queue, i) { 5928 if (!qeth_out_queue_is_empty(queue)) 5929 napi_schedule(&queue->napi); 5930 } 5931 } 5932 5933 if (card->options.cq == QETH_CQ_ENABLED) 5934 qeth_cq_poll(card); 5935 5936 if (budget) { 5937 struct qeth_rx *ctx = &card->rx; 5938 5939 /* Process any substantial refill backlog: */ 5940 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5941 5942 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5943 if (work_done >= budget) 5944 return work_done; 5945 } 5946 5947 if (napi_complete_done(napi, work_done) && 5948 qdio_start_irq(CARD_DDEV(card))) 5949 napi_schedule(napi); 5950 5951 return work_done; 5952 } 5953 EXPORT_SYMBOL_GPL(qeth_poll); 5954 5955 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5956 unsigned int bidx, unsigned int qdio_error, 5957 int budget) 5958 { 5959 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5960 u8 sflags = buffer->buffer->element[15].sflags; 5961 struct qeth_card *card = queue->card; 5962 bool error = !!qdio_error; 5963 5964 if (qdio_error == QDIO_ERROR_SLSB_PENDING) { 5965 struct qaob *aob = buffer->aob; 5966 struct qeth_qaob_priv1 *priv; 5967 enum iucv_tx_notify notify; 5968 5969 if (!aob) { 5970 netdev_WARN_ONCE(card->dev, 5971 "Pending TX buffer %#x without QAOB on TX queue %u\n", 5972 bidx, queue->queue_no); 5973 qeth_schedule_recovery(card); 5974 return; 5975 } 5976 5977 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5978 5979 priv = (struct qeth_qaob_priv1 *)&aob->user1; 5980 /* QAOB hasn't completed yet: */ 5981 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { 5982 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5983 5984 /* Prepare the queue slot for immediate re-use: */ 5985 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5986 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { 5987 QETH_CARD_TEXT(card, 2, "outofbuf"); 5988 qeth_schedule_recovery(card); 5989 } 5990 5991 list_add(&buffer->list_entry, &queue->pending_bufs); 5992 /* Skip clearing the buffer: */ 5993 return; 5994 } 5995 5996 /* QAOB already completed: */ 5997 notify = qeth_compute_cq_notification(aob->aorc, 0); 5998 qeth_notify_skbs(queue, buffer, notify); 5999 error = !!aob->aorc; 6000 memset(aob, 0, sizeof(*aob)); 6001 } else if (card->options.cq == QETH_CQ_ENABLED) { 6002 qeth_notify_skbs(queue, buffer, 6003 qeth_compute_cq_notification(sflags, 0)); 6004 } 6005 6006 qeth_clear_output_buffer(queue, buffer, error, budget); 6007 } 6008 6009 static int qeth_tx_poll(struct napi_struct *napi, int budget) 6010 { 6011 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 6012 unsigned int queue_no = queue->queue_no; 6013 struct qeth_card *card = queue->card; 6014 struct net_device *dev = card->dev; 6015 unsigned int work_done = 0; 6016 struct netdev_queue *txq; 6017 6018 if (IS_IQD(card)) 6019 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 6020 else 6021 txq = netdev_get_tx_queue(dev, queue_no); 6022 6023 while (1) { 6024 unsigned int start, error, i; 6025 unsigned int packets = 0; 6026 unsigned int bytes = 0; 6027 int completed; 6028 6029 qeth_tx_complete_pending_bufs(card, queue, false, budget); 6030 6031 if (qeth_out_queue_is_empty(queue)) { 6032 napi_complete(napi); 6033 return 0; 6034 } 6035 6036 /* Give the CPU a breather: */ 6037 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 6038 QETH_TXQ_STAT_INC(queue, completion_yield); 6039 if (napi_complete_done(napi, 0)) 6040 napi_schedule(napi); 6041 return 0; 6042 } 6043 6044 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 6045 &start, &error); 6046 if (completed <= 0) { 6047 /* Ensure we see TX completion for pending work: */ 6048 if (napi_complete_done(napi, 0) && 6049 !atomic_read(&queue->set_pci_flags_count)) 6050 qeth_tx_arm_timer(queue, queue->rescan_usecs); 6051 return 0; 6052 } 6053 6054 for (i = start; i < start + completed; i++) { 6055 struct qeth_qdio_out_buffer *buffer; 6056 unsigned int bidx = QDIO_BUFNR(i); 6057 6058 buffer = queue->bufs[bidx]; 6059 packets += buffer->frames; 6060 bytes += buffer->bytes; 6061 6062 qeth_handle_send_error(card, buffer, error); 6063 if (IS_IQD(card)) 6064 qeth_iqd_tx_complete(queue, bidx, error, budget); 6065 else 6066 qeth_clear_output_buffer(queue, buffer, error, 6067 budget); 6068 } 6069 6070 atomic_sub(completed, &queue->used_buffers); 6071 work_done += completed; 6072 if (IS_IQD(card)) 6073 netdev_tx_completed_queue(txq, packets, bytes); 6074 else 6075 qeth_check_outbound_queue(queue); 6076 6077 /* xmit may have observed the full-condition, but not yet 6078 * stopped the txq. In which case the code below won't trigger. 6079 * So before returning, xmit will re-check the txq's fill level 6080 * and wake it up if needed. 6081 */ 6082 if (netif_tx_queue_stopped(txq) && 6083 !qeth_out_queue_is_full(queue)) 6084 netif_tx_wake_queue(txq); 6085 } 6086 } 6087 6088 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6089 { 6090 if (!cmd->hdr.return_code) 6091 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6092 return cmd->hdr.return_code; 6093 } 6094 6095 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6096 struct qeth_reply *reply, 6097 unsigned long data) 6098 { 6099 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6100 struct qeth_ipa_caps *caps = reply->param; 6101 6102 if (qeth_setassparms_inspect_rc(cmd)) 6103 return -EIO; 6104 6105 caps->supported = cmd->data.setassparms.data.caps.supported; 6106 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6107 return 0; 6108 } 6109 6110 int qeth_setassparms_cb(struct qeth_card *card, 6111 struct qeth_reply *reply, unsigned long data) 6112 { 6113 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6114 6115 QETH_CARD_TEXT(card, 4, "defadpcb"); 6116 6117 if (cmd->hdr.return_code) 6118 return -EIO; 6119 6120 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6121 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6122 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6123 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6124 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6125 return 0; 6126 } 6127 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6128 6129 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6130 enum qeth_ipa_funcs ipa_func, 6131 u16 cmd_code, 6132 unsigned int data_length, 6133 enum qeth_prot_versions prot) 6134 { 6135 struct qeth_ipacmd_setassparms *setassparms; 6136 struct qeth_ipacmd_setassparms_hdr *hdr; 6137 struct qeth_cmd_buffer *iob; 6138 6139 QETH_CARD_TEXT(card, 4, "getasscm"); 6140 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6141 data_length + 6142 offsetof(struct qeth_ipacmd_setassparms, 6143 data)); 6144 if (!iob) 6145 return NULL; 6146 6147 setassparms = &__ipa_cmd(iob)->data.setassparms; 6148 setassparms->assist_no = ipa_func; 6149 6150 hdr = &setassparms->hdr; 6151 hdr->length = sizeof(*hdr) + data_length; 6152 hdr->command_code = cmd_code; 6153 return iob; 6154 } 6155 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6156 6157 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6158 enum qeth_ipa_funcs ipa_func, 6159 u16 cmd_code, u32 *data, 6160 enum qeth_prot_versions prot) 6161 { 6162 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6163 struct qeth_cmd_buffer *iob; 6164 6165 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6166 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6167 if (!iob) 6168 return -ENOMEM; 6169 6170 if (data) 6171 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6172 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6173 } 6174 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6175 6176 static void qeth_unregister_dbf_views(void) 6177 { 6178 int x; 6179 6180 for (x = 0; x < QETH_DBF_INFOS; x++) { 6181 debug_unregister(qeth_dbf[x].id); 6182 qeth_dbf[x].id = NULL; 6183 } 6184 } 6185 6186 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6187 { 6188 char dbf_txt_buf[32]; 6189 va_list args; 6190 6191 if (!debug_level_enabled(id, level)) 6192 return; 6193 va_start(args, fmt); 6194 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6195 va_end(args); 6196 debug_text_event(id, level, dbf_txt_buf); 6197 } 6198 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6199 6200 static int qeth_register_dbf_views(void) 6201 { 6202 int ret; 6203 int x; 6204 6205 for (x = 0; x < QETH_DBF_INFOS; x++) { 6206 /* register the areas */ 6207 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6208 qeth_dbf[x].pages, 6209 qeth_dbf[x].areas, 6210 qeth_dbf[x].len); 6211 if (qeth_dbf[x].id == NULL) { 6212 qeth_unregister_dbf_views(); 6213 return -ENOMEM; 6214 } 6215 6216 /* register a view */ 6217 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6218 if (ret) { 6219 qeth_unregister_dbf_views(); 6220 return ret; 6221 } 6222 6223 /* set a passing level */ 6224 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6225 } 6226 6227 return 0; 6228 } 6229 6230 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6231 6232 int qeth_setup_discipline(struct qeth_card *card, 6233 enum qeth_discipline_id discipline) 6234 { 6235 int rc; 6236 6237 mutex_lock(&qeth_mod_mutex); 6238 switch (discipline) { 6239 case QETH_DISCIPLINE_LAYER3: 6240 card->discipline = try_then_request_module( 6241 symbol_get(qeth_l3_discipline), "qeth_l3"); 6242 break; 6243 case QETH_DISCIPLINE_LAYER2: 6244 card->discipline = try_then_request_module( 6245 symbol_get(qeth_l2_discipline), "qeth_l2"); 6246 break; 6247 default: 6248 break; 6249 } 6250 mutex_unlock(&qeth_mod_mutex); 6251 6252 if (!card->discipline) { 6253 dev_err(&card->gdev->dev, "There is no kernel module to " 6254 "support discipline %d\n", discipline); 6255 return -EINVAL; 6256 } 6257 6258 rc = card->discipline->setup(card->gdev); 6259 if (rc) { 6260 if (discipline == QETH_DISCIPLINE_LAYER2) 6261 symbol_put(qeth_l2_discipline); 6262 else 6263 symbol_put(qeth_l3_discipline); 6264 card->discipline = NULL; 6265 6266 return rc; 6267 } 6268 6269 card->options.layer = discipline; 6270 return 0; 6271 } 6272 6273 void qeth_remove_discipline(struct qeth_card *card) 6274 { 6275 card->discipline->remove(card->gdev); 6276 6277 if (IS_LAYER2(card)) 6278 symbol_put(qeth_l2_discipline); 6279 else 6280 symbol_put(qeth_l3_discipline); 6281 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6282 card->discipline = NULL; 6283 } 6284 6285 static const struct device_type qeth_generic_devtype = { 6286 .name = "qeth_generic", 6287 }; 6288 6289 #define DBF_NAME_LEN 20 6290 6291 struct qeth_dbf_entry { 6292 char dbf_name[DBF_NAME_LEN]; 6293 debug_info_t *dbf_info; 6294 struct list_head dbf_list; 6295 }; 6296 6297 static LIST_HEAD(qeth_dbf_list); 6298 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6299 6300 static debug_info_t *qeth_get_dbf_entry(char *name) 6301 { 6302 struct qeth_dbf_entry *entry; 6303 debug_info_t *rc = NULL; 6304 6305 mutex_lock(&qeth_dbf_list_mutex); 6306 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6307 if (strcmp(entry->dbf_name, name) == 0) { 6308 rc = entry->dbf_info; 6309 break; 6310 } 6311 } 6312 mutex_unlock(&qeth_dbf_list_mutex); 6313 return rc; 6314 } 6315 6316 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6317 { 6318 struct qeth_dbf_entry *new_entry; 6319 6320 card->debug = debug_register(name, 2, 1, 8); 6321 if (!card->debug) { 6322 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6323 goto err; 6324 } 6325 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6326 goto err_dbg; 6327 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6328 if (!new_entry) 6329 goto err_dbg; 6330 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6331 new_entry->dbf_info = card->debug; 6332 mutex_lock(&qeth_dbf_list_mutex); 6333 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6334 mutex_unlock(&qeth_dbf_list_mutex); 6335 6336 return 0; 6337 6338 err_dbg: 6339 debug_unregister(card->debug); 6340 err: 6341 return -ENOMEM; 6342 } 6343 6344 static void qeth_clear_dbf_list(void) 6345 { 6346 struct qeth_dbf_entry *entry, *tmp; 6347 6348 mutex_lock(&qeth_dbf_list_mutex); 6349 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6350 list_del(&entry->dbf_list); 6351 debug_unregister(entry->dbf_info); 6352 kfree(entry); 6353 } 6354 mutex_unlock(&qeth_dbf_list_mutex); 6355 } 6356 6357 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6358 { 6359 struct net_device *dev; 6360 struct qeth_priv *priv; 6361 6362 switch (card->info.type) { 6363 case QETH_CARD_TYPE_IQD: 6364 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6365 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6366 break; 6367 case QETH_CARD_TYPE_OSM: 6368 dev = alloc_etherdev(sizeof(*priv)); 6369 break; 6370 default: 6371 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6372 } 6373 6374 if (!dev) 6375 return NULL; 6376 6377 priv = netdev_priv(dev); 6378 priv->rx_copybreak = QETH_RX_COPYBREAK; 6379 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6380 6381 dev->ml_priv = card; 6382 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6383 dev->min_mtu = 576; 6384 /* initialized when device first goes online: */ 6385 dev->max_mtu = 0; 6386 dev->mtu = 0; 6387 SET_NETDEV_DEV(dev, &card->gdev->dev); 6388 netif_carrier_off(dev); 6389 6390 dev->ethtool_ops = &qeth_ethtool_ops; 6391 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6392 dev->hw_features |= NETIF_F_SG; 6393 dev->vlan_features |= NETIF_F_SG; 6394 if (IS_IQD(card)) 6395 dev->features |= NETIF_F_SG; 6396 6397 return dev; 6398 } 6399 6400 struct net_device *qeth_clone_netdev(struct net_device *orig) 6401 { 6402 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6403 6404 if (!clone) 6405 return NULL; 6406 6407 clone->dev_port = orig->dev_port; 6408 return clone; 6409 } 6410 6411 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6412 { 6413 struct qeth_card *card; 6414 struct device *dev; 6415 int rc; 6416 enum qeth_discipline_id enforced_disc; 6417 char dbf_name[DBF_NAME_LEN]; 6418 6419 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6420 6421 dev = &gdev->dev; 6422 if (!get_device(dev)) 6423 return -ENODEV; 6424 6425 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6426 6427 card = qeth_alloc_card(gdev); 6428 if (!card) { 6429 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6430 rc = -ENOMEM; 6431 goto err_dev; 6432 } 6433 6434 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6435 dev_name(&gdev->dev)); 6436 card->debug = qeth_get_dbf_entry(dbf_name); 6437 if (!card->debug) { 6438 rc = qeth_add_dbf_entry(card, dbf_name); 6439 if (rc) 6440 goto err_card; 6441 } 6442 6443 qeth_setup_card(card); 6444 card->dev = qeth_alloc_netdev(card); 6445 if (!card->dev) { 6446 rc = -ENOMEM; 6447 goto err_card; 6448 } 6449 6450 qeth_determine_capabilities(card); 6451 qeth_set_blkt_defaults(card); 6452 6453 card->qdio.no_out_queues = card->dev->num_tx_queues; 6454 rc = qeth_update_from_chp_desc(card); 6455 if (rc) 6456 goto err_chp_desc; 6457 6458 gdev->dev.groups = qeth_dev_groups; 6459 6460 enforced_disc = qeth_enforce_discipline(card); 6461 switch (enforced_disc) { 6462 case QETH_DISCIPLINE_UNDETERMINED: 6463 gdev->dev.type = &qeth_generic_devtype; 6464 break; 6465 default: 6466 card->info.layer_enforced = true; 6467 /* It's so early that we don't need the discipline_mutex yet. */ 6468 rc = qeth_setup_discipline(card, enforced_disc); 6469 if (rc) 6470 goto err_setup_disc; 6471 6472 break; 6473 } 6474 6475 return 0; 6476 6477 err_setup_disc: 6478 err_chp_desc: 6479 free_netdev(card->dev); 6480 err_card: 6481 qeth_core_free_card(card); 6482 err_dev: 6483 put_device(dev); 6484 return rc; 6485 } 6486 6487 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6488 { 6489 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6490 6491 QETH_CARD_TEXT(card, 2, "removedv"); 6492 6493 mutex_lock(&card->discipline_mutex); 6494 if (card->discipline) 6495 qeth_remove_discipline(card); 6496 mutex_unlock(&card->discipline_mutex); 6497 6498 qeth_free_qdio_queues(card); 6499 6500 free_netdev(card->dev); 6501 qeth_core_free_card(card); 6502 put_device(&gdev->dev); 6503 } 6504 6505 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6506 { 6507 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6508 int rc = 0; 6509 enum qeth_discipline_id def_discipline; 6510 6511 mutex_lock(&card->discipline_mutex); 6512 if (!card->discipline) { 6513 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6514 QETH_DISCIPLINE_LAYER2; 6515 rc = qeth_setup_discipline(card, def_discipline); 6516 if (rc) 6517 goto err; 6518 } 6519 6520 rc = qeth_set_online(card, card->discipline); 6521 6522 err: 6523 mutex_unlock(&card->discipline_mutex); 6524 return rc; 6525 } 6526 6527 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6528 { 6529 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6530 int rc; 6531 6532 mutex_lock(&card->discipline_mutex); 6533 rc = qeth_set_offline(card, card->discipline, false); 6534 mutex_unlock(&card->discipline_mutex); 6535 6536 return rc; 6537 } 6538 6539 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6540 { 6541 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6542 6543 qeth_set_allowed_threads(card, 0, 1); 6544 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6545 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6546 qeth_qdio_clear_card(card, 0); 6547 qeth_drain_output_queues(card); 6548 qdio_free(CARD_DDEV(card)); 6549 } 6550 6551 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6552 size_t count) 6553 { 6554 int err; 6555 6556 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6557 buf); 6558 6559 return err ? err : count; 6560 } 6561 static DRIVER_ATTR_WO(group); 6562 6563 static struct attribute *qeth_drv_attrs[] = { 6564 &driver_attr_group.attr, 6565 NULL, 6566 }; 6567 static struct attribute_group qeth_drv_attr_group = { 6568 .attrs = qeth_drv_attrs, 6569 }; 6570 static const struct attribute_group *qeth_drv_attr_groups[] = { 6571 &qeth_drv_attr_group, 6572 NULL, 6573 }; 6574 6575 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6576 .driver = { 6577 .groups = qeth_drv_attr_groups, 6578 .owner = THIS_MODULE, 6579 .name = "qeth", 6580 }, 6581 .ccw_driver = &qeth_ccw_driver, 6582 .setup = qeth_core_probe_device, 6583 .remove = qeth_core_remove_device, 6584 .set_online = qeth_core_set_online, 6585 .set_offline = qeth_core_set_offline, 6586 .shutdown = qeth_core_shutdown, 6587 }; 6588 6589 int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) 6590 { 6591 struct qeth_card *card = dev->ml_priv; 6592 int rc = 0; 6593 6594 switch (cmd) { 6595 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6596 rc = qeth_snmp_command(card, data); 6597 break; 6598 case SIOC_QETH_GET_CARD_TYPE: 6599 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6600 !IS_VM_NIC(card)) 6601 return 1; 6602 return 0; 6603 case SIOC_QETH_QUERY_OAT: 6604 rc = qeth_query_oat_command(card, data); 6605 break; 6606 default: 6607 if (card->discipline->do_ioctl) 6608 rc = card->discipline->do_ioctl(dev, rq, data, cmd); 6609 else 6610 rc = -EOPNOTSUPP; 6611 } 6612 if (rc) 6613 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6614 return rc; 6615 } 6616 EXPORT_SYMBOL_GPL(qeth_siocdevprivate); 6617 6618 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6619 { 6620 struct qeth_card *card = dev->ml_priv; 6621 struct mii_ioctl_data *mii_data; 6622 int rc = 0; 6623 6624 switch (cmd) { 6625 case SIOCGMIIPHY: 6626 mii_data = if_mii(rq); 6627 mii_data->phy_id = 0; 6628 break; 6629 case SIOCGMIIREG: 6630 mii_data = if_mii(rq); 6631 if (mii_data->phy_id != 0) 6632 rc = -EINVAL; 6633 else 6634 mii_data->val_out = qeth_mdio_read(dev, 6635 mii_data->phy_id, mii_data->reg_num); 6636 break; 6637 default: 6638 return -EOPNOTSUPP; 6639 } 6640 if (rc) 6641 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6642 return rc; 6643 } 6644 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6645 6646 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6647 unsigned long data) 6648 { 6649 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6650 u32 *features = reply->param; 6651 6652 if (qeth_setassparms_inspect_rc(cmd)) 6653 return -EIO; 6654 6655 *features = cmd->data.setassparms.data.flags_32bit; 6656 return 0; 6657 } 6658 6659 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6660 enum qeth_prot_versions prot) 6661 { 6662 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6663 NULL, prot); 6664 } 6665 6666 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6667 enum qeth_prot_versions prot, u8 *lp2lp) 6668 { 6669 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6670 struct qeth_cmd_buffer *iob; 6671 struct qeth_ipa_caps caps; 6672 u32 features; 6673 int rc; 6674 6675 /* some L3 HW requires combined L3+L4 csum offload: */ 6676 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6677 cstype == IPA_OUTBOUND_CHECKSUM) 6678 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6679 6680 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6681 prot); 6682 if (!iob) 6683 return -ENOMEM; 6684 6685 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6686 if (rc) 6687 return rc; 6688 6689 if ((required_features & features) != required_features) { 6690 qeth_set_csum_off(card, cstype, prot); 6691 return -EOPNOTSUPP; 6692 } 6693 6694 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6695 SETASS_DATA_SIZEOF(flags_32bit), 6696 prot); 6697 if (!iob) { 6698 qeth_set_csum_off(card, cstype, prot); 6699 return -ENOMEM; 6700 } 6701 6702 if (features & QETH_IPA_CHECKSUM_LP2LP) 6703 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6704 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6705 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6706 if (rc) { 6707 qeth_set_csum_off(card, cstype, prot); 6708 return rc; 6709 } 6710 6711 if (!qeth_ipa_caps_supported(&caps, required_features) || 6712 !qeth_ipa_caps_enabled(&caps, required_features)) { 6713 qeth_set_csum_off(card, cstype, prot); 6714 return -EOPNOTSUPP; 6715 } 6716 6717 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6718 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6719 6720 if (lp2lp) 6721 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6722 6723 return 0; 6724 } 6725 6726 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6727 enum qeth_prot_versions prot, u8 *lp2lp) 6728 { 6729 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6730 qeth_set_csum_off(card, cstype, prot); 6731 } 6732 6733 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6734 unsigned long data) 6735 { 6736 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6737 struct qeth_tso_start_data *tso_data = reply->param; 6738 6739 if (qeth_setassparms_inspect_rc(cmd)) 6740 return -EIO; 6741 6742 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6743 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6744 return 0; 6745 } 6746 6747 static int qeth_set_tso_off(struct qeth_card *card, 6748 enum qeth_prot_versions prot) 6749 { 6750 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6751 IPA_CMD_ASS_STOP, NULL, prot); 6752 } 6753 6754 static int qeth_set_tso_on(struct qeth_card *card, 6755 enum qeth_prot_versions prot) 6756 { 6757 struct qeth_tso_start_data tso_data; 6758 struct qeth_cmd_buffer *iob; 6759 struct qeth_ipa_caps caps; 6760 int rc; 6761 6762 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6763 IPA_CMD_ASS_START, 0, prot); 6764 if (!iob) 6765 return -ENOMEM; 6766 6767 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6768 if (rc) 6769 return rc; 6770 6771 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6772 qeth_set_tso_off(card, prot); 6773 return -EOPNOTSUPP; 6774 } 6775 6776 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6777 IPA_CMD_ASS_ENABLE, 6778 SETASS_DATA_SIZEOF(caps), prot); 6779 if (!iob) { 6780 qeth_set_tso_off(card, prot); 6781 return -ENOMEM; 6782 } 6783 6784 /* enable TSO capability */ 6785 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6786 QETH_IPA_LARGE_SEND_TCP; 6787 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6788 if (rc) { 6789 qeth_set_tso_off(card, prot); 6790 return rc; 6791 } 6792 6793 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6794 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6795 qeth_set_tso_off(card, prot); 6796 return -EOPNOTSUPP; 6797 } 6798 6799 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6800 tso_data.mss); 6801 return 0; 6802 } 6803 6804 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6805 enum qeth_prot_versions prot) 6806 { 6807 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6808 } 6809 6810 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6811 { 6812 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6813 int rc_ipv6; 6814 6815 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6816 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6817 QETH_PROT_IPV4, NULL); 6818 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6819 /* no/one Offload Assist available, so the rc is trivial */ 6820 return rc_ipv4; 6821 6822 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6823 QETH_PROT_IPV6, NULL); 6824 6825 if (on) 6826 /* enable: success if any Assist is active */ 6827 return (rc_ipv6) ? rc_ipv4 : 0; 6828 6829 /* disable: failure if any Assist is still active */ 6830 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6831 } 6832 6833 /** 6834 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6835 * @dev: a net_device 6836 */ 6837 void qeth_enable_hw_features(struct net_device *dev) 6838 { 6839 struct qeth_card *card = dev->ml_priv; 6840 netdev_features_t features; 6841 6842 features = dev->features; 6843 /* force-off any feature that might need an IPA sequence. 6844 * netdev_update_features() will restart them. 6845 */ 6846 dev->features &= ~dev->hw_features; 6847 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6848 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6849 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6850 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6851 } 6852 netdev_update_features(dev); 6853 if (features != dev->features) 6854 dev_warn(&card->gdev->dev, 6855 "Device recovery failed to restore all offload features\n"); 6856 } 6857 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6858 6859 static void qeth_check_restricted_features(struct qeth_card *card, 6860 netdev_features_t changed, 6861 netdev_features_t actual) 6862 { 6863 netdev_features_t ipv6_features = NETIF_F_TSO6; 6864 netdev_features_t ipv4_features = NETIF_F_TSO; 6865 6866 if (!card->info.has_lp2lp_cso_v6) 6867 ipv6_features |= NETIF_F_IPV6_CSUM; 6868 if (!card->info.has_lp2lp_cso_v4) 6869 ipv4_features |= NETIF_F_IP_CSUM; 6870 6871 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6872 qeth_flush_local_addrs6(card); 6873 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6874 qeth_flush_local_addrs4(card); 6875 } 6876 6877 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6878 { 6879 struct qeth_card *card = dev->ml_priv; 6880 netdev_features_t changed = dev->features ^ features; 6881 int rc = 0; 6882 6883 QETH_CARD_TEXT(card, 2, "setfeat"); 6884 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6885 6886 if ((changed & NETIF_F_IP_CSUM)) { 6887 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6888 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6889 &card->info.has_lp2lp_cso_v4); 6890 if (rc) 6891 changed ^= NETIF_F_IP_CSUM; 6892 } 6893 if (changed & NETIF_F_IPV6_CSUM) { 6894 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6895 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6896 &card->info.has_lp2lp_cso_v6); 6897 if (rc) 6898 changed ^= NETIF_F_IPV6_CSUM; 6899 } 6900 if (changed & NETIF_F_RXCSUM) { 6901 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6902 if (rc) 6903 changed ^= NETIF_F_RXCSUM; 6904 } 6905 if (changed & NETIF_F_TSO) { 6906 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6907 QETH_PROT_IPV4); 6908 if (rc) 6909 changed ^= NETIF_F_TSO; 6910 } 6911 if (changed & NETIF_F_TSO6) { 6912 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6913 QETH_PROT_IPV6); 6914 if (rc) 6915 changed ^= NETIF_F_TSO6; 6916 } 6917 6918 qeth_check_restricted_features(card, dev->features ^ features, 6919 dev->features ^ changed); 6920 6921 /* everything changed successfully? */ 6922 if ((dev->features ^ features) == changed) 6923 return 0; 6924 /* something went wrong. save changed features and return error */ 6925 dev->features ^= changed; 6926 return -EIO; 6927 } 6928 EXPORT_SYMBOL_GPL(qeth_set_features); 6929 6930 netdev_features_t qeth_fix_features(struct net_device *dev, 6931 netdev_features_t features) 6932 { 6933 struct qeth_card *card = dev->ml_priv; 6934 6935 QETH_CARD_TEXT(card, 2, "fixfeat"); 6936 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6937 features &= ~NETIF_F_IP_CSUM; 6938 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6939 features &= ~NETIF_F_IPV6_CSUM; 6940 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6941 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6942 features &= ~NETIF_F_RXCSUM; 6943 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6944 features &= ~NETIF_F_TSO; 6945 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6946 features &= ~NETIF_F_TSO6; 6947 6948 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6949 return features; 6950 } 6951 EXPORT_SYMBOL_GPL(qeth_fix_features); 6952 6953 netdev_features_t qeth_features_check(struct sk_buff *skb, 6954 struct net_device *dev, 6955 netdev_features_t features) 6956 { 6957 struct qeth_card *card = dev->ml_priv; 6958 6959 /* Traffic with local next-hop is not eligible for some offloads: */ 6960 if (skb->ip_summed == CHECKSUM_PARTIAL && 6961 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 6962 netdev_features_t restricted = 0; 6963 6964 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 6965 restricted |= NETIF_F_ALL_TSO; 6966 6967 switch (vlan_get_protocol(skb)) { 6968 case htons(ETH_P_IP): 6969 if (!card->info.has_lp2lp_cso_v4) 6970 restricted |= NETIF_F_IP_CSUM; 6971 6972 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 6973 features &= ~restricted; 6974 break; 6975 case htons(ETH_P_IPV6): 6976 if (!card->info.has_lp2lp_cso_v6) 6977 restricted |= NETIF_F_IPV6_CSUM; 6978 6979 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 6980 features &= ~restricted; 6981 break; 6982 default: 6983 break; 6984 } 6985 } 6986 6987 /* GSO segmentation builds skbs with 6988 * a (small) linear part for the headers, and 6989 * page frags for the data. 6990 * Compared to a linear skb, the header-only part consumes an 6991 * additional buffer element. This reduces buffer utilization, and 6992 * hurts throughput. So compress small segments into one element. 6993 */ 6994 if (netif_needs_gso(skb, features)) { 6995 /* match skb_segment(): */ 6996 unsigned int doffset = skb->data - skb_mac_header(skb); 6997 unsigned int hsize = skb_shinfo(skb)->gso_size; 6998 unsigned int hroom = skb_headroom(skb); 6999 7000 /* linearize only if resulting skb allocations are order-0: */ 7001 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 7002 features &= ~NETIF_F_SG; 7003 } 7004 7005 return vlan_features_check(skb, features); 7006 } 7007 EXPORT_SYMBOL_GPL(qeth_features_check); 7008 7009 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7010 { 7011 struct qeth_card *card = dev->ml_priv; 7012 struct qeth_qdio_out_q *queue; 7013 unsigned int i; 7014 7015 QETH_CARD_TEXT(card, 5, "getstat"); 7016 7017 stats->rx_packets = card->stats.rx_packets; 7018 stats->rx_bytes = card->stats.rx_bytes; 7019 stats->rx_errors = card->stats.rx_length_errors + 7020 card->stats.rx_frame_errors + 7021 card->stats.rx_fifo_errors; 7022 stats->rx_dropped = card->stats.rx_dropped_nomem + 7023 card->stats.rx_dropped_notsupp + 7024 card->stats.rx_dropped_runt; 7025 stats->multicast = card->stats.rx_multicast; 7026 stats->rx_length_errors = card->stats.rx_length_errors; 7027 stats->rx_frame_errors = card->stats.rx_frame_errors; 7028 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 7029 7030 for (i = 0; i < card->qdio.no_out_queues; i++) { 7031 queue = card->qdio.out_qs[i]; 7032 7033 stats->tx_packets += queue->stats.tx_packets; 7034 stats->tx_bytes += queue->stats.tx_bytes; 7035 stats->tx_errors += queue->stats.tx_errors; 7036 stats->tx_dropped += queue->stats.tx_dropped; 7037 } 7038 } 7039 EXPORT_SYMBOL_GPL(qeth_get_stats64); 7040 7041 #define TC_IQD_UCAST 0 7042 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 7043 unsigned int ucast_txqs) 7044 { 7045 unsigned int prio; 7046 7047 /* IQD requires mcast traffic to be placed on a dedicated queue, and 7048 * qeth_iqd_select_queue() deals with this. 7049 * For unicast traffic, we defer the queue selection to the stack. 7050 * By installing a trivial prio map that spans over only the unicast 7051 * queues, we can encourage the stack to spread the ucast traffic evenly 7052 * without selecting the mcast queue. 7053 */ 7054 7055 /* One traffic class, spanning over all active ucast queues: */ 7056 netdev_set_num_tc(dev, 1); 7057 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 7058 QETH_IQD_MIN_UCAST_TXQ); 7059 7060 /* Map all priorities to this traffic class: */ 7061 for (prio = 0; prio <= TC_BITMASK; prio++) 7062 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 7063 } 7064 7065 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7066 { 7067 struct net_device *dev = card->dev; 7068 int rc; 7069 7070 /* Per netif_setup_tc(), adjust the mapping first: */ 7071 if (IS_IQD(card)) 7072 qeth_iqd_set_prio_tc_map(dev, count - 1); 7073 7074 rc = netif_set_real_num_tx_queues(dev, count); 7075 7076 if (rc && IS_IQD(card)) 7077 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7078 7079 return rc; 7080 } 7081 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7082 7083 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7084 u8 cast_type, struct net_device *sb_dev) 7085 { 7086 u16 txq; 7087 7088 if (cast_type != RTN_UNICAST) 7089 return QETH_IQD_MCAST_TXQ; 7090 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7091 return QETH_IQD_MIN_UCAST_TXQ; 7092 7093 txq = netdev_pick_tx(dev, skb, sb_dev); 7094 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7095 } 7096 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7097 7098 int qeth_open(struct net_device *dev) 7099 { 7100 struct qeth_card *card = dev->ml_priv; 7101 struct qeth_qdio_out_q *queue; 7102 unsigned int i; 7103 7104 QETH_CARD_TEXT(card, 4, "qethopen"); 7105 7106 card->data.state = CH_STATE_UP; 7107 netif_tx_start_all_queues(dev); 7108 7109 local_bh_disable(); 7110 qeth_for_each_output_queue(card, queue, i) { 7111 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 7112 QETH_NAPI_WEIGHT); 7113 napi_enable(&queue->napi); 7114 napi_schedule(&queue->napi); 7115 } 7116 7117 napi_enable(&card->napi); 7118 napi_schedule(&card->napi); 7119 /* kick-start the NAPI softirq: */ 7120 local_bh_enable(); 7121 7122 return 0; 7123 } 7124 EXPORT_SYMBOL_GPL(qeth_open); 7125 7126 int qeth_stop(struct net_device *dev) 7127 { 7128 struct qeth_card *card = dev->ml_priv; 7129 struct qeth_qdio_out_q *queue; 7130 unsigned int i; 7131 7132 QETH_CARD_TEXT(card, 4, "qethstop"); 7133 7134 napi_disable(&card->napi); 7135 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7136 qdio_stop_irq(CARD_DDEV(card)); 7137 7138 /* Quiesce the NAPI instances: */ 7139 qeth_for_each_output_queue(card, queue, i) 7140 napi_disable(&queue->napi); 7141 7142 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7143 netif_tx_disable(dev); 7144 7145 qeth_for_each_output_queue(card, queue, i) { 7146 del_timer_sync(&queue->timer); 7147 /* Queues may get re-allocated, so remove the NAPIs. */ 7148 netif_napi_del(&queue->napi); 7149 } 7150 7151 return 0; 7152 } 7153 EXPORT_SYMBOL_GPL(qeth_stop); 7154 7155 static int __init qeth_core_init(void) 7156 { 7157 int rc; 7158 7159 pr_info("loading core functions\n"); 7160 7161 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7162 7163 rc = qeth_register_dbf_views(); 7164 if (rc) 7165 goto dbf_err; 7166 qeth_core_root_dev = root_device_register("qeth"); 7167 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7168 if (rc) 7169 goto register_err; 7170 qeth_core_header_cache = 7171 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7172 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7173 0, NULL); 7174 if (!qeth_core_header_cache) { 7175 rc = -ENOMEM; 7176 goto slab_err; 7177 } 7178 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7179 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7180 if (!qeth_qdio_outbuf_cache) { 7181 rc = -ENOMEM; 7182 goto cqslab_err; 7183 } 7184 rc = ccw_driver_register(&qeth_ccw_driver); 7185 if (rc) 7186 goto ccw_err; 7187 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7188 if (rc) 7189 goto ccwgroup_err; 7190 7191 return 0; 7192 7193 ccwgroup_err: 7194 ccw_driver_unregister(&qeth_ccw_driver); 7195 ccw_err: 7196 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7197 cqslab_err: 7198 kmem_cache_destroy(qeth_core_header_cache); 7199 slab_err: 7200 root_device_unregister(qeth_core_root_dev); 7201 register_err: 7202 qeth_unregister_dbf_views(); 7203 dbf_err: 7204 debugfs_remove_recursive(qeth_debugfs_root); 7205 pr_err("Initializing the qeth device driver failed\n"); 7206 return rc; 7207 } 7208 7209 static void __exit qeth_core_exit(void) 7210 { 7211 qeth_clear_dbf_list(); 7212 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7213 ccw_driver_unregister(&qeth_ccw_driver); 7214 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7215 kmem_cache_destroy(qeth_core_header_cache); 7216 root_device_unregister(qeth_core_root_dev); 7217 qeth_unregister_dbf_views(); 7218 debugfs_remove_recursive(qeth_debugfs_root); 7219 pr_info("core functions removed\n"); 7220 } 7221 7222 module_init(qeth_core_init); 7223 module_exit(qeth_core_exit); 7224 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7225 MODULE_DESCRIPTION("qeth core functions"); 7226 MODULE_LICENSE("GPL"); 7227