1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 #include <net/sock.h> 37 38 #include <asm/ebcdic.h> 39 #include <asm/chpid.h> 40 #include <asm/sysinfo.h> 41 #include <asm/diag.h> 42 #include <asm/cio.h> 43 #include <asm/ccwdev.h> 44 #include <asm/cpcmd.h> 45 46 #include "qeth_core.h" 47 48 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 49 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 50 /* N P A M L V H */ 51 [QETH_DBF_SETUP] = {"qeth_setup", 52 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 53 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 54 &debug_sprintf_view, NULL}, 55 [QETH_DBF_CTRL] = {"qeth_control", 56 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 57 }; 58 EXPORT_SYMBOL_GPL(qeth_dbf); 59 60 struct kmem_cache *qeth_core_header_cache; 61 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 62 static struct kmem_cache *qeth_qdio_outbuf_cache; 63 64 static struct device *qeth_core_root_dev; 65 static struct dentry *qeth_debugfs_root; 66 static struct lock_class_key qdio_out_skb_queue_key; 67 68 static void qeth_issue_next_read_cb(struct qeth_card *card, 69 struct qeth_cmd_buffer *iob, 70 unsigned int data_length); 71 static int qeth_qdio_establish(struct qeth_card *); 72 static void qeth_free_qdio_queues(struct qeth_card *card); 73 74 static void qeth_close_dev_handler(struct work_struct *work) 75 { 76 struct qeth_card *card; 77 78 card = container_of(work, struct qeth_card, close_dev_work); 79 QETH_CARD_TEXT(card, 2, "cldevhdl"); 80 ccwgroup_set_offline(card->gdev); 81 } 82 83 static const char *qeth_get_cardname(struct qeth_card *card) 84 { 85 if (IS_VM_NIC(card)) { 86 switch (card->info.type) { 87 case QETH_CARD_TYPE_OSD: 88 return " Virtual NIC QDIO"; 89 case QETH_CARD_TYPE_IQD: 90 return " Virtual NIC Hiper"; 91 case QETH_CARD_TYPE_OSM: 92 return " Virtual NIC QDIO - OSM"; 93 case QETH_CARD_TYPE_OSX: 94 return " Virtual NIC QDIO - OSX"; 95 default: 96 return " unknown"; 97 } 98 } else { 99 switch (card->info.type) { 100 case QETH_CARD_TYPE_OSD: 101 return " OSD Express"; 102 case QETH_CARD_TYPE_IQD: 103 return " HiperSockets"; 104 case QETH_CARD_TYPE_OSN: 105 return " OSN QDIO"; 106 case QETH_CARD_TYPE_OSM: 107 return " OSM QDIO"; 108 case QETH_CARD_TYPE_OSX: 109 return " OSX QDIO"; 110 default: 111 return " unknown"; 112 } 113 } 114 return " n/a"; 115 } 116 117 /* max length to be returned: 14 */ 118 const char *qeth_get_cardname_short(struct qeth_card *card) 119 { 120 if (IS_VM_NIC(card)) { 121 switch (card->info.type) { 122 case QETH_CARD_TYPE_OSD: 123 return "Virt.NIC QDIO"; 124 case QETH_CARD_TYPE_IQD: 125 return "Virt.NIC Hiper"; 126 case QETH_CARD_TYPE_OSM: 127 return "Virt.NIC OSM"; 128 case QETH_CARD_TYPE_OSX: 129 return "Virt.NIC OSX"; 130 default: 131 return "unknown"; 132 } 133 } else { 134 switch (card->info.type) { 135 case QETH_CARD_TYPE_OSD: 136 switch (card->info.link_type) { 137 case QETH_LINK_TYPE_FAST_ETH: 138 return "OSD_100"; 139 case QETH_LINK_TYPE_HSTR: 140 return "HSTR"; 141 case QETH_LINK_TYPE_GBIT_ETH: 142 return "OSD_1000"; 143 case QETH_LINK_TYPE_10GBIT_ETH: 144 return "OSD_10GIG"; 145 case QETH_LINK_TYPE_25GBIT_ETH: 146 return "OSD_25GIG"; 147 case QETH_LINK_TYPE_LANE_ETH100: 148 return "OSD_FE_LANE"; 149 case QETH_LINK_TYPE_LANE_TR: 150 return "OSD_TR_LANE"; 151 case QETH_LINK_TYPE_LANE_ETH1000: 152 return "OSD_GbE_LANE"; 153 case QETH_LINK_TYPE_LANE: 154 return "OSD_ATM_LANE"; 155 default: 156 return "OSD_Express"; 157 } 158 case QETH_CARD_TYPE_IQD: 159 return "HiperSockets"; 160 case QETH_CARD_TYPE_OSN: 161 return "OSN"; 162 case QETH_CARD_TYPE_OSM: 163 return "OSM_1000"; 164 case QETH_CARD_TYPE_OSX: 165 return "OSX_10GIG"; 166 default: 167 return "unknown"; 168 } 169 } 170 return "n/a"; 171 } 172 173 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 174 int clear_start_mask) 175 { 176 unsigned long flags; 177 178 spin_lock_irqsave(&card->thread_mask_lock, flags); 179 card->thread_allowed_mask = threads; 180 if (clear_start_mask) 181 card->thread_start_mask &= threads; 182 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 183 wake_up(&card->wait_q); 184 } 185 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 186 187 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 188 { 189 unsigned long flags; 190 int rc = 0; 191 192 spin_lock_irqsave(&card->thread_mask_lock, flags); 193 rc = (card->thread_running_mask & threads); 194 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 195 return rc; 196 } 197 EXPORT_SYMBOL_GPL(qeth_threads_running); 198 199 static void qeth_clear_working_pool_list(struct qeth_card *card) 200 { 201 struct qeth_buffer_pool_entry *pool_entry, *tmp; 202 struct qeth_qdio_q *queue = card->qdio.in_q; 203 unsigned int i; 204 205 QETH_CARD_TEXT(card, 5, "clwrklst"); 206 list_for_each_entry_safe(pool_entry, tmp, 207 &card->qdio.in_buf_pool.entry_list, list) 208 list_del(&pool_entry->list); 209 210 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 211 queue->bufs[i].pool_entry = NULL; 212 } 213 214 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 215 { 216 unsigned int i; 217 218 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 219 if (entry->elements[i]) 220 __free_page(entry->elements[i]); 221 } 222 223 kfree(entry); 224 } 225 226 static void qeth_free_buffer_pool(struct qeth_card *card) 227 { 228 struct qeth_buffer_pool_entry *entry, *tmp; 229 230 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 231 init_list) { 232 list_del(&entry->init_list); 233 qeth_free_pool_entry(entry); 234 } 235 } 236 237 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 238 { 239 struct qeth_buffer_pool_entry *entry; 240 unsigned int i; 241 242 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 243 if (!entry) 244 return NULL; 245 246 for (i = 0; i < pages; i++) { 247 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 248 249 if (!entry->elements[i]) { 250 qeth_free_pool_entry(entry); 251 return NULL; 252 } 253 } 254 255 return entry; 256 } 257 258 static int qeth_alloc_buffer_pool(struct qeth_card *card) 259 { 260 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 261 unsigned int i; 262 263 QETH_CARD_TEXT(card, 5, "alocpool"); 264 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 265 struct qeth_buffer_pool_entry *entry; 266 267 entry = qeth_alloc_pool_entry(buf_elements); 268 if (!entry) { 269 qeth_free_buffer_pool(card); 270 return -ENOMEM; 271 } 272 273 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 274 } 275 return 0; 276 } 277 278 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 279 { 280 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 281 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 282 struct qeth_buffer_pool_entry *entry, *tmp; 283 int delta = count - pool->buf_count; 284 LIST_HEAD(entries); 285 286 QETH_CARD_TEXT(card, 2, "realcbp"); 287 288 /* Defer until queue is allocated: */ 289 if (!card->qdio.in_q) 290 goto out; 291 292 /* Remove entries from the pool: */ 293 while (delta < 0) { 294 entry = list_first_entry(&pool->entry_list, 295 struct qeth_buffer_pool_entry, 296 init_list); 297 list_del(&entry->init_list); 298 qeth_free_pool_entry(entry); 299 300 delta++; 301 } 302 303 /* Allocate additional entries: */ 304 while (delta > 0) { 305 entry = qeth_alloc_pool_entry(buf_elements); 306 if (!entry) { 307 list_for_each_entry_safe(entry, tmp, &entries, 308 init_list) { 309 list_del(&entry->init_list); 310 qeth_free_pool_entry(entry); 311 } 312 313 return -ENOMEM; 314 } 315 316 list_add(&entry->init_list, &entries); 317 318 delta--; 319 } 320 321 list_splice(&entries, &pool->entry_list); 322 323 out: 324 card->qdio.in_buf_pool.buf_count = count; 325 pool->buf_count = count; 326 return 0; 327 } 328 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 329 330 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 331 { 332 if (!q) 333 return; 334 335 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 336 kfree(q); 337 } 338 339 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 340 { 341 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 342 int i; 343 344 if (!q) 345 return NULL; 346 347 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 348 kfree(q); 349 return NULL; 350 } 351 352 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 353 q->bufs[i].buffer = q->qdio_bufs[i]; 354 355 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 356 return q; 357 } 358 359 static int qeth_cq_init(struct qeth_card *card) 360 { 361 int rc; 362 363 if (card->options.cq == QETH_CQ_ENABLED) { 364 QETH_CARD_TEXT(card, 2, "cqinit"); 365 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 366 QDIO_MAX_BUFFERS_PER_Q); 367 card->qdio.c_q->next_buf_to_init = 127; 368 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 369 card->qdio.no_in_queues - 1, 0, 127, NULL); 370 if (rc) { 371 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 372 goto out; 373 } 374 } 375 rc = 0; 376 out: 377 return rc; 378 } 379 380 static int qeth_alloc_cq(struct qeth_card *card) 381 { 382 if (card->options.cq == QETH_CQ_ENABLED) { 383 QETH_CARD_TEXT(card, 2, "cqon"); 384 card->qdio.c_q = qeth_alloc_qdio_queue(); 385 if (!card->qdio.c_q) { 386 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 387 return -ENOMEM; 388 } 389 390 card->qdio.no_in_queues = 2; 391 } else { 392 QETH_CARD_TEXT(card, 2, "nocq"); 393 card->qdio.c_q = NULL; 394 card->qdio.no_in_queues = 1; 395 } 396 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 397 return 0; 398 } 399 400 static void qeth_free_cq(struct qeth_card *card) 401 { 402 if (card->qdio.c_q) { 403 --card->qdio.no_in_queues; 404 qeth_free_qdio_queue(card->qdio.c_q); 405 card->qdio.c_q = NULL; 406 } 407 } 408 409 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 410 int delayed) 411 { 412 enum iucv_tx_notify n; 413 414 switch (sbalf15) { 415 case 0: 416 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 417 break; 418 case 4: 419 case 16: 420 case 17: 421 case 18: 422 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 423 TX_NOTIFY_UNREACHABLE; 424 break; 425 default: 426 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 427 TX_NOTIFY_GENERALERROR; 428 break; 429 } 430 431 return n; 432 } 433 434 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 435 void *data) 436 { 437 ccw->cmd_code = cmd_code; 438 ccw->flags = flags | CCW_FLAG_SLI; 439 ccw->count = len; 440 ccw->cda = (__u32) __pa(data); 441 } 442 443 static int __qeth_issue_next_read(struct qeth_card *card) 444 { 445 struct qeth_cmd_buffer *iob = card->read_cmd; 446 struct qeth_channel *channel = iob->channel; 447 struct ccw1 *ccw = __ccw_from_cmd(iob); 448 int rc; 449 450 QETH_CARD_TEXT(card, 5, "issnxrd"); 451 if (channel->state != CH_STATE_UP) 452 return -EIO; 453 454 memset(iob->data, 0, iob->length); 455 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 456 iob->callback = qeth_issue_next_read_cb; 457 /* keep the cmd alive after completion: */ 458 qeth_get_cmd(iob); 459 460 QETH_CARD_TEXT(card, 6, "noirqpnd"); 461 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 462 if (!rc) { 463 channel->active_cmd = iob; 464 } else { 465 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 466 rc, CARD_DEVID(card)); 467 qeth_unlock_channel(card, channel); 468 qeth_put_cmd(iob); 469 card->read_or_write_problem = 1; 470 qeth_schedule_recovery(card); 471 } 472 return rc; 473 } 474 475 static int qeth_issue_next_read(struct qeth_card *card) 476 { 477 int ret; 478 479 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 480 ret = __qeth_issue_next_read(card); 481 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 482 483 return ret; 484 } 485 486 static void qeth_enqueue_cmd(struct qeth_card *card, 487 struct qeth_cmd_buffer *iob) 488 { 489 spin_lock_irq(&card->lock); 490 list_add_tail(&iob->list_entry, &card->cmd_waiter_list); 491 spin_unlock_irq(&card->lock); 492 } 493 494 static void qeth_dequeue_cmd(struct qeth_card *card, 495 struct qeth_cmd_buffer *iob) 496 { 497 spin_lock_irq(&card->lock); 498 list_del(&iob->list_entry); 499 spin_unlock_irq(&card->lock); 500 } 501 502 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 503 { 504 iob->rc = reason; 505 complete(&iob->done); 506 } 507 EXPORT_SYMBOL_GPL(qeth_notify_cmd); 508 509 static void qeth_flush_local_addrs4(struct qeth_card *card) 510 { 511 struct qeth_local_addr *addr; 512 struct hlist_node *tmp; 513 unsigned int i; 514 515 spin_lock_irq(&card->local_addrs4_lock); 516 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 517 hash_del_rcu(&addr->hnode); 518 kfree_rcu(addr, rcu); 519 } 520 spin_unlock_irq(&card->local_addrs4_lock); 521 } 522 523 static void qeth_flush_local_addrs6(struct qeth_card *card) 524 { 525 struct qeth_local_addr *addr; 526 struct hlist_node *tmp; 527 unsigned int i; 528 529 spin_lock_irq(&card->local_addrs6_lock); 530 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 531 hash_del_rcu(&addr->hnode); 532 kfree_rcu(addr, rcu); 533 } 534 spin_unlock_irq(&card->local_addrs6_lock); 535 } 536 537 static void qeth_flush_local_addrs(struct qeth_card *card) 538 { 539 qeth_flush_local_addrs4(card); 540 qeth_flush_local_addrs6(card); 541 } 542 543 static void qeth_add_local_addrs4(struct qeth_card *card, 544 struct qeth_ipacmd_local_addrs4 *cmd) 545 { 546 unsigned int i; 547 548 if (cmd->addr_length != 549 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 550 dev_err_ratelimited(&card->gdev->dev, 551 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 552 cmd->addr_length); 553 return; 554 } 555 556 spin_lock(&card->local_addrs4_lock); 557 for (i = 0; i < cmd->count; i++) { 558 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 559 struct qeth_local_addr *addr; 560 bool duplicate = false; 561 562 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 563 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 564 duplicate = true; 565 break; 566 } 567 } 568 569 if (duplicate) 570 continue; 571 572 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 573 if (!addr) { 574 dev_err(&card->gdev->dev, 575 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 576 &cmd->addrs[i].addr); 577 continue; 578 } 579 580 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 581 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 582 } 583 spin_unlock(&card->local_addrs4_lock); 584 } 585 586 static void qeth_add_local_addrs6(struct qeth_card *card, 587 struct qeth_ipacmd_local_addrs6 *cmd) 588 { 589 unsigned int i; 590 591 if (cmd->addr_length != 592 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 593 dev_err_ratelimited(&card->gdev->dev, 594 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 595 cmd->addr_length); 596 return; 597 } 598 599 spin_lock(&card->local_addrs6_lock); 600 for (i = 0; i < cmd->count; i++) { 601 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 602 struct qeth_local_addr *addr; 603 bool duplicate = false; 604 605 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 606 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 607 duplicate = true; 608 break; 609 } 610 } 611 612 if (duplicate) 613 continue; 614 615 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 616 if (!addr) { 617 dev_err(&card->gdev->dev, 618 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 619 &cmd->addrs[i].addr); 620 continue; 621 } 622 623 addr->addr = cmd->addrs[i].addr; 624 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 625 } 626 spin_unlock(&card->local_addrs6_lock); 627 } 628 629 static void qeth_del_local_addrs4(struct qeth_card *card, 630 struct qeth_ipacmd_local_addrs4 *cmd) 631 { 632 unsigned int i; 633 634 if (cmd->addr_length != 635 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 636 dev_err_ratelimited(&card->gdev->dev, 637 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 638 cmd->addr_length); 639 return; 640 } 641 642 spin_lock(&card->local_addrs4_lock); 643 for (i = 0; i < cmd->count; i++) { 644 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 645 unsigned int key = ipv4_addr_hash(addr->addr); 646 struct qeth_local_addr *tmp; 647 648 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 649 if (tmp->addr.s6_addr32[3] == addr->addr) { 650 hash_del_rcu(&tmp->hnode); 651 kfree_rcu(tmp, rcu); 652 break; 653 } 654 } 655 } 656 spin_unlock(&card->local_addrs4_lock); 657 } 658 659 static void qeth_del_local_addrs6(struct qeth_card *card, 660 struct qeth_ipacmd_local_addrs6 *cmd) 661 { 662 unsigned int i; 663 664 if (cmd->addr_length != 665 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 666 dev_err_ratelimited(&card->gdev->dev, 667 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 668 cmd->addr_length); 669 return; 670 } 671 672 spin_lock(&card->local_addrs6_lock); 673 for (i = 0; i < cmd->count; i++) { 674 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 675 u32 key = ipv6_addr_hash(&addr->addr); 676 struct qeth_local_addr *tmp; 677 678 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 679 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 680 hash_del_rcu(&tmp->hnode); 681 kfree_rcu(tmp, rcu); 682 break; 683 } 684 } 685 } 686 spin_unlock(&card->local_addrs6_lock); 687 } 688 689 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 690 struct sk_buff *skb) 691 { 692 struct qeth_local_addr *tmp; 693 bool is_local = false; 694 unsigned int key; 695 __be32 next_hop; 696 697 if (hash_empty(card->local_addrs4)) 698 return false; 699 700 rcu_read_lock(); 701 next_hop = qeth_next_hop_v4_rcu(skb, 702 qeth_dst_check_rcu(skb, htons(ETH_P_IP))); 703 key = ipv4_addr_hash(next_hop); 704 705 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 706 if (tmp->addr.s6_addr32[3] == next_hop) { 707 is_local = true; 708 break; 709 } 710 } 711 rcu_read_unlock(); 712 713 return is_local; 714 } 715 716 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 717 struct sk_buff *skb) 718 { 719 struct qeth_local_addr *tmp; 720 struct in6_addr *next_hop; 721 bool is_local = false; 722 u32 key; 723 724 if (hash_empty(card->local_addrs6)) 725 return false; 726 727 rcu_read_lock(); 728 next_hop = qeth_next_hop_v6_rcu(skb, 729 qeth_dst_check_rcu(skb, htons(ETH_P_IPV6))); 730 key = ipv6_addr_hash(next_hop); 731 732 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 733 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 734 is_local = true; 735 break; 736 } 737 } 738 rcu_read_unlock(); 739 740 return is_local; 741 } 742 743 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 744 { 745 struct qeth_card *card = m->private; 746 struct qeth_local_addr *tmp; 747 unsigned int i; 748 749 rcu_read_lock(); 750 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 751 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 752 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 753 seq_printf(m, "%pI6c\n", &tmp->addr); 754 rcu_read_unlock(); 755 756 return 0; 757 } 758 759 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 760 761 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 762 struct qeth_card *card) 763 { 764 const char *ipa_name; 765 int com = cmd->hdr.command; 766 767 ipa_name = qeth_get_ipa_cmd_name(com); 768 769 if (rc) 770 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 771 ipa_name, com, CARD_DEVID(card), rc, 772 qeth_get_ipa_msg(rc)); 773 else 774 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 775 ipa_name, com, CARD_DEVID(card)); 776 } 777 778 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 779 struct qeth_ipa_cmd *cmd) 780 { 781 QETH_CARD_TEXT(card, 5, "chkipad"); 782 783 if (IS_IPA_REPLY(cmd)) { 784 if (cmd->hdr.command != IPA_CMD_SETCCID && 785 cmd->hdr.command != IPA_CMD_DELCCID && 786 cmd->hdr.command != IPA_CMD_MODCCID && 787 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 788 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 789 return cmd; 790 } 791 792 /* handle unsolicited event: */ 793 switch (cmd->hdr.command) { 794 case IPA_CMD_STOPLAN: 795 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 796 dev_err(&card->gdev->dev, 797 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 798 netdev_name(card->dev)); 799 schedule_work(&card->close_dev_work); 800 } else { 801 dev_warn(&card->gdev->dev, 802 "The link for interface %s on CHPID 0x%X failed\n", 803 netdev_name(card->dev), card->info.chpid); 804 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 805 netif_carrier_off(card->dev); 806 } 807 return NULL; 808 case IPA_CMD_STARTLAN: 809 dev_info(&card->gdev->dev, 810 "The link for %s on CHPID 0x%X has been restored\n", 811 netdev_name(card->dev), card->info.chpid); 812 if (card->info.hwtrap) 813 card->info.hwtrap = 2; 814 qeth_schedule_recovery(card); 815 return NULL; 816 case IPA_CMD_SETBRIDGEPORT_IQD: 817 case IPA_CMD_SETBRIDGEPORT_OSA: 818 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 819 if (card->discipline->control_event_handler(card, cmd)) 820 return cmd; 821 return NULL; 822 case IPA_CMD_MODCCID: 823 return cmd; 824 case IPA_CMD_REGISTER_LOCAL_ADDR: 825 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 826 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 827 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 828 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 829 830 QETH_CARD_TEXT(card, 3, "irla"); 831 return NULL; 832 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 833 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 834 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 835 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 836 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 837 838 QETH_CARD_TEXT(card, 3, "urla"); 839 return NULL; 840 default: 841 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 842 return cmd; 843 } 844 } 845 846 static void qeth_clear_ipacmd_list(struct qeth_card *card) 847 { 848 struct qeth_cmd_buffer *iob; 849 unsigned long flags; 850 851 QETH_CARD_TEXT(card, 4, "clipalst"); 852 853 spin_lock_irqsave(&card->lock, flags); 854 list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) 855 qeth_notify_cmd(iob, -ECANCELED); 856 spin_unlock_irqrestore(&card->lock, flags); 857 } 858 859 static int qeth_check_idx_response(struct qeth_card *card, 860 unsigned char *buffer) 861 { 862 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 863 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 864 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 865 buffer[4]); 866 QETH_CARD_TEXT(card, 2, "ckidxres"); 867 QETH_CARD_TEXT(card, 2, " idxterm"); 868 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 869 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 870 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 871 dev_err(&card->gdev->dev, 872 "The device does not support the configured transport mode\n"); 873 return -EPROTONOSUPPORT; 874 } 875 return -EIO; 876 } 877 return 0; 878 } 879 880 void qeth_put_cmd(struct qeth_cmd_buffer *iob) 881 { 882 if (refcount_dec_and_test(&iob->ref_count)) { 883 kfree(iob->data); 884 kfree(iob); 885 } 886 } 887 EXPORT_SYMBOL_GPL(qeth_put_cmd); 888 889 static void qeth_release_buffer_cb(struct qeth_card *card, 890 struct qeth_cmd_buffer *iob, 891 unsigned int data_length) 892 { 893 qeth_put_cmd(iob); 894 } 895 896 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 897 { 898 qeth_notify_cmd(iob, rc); 899 qeth_put_cmd(iob); 900 } 901 902 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 903 unsigned int length, unsigned int ccws, 904 long timeout) 905 { 906 struct qeth_cmd_buffer *iob; 907 908 if (length > QETH_BUFSIZE) 909 return NULL; 910 911 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 912 if (!iob) 913 return NULL; 914 915 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 916 GFP_KERNEL | GFP_DMA); 917 if (!iob->data) { 918 kfree(iob); 919 return NULL; 920 } 921 922 init_completion(&iob->done); 923 spin_lock_init(&iob->lock); 924 refcount_set(&iob->ref_count, 1); 925 iob->channel = channel; 926 iob->timeout = timeout; 927 iob->length = length; 928 return iob; 929 } 930 EXPORT_SYMBOL_GPL(qeth_alloc_cmd); 931 932 static void qeth_issue_next_read_cb(struct qeth_card *card, 933 struct qeth_cmd_buffer *iob, 934 unsigned int data_length) 935 { 936 struct qeth_cmd_buffer *request = NULL; 937 struct qeth_ipa_cmd *cmd = NULL; 938 struct qeth_reply *reply = NULL; 939 struct qeth_cmd_buffer *tmp; 940 unsigned long flags; 941 int rc = 0; 942 943 QETH_CARD_TEXT(card, 4, "sndctlcb"); 944 rc = qeth_check_idx_response(card, iob->data); 945 switch (rc) { 946 case 0: 947 break; 948 case -EIO: 949 qeth_schedule_recovery(card); 950 fallthrough; 951 default: 952 qeth_clear_ipacmd_list(card); 953 goto err_idx; 954 } 955 956 cmd = __ipa_reply(iob); 957 if (cmd) { 958 cmd = qeth_check_ipa_data(card, cmd); 959 if (!cmd) 960 goto out; 961 if (IS_OSN(card) && card->osn_info.assist_cb && 962 cmd->hdr.command != IPA_CMD_STARTLAN) { 963 card->osn_info.assist_cb(card->dev, cmd); 964 goto out; 965 } 966 } 967 968 /* match against pending cmd requests */ 969 spin_lock_irqsave(&card->lock, flags); 970 list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { 971 if (tmp->match && tmp->match(tmp, iob)) { 972 request = tmp; 973 /* take the object outside the lock */ 974 qeth_get_cmd(request); 975 break; 976 } 977 } 978 spin_unlock_irqrestore(&card->lock, flags); 979 980 if (!request) 981 goto out; 982 983 reply = &request->reply; 984 if (!reply->callback) { 985 rc = 0; 986 goto no_callback; 987 } 988 989 spin_lock_irqsave(&request->lock, flags); 990 if (request->rc) 991 /* Bail out when the requestor has already left: */ 992 rc = request->rc; 993 else 994 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 995 (unsigned long)iob); 996 spin_unlock_irqrestore(&request->lock, flags); 997 998 no_callback: 999 if (rc <= 0) 1000 qeth_notify_cmd(request, rc); 1001 qeth_put_cmd(request); 1002 out: 1003 memcpy(&card->seqno.pdu_hdr_ack, 1004 QETH_PDU_HEADER_SEQ_NO(iob->data), 1005 QETH_SEQ_NO_LENGTH); 1006 __qeth_issue_next_read(card); 1007 err_idx: 1008 qeth_put_cmd(iob); 1009 } 1010 1011 static int qeth_set_thread_start_bit(struct qeth_card *card, 1012 unsigned long thread) 1013 { 1014 unsigned long flags; 1015 int rc = 0; 1016 1017 spin_lock_irqsave(&card->thread_mask_lock, flags); 1018 if (!(card->thread_allowed_mask & thread)) 1019 rc = -EPERM; 1020 else if (card->thread_start_mask & thread) 1021 rc = -EBUSY; 1022 else 1023 card->thread_start_mask |= thread; 1024 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1025 1026 return rc; 1027 } 1028 1029 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1030 unsigned long thread) 1031 { 1032 unsigned long flags; 1033 1034 spin_lock_irqsave(&card->thread_mask_lock, flags); 1035 card->thread_start_mask &= ~thread; 1036 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1037 wake_up(&card->wait_q); 1038 } 1039 1040 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1041 unsigned long thread) 1042 { 1043 unsigned long flags; 1044 1045 spin_lock_irqsave(&card->thread_mask_lock, flags); 1046 card->thread_running_mask &= ~thread; 1047 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1048 wake_up_all(&card->wait_q); 1049 } 1050 1051 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1052 { 1053 unsigned long flags; 1054 int rc = 0; 1055 1056 spin_lock_irqsave(&card->thread_mask_lock, flags); 1057 if (card->thread_start_mask & thread) { 1058 if ((card->thread_allowed_mask & thread) && 1059 !(card->thread_running_mask & thread)) { 1060 rc = 1; 1061 card->thread_start_mask &= ~thread; 1062 card->thread_running_mask |= thread; 1063 } else 1064 rc = -EPERM; 1065 } 1066 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1067 return rc; 1068 } 1069 1070 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1071 { 1072 int rc = 0; 1073 1074 wait_event(card->wait_q, 1075 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1076 return rc; 1077 } 1078 1079 int qeth_schedule_recovery(struct qeth_card *card) 1080 { 1081 int rc; 1082 1083 QETH_CARD_TEXT(card, 2, "startrec"); 1084 1085 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1086 if (!rc) 1087 schedule_work(&card->kernel_thread_starter); 1088 1089 return rc; 1090 } 1091 1092 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1093 struct irb *irb) 1094 { 1095 int dstat, cstat; 1096 char *sense; 1097 1098 sense = (char *) irb->ecw; 1099 cstat = irb->scsw.cmd.cstat; 1100 dstat = irb->scsw.cmd.dstat; 1101 1102 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1103 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1104 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1105 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1106 dev_warn(&cdev->dev, "The qeth device driver " 1107 "failed to recover an error on the device\n"); 1108 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1109 CCW_DEVID(cdev), dstat, cstat); 1110 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1111 16, 1, irb, 64, 1); 1112 return -EIO; 1113 } 1114 1115 if (dstat & DEV_STAT_UNIT_CHECK) { 1116 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1117 SENSE_RESETTING_EVENT_FLAG) { 1118 QETH_CARD_TEXT(card, 2, "REVIND"); 1119 return -EIO; 1120 } 1121 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1122 SENSE_COMMAND_REJECT_FLAG) { 1123 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1124 return -EIO; 1125 } 1126 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1127 QETH_CARD_TEXT(card, 2, "AFFE"); 1128 return -EIO; 1129 } 1130 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1131 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1132 return 0; 1133 } 1134 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1135 return -EIO; 1136 } 1137 return 0; 1138 } 1139 1140 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1141 struct irb *irb) 1142 { 1143 if (!IS_ERR(irb)) 1144 return 0; 1145 1146 switch (PTR_ERR(irb)) { 1147 case -EIO: 1148 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1149 CCW_DEVID(cdev)); 1150 QETH_CARD_TEXT(card, 2, "ckirberr"); 1151 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1152 return -EIO; 1153 case -ETIMEDOUT: 1154 dev_warn(&cdev->dev, "A hardware operation timed out" 1155 " on the device\n"); 1156 QETH_CARD_TEXT(card, 2, "ckirberr"); 1157 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1158 return -ETIMEDOUT; 1159 default: 1160 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1161 PTR_ERR(irb), CCW_DEVID(cdev)); 1162 QETH_CARD_TEXT(card, 2, "ckirberr"); 1163 QETH_CARD_TEXT(card, 2, " rc???"); 1164 return PTR_ERR(irb); 1165 } 1166 } 1167 1168 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1169 struct irb *irb) 1170 { 1171 int rc; 1172 int cstat, dstat; 1173 struct qeth_cmd_buffer *iob = NULL; 1174 struct ccwgroup_device *gdev; 1175 struct qeth_channel *channel; 1176 struct qeth_card *card; 1177 1178 /* while we hold the ccwdev lock, this stays valid: */ 1179 gdev = dev_get_drvdata(&cdev->dev); 1180 card = dev_get_drvdata(&gdev->dev); 1181 1182 QETH_CARD_TEXT(card, 5, "irq"); 1183 1184 if (card->read.ccwdev == cdev) { 1185 channel = &card->read; 1186 QETH_CARD_TEXT(card, 5, "read"); 1187 } else if (card->write.ccwdev == cdev) { 1188 channel = &card->write; 1189 QETH_CARD_TEXT(card, 5, "write"); 1190 } else { 1191 channel = &card->data; 1192 QETH_CARD_TEXT(card, 5, "data"); 1193 } 1194 1195 if (intparm == 0) { 1196 QETH_CARD_TEXT(card, 5, "irqunsol"); 1197 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1198 QETH_CARD_TEXT(card, 5, "irqunexp"); 1199 1200 dev_err(&cdev->dev, 1201 "Received IRQ with intparm %lx, expected %px\n", 1202 intparm, channel->active_cmd); 1203 if (channel->active_cmd) 1204 qeth_cancel_cmd(channel->active_cmd, -EIO); 1205 } else { 1206 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1207 } 1208 1209 qeth_unlock_channel(card, channel); 1210 1211 rc = qeth_check_irb_error(card, cdev, irb); 1212 if (rc) { 1213 /* IO was terminated, free its resources. */ 1214 if (iob) 1215 qeth_cancel_cmd(iob, rc); 1216 return; 1217 } 1218 1219 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1220 channel->state = CH_STATE_STOPPED; 1221 wake_up(&card->wait_q); 1222 } 1223 1224 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1225 channel->state = CH_STATE_HALTED; 1226 wake_up(&card->wait_q); 1227 } 1228 1229 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1230 SCSW_FCTL_HALT_FUNC))) { 1231 qeth_cancel_cmd(iob, -ECANCELED); 1232 iob = NULL; 1233 } 1234 1235 cstat = irb->scsw.cmd.cstat; 1236 dstat = irb->scsw.cmd.dstat; 1237 1238 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1239 (dstat & DEV_STAT_UNIT_CHECK) || 1240 (cstat)) { 1241 if (irb->esw.esw0.erw.cons) { 1242 dev_warn(&channel->ccwdev->dev, 1243 "The qeth device driver failed to recover " 1244 "an error on the device\n"); 1245 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1246 CCW_DEVID(channel->ccwdev), cstat, 1247 dstat); 1248 print_hex_dump(KERN_WARNING, "qeth: irb ", 1249 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1250 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1251 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1252 } 1253 1254 rc = qeth_get_problem(card, cdev, irb); 1255 if (rc) { 1256 card->read_or_write_problem = 1; 1257 if (iob) 1258 qeth_cancel_cmd(iob, rc); 1259 qeth_clear_ipacmd_list(card); 1260 qeth_schedule_recovery(card); 1261 return; 1262 } 1263 } 1264 1265 if (iob) { 1266 /* sanity check: */ 1267 if (irb->scsw.cmd.count > iob->length) { 1268 qeth_cancel_cmd(iob, -EIO); 1269 return; 1270 } 1271 if (iob->callback) 1272 iob->callback(card, iob, 1273 iob->length - irb->scsw.cmd.count); 1274 } 1275 } 1276 1277 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1278 struct qeth_qdio_out_buffer *buf, 1279 enum iucv_tx_notify notification) 1280 { 1281 struct sk_buff *skb; 1282 1283 skb_queue_walk(&buf->skb_list, skb) { 1284 struct sock *sk = skb->sk; 1285 1286 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1287 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1288 if (sk && sk->sk_family == PF_IUCV) 1289 iucv_sk(sk)->sk_txnotify(sk, notification); 1290 } 1291 } 1292 1293 static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue, 1294 struct qeth_qdio_out_buffer *buf, bool error, 1295 int budget) 1296 { 1297 struct sk_buff *skb; 1298 1299 /* Empty buffer? */ 1300 if (buf->next_element_to_fill == 0) 1301 return; 1302 1303 QETH_TXQ_STAT_INC(queue, bufs); 1304 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1305 if (error) { 1306 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1307 } else { 1308 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1309 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1310 } 1311 1312 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1313 unsigned int bytes = qdisc_pkt_len(skb); 1314 bool is_tso = skb_is_gso(skb); 1315 unsigned int packets; 1316 1317 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1318 if (!error) { 1319 if (skb->ip_summed == CHECKSUM_PARTIAL) 1320 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1321 if (skb_is_nonlinear(skb)) 1322 QETH_TXQ_STAT_INC(queue, skbs_sg); 1323 if (is_tso) { 1324 QETH_TXQ_STAT_INC(queue, skbs_tso); 1325 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1326 } 1327 } 1328 1329 napi_consume_skb(skb, budget); 1330 } 1331 } 1332 1333 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1334 struct qeth_qdio_out_buffer *buf, 1335 bool error, int budget) 1336 { 1337 int i; 1338 1339 /* is PCI flag set on buffer? */ 1340 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) { 1341 atomic_dec(&queue->set_pci_flags_count); 1342 QETH_TXQ_STAT_INC(queue, completion_irq); 1343 } 1344 1345 qeth_tx_complete_buf(queue, buf, error, budget); 1346 1347 for (i = 0; i < queue->max_elements; ++i) { 1348 void *data = phys_to_virt(buf->buffer->element[i].addr); 1349 1350 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1351 kmem_cache_free(qeth_core_header_cache, data); 1352 } 1353 1354 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1355 buf->next_element_to_fill = 0; 1356 buf->frames = 0; 1357 buf->bytes = 0; 1358 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1359 } 1360 1361 static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf) 1362 { 1363 if (buf->aob) 1364 qdio_release_aob(buf->aob); 1365 kmem_cache_free(qeth_qdio_outbuf_cache, buf); 1366 } 1367 1368 static void qeth_tx_complete_pending_bufs(struct qeth_card *card, 1369 struct qeth_qdio_out_q *queue, 1370 bool drain, int budget) 1371 { 1372 struct qeth_qdio_out_buffer *buf, *tmp; 1373 1374 list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { 1375 struct qeth_qaob_priv1 *priv; 1376 struct qaob *aob = buf->aob; 1377 enum iucv_tx_notify notify; 1378 unsigned int i; 1379 1380 priv = (struct qeth_qaob_priv1 *)&aob->user1; 1381 if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) { 1382 QETH_CARD_TEXT(card, 5, "fp"); 1383 QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); 1384 1385 notify = drain ? TX_NOTIFY_GENERALERROR : 1386 qeth_compute_cq_notification(aob->aorc, 1); 1387 qeth_notify_skbs(queue, buf, notify); 1388 qeth_tx_complete_buf(queue, buf, drain, budget); 1389 1390 for (i = 0; 1391 i < aob->sb_count && i < queue->max_elements; 1392 i++) { 1393 void *data = phys_to_virt(aob->sba[i]); 1394 1395 if (test_bit(i, buf->from_kmem_cache) && data) 1396 kmem_cache_free(qeth_core_header_cache, 1397 data); 1398 } 1399 1400 list_del(&buf->list_entry); 1401 qeth_free_out_buf(buf); 1402 } 1403 } 1404 } 1405 1406 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1407 { 1408 int j; 1409 1410 qeth_tx_complete_pending_bufs(q->card, q, true, 0); 1411 1412 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1413 if (!q->bufs[j]) 1414 continue; 1415 1416 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1417 if (free) { 1418 qeth_free_out_buf(q->bufs[j]); 1419 q->bufs[j] = NULL; 1420 } 1421 } 1422 } 1423 1424 static void qeth_drain_output_queues(struct qeth_card *card) 1425 { 1426 int i; 1427 1428 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1429 /* clear outbound buffers to free skbs */ 1430 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1431 if (card->qdio.out_qs[i]) 1432 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1433 } 1434 } 1435 1436 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1437 { 1438 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1439 1440 if (card->qdio.no_out_queues == max) 1441 return; 1442 1443 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1444 qeth_free_qdio_queues(card); 1445 1446 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1447 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1448 1449 card->qdio.no_out_queues = max; 1450 } 1451 1452 static int qeth_update_from_chp_desc(struct qeth_card *card) 1453 { 1454 struct ccw_device *ccwdev; 1455 struct channel_path_desc_fmt0 *chp_dsc; 1456 1457 QETH_CARD_TEXT(card, 2, "chp_desc"); 1458 1459 ccwdev = card->data.ccwdev; 1460 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1461 if (!chp_dsc) 1462 return -ENOMEM; 1463 1464 card->info.func_level = 0x4100 + chp_dsc->desc; 1465 1466 if (IS_OSD(card) || IS_OSX(card)) 1467 /* CHPP field bit 6 == 1 -> single queue */ 1468 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1469 1470 kfree(chp_dsc); 1471 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1472 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1473 return 0; 1474 } 1475 1476 static void qeth_init_qdio_info(struct qeth_card *card) 1477 { 1478 QETH_CARD_TEXT(card, 4, "intqdinf"); 1479 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1480 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1481 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1482 1483 /* inbound */ 1484 card->qdio.no_in_queues = 1; 1485 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1486 if (IS_IQD(card)) 1487 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1488 else 1489 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1490 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1491 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1492 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1493 } 1494 1495 static void qeth_set_initial_options(struct qeth_card *card) 1496 { 1497 card->options.route4.type = NO_ROUTER; 1498 card->options.route6.type = NO_ROUTER; 1499 card->options.isolation = ISOLATION_MODE_NONE; 1500 card->options.cq = QETH_CQ_DISABLED; 1501 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1502 } 1503 1504 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1505 { 1506 unsigned long flags; 1507 int rc = 0; 1508 1509 spin_lock_irqsave(&card->thread_mask_lock, flags); 1510 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1511 (u8) card->thread_start_mask, 1512 (u8) card->thread_allowed_mask, 1513 (u8) card->thread_running_mask); 1514 rc = (card->thread_start_mask & thread); 1515 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1516 return rc; 1517 } 1518 1519 static int qeth_do_reset(void *data); 1520 static void qeth_start_kernel_thread(struct work_struct *work) 1521 { 1522 struct task_struct *ts; 1523 struct qeth_card *card = container_of(work, struct qeth_card, 1524 kernel_thread_starter); 1525 QETH_CARD_TEXT(card, 2, "strthrd"); 1526 1527 if (card->read.state != CH_STATE_UP && 1528 card->write.state != CH_STATE_UP) 1529 return; 1530 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1531 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1532 if (IS_ERR(ts)) { 1533 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1534 qeth_clear_thread_running_bit(card, 1535 QETH_RECOVER_THREAD); 1536 } 1537 } 1538 } 1539 1540 static void qeth_buffer_reclaim_work(struct work_struct *); 1541 static void qeth_setup_card(struct qeth_card *card) 1542 { 1543 QETH_CARD_TEXT(card, 2, "setupcrd"); 1544 1545 card->info.type = CARD_RDEV(card)->id.driver_info; 1546 card->state = CARD_STATE_DOWN; 1547 spin_lock_init(&card->lock); 1548 spin_lock_init(&card->thread_mask_lock); 1549 mutex_init(&card->conf_mutex); 1550 mutex_init(&card->discipline_mutex); 1551 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1552 INIT_LIST_HEAD(&card->cmd_waiter_list); 1553 init_waitqueue_head(&card->wait_q); 1554 qeth_set_initial_options(card); 1555 /* IP address takeover */ 1556 INIT_LIST_HEAD(&card->ipato.entries); 1557 qeth_init_qdio_info(card); 1558 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1559 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1560 hash_init(card->rx_mode_addrs); 1561 hash_init(card->local_addrs4); 1562 hash_init(card->local_addrs6); 1563 spin_lock_init(&card->local_addrs4_lock); 1564 spin_lock_init(&card->local_addrs6_lock); 1565 } 1566 1567 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1568 { 1569 struct qeth_card *card = container_of(slr, struct qeth_card, 1570 qeth_service_level); 1571 if (card->info.mcl_level[0]) 1572 seq_printf(m, "qeth: %s firmware level %s\n", 1573 CARD_BUS_ID(card), card->info.mcl_level); 1574 } 1575 1576 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1577 { 1578 struct qeth_card *card; 1579 1580 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1581 card = kzalloc(sizeof(*card), GFP_KERNEL); 1582 if (!card) 1583 goto out; 1584 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1585 1586 card->gdev = gdev; 1587 dev_set_drvdata(&gdev->dev, card); 1588 CARD_RDEV(card) = gdev->cdev[0]; 1589 CARD_WDEV(card) = gdev->cdev[1]; 1590 CARD_DDEV(card) = gdev->cdev[2]; 1591 1592 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1593 dev_name(&gdev->dev)); 1594 if (!card->event_wq) 1595 goto out_wq; 1596 1597 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1598 if (!card->read_cmd) 1599 goto out_read_cmd; 1600 1601 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1602 qeth_debugfs_root); 1603 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1604 &qeth_debugfs_local_addr_fops); 1605 1606 card->qeth_service_level.seq_print = qeth_core_sl_print; 1607 register_service_level(&card->qeth_service_level); 1608 return card; 1609 1610 out_read_cmd: 1611 destroy_workqueue(card->event_wq); 1612 out_wq: 1613 dev_set_drvdata(&gdev->dev, NULL); 1614 kfree(card); 1615 out: 1616 return NULL; 1617 } 1618 1619 static int qeth_clear_channel(struct qeth_card *card, 1620 struct qeth_channel *channel) 1621 { 1622 int rc; 1623 1624 QETH_CARD_TEXT(card, 3, "clearch"); 1625 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1626 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1627 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1628 1629 if (rc) 1630 return rc; 1631 rc = wait_event_interruptible_timeout(card->wait_q, 1632 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1633 if (rc == -ERESTARTSYS) 1634 return rc; 1635 if (channel->state != CH_STATE_STOPPED) 1636 return -ETIME; 1637 channel->state = CH_STATE_DOWN; 1638 return 0; 1639 } 1640 1641 static int qeth_halt_channel(struct qeth_card *card, 1642 struct qeth_channel *channel) 1643 { 1644 int rc; 1645 1646 QETH_CARD_TEXT(card, 3, "haltch"); 1647 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1648 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1649 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1650 1651 if (rc) 1652 return rc; 1653 rc = wait_event_interruptible_timeout(card->wait_q, 1654 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1655 if (rc == -ERESTARTSYS) 1656 return rc; 1657 if (channel->state != CH_STATE_HALTED) 1658 return -ETIME; 1659 return 0; 1660 } 1661 1662 static int qeth_stop_channel(struct qeth_channel *channel) 1663 { 1664 struct ccw_device *cdev = channel->ccwdev; 1665 int rc; 1666 1667 rc = ccw_device_set_offline(cdev); 1668 1669 spin_lock_irq(get_ccwdev_lock(cdev)); 1670 if (channel->active_cmd) 1671 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1672 channel->active_cmd); 1673 1674 cdev->handler = NULL; 1675 spin_unlock_irq(get_ccwdev_lock(cdev)); 1676 1677 return rc; 1678 } 1679 1680 static int qeth_start_channel(struct qeth_channel *channel) 1681 { 1682 struct ccw_device *cdev = channel->ccwdev; 1683 int rc; 1684 1685 channel->state = CH_STATE_DOWN; 1686 xchg(&channel->active_cmd, NULL); 1687 1688 spin_lock_irq(get_ccwdev_lock(cdev)); 1689 cdev->handler = qeth_irq; 1690 spin_unlock_irq(get_ccwdev_lock(cdev)); 1691 1692 rc = ccw_device_set_online(cdev); 1693 if (rc) 1694 goto err; 1695 1696 return 0; 1697 1698 err: 1699 spin_lock_irq(get_ccwdev_lock(cdev)); 1700 cdev->handler = NULL; 1701 spin_unlock_irq(get_ccwdev_lock(cdev)); 1702 return rc; 1703 } 1704 1705 static int qeth_halt_channels(struct qeth_card *card) 1706 { 1707 int rc1 = 0, rc2 = 0, rc3 = 0; 1708 1709 QETH_CARD_TEXT(card, 3, "haltchs"); 1710 rc1 = qeth_halt_channel(card, &card->read); 1711 rc2 = qeth_halt_channel(card, &card->write); 1712 rc3 = qeth_halt_channel(card, &card->data); 1713 if (rc1) 1714 return rc1; 1715 if (rc2) 1716 return rc2; 1717 return rc3; 1718 } 1719 1720 static int qeth_clear_channels(struct qeth_card *card) 1721 { 1722 int rc1 = 0, rc2 = 0, rc3 = 0; 1723 1724 QETH_CARD_TEXT(card, 3, "clearchs"); 1725 rc1 = qeth_clear_channel(card, &card->read); 1726 rc2 = qeth_clear_channel(card, &card->write); 1727 rc3 = qeth_clear_channel(card, &card->data); 1728 if (rc1) 1729 return rc1; 1730 if (rc2) 1731 return rc2; 1732 return rc3; 1733 } 1734 1735 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1736 { 1737 int rc = 0; 1738 1739 QETH_CARD_TEXT(card, 3, "clhacrd"); 1740 1741 if (halt) 1742 rc = qeth_halt_channels(card); 1743 if (rc) 1744 return rc; 1745 return qeth_clear_channels(card); 1746 } 1747 1748 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1749 { 1750 int rc = 0; 1751 1752 QETH_CARD_TEXT(card, 3, "qdioclr"); 1753 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1754 QETH_QDIO_CLEANING)) { 1755 case QETH_QDIO_ESTABLISHED: 1756 if (IS_IQD(card)) 1757 rc = qdio_shutdown(CARD_DDEV(card), 1758 QDIO_FLAG_CLEANUP_USING_HALT); 1759 else 1760 rc = qdio_shutdown(CARD_DDEV(card), 1761 QDIO_FLAG_CLEANUP_USING_CLEAR); 1762 if (rc) 1763 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1764 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1765 break; 1766 case QETH_QDIO_CLEANING: 1767 return rc; 1768 default: 1769 break; 1770 } 1771 rc = qeth_clear_halt_card(card, use_halt); 1772 if (rc) 1773 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1774 return rc; 1775 } 1776 1777 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1778 { 1779 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1780 struct diag26c_vnic_resp *response = NULL; 1781 struct diag26c_vnic_req *request = NULL; 1782 struct ccw_dev_id id; 1783 char userid[80]; 1784 int rc = 0; 1785 1786 QETH_CARD_TEXT(card, 2, "vmlayer"); 1787 1788 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1789 if (rc) 1790 goto out; 1791 1792 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1793 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1794 if (!request || !response) { 1795 rc = -ENOMEM; 1796 goto out; 1797 } 1798 1799 ccw_device_get_id(CARD_RDEV(card), &id); 1800 request->resp_buf_len = sizeof(*response); 1801 request->resp_version = DIAG26C_VERSION6_VM65918; 1802 request->req_format = DIAG26C_VNIC_INFO; 1803 ASCEBC(userid, 8); 1804 memcpy(&request->sys_name, userid, 8); 1805 request->devno = id.devno; 1806 1807 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1808 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1809 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1810 if (rc) 1811 goto out; 1812 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1813 1814 if (request->resp_buf_len < sizeof(*response) || 1815 response->version != request->resp_version) { 1816 rc = -EIO; 1817 goto out; 1818 } 1819 1820 if (response->protocol == VNIC_INFO_PROT_L2) 1821 disc = QETH_DISCIPLINE_LAYER2; 1822 else if (response->protocol == VNIC_INFO_PROT_L3) 1823 disc = QETH_DISCIPLINE_LAYER3; 1824 1825 out: 1826 kfree(response); 1827 kfree(request); 1828 if (rc) 1829 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1830 return disc; 1831 } 1832 1833 /* Determine whether the device requires a specific layer discipline */ 1834 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1835 { 1836 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1837 1838 if (IS_OSM(card) || IS_OSN(card)) 1839 disc = QETH_DISCIPLINE_LAYER2; 1840 else if (IS_VM_NIC(card)) 1841 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1842 qeth_vm_detect_layer(card); 1843 1844 switch (disc) { 1845 case QETH_DISCIPLINE_LAYER2: 1846 QETH_CARD_TEXT(card, 3, "force l2"); 1847 break; 1848 case QETH_DISCIPLINE_LAYER3: 1849 QETH_CARD_TEXT(card, 3, "force l3"); 1850 break; 1851 default: 1852 QETH_CARD_TEXT(card, 3, "force no"); 1853 } 1854 1855 return disc; 1856 } 1857 1858 static void qeth_set_blkt_defaults(struct qeth_card *card) 1859 { 1860 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1861 1862 if (card->info.use_v1_blkt) { 1863 card->info.blkt.time_total = 0; 1864 card->info.blkt.inter_packet = 0; 1865 card->info.blkt.inter_packet_jumbo = 0; 1866 } else { 1867 card->info.blkt.time_total = 250; 1868 card->info.blkt.inter_packet = 5; 1869 card->info.blkt.inter_packet_jumbo = 15; 1870 } 1871 } 1872 1873 static void qeth_idx_init(struct qeth_card *card) 1874 { 1875 memset(&card->seqno, 0, sizeof(card->seqno)); 1876 1877 card->token.issuer_rm_w = 0x00010103UL; 1878 card->token.cm_filter_w = 0x00010108UL; 1879 card->token.cm_connection_w = 0x0001010aUL; 1880 card->token.ulp_filter_w = 0x0001010bUL; 1881 card->token.ulp_connection_w = 0x0001010dUL; 1882 1883 switch (card->info.type) { 1884 case QETH_CARD_TYPE_IQD: 1885 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1886 break; 1887 case QETH_CARD_TYPE_OSD: 1888 case QETH_CARD_TYPE_OSN: 1889 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1890 break; 1891 default: 1892 break; 1893 } 1894 } 1895 1896 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1897 struct qeth_cmd_buffer *iob) 1898 { 1899 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1900 QETH_SEQ_NO_LENGTH); 1901 if (iob->channel == &card->write) 1902 card->seqno.trans_hdr++; 1903 } 1904 1905 static int qeth_peer_func_level(int level) 1906 { 1907 if ((level & 0xff) == 8) 1908 return (level & 0xff) + 0x400; 1909 if (((level >> 8) & 3) == 1) 1910 return (level & 0xff) + 0x200; 1911 return level; 1912 } 1913 1914 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1915 struct qeth_cmd_buffer *iob) 1916 { 1917 qeth_idx_finalize_cmd(card, iob); 1918 1919 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1920 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1921 card->seqno.pdu_hdr++; 1922 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1923 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1924 1925 iob->callback = qeth_release_buffer_cb; 1926 } 1927 1928 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1929 struct qeth_cmd_buffer *reply) 1930 { 1931 /* MPC cmds are issued strictly in sequence. */ 1932 return !IS_IPA(reply->data); 1933 } 1934 1935 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1936 const void *data, 1937 unsigned int data_length) 1938 { 1939 struct qeth_cmd_buffer *iob; 1940 1941 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1942 if (!iob) 1943 return NULL; 1944 1945 memcpy(iob->data, data, data_length); 1946 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1947 iob->data); 1948 iob->finalize = qeth_mpc_finalize_cmd; 1949 iob->match = qeth_mpc_match_reply; 1950 return iob; 1951 } 1952 1953 /** 1954 * qeth_send_control_data() - send control command to the card 1955 * @card: qeth_card structure pointer 1956 * @iob: qeth_cmd_buffer pointer 1957 * @reply_cb: callback function pointer 1958 * @cb_card: pointer to the qeth_card structure 1959 * @cb_reply: pointer to the qeth_reply structure 1960 * @cb_cmd: pointer to the original iob for non-IPA 1961 * commands, or to the qeth_ipa_cmd structure 1962 * for the IPA commands. 1963 * @reply_param: private pointer passed to the callback 1964 * 1965 * Callback function gets called one or more times, with cb_cmd 1966 * pointing to the response returned by the hardware. Callback 1967 * function must return 1968 * > 0 if more reply blocks are expected, 1969 * 0 if the last or only reply block is received, and 1970 * < 0 on error. 1971 * Callback function can get the value of the reply_param pointer from the 1972 * field 'param' of the structure qeth_reply. 1973 */ 1974 1975 static int qeth_send_control_data(struct qeth_card *card, 1976 struct qeth_cmd_buffer *iob, 1977 int (*reply_cb)(struct qeth_card *cb_card, 1978 struct qeth_reply *cb_reply, 1979 unsigned long cb_cmd), 1980 void *reply_param) 1981 { 1982 struct qeth_channel *channel = iob->channel; 1983 struct qeth_reply *reply = &iob->reply; 1984 long timeout = iob->timeout; 1985 int rc; 1986 1987 QETH_CARD_TEXT(card, 2, "sendctl"); 1988 1989 reply->callback = reply_cb; 1990 reply->param = reply_param; 1991 1992 timeout = wait_event_interruptible_timeout(card->wait_q, 1993 qeth_trylock_channel(channel, iob), 1994 timeout); 1995 if (timeout <= 0) { 1996 qeth_put_cmd(iob); 1997 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1998 } 1999 2000 if (iob->finalize) 2001 iob->finalize(card, iob); 2002 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 2003 2004 qeth_enqueue_cmd(card, iob); 2005 2006 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 2007 qeth_get_cmd(iob); 2008 2009 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2010 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 2011 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 2012 (addr_t) iob, 0, 0, timeout); 2013 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2014 if (rc) { 2015 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 2016 CARD_DEVID(card), rc); 2017 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2018 qeth_dequeue_cmd(card, iob); 2019 qeth_put_cmd(iob); 2020 qeth_unlock_channel(card, channel); 2021 goto out; 2022 } 2023 2024 timeout = wait_for_completion_interruptible_timeout(&iob->done, 2025 timeout); 2026 if (timeout <= 0) 2027 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2028 2029 qeth_dequeue_cmd(card, iob); 2030 2031 if (reply_cb) { 2032 /* Wait until the callback for a late reply has completed: */ 2033 spin_lock_irq(&iob->lock); 2034 if (rc) 2035 /* Zap any callback that's still pending: */ 2036 iob->rc = rc; 2037 spin_unlock_irq(&iob->lock); 2038 } 2039 2040 if (!rc) 2041 rc = iob->rc; 2042 2043 out: 2044 qeth_put_cmd(iob); 2045 return rc; 2046 } 2047 2048 struct qeth_node_desc { 2049 struct node_descriptor nd1; 2050 struct node_descriptor nd2; 2051 struct node_descriptor nd3; 2052 }; 2053 2054 static void qeth_read_conf_data_cb(struct qeth_card *card, 2055 struct qeth_cmd_buffer *iob, 2056 unsigned int data_length) 2057 { 2058 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2059 int rc = 0; 2060 u8 *tag; 2061 2062 QETH_CARD_TEXT(card, 2, "cfgunit"); 2063 2064 if (data_length < sizeof(*nd)) { 2065 rc = -EINVAL; 2066 goto out; 2067 } 2068 2069 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2070 nd->nd1.plant[1] == _ascebc['M']; 2071 tag = (u8 *)&nd->nd1.tag; 2072 card->info.chpid = tag[0]; 2073 card->info.unit_addr2 = tag[1]; 2074 2075 tag = (u8 *)&nd->nd2.tag; 2076 card->info.cula = tag[1]; 2077 2078 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2079 nd->nd3.model[1] == 0xF0 && 2080 nd->nd3.model[2] >= 0xF1 && 2081 nd->nd3.model[2] <= 0xF4; 2082 2083 out: 2084 qeth_notify_cmd(iob, rc); 2085 qeth_put_cmd(iob); 2086 } 2087 2088 static int qeth_read_conf_data(struct qeth_card *card) 2089 { 2090 struct qeth_channel *channel = &card->data; 2091 struct qeth_cmd_buffer *iob; 2092 struct ciw *ciw; 2093 2094 /* scan for RCD command in extended SenseID data */ 2095 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2096 if (!ciw || ciw->cmd == 0) 2097 return -EOPNOTSUPP; 2098 if (ciw->count < sizeof(struct qeth_node_desc)) 2099 return -EINVAL; 2100 2101 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2102 if (!iob) 2103 return -ENOMEM; 2104 2105 iob->callback = qeth_read_conf_data_cb; 2106 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2107 iob->data); 2108 2109 return qeth_send_control_data(card, iob, NULL, NULL); 2110 } 2111 2112 static int qeth_idx_check_activate_response(struct qeth_card *card, 2113 struct qeth_channel *channel, 2114 struct qeth_cmd_buffer *iob) 2115 { 2116 int rc; 2117 2118 rc = qeth_check_idx_response(card, iob->data); 2119 if (rc) 2120 return rc; 2121 2122 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2123 return 0; 2124 2125 /* negative reply: */ 2126 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2127 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2128 2129 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2130 case QETH_IDX_ACT_ERR_EXCL: 2131 dev_err(&channel->ccwdev->dev, 2132 "The adapter is used exclusively by another host\n"); 2133 return -EBUSY; 2134 case QETH_IDX_ACT_ERR_AUTH: 2135 case QETH_IDX_ACT_ERR_AUTH_USER: 2136 dev_err(&channel->ccwdev->dev, 2137 "Setting the device online failed because of insufficient authorization\n"); 2138 return -EPERM; 2139 default: 2140 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2141 CCW_DEVID(channel->ccwdev)); 2142 return -EIO; 2143 } 2144 } 2145 2146 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2147 struct qeth_cmd_buffer *iob, 2148 unsigned int data_length) 2149 { 2150 struct qeth_channel *channel = iob->channel; 2151 u16 peer_level; 2152 int rc; 2153 2154 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2155 2156 rc = qeth_idx_check_activate_response(card, channel, iob); 2157 if (rc) 2158 goto out; 2159 2160 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2161 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2162 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2163 CCW_DEVID(channel->ccwdev), 2164 card->info.func_level, peer_level); 2165 rc = -EINVAL; 2166 goto out; 2167 } 2168 2169 memcpy(&card->token.issuer_rm_r, 2170 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2171 QETH_MPC_TOKEN_LENGTH); 2172 memcpy(&card->info.mcl_level[0], 2173 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2174 2175 out: 2176 qeth_notify_cmd(iob, rc); 2177 qeth_put_cmd(iob); 2178 } 2179 2180 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2181 struct qeth_cmd_buffer *iob, 2182 unsigned int data_length) 2183 { 2184 struct qeth_channel *channel = iob->channel; 2185 u16 peer_level; 2186 int rc; 2187 2188 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2189 2190 rc = qeth_idx_check_activate_response(card, channel, iob); 2191 if (rc) 2192 goto out; 2193 2194 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2195 if ((peer_level & ~0x0100) != 2196 qeth_peer_func_level(card->info.func_level)) { 2197 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2198 CCW_DEVID(channel->ccwdev), 2199 card->info.func_level, peer_level); 2200 rc = -EINVAL; 2201 } 2202 2203 out: 2204 qeth_notify_cmd(iob, rc); 2205 qeth_put_cmd(iob); 2206 } 2207 2208 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2209 struct qeth_cmd_buffer *iob) 2210 { 2211 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2212 u8 port = ((u8)card->dev->dev_port) | 0x80; 2213 struct ccw1 *ccw = __ccw_from_cmd(iob); 2214 2215 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2216 iob->data); 2217 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2218 iob->finalize = qeth_idx_finalize_cmd; 2219 2220 port |= QETH_IDX_ACT_INVAL_FRAME; 2221 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2222 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2223 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2224 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2225 &card->info.func_level, 2); 2226 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2227 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2228 } 2229 2230 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2231 { 2232 struct qeth_channel *channel = &card->read; 2233 struct qeth_cmd_buffer *iob; 2234 int rc; 2235 2236 QETH_CARD_TEXT(card, 2, "idxread"); 2237 2238 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2239 if (!iob) 2240 return -ENOMEM; 2241 2242 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2243 qeth_idx_setup_activate_cmd(card, iob); 2244 iob->callback = qeth_idx_activate_read_channel_cb; 2245 2246 rc = qeth_send_control_data(card, iob, NULL, NULL); 2247 if (rc) 2248 return rc; 2249 2250 channel->state = CH_STATE_UP; 2251 return 0; 2252 } 2253 2254 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2255 { 2256 struct qeth_channel *channel = &card->write; 2257 struct qeth_cmd_buffer *iob; 2258 int rc; 2259 2260 QETH_CARD_TEXT(card, 2, "idxwrite"); 2261 2262 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2263 if (!iob) 2264 return -ENOMEM; 2265 2266 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2267 qeth_idx_setup_activate_cmd(card, iob); 2268 iob->callback = qeth_idx_activate_write_channel_cb; 2269 2270 rc = qeth_send_control_data(card, iob, NULL, NULL); 2271 if (rc) 2272 return rc; 2273 2274 channel->state = CH_STATE_UP; 2275 return 0; 2276 } 2277 2278 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2279 unsigned long data) 2280 { 2281 struct qeth_cmd_buffer *iob; 2282 2283 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2284 2285 iob = (struct qeth_cmd_buffer *) data; 2286 memcpy(&card->token.cm_filter_r, 2287 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2288 QETH_MPC_TOKEN_LENGTH); 2289 return 0; 2290 } 2291 2292 static int qeth_cm_enable(struct qeth_card *card) 2293 { 2294 struct qeth_cmd_buffer *iob; 2295 2296 QETH_CARD_TEXT(card, 2, "cmenable"); 2297 2298 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2299 if (!iob) 2300 return -ENOMEM; 2301 2302 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2303 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2304 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2305 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2306 2307 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2308 } 2309 2310 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2311 unsigned long data) 2312 { 2313 struct qeth_cmd_buffer *iob; 2314 2315 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2316 2317 iob = (struct qeth_cmd_buffer *) data; 2318 memcpy(&card->token.cm_connection_r, 2319 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2320 QETH_MPC_TOKEN_LENGTH); 2321 return 0; 2322 } 2323 2324 static int qeth_cm_setup(struct qeth_card *card) 2325 { 2326 struct qeth_cmd_buffer *iob; 2327 2328 QETH_CARD_TEXT(card, 2, "cmsetup"); 2329 2330 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2331 if (!iob) 2332 return -ENOMEM; 2333 2334 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2335 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2336 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2337 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2338 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2339 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2340 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2341 } 2342 2343 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2344 { 2345 if (link_type == QETH_LINK_TYPE_LANE_TR || 2346 link_type == QETH_LINK_TYPE_HSTR) { 2347 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2348 return false; 2349 } 2350 2351 return true; 2352 } 2353 2354 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2355 { 2356 struct net_device *dev = card->dev; 2357 unsigned int new_mtu; 2358 2359 if (!max_mtu) { 2360 /* IQD needs accurate max MTU to set up its RX buffers: */ 2361 if (IS_IQD(card)) 2362 return -EINVAL; 2363 /* tolerate quirky HW: */ 2364 max_mtu = ETH_MAX_MTU; 2365 } 2366 2367 rtnl_lock(); 2368 if (IS_IQD(card)) { 2369 /* move any device with default MTU to new max MTU: */ 2370 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2371 2372 /* adjust RX buffer size to new max MTU: */ 2373 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2374 if (dev->max_mtu && dev->max_mtu != max_mtu) 2375 qeth_free_qdio_queues(card); 2376 } else { 2377 if (dev->mtu) 2378 new_mtu = dev->mtu; 2379 /* default MTUs for first setup: */ 2380 else if (IS_LAYER2(card)) 2381 new_mtu = ETH_DATA_LEN; 2382 else 2383 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2384 } 2385 2386 dev->max_mtu = max_mtu; 2387 dev->mtu = min(new_mtu, max_mtu); 2388 rtnl_unlock(); 2389 return 0; 2390 } 2391 2392 static int qeth_get_mtu_outof_framesize(int framesize) 2393 { 2394 switch (framesize) { 2395 case 0x4000: 2396 return 8192; 2397 case 0x6000: 2398 return 16384; 2399 case 0xa000: 2400 return 32768; 2401 case 0xffff: 2402 return 57344; 2403 default: 2404 return 0; 2405 } 2406 } 2407 2408 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2409 unsigned long data) 2410 { 2411 __u16 mtu, framesize; 2412 __u16 len; 2413 struct qeth_cmd_buffer *iob; 2414 u8 link_type = 0; 2415 2416 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2417 2418 iob = (struct qeth_cmd_buffer *) data; 2419 memcpy(&card->token.ulp_filter_r, 2420 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2421 QETH_MPC_TOKEN_LENGTH); 2422 if (IS_IQD(card)) { 2423 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2424 mtu = qeth_get_mtu_outof_framesize(framesize); 2425 } else { 2426 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2427 } 2428 *(u16 *)reply->param = mtu; 2429 2430 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2431 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2432 memcpy(&link_type, 2433 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2434 if (!qeth_is_supported_link_type(card, link_type)) 2435 return -EPROTONOSUPPORT; 2436 } 2437 2438 card->info.link_type = link_type; 2439 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2440 return 0; 2441 } 2442 2443 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2444 { 2445 if (IS_OSN(card)) 2446 return QETH_PROT_OSN2; 2447 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2448 } 2449 2450 static int qeth_ulp_enable(struct qeth_card *card) 2451 { 2452 u8 prot_type = qeth_mpc_select_prot_type(card); 2453 struct qeth_cmd_buffer *iob; 2454 u16 max_mtu; 2455 int rc; 2456 2457 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2458 2459 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2460 if (!iob) 2461 return -ENOMEM; 2462 2463 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2464 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2465 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2466 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2467 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2468 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2469 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2470 if (rc) 2471 return rc; 2472 return qeth_update_max_mtu(card, max_mtu); 2473 } 2474 2475 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2476 unsigned long data) 2477 { 2478 struct qeth_cmd_buffer *iob; 2479 2480 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2481 2482 iob = (struct qeth_cmd_buffer *) data; 2483 memcpy(&card->token.ulp_connection_r, 2484 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2485 QETH_MPC_TOKEN_LENGTH); 2486 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2487 3)) { 2488 QETH_CARD_TEXT(card, 2, "olmlimit"); 2489 dev_err(&card->gdev->dev, "A connection could not be " 2490 "established because of an OLM limit\n"); 2491 return -EMLINK; 2492 } 2493 return 0; 2494 } 2495 2496 static int qeth_ulp_setup(struct qeth_card *card) 2497 { 2498 __u16 temp; 2499 struct qeth_cmd_buffer *iob; 2500 2501 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2502 2503 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2504 if (!iob) 2505 return -ENOMEM; 2506 2507 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2508 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2509 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2510 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2511 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2512 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2513 2514 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2515 temp = (card->info.cula << 8) + card->info.unit_addr2; 2516 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2517 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2518 } 2519 2520 static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx, 2521 gfp_t gfp) 2522 { 2523 struct qeth_qdio_out_buffer *newbuf; 2524 2525 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp); 2526 if (!newbuf) 2527 return -ENOMEM; 2528 2529 newbuf->buffer = q->qdio_bufs[bidx]; 2530 skb_queue_head_init(&newbuf->skb_list); 2531 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2532 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2533 q->bufs[bidx] = newbuf; 2534 return 0; 2535 } 2536 2537 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2538 { 2539 if (!q) 2540 return; 2541 2542 qeth_drain_output_queue(q, true); 2543 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2544 kfree(q); 2545 } 2546 2547 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2548 { 2549 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2550 unsigned int i; 2551 2552 if (!q) 2553 return NULL; 2554 2555 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) 2556 goto err_qdio_bufs; 2557 2558 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 2559 if (qeth_alloc_out_buf(q, i, GFP_KERNEL)) 2560 goto err_out_bufs; 2561 } 2562 2563 return q; 2564 2565 err_out_bufs: 2566 while (i > 0) 2567 qeth_free_out_buf(q->bufs[--i]); 2568 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2569 err_qdio_bufs: 2570 kfree(q); 2571 return NULL; 2572 } 2573 2574 static void qeth_tx_completion_timer(struct timer_list *timer) 2575 { 2576 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2577 2578 napi_schedule(&queue->napi); 2579 QETH_TXQ_STAT_INC(queue, completion_timer); 2580 } 2581 2582 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2583 { 2584 unsigned int i; 2585 2586 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2587 2588 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2589 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2590 return 0; 2591 2592 QETH_CARD_TEXT(card, 2, "inq"); 2593 card->qdio.in_q = qeth_alloc_qdio_queue(); 2594 if (!card->qdio.in_q) 2595 goto out_nomem; 2596 2597 /* inbound buffer pool */ 2598 if (qeth_alloc_buffer_pool(card)) 2599 goto out_freeinq; 2600 2601 /* outbound */ 2602 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2603 struct qeth_qdio_out_q *queue; 2604 2605 queue = qeth_alloc_output_queue(); 2606 if (!queue) 2607 goto out_freeoutq; 2608 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2609 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2610 card->qdio.out_qs[i] = queue; 2611 queue->card = card; 2612 queue->queue_no = i; 2613 INIT_LIST_HEAD(&queue->pending_bufs); 2614 spin_lock_init(&queue->lock); 2615 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2616 if (IS_IQD(card)) { 2617 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2618 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2619 queue->rescan_usecs = QETH_TX_TIMER_USECS; 2620 } else { 2621 queue->coalesce_usecs = USEC_PER_SEC; 2622 queue->max_coalesced_frames = 0; 2623 queue->rescan_usecs = 10 * USEC_PER_SEC; 2624 } 2625 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2626 } 2627 2628 /* completion */ 2629 if (qeth_alloc_cq(card)) 2630 goto out_freeoutq; 2631 2632 return 0; 2633 2634 out_freeoutq: 2635 while (i > 0) { 2636 qeth_free_output_queue(card->qdio.out_qs[--i]); 2637 card->qdio.out_qs[i] = NULL; 2638 } 2639 qeth_free_buffer_pool(card); 2640 out_freeinq: 2641 qeth_free_qdio_queue(card->qdio.in_q); 2642 card->qdio.in_q = NULL; 2643 out_nomem: 2644 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2645 return -ENOMEM; 2646 } 2647 2648 static void qeth_free_qdio_queues(struct qeth_card *card) 2649 { 2650 int i, j; 2651 2652 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2653 QETH_QDIO_UNINITIALIZED) 2654 return; 2655 2656 qeth_free_cq(card); 2657 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2658 if (card->qdio.in_q->bufs[j].rx_skb) 2659 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2660 } 2661 qeth_free_qdio_queue(card->qdio.in_q); 2662 card->qdio.in_q = NULL; 2663 /* inbound buffer pool */ 2664 qeth_free_buffer_pool(card); 2665 /* free outbound qdio_qs */ 2666 for (i = 0; i < card->qdio.no_out_queues; i++) { 2667 qeth_free_output_queue(card->qdio.out_qs[i]); 2668 card->qdio.out_qs[i] = NULL; 2669 } 2670 } 2671 2672 static void qeth_fill_qib_parms(struct qeth_card *card, 2673 struct qeth_qib_parms *parms) 2674 { 2675 struct qeth_qdio_out_q *queue; 2676 unsigned int i; 2677 2678 parms->pcit_magic[0] = 'P'; 2679 parms->pcit_magic[1] = 'C'; 2680 parms->pcit_magic[2] = 'I'; 2681 parms->pcit_magic[3] = 'T'; 2682 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2683 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2684 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2685 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2686 2687 parms->blkt_magic[0] = 'B'; 2688 parms->blkt_magic[1] = 'L'; 2689 parms->blkt_magic[2] = 'K'; 2690 parms->blkt_magic[3] = 'T'; 2691 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2692 parms->blkt_total = card->info.blkt.time_total; 2693 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2694 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2695 2696 /* Prio-queueing implicitly uses the default priorities: */ 2697 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2698 return; 2699 2700 parms->pque_magic[0] = 'P'; 2701 parms->pque_magic[1] = 'Q'; 2702 parms->pque_magic[2] = 'U'; 2703 parms->pque_magic[3] = 'E'; 2704 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2705 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2706 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2707 2708 qeth_for_each_output_queue(card, queue, i) 2709 parms->pque_priority[i] = queue->priority; 2710 } 2711 2712 static int qeth_qdio_activate(struct qeth_card *card) 2713 { 2714 QETH_CARD_TEXT(card, 3, "qdioact"); 2715 return qdio_activate(CARD_DDEV(card)); 2716 } 2717 2718 static int qeth_dm_act(struct qeth_card *card) 2719 { 2720 struct qeth_cmd_buffer *iob; 2721 2722 QETH_CARD_TEXT(card, 2, "dmact"); 2723 2724 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2725 if (!iob) 2726 return -ENOMEM; 2727 2728 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2729 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2730 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2731 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2732 return qeth_send_control_data(card, iob, NULL, NULL); 2733 } 2734 2735 static int qeth_mpc_initialize(struct qeth_card *card) 2736 { 2737 int rc; 2738 2739 QETH_CARD_TEXT(card, 2, "mpcinit"); 2740 2741 rc = qeth_issue_next_read(card); 2742 if (rc) { 2743 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2744 return rc; 2745 } 2746 rc = qeth_cm_enable(card); 2747 if (rc) { 2748 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2749 return rc; 2750 } 2751 rc = qeth_cm_setup(card); 2752 if (rc) { 2753 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2754 return rc; 2755 } 2756 rc = qeth_ulp_enable(card); 2757 if (rc) { 2758 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2759 return rc; 2760 } 2761 rc = qeth_ulp_setup(card); 2762 if (rc) { 2763 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2764 return rc; 2765 } 2766 rc = qeth_alloc_qdio_queues(card); 2767 if (rc) { 2768 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2769 return rc; 2770 } 2771 rc = qeth_qdio_establish(card); 2772 if (rc) { 2773 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2774 qeth_free_qdio_queues(card); 2775 return rc; 2776 } 2777 rc = qeth_qdio_activate(card); 2778 if (rc) { 2779 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2780 return rc; 2781 } 2782 rc = qeth_dm_act(card); 2783 if (rc) { 2784 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2785 return rc; 2786 } 2787 2788 return 0; 2789 } 2790 2791 static void qeth_print_status_message(struct qeth_card *card) 2792 { 2793 switch (card->info.type) { 2794 case QETH_CARD_TYPE_OSD: 2795 case QETH_CARD_TYPE_OSM: 2796 case QETH_CARD_TYPE_OSX: 2797 /* VM will use a non-zero first character 2798 * to indicate a HiperSockets like reporting 2799 * of the level OSA sets the first character to zero 2800 * */ 2801 if (!card->info.mcl_level[0]) { 2802 sprintf(card->info.mcl_level, "%02x%02x", 2803 card->info.mcl_level[2], 2804 card->info.mcl_level[3]); 2805 break; 2806 } 2807 fallthrough; 2808 case QETH_CARD_TYPE_IQD: 2809 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2810 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2811 card->info.mcl_level[0]]; 2812 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2813 card->info.mcl_level[1]]; 2814 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2815 card->info.mcl_level[2]]; 2816 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2817 card->info.mcl_level[3]]; 2818 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2819 } 2820 break; 2821 default: 2822 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2823 } 2824 dev_info(&card->gdev->dev, 2825 "Device is a%s card%s%s%s\nwith link type %s.\n", 2826 qeth_get_cardname(card), 2827 (card->info.mcl_level[0]) ? " (level: " : "", 2828 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2829 (card->info.mcl_level[0]) ? ")" : "", 2830 qeth_get_cardname_short(card)); 2831 } 2832 2833 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2834 { 2835 struct qeth_buffer_pool_entry *entry; 2836 2837 QETH_CARD_TEXT(card, 5, "inwrklst"); 2838 2839 list_for_each_entry(entry, 2840 &card->qdio.init_pool.entry_list, init_list) { 2841 qeth_put_buffer_pool_entry(card, entry); 2842 } 2843 } 2844 2845 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2846 struct qeth_card *card) 2847 { 2848 struct qeth_buffer_pool_entry *entry; 2849 int i, free; 2850 2851 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2852 return NULL; 2853 2854 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2855 free = 1; 2856 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2857 if (page_count(entry->elements[i]) > 1) { 2858 free = 0; 2859 break; 2860 } 2861 } 2862 if (free) { 2863 list_del_init(&entry->list); 2864 return entry; 2865 } 2866 } 2867 2868 /* no free buffer in pool so take first one and swap pages */ 2869 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2870 struct qeth_buffer_pool_entry, list); 2871 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2872 if (page_count(entry->elements[i]) > 1) { 2873 struct page *page = dev_alloc_page(); 2874 2875 if (!page) 2876 return NULL; 2877 2878 __free_page(entry->elements[i]); 2879 entry->elements[i] = page; 2880 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2881 } 2882 } 2883 list_del_init(&entry->list); 2884 return entry; 2885 } 2886 2887 static int qeth_init_input_buffer(struct qeth_card *card, 2888 struct qeth_qdio_buffer *buf) 2889 { 2890 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2891 int i; 2892 2893 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2894 buf->rx_skb = netdev_alloc_skb(card->dev, 2895 ETH_HLEN + 2896 sizeof(struct ipv6hdr)); 2897 if (!buf->rx_skb) 2898 return -ENOMEM; 2899 } 2900 2901 if (!pool_entry) { 2902 pool_entry = qeth_find_free_buffer_pool_entry(card); 2903 if (!pool_entry) 2904 return -ENOBUFS; 2905 2906 buf->pool_entry = pool_entry; 2907 } 2908 2909 /* 2910 * since the buffer is accessed only from the input_tasklet 2911 * there shouldn't be a need to synchronize; also, since we use 2912 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2913 * buffers 2914 */ 2915 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2916 buf->buffer->element[i].length = PAGE_SIZE; 2917 buf->buffer->element[i].addr = 2918 page_to_phys(pool_entry->elements[i]); 2919 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2920 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2921 else 2922 buf->buffer->element[i].eflags = 0; 2923 buf->buffer->element[i].sflags = 0; 2924 } 2925 return 0; 2926 } 2927 2928 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2929 struct qeth_qdio_out_q *queue) 2930 { 2931 if (!IS_IQD(card) || 2932 qeth_iqd_is_mcast_queue(card, queue) || 2933 card->options.cq == QETH_CQ_ENABLED || 2934 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2935 return 1; 2936 2937 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2938 } 2939 2940 static int qeth_init_qdio_queues(struct qeth_card *card) 2941 { 2942 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 2943 unsigned int i; 2944 int rc; 2945 2946 QETH_CARD_TEXT(card, 2, "initqdqs"); 2947 2948 /* inbound queue */ 2949 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2950 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2951 2952 qeth_initialize_working_pool_list(card); 2953 /*give only as many buffers to hardware as we have buffer pool entries*/ 2954 for (i = 0; i < rx_bufs; i++) { 2955 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2956 if (rc) 2957 return rc; 2958 } 2959 2960 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 2961 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, 2962 NULL); 2963 if (rc) { 2964 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2965 return rc; 2966 } 2967 2968 /* completion */ 2969 rc = qeth_cq_init(card); 2970 if (rc) { 2971 return rc; 2972 } 2973 2974 /* outbound queue */ 2975 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2976 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2977 2978 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2979 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2980 queue->next_buf_to_fill = 0; 2981 queue->do_pack = 0; 2982 queue->prev_hdr = NULL; 2983 queue->coalesced_frames = 0; 2984 queue->bulk_start = 0; 2985 queue->bulk_count = 0; 2986 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 2987 atomic_set(&queue->used_buffers, 0); 2988 atomic_set(&queue->set_pci_flags_count, 0); 2989 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2990 } 2991 return 0; 2992 } 2993 2994 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2995 struct qeth_cmd_buffer *iob) 2996 { 2997 qeth_mpc_finalize_cmd(card, iob); 2998 2999 /* override with IPA-specific values: */ 3000 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 3001 } 3002 3003 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3004 u16 cmd_length, 3005 bool (*match)(struct qeth_cmd_buffer *iob, 3006 struct qeth_cmd_buffer *reply)) 3007 { 3008 u8 prot_type = qeth_mpc_select_prot_type(card); 3009 u16 total_length = iob->length; 3010 3011 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 3012 iob->data); 3013 iob->finalize = qeth_ipa_finalize_cmd; 3014 iob->match = match; 3015 3016 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 3017 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 3018 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 3019 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 3020 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 3021 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 3022 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 3023 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 3024 } 3025 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 3026 3027 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 3028 struct qeth_cmd_buffer *reply) 3029 { 3030 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 3031 3032 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 3033 } 3034 3035 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3036 enum qeth_ipa_cmds cmd_code, 3037 enum qeth_prot_versions prot, 3038 unsigned int data_length) 3039 { 3040 struct qeth_cmd_buffer *iob; 3041 struct qeth_ipacmd_hdr *hdr; 3042 3043 data_length += offsetof(struct qeth_ipa_cmd, data); 3044 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3045 QETH_IPA_TIMEOUT); 3046 if (!iob) 3047 return NULL; 3048 3049 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); 3050 3051 hdr = &__ipa_cmd(iob)->hdr; 3052 hdr->command = cmd_code; 3053 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3054 /* hdr->seqno is set by qeth_send_control_data() */ 3055 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3056 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3057 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3058 hdr->param_count = 1; 3059 hdr->prot_version = prot; 3060 return iob; 3061 } 3062 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3063 3064 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3065 struct qeth_reply *reply, unsigned long data) 3066 { 3067 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3068 3069 return (cmd->hdr.return_code) ? -EIO : 0; 3070 } 3071 3072 /** 3073 * qeth_send_ipa_cmd() - send an IPA command 3074 * 3075 * See qeth_send_control_data() for explanation of the arguments. 3076 */ 3077 3078 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3079 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3080 unsigned long), 3081 void *reply_param) 3082 { 3083 int rc; 3084 3085 QETH_CARD_TEXT(card, 4, "sendipa"); 3086 3087 if (card->read_or_write_problem) { 3088 qeth_put_cmd(iob); 3089 return -EIO; 3090 } 3091 3092 if (reply_cb == NULL) 3093 reply_cb = qeth_send_ipa_cmd_cb; 3094 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3095 if (rc == -ETIME) { 3096 qeth_clear_ipacmd_list(card); 3097 qeth_schedule_recovery(card); 3098 } 3099 return rc; 3100 } 3101 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3102 3103 static int qeth_send_startlan_cb(struct qeth_card *card, 3104 struct qeth_reply *reply, unsigned long data) 3105 { 3106 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3107 3108 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3109 return -ENETDOWN; 3110 3111 return (cmd->hdr.return_code) ? -EIO : 0; 3112 } 3113 3114 static int qeth_send_startlan(struct qeth_card *card) 3115 { 3116 struct qeth_cmd_buffer *iob; 3117 3118 QETH_CARD_TEXT(card, 2, "strtlan"); 3119 3120 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3121 if (!iob) 3122 return -ENOMEM; 3123 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3124 } 3125 3126 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3127 { 3128 if (!cmd->hdr.return_code) 3129 cmd->hdr.return_code = 3130 cmd->data.setadapterparms.hdr.return_code; 3131 return cmd->hdr.return_code; 3132 } 3133 3134 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3135 struct qeth_reply *reply, unsigned long data) 3136 { 3137 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3138 struct qeth_query_cmds_supp *query_cmd; 3139 3140 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3141 if (qeth_setadpparms_inspect_rc(cmd)) 3142 return -EIO; 3143 3144 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3145 if (query_cmd->lan_type & 0x7f) { 3146 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3147 return -EPROTONOSUPPORT; 3148 3149 card->info.link_type = query_cmd->lan_type; 3150 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3151 } 3152 3153 card->options.adp.supported = query_cmd->supported_cmds; 3154 return 0; 3155 } 3156 3157 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3158 enum qeth_ipa_setadp_cmd adp_cmd, 3159 unsigned int data_length) 3160 { 3161 struct qeth_ipacmd_setadpparms_hdr *hdr; 3162 struct qeth_cmd_buffer *iob; 3163 3164 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3165 data_length + 3166 offsetof(struct qeth_ipacmd_setadpparms, 3167 data)); 3168 if (!iob) 3169 return NULL; 3170 3171 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3172 hdr->cmdlength = sizeof(*hdr) + data_length; 3173 hdr->command_code = adp_cmd; 3174 hdr->used_total = 1; 3175 hdr->seq_no = 1; 3176 return iob; 3177 } 3178 3179 static int qeth_query_setadapterparms(struct qeth_card *card) 3180 { 3181 int rc; 3182 struct qeth_cmd_buffer *iob; 3183 3184 QETH_CARD_TEXT(card, 3, "queryadp"); 3185 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3186 SETADP_DATA_SIZEOF(query_cmds_supp)); 3187 if (!iob) 3188 return -ENOMEM; 3189 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3190 return rc; 3191 } 3192 3193 static int qeth_query_ipassists_cb(struct qeth_card *card, 3194 struct qeth_reply *reply, unsigned long data) 3195 { 3196 struct qeth_ipa_cmd *cmd; 3197 3198 QETH_CARD_TEXT(card, 2, "qipasscb"); 3199 3200 cmd = (struct qeth_ipa_cmd *) data; 3201 3202 switch (cmd->hdr.return_code) { 3203 case IPA_RC_SUCCESS: 3204 break; 3205 case IPA_RC_NOTSUPP: 3206 case IPA_RC_L2_UNSUPPORTED_CMD: 3207 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3208 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3209 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3210 return -EOPNOTSUPP; 3211 default: 3212 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3213 CARD_DEVID(card), cmd->hdr.return_code); 3214 return -EIO; 3215 } 3216 3217 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3218 card->options.ipa4 = cmd->hdr.assists; 3219 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3220 card->options.ipa6 = cmd->hdr.assists; 3221 else 3222 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3223 CARD_DEVID(card)); 3224 return 0; 3225 } 3226 3227 static int qeth_query_ipassists(struct qeth_card *card, 3228 enum qeth_prot_versions prot) 3229 { 3230 int rc; 3231 struct qeth_cmd_buffer *iob; 3232 3233 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3234 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3235 if (!iob) 3236 return -ENOMEM; 3237 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3238 return rc; 3239 } 3240 3241 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3242 struct qeth_reply *reply, unsigned long data) 3243 { 3244 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3245 struct qeth_query_switch_attributes *attrs; 3246 struct qeth_switch_info *sw_info; 3247 3248 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3249 if (qeth_setadpparms_inspect_rc(cmd)) 3250 return -EIO; 3251 3252 sw_info = (struct qeth_switch_info *)reply->param; 3253 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3254 sw_info->capabilities = attrs->capabilities; 3255 sw_info->settings = attrs->settings; 3256 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3257 sw_info->settings); 3258 return 0; 3259 } 3260 3261 int qeth_query_switch_attributes(struct qeth_card *card, 3262 struct qeth_switch_info *sw_info) 3263 { 3264 struct qeth_cmd_buffer *iob; 3265 3266 QETH_CARD_TEXT(card, 2, "qswiattr"); 3267 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3268 return -EOPNOTSUPP; 3269 if (!netif_carrier_ok(card->dev)) 3270 return -ENOMEDIUM; 3271 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3272 if (!iob) 3273 return -ENOMEM; 3274 return qeth_send_ipa_cmd(card, iob, 3275 qeth_query_switch_attributes_cb, sw_info); 3276 } 3277 3278 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3279 enum qeth_diags_cmds sub_cmd, 3280 unsigned int data_length) 3281 { 3282 struct qeth_ipacmd_diagass *cmd; 3283 struct qeth_cmd_buffer *iob; 3284 3285 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3286 DIAG_HDR_LEN + data_length); 3287 if (!iob) 3288 return NULL; 3289 3290 cmd = &__ipa_cmd(iob)->data.diagass; 3291 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3292 cmd->subcmd = sub_cmd; 3293 return iob; 3294 } 3295 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3296 3297 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3298 struct qeth_reply *reply, unsigned long data) 3299 { 3300 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3301 u16 rc = cmd->hdr.return_code; 3302 3303 if (rc) { 3304 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3305 return -EIO; 3306 } 3307 3308 card->info.diagass_support = cmd->data.diagass.ext; 3309 return 0; 3310 } 3311 3312 static int qeth_query_setdiagass(struct qeth_card *card) 3313 { 3314 struct qeth_cmd_buffer *iob; 3315 3316 QETH_CARD_TEXT(card, 2, "qdiagass"); 3317 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3318 if (!iob) 3319 return -ENOMEM; 3320 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3321 } 3322 3323 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3324 { 3325 unsigned long info = get_zeroed_page(GFP_KERNEL); 3326 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3327 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3328 struct ccw_dev_id ccwid; 3329 int level; 3330 3331 tid->chpid = card->info.chpid; 3332 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3333 tid->ssid = ccwid.ssid; 3334 tid->devno = ccwid.devno; 3335 if (!info) 3336 return; 3337 level = stsi(NULL, 0, 0, 0); 3338 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3339 tid->lparnr = info222->lpar_number; 3340 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3341 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3342 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3343 } 3344 free_page(info); 3345 } 3346 3347 static int qeth_hw_trap_cb(struct qeth_card *card, 3348 struct qeth_reply *reply, unsigned long data) 3349 { 3350 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3351 u16 rc = cmd->hdr.return_code; 3352 3353 if (rc) { 3354 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3355 return -EIO; 3356 } 3357 return 0; 3358 } 3359 3360 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3361 { 3362 struct qeth_cmd_buffer *iob; 3363 struct qeth_ipa_cmd *cmd; 3364 3365 QETH_CARD_TEXT(card, 2, "diagtrap"); 3366 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3367 if (!iob) 3368 return -ENOMEM; 3369 cmd = __ipa_cmd(iob); 3370 cmd->data.diagass.type = 1; 3371 cmd->data.diagass.action = action; 3372 switch (action) { 3373 case QETH_DIAGS_TRAP_ARM: 3374 cmd->data.diagass.options = 0x0003; 3375 cmd->data.diagass.ext = 0x00010000 + 3376 sizeof(struct qeth_trap_id); 3377 qeth_get_trap_id(card, 3378 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3379 break; 3380 case QETH_DIAGS_TRAP_DISARM: 3381 cmd->data.diagass.options = 0x0001; 3382 break; 3383 case QETH_DIAGS_TRAP_CAPTURE: 3384 break; 3385 } 3386 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3387 } 3388 3389 static int qeth_check_qdio_errors(struct qeth_card *card, 3390 struct qdio_buffer *buf, 3391 unsigned int qdio_error, 3392 const char *dbftext) 3393 { 3394 if (qdio_error) { 3395 QETH_CARD_TEXT(card, 2, dbftext); 3396 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3397 buf->element[15].sflags); 3398 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3399 buf->element[14].sflags); 3400 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3401 if ((buf->element[15].sflags) == 0x12) { 3402 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3403 return 0; 3404 } else 3405 return 1; 3406 } 3407 return 0; 3408 } 3409 3410 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3411 unsigned int count) 3412 { 3413 struct qeth_qdio_q *queue = card->qdio.in_q; 3414 struct list_head *lh; 3415 int i; 3416 int rc; 3417 int newcount = 0; 3418 3419 /* only requeue at a certain threshold to avoid SIGAs */ 3420 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3421 for (i = queue->next_buf_to_init; 3422 i < queue->next_buf_to_init + count; ++i) { 3423 if (qeth_init_input_buffer(card, 3424 &queue->bufs[QDIO_BUFNR(i)])) { 3425 break; 3426 } else { 3427 newcount++; 3428 } 3429 } 3430 3431 if (newcount < count) { 3432 /* we are in memory shortage so we switch back to 3433 traditional skb allocation and drop packages */ 3434 atomic_set(&card->force_alloc_skb, 3); 3435 count = newcount; 3436 } else { 3437 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3438 } 3439 3440 if (!count) { 3441 i = 0; 3442 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3443 i++; 3444 if (i == card->qdio.in_buf_pool.buf_count) { 3445 QETH_CARD_TEXT(card, 2, "qsarbw"); 3446 schedule_delayed_work( 3447 &card->buffer_reclaim_work, 3448 QETH_RECLAIM_WORK_TIME); 3449 } 3450 return 0; 3451 } 3452 3453 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3454 queue->next_buf_to_init, count, NULL); 3455 if (rc) { 3456 QETH_CARD_TEXT(card, 2, "qinberr"); 3457 } 3458 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3459 count); 3460 return count; 3461 } 3462 3463 return 0; 3464 } 3465 3466 static void qeth_buffer_reclaim_work(struct work_struct *work) 3467 { 3468 struct qeth_card *card = container_of(to_delayed_work(work), 3469 struct qeth_card, 3470 buffer_reclaim_work); 3471 3472 local_bh_disable(); 3473 napi_schedule(&card->napi); 3474 /* kick-start the NAPI softirq: */ 3475 local_bh_enable(); 3476 } 3477 3478 static void qeth_handle_send_error(struct qeth_card *card, 3479 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3480 { 3481 int sbalf15 = buffer->buffer->element[15].sflags; 3482 3483 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3484 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3485 3486 if (!qdio_err) 3487 return; 3488 3489 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3490 return; 3491 3492 QETH_CARD_TEXT(card, 1, "lnkfail"); 3493 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3494 (u16)qdio_err, (u8)sbalf15); 3495 } 3496 3497 /** 3498 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3499 * @queue: queue to check for packing buffer 3500 * 3501 * Returns number of buffers that were prepared for flush. 3502 */ 3503 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3504 { 3505 struct qeth_qdio_out_buffer *buffer; 3506 3507 buffer = queue->bufs[queue->next_buf_to_fill]; 3508 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3509 (buffer->next_element_to_fill > 0)) { 3510 /* it's a packing buffer */ 3511 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3512 queue->next_buf_to_fill = 3513 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3514 return 1; 3515 } 3516 return 0; 3517 } 3518 3519 /* 3520 * Switched to packing state if the number of used buffers on a queue 3521 * reaches a certain limit. 3522 */ 3523 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3524 { 3525 if (!queue->do_pack) { 3526 if (atomic_read(&queue->used_buffers) 3527 >= QETH_HIGH_WATERMARK_PACK){ 3528 /* switch non-PACKING -> PACKING */ 3529 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3530 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3531 queue->do_pack = 1; 3532 } 3533 } 3534 } 3535 3536 /* 3537 * Switches from packing to non-packing mode. If there is a packing 3538 * buffer on the queue this buffer will be prepared to be flushed. 3539 * In that case 1 is returned to inform the caller. If no buffer 3540 * has to be flushed, zero is returned. 3541 */ 3542 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3543 { 3544 if (queue->do_pack) { 3545 if (atomic_read(&queue->used_buffers) 3546 <= QETH_LOW_WATERMARK_PACK) { 3547 /* switch PACKING -> non-PACKING */ 3548 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3549 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3550 queue->do_pack = 0; 3551 return qeth_prep_flush_pack_buffer(queue); 3552 } 3553 } 3554 return 0; 3555 } 3556 3557 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3558 int count) 3559 { 3560 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3561 struct qeth_card *card = queue->card; 3562 unsigned int frames, usecs; 3563 struct qaob *aob = NULL; 3564 int rc; 3565 int i; 3566 3567 for (i = index; i < index + count; ++i) { 3568 unsigned int bidx = QDIO_BUFNR(i); 3569 struct sk_buff *skb; 3570 3571 buf = queue->bufs[bidx]; 3572 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3573 SBAL_EFLAGS_LAST_ENTRY; 3574 queue->coalesced_frames += buf->frames; 3575 3576 if (IS_IQD(card)) { 3577 skb_queue_walk(&buf->skb_list, skb) 3578 skb_tx_timestamp(skb); 3579 } 3580 } 3581 3582 if (IS_IQD(card)) { 3583 if (card->options.cq == QETH_CQ_ENABLED && 3584 !qeth_iqd_is_mcast_queue(card, queue) && 3585 count == 1) { 3586 if (!buf->aob) 3587 buf->aob = qdio_allocate_aob(); 3588 if (buf->aob) { 3589 struct qeth_qaob_priv1 *priv; 3590 3591 aob = buf->aob; 3592 priv = (struct qeth_qaob_priv1 *)&aob->user1; 3593 priv->state = QETH_QAOB_ISSUED; 3594 priv->queue_no = queue->queue_no; 3595 } 3596 } 3597 } else { 3598 if (!queue->do_pack) { 3599 if ((atomic_read(&queue->used_buffers) >= 3600 (QETH_HIGH_WATERMARK_PACK - 3601 QETH_WATERMARK_PACK_FUZZ)) && 3602 !atomic_read(&queue->set_pci_flags_count)) { 3603 /* it's likely that we'll go to packing 3604 * mode soon */ 3605 atomic_inc(&queue->set_pci_flags_count); 3606 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3607 } 3608 } else { 3609 if (!atomic_read(&queue->set_pci_flags_count)) { 3610 /* 3611 * there's no outstanding PCI any more, so we 3612 * have to request a PCI to be sure the the PCI 3613 * will wake at some time in the future then we 3614 * can flush packed buffers that might still be 3615 * hanging around, which can happen if no 3616 * further send was requested by the stack 3617 */ 3618 atomic_inc(&queue->set_pci_flags_count); 3619 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3620 } 3621 } 3622 } 3623 3624 QETH_TXQ_STAT_INC(queue, doorbell); 3625 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, 3626 index, count, aob); 3627 3628 switch (rc) { 3629 case 0: 3630 case -ENOBUFS: 3631 /* ignore temporary SIGA errors without busy condition */ 3632 3633 /* Fake the TX completion interrupt: */ 3634 frames = READ_ONCE(queue->max_coalesced_frames); 3635 usecs = READ_ONCE(queue->coalesce_usecs); 3636 3637 if (frames && queue->coalesced_frames >= frames) { 3638 napi_schedule(&queue->napi); 3639 queue->coalesced_frames = 0; 3640 QETH_TXQ_STAT_INC(queue, coal_frames); 3641 } else if (qeth_use_tx_irqs(card) && 3642 atomic_read(&queue->used_buffers) >= 32) { 3643 /* Old behaviour carried over from the qdio layer: */ 3644 napi_schedule(&queue->napi); 3645 QETH_TXQ_STAT_INC(queue, coal_frames); 3646 } else if (usecs) { 3647 qeth_tx_arm_timer(queue, usecs); 3648 } 3649 3650 break; 3651 default: 3652 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3653 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3654 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3655 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3656 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3657 3658 /* this must not happen under normal circumstances. if it 3659 * happens something is really wrong -> recover */ 3660 qeth_schedule_recovery(queue->card); 3661 } 3662 } 3663 3664 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3665 { 3666 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3667 3668 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3669 queue->prev_hdr = NULL; 3670 queue->bulk_count = 0; 3671 } 3672 3673 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3674 { 3675 /* 3676 * check if weed have to switch to non-packing mode or if 3677 * we have to get a pci flag out on the queue 3678 */ 3679 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3680 !atomic_read(&queue->set_pci_flags_count)) { 3681 unsigned int index, flush_cnt; 3682 bool q_was_packing; 3683 3684 spin_lock(&queue->lock); 3685 3686 index = queue->next_buf_to_fill; 3687 q_was_packing = queue->do_pack; 3688 3689 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3690 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3691 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3692 3693 if (flush_cnt) { 3694 qeth_flush_buffers(queue, index, flush_cnt); 3695 if (q_was_packing) 3696 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3697 } 3698 3699 spin_unlock(&queue->lock); 3700 } 3701 } 3702 3703 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3704 { 3705 struct qeth_card *card = (struct qeth_card *)card_ptr; 3706 3707 napi_schedule_irqoff(&card->napi); 3708 } 3709 3710 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3711 { 3712 int rc; 3713 3714 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3715 rc = -1; 3716 goto out; 3717 } else { 3718 if (card->options.cq == cq) { 3719 rc = 0; 3720 goto out; 3721 } 3722 3723 qeth_free_qdio_queues(card); 3724 card->options.cq = cq; 3725 rc = 0; 3726 } 3727 out: 3728 return rc; 3729 3730 } 3731 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3732 3733 static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob) 3734 { 3735 struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1; 3736 unsigned int queue_no = priv->queue_no; 3737 3738 BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1)); 3739 3740 if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING && 3741 queue_no < card->qdio.no_out_queues) 3742 napi_schedule(&card->qdio.out_qs[queue_no]->napi); 3743 } 3744 3745 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3746 unsigned int queue, int first_element, 3747 int count) 3748 { 3749 struct qeth_qdio_q *cq = card->qdio.c_q; 3750 int i; 3751 int rc; 3752 3753 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3754 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3755 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3756 3757 if (qdio_err) { 3758 netif_tx_stop_all_queues(card->dev); 3759 qeth_schedule_recovery(card); 3760 return; 3761 } 3762 3763 for (i = first_element; i < first_element + count; ++i) { 3764 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3765 int e = 0; 3766 3767 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3768 buffer->element[e].addr) { 3769 unsigned long phys_aob_addr = buffer->element[e].addr; 3770 3771 qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); 3772 ++e; 3773 } 3774 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3775 } 3776 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3777 cq->next_buf_to_init, count, NULL); 3778 if (rc) { 3779 dev_warn(&card->gdev->dev, 3780 "QDIO reported an error, rc=%i\n", rc); 3781 QETH_CARD_TEXT(card, 2, "qcqherr"); 3782 } 3783 3784 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3785 } 3786 3787 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3788 unsigned int qdio_err, int queue, 3789 int first_elem, int count, 3790 unsigned long card_ptr) 3791 { 3792 struct qeth_card *card = (struct qeth_card *)card_ptr; 3793 3794 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3795 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3796 3797 if (qdio_err) 3798 qeth_schedule_recovery(card); 3799 } 3800 3801 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3802 unsigned int qdio_error, int __queue, 3803 int first_element, int count, 3804 unsigned long card_ptr) 3805 { 3806 struct qeth_card *card = (struct qeth_card *) card_ptr; 3807 struct net_device *dev = card->dev; 3808 3809 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3810 if (qdio_error & QDIO_ERROR_FATAL) { 3811 QETH_CARD_TEXT(card, 2, "achkcond"); 3812 netif_tx_stop_all_queues(dev); 3813 qeth_schedule_recovery(card); 3814 } 3815 } 3816 3817 /** 3818 * Note: Function assumes that we have 4 outbound queues. 3819 */ 3820 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3821 { 3822 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3823 u8 tos; 3824 3825 switch (card->qdio.do_prio_queueing) { 3826 case QETH_PRIO_Q_ING_TOS: 3827 case QETH_PRIO_Q_ING_PREC: 3828 switch (vlan_get_protocol(skb)) { 3829 case htons(ETH_P_IP): 3830 tos = ipv4_get_dsfield(ip_hdr(skb)); 3831 break; 3832 case htons(ETH_P_IPV6): 3833 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3834 break; 3835 default: 3836 return card->qdio.default_out_queue; 3837 } 3838 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3839 return ~tos >> 6 & 3; 3840 if (tos & IPTOS_MINCOST) 3841 return 3; 3842 if (tos & IPTOS_RELIABILITY) 3843 return 2; 3844 if (tos & IPTOS_THROUGHPUT) 3845 return 1; 3846 if (tos & IPTOS_LOWDELAY) 3847 return 0; 3848 break; 3849 case QETH_PRIO_Q_ING_SKB: 3850 if (skb->priority > 5) 3851 return 0; 3852 return ~skb->priority >> 1 & 3; 3853 case QETH_PRIO_Q_ING_VLAN: 3854 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3855 return ~ntohs(veth->h_vlan_TCI) >> 3856 (VLAN_PRIO_SHIFT + 1) & 3; 3857 break; 3858 case QETH_PRIO_Q_ING_FIXED: 3859 return card->qdio.default_out_queue; 3860 default: 3861 break; 3862 } 3863 return card->qdio.default_out_queue; 3864 } 3865 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3866 3867 /** 3868 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3869 * @skb: SKB address 3870 * 3871 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3872 * fragmented part of the SKB. Returns zero for linear SKB. 3873 */ 3874 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3875 { 3876 int cnt, elements = 0; 3877 3878 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3879 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3880 3881 elements += qeth_get_elements_for_range( 3882 (addr_t)skb_frag_address(frag), 3883 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3884 } 3885 return elements; 3886 } 3887 3888 /** 3889 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3890 * to transmit an skb. 3891 * @skb: the skb to operate on. 3892 * @data_offset: skip this part of the skb's linear data 3893 * 3894 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3895 * skb's data (both its linear part and paged fragments). 3896 */ 3897 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3898 { 3899 unsigned int elements = qeth_get_elements_for_frags(skb); 3900 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3901 addr_t start = (addr_t)skb->data + data_offset; 3902 3903 if (start != end) 3904 elements += qeth_get_elements_for_range(start, end); 3905 return elements; 3906 } 3907 EXPORT_SYMBOL_GPL(qeth_count_elements); 3908 3909 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3910 MAX_TCP_HEADER) 3911 3912 /** 3913 * qeth_add_hw_header() - add a HW header to an skb. 3914 * @skb: skb that the HW header should be added to. 3915 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3916 * it contains a valid pointer to a qeth_hdr. 3917 * @hdr_len: length of the HW header. 3918 * @proto_len: length of protocol headers that need to be in same page as the 3919 * HW header. 3920 * 3921 * Returns the pushed length. If the header can't be pushed on 3922 * (eg. because it would cross a page boundary), it is allocated from 3923 * the cache instead and 0 is returned. 3924 * The number of needed buffer elements is returned in @elements. 3925 * Error to create the hdr is indicated by returning with < 0. 3926 */ 3927 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3928 struct sk_buff *skb, struct qeth_hdr **hdr, 3929 unsigned int hdr_len, unsigned int proto_len, 3930 unsigned int *elements) 3931 { 3932 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 3933 const unsigned int contiguous = proto_len ? proto_len : 1; 3934 const unsigned int max_elements = queue->max_elements; 3935 unsigned int __elements; 3936 addr_t start, end; 3937 bool push_ok; 3938 int rc; 3939 3940 check_layout: 3941 start = (addr_t)skb->data - hdr_len; 3942 end = (addr_t)skb->data; 3943 3944 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3945 /* Push HW header into same page as first protocol header. */ 3946 push_ok = true; 3947 /* ... but TSO always needs a separate element for headers: */ 3948 if (skb_is_gso(skb)) 3949 __elements = 1 + qeth_count_elements(skb, proto_len); 3950 else 3951 __elements = qeth_count_elements(skb, 0); 3952 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3953 /* Push HW header into preceding page, flush with skb->data. */ 3954 push_ok = true; 3955 __elements = 1 + qeth_count_elements(skb, 0); 3956 } else { 3957 /* Use header cache, copy protocol headers up. */ 3958 push_ok = false; 3959 __elements = 1 + qeth_count_elements(skb, proto_len); 3960 } 3961 3962 /* Compress skb to fit into one IO buffer: */ 3963 if (__elements > max_elements) { 3964 if (!skb_is_nonlinear(skb)) { 3965 /* Drop it, no easy way of shrinking it further. */ 3966 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3967 max_elements, __elements, skb->len); 3968 return -E2BIG; 3969 } 3970 3971 rc = skb_linearize(skb); 3972 if (rc) { 3973 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3974 return rc; 3975 } 3976 3977 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3978 /* Linearization changed the layout, re-evaluate: */ 3979 goto check_layout; 3980 } 3981 3982 *elements = __elements; 3983 /* Add the header: */ 3984 if (push_ok) { 3985 *hdr = skb_push(skb, hdr_len); 3986 return hdr_len; 3987 } 3988 3989 /* Fall back to cache element with known-good alignment: */ 3990 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3991 return -E2BIG; 3992 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 3993 if (!*hdr) 3994 return -ENOMEM; 3995 /* Copy protocol headers behind HW header: */ 3996 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3997 return 0; 3998 } 3999 4000 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 4001 struct sk_buff *curr_skb, 4002 struct qeth_hdr *curr_hdr) 4003 { 4004 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 4005 struct qeth_hdr *prev_hdr = queue->prev_hdr; 4006 4007 if (!prev_hdr) 4008 return true; 4009 4010 /* All packets must have the same target: */ 4011 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 4012 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 4013 4014 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 4015 eth_hdr(curr_skb)->h_dest) && 4016 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 4017 } 4018 4019 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 4020 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 4021 } 4022 4023 /** 4024 * qeth_fill_buffer() - map skb into an output buffer 4025 * @buf: buffer to transport the skb 4026 * @skb: skb to map into the buffer 4027 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 4028 * from qeth_core_header_cache. 4029 * @offset: when mapping the skb, start at skb->data + offset 4030 * @hd_len: if > 0, build a dedicated header element of this size 4031 */ 4032 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 4033 struct sk_buff *skb, struct qeth_hdr *hdr, 4034 unsigned int offset, unsigned int hd_len) 4035 { 4036 struct qdio_buffer *buffer = buf->buffer; 4037 int element = buf->next_element_to_fill; 4038 int length = skb_headlen(skb) - offset; 4039 char *data = skb->data + offset; 4040 unsigned int elem_length, cnt; 4041 bool is_first_elem = true; 4042 4043 __skb_queue_tail(&buf->skb_list, skb); 4044 4045 /* build dedicated element for HW Header */ 4046 if (hd_len) { 4047 is_first_elem = false; 4048 4049 buffer->element[element].addr = virt_to_phys(hdr); 4050 buffer->element[element].length = hd_len; 4051 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4052 4053 /* HW header is allocated from cache: */ 4054 if ((void *)hdr != skb->data) 4055 __set_bit(element, buf->from_kmem_cache); 4056 /* HW header was pushed and is contiguous with linear part: */ 4057 else if (length > 0 && !PAGE_ALIGNED(data) && 4058 (data == (char *)hdr + hd_len)) 4059 buffer->element[element].eflags |= 4060 SBAL_EFLAGS_CONTIGUOUS; 4061 4062 element++; 4063 } 4064 4065 /* map linear part into buffer element(s) */ 4066 while (length > 0) { 4067 elem_length = min_t(unsigned int, length, 4068 PAGE_SIZE - offset_in_page(data)); 4069 4070 buffer->element[element].addr = virt_to_phys(data); 4071 buffer->element[element].length = elem_length; 4072 length -= elem_length; 4073 if (is_first_elem) { 4074 is_first_elem = false; 4075 if (length || skb_is_nonlinear(skb)) 4076 /* skb needs additional elements */ 4077 buffer->element[element].eflags = 4078 SBAL_EFLAGS_FIRST_FRAG; 4079 else 4080 buffer->element[element].eflags = 0; 4081 } else { 4082 buffer->element[element].eflags = 4083 SBAL_EFLAGS_MIDDLE_FRAG; 4084 } 4085 4086 data += elem_length; 4087 element++; 4088 } 4089 4090 /* map page frags into buffer element(s) */ 4091 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4092 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4093 4094 data = skb_frag_address(frag); 4095 length = skb_frag_size(frag); 4096 while (length > 0) { 4097 elem_length = min_t(unsigned int, length, 4098 PAGE_SIZE - offset_in_page(data)); 4099 4100 buffer->element[element].addr = virt_to_phys(data); 4101 buffer->element[element].length = elem_length; 4102 buffer->element[element].eflags = 4103 SBAL_EFLAGS_MIDDLE_FRAG; 4104 4105 length -= elem_length; 4106 data += elem_length; 4107 element++; 4108 } 4109 } 4110 4111 if (buffer->element[element - 1].eflags) 4112 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4113 buf->next_element_to_fill = element; 4114 return element; 4115 } 4116 4117 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4118 struct sk_buff *skb, unsigned int elements, 4119 struct qeth_hdr *hdr, unsigned int offset, 4120 unsigned int hd_len) 4121 { 4122 unsigned int bytes = qdisc_pkt_len(skb); 4123 struct qeth_qdio_out_buffer *buffer; 4124 unsigned int next_element; 4125 struct netdev_queue *txq; 4126 bool stopped = false; 4127 bool flush; 4128 4129 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4130 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4131 4132 /* Just a sanity check, the wake/stop logic should ensure that we always 4133 * get a free buffer. 4134 */ 4135 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4136 return -EBUSY; 4137 4138 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4139 4140 if (flush || 4141 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4142 if (buffer->next_element_to_fill > 0) { 4143 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4144 queue->bulk_count++; 4145 } 4146 4147 if (queue->bulk_count >= queue->bulk_max) 4148 flush = true; 4149 4150 if (flush) 4151 qeth_flush_queue(queue); 4152 4153 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4154 queue->bulk_count)]; 4155 4156 /* Sanity-check again: */ 4157 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4158 return -EBUSY; 4159 } 4160 4161 if (buffer->next_element_to_fill == 0 && 4162 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4163 /* If a TX completion happens right _here_ and misses to wake 4164 * the txq, then our re-check below will catch the race. 4165 */ 4166 QETH_TXQ_STAT_INC(queue, stopped); 4167 netif_tx_stop_queue(txq); 4168 stopped = true; 4169 } 4170 4171 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4172 buffer->bytes += bytes; 4173 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4174 queue->prev_hdr = hdr; 4175 4176 flush = __netdev_tx_sent_queue(txq, bytes, 4177 !stopped && netdev_xmit_more()); 4178 4179 if (flush || next_element >= queue->max_elements) { 4180 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4181 queue->bulk_count++; 4182 4183 if (queue->bulk_count >= queue->bulk_max) 4184 flush = true; 4185 4186 if (flush) 4187 qeth_flush_queue(queue); 4188 } 4189 4190 if (stopped && !qeth_out_queue_is_full(queue)) 4191 netif_tx_start_queue(txq); 4192 return 0; 4193 } 4194 4195 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4196 struct sk_buff *skb, struct qeth_hdr *hdr, 4197 unsigned int offset, unsigned int hd_len, 4198 int elements_needed) 4199 { 4200 unsigned int start_index = queue->next_buf_to_fill; 4201 struct qeth_qdio_out_buffer *buffer; 4202 unsigned int next_element; 4203 struct netdev_queue *txq; 4204 bool stopped = false; 4205 int flush_count = 0; 4206 int do_pack = 0; 4207 int rc = 0; 4208 4209 buffer = queue->bufs[queue->next_buf_to_fill]; 4210 4211 /* Just a sanity check, the wake/stop logic should ensure that we always 4212 * get a free buffer. 4213 */ 4214 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4215 return -EBUSY; 4216 4217 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4218 4219 /* check if we need to switch packing state of this queue */ 4220 qeth_switch_to_packing_if_needed(queue); 4221 if (queue->do_pack) { 4222 do_pack = 1; 4223 /* does packet fit in current buffer? */ 4224 if (buffer->next_element_to_fill + elements_needed > 4225 queue->max_elements) { 4226 /* ... no -> set state PRIMED */ 4227 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4228 flush_count++; 4229 queue->next_buf_to_fill = 4230 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4231 buffer = queue->bufs[queue->next_buf_to_fill]; 4232 4233 /* We stepped forward, so sanity-check again: */ 4234 if (atomic_read(&buffer->state) != 4235 QETH_QDIO_BUF_EMPTY) { 4236 qeth_flush_buffers(queue, start_index, 4237 flush_count); 4238 rc = -EBUSY; 4239 goto out; 4240 } 4241 } 4242 } 4243 4244 if (buffer->next_element_to_fill == 0 && 4245 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4246 /* If a TX completion happens right _here_ and misses to wake 4247 * the txq, then our re-check below will catch the race. 4248 */ 4249 QETH_TXQ_STAT_INC(queue, stopped); 4250 netif_tx_stop_queue(txq); 4251 stopped = true; 4252 } 4253 4254 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4255 buffer->bytes += qdisc_pkt_len(skb); 4256 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4257 4258 if (queue->do_pack) 4259 QETH_TXQ_STAT_INC(queue, skbs_pack); 4260 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4261 flush_count++; 4262 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4263 queue->next_buf_to_fill = 4264 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4265 } 4266 4267 if (flush_count) 4268 qeth_flush_buffers(queue, start_index, flush_count); 4269 4270 out: 4271 if (do_pack) 4272 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4273 4274 if (stopped && !qeth_out_queue_is_full(queue)) 4275 netif_tx_start_queue(txq); 4276 return rc; 4277 } 4278 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4279 4280 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4281 unsigned int payload_len, struct sk_buff *skb, 4282 unsigned int proto_len) 4283 { 4284 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4285 4286 ext->hdr_tot_len = sizeof(*ext); 4287 ext->imb_hdr_no = 1; 4288 ext->hdr_type = 1; 4289 ext->hdr_version = 1; 4290 ext->hdr_len = 28; 4291 ext->payload_len = payload_len; 4292 ext->mss = skb_shinfo(skb)->gso_size; 4293 ext->dg_hdr_len = proto_len; 4294 } 4295 4296 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4297 struct qeth_qdio_out_q *queue, __be16 proto, 4298 void (*fill_header)(struct qeth_qdio_out_q *queue, 4299 struct qeth_hdr *hdr, struct sk_buff *skb, 4300 __be16 proto, unsigned int data_len)) 4301 { 4302 unsigned int proto_len, hw_hdr_len; 4303 unsigned int frame_len = skb->len; 4304 bool is_tso = skb_is_gso(skb); 4305 unsigned int data_offset = 0; 4306 struct qeth_hdr *hdr = NULL; 4307 unsigned int hd_len = 0; 4308 unsigned int elements; 4309 int push_len, rc; 4310 4311 if (is_tso) { 4312 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4313 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4314 } else { 4315 hw_hdr_len = sizeof(struct qeth_hdr); 4316 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4317 } 4318 4319 rc = skb_cow_head(skb, hw_hdr_len); 4320 if (rc) 4321 return rc; 4322 4323 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4324 &elements); 4325 if (push_len < 0) 4326 return push_len; 4327 if (is_tso || !push_len) { 4328 /* HW header needs its own buffer element. */ 4329 hd_len = hw_hdr_len + proto_len; 4330 data_offset = push_len + proto_len; 4331 } 4332 memset(hdr, 0, hw_hdr_len); 4333 fill_header(queue, hdr, skb, proto, frame_len); 4334 if (is_tso) 4335 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4336 frame_len - proto_len, skb, proto_len); 4337 4338 if (IS_IQD(card)) { 4339 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4340 hd_len); 4341 } else { 4342 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4343 skb_orphan(skb); 4344 spin_lock(&queue->lock); 4345 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4346 hd_len, elements); 4347 spin_unlock(&queue->lock); 4348 } 4349 4350 if (rc && !push_len) 4351 kmem_cache_free(qeth_core_header_cache, hdr); 4352 4353 return rc; 4354 } 4355 EXPORT_SYMBOL_GPL(qeth_xmit); 4356 4357 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4358 struct qeth_reply *reply, unsigned long data) 4359 { 4360 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4361 struct qeth_ipacmd_setadpparms *setparms; 4362 4363 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4364 4365 setparms = &(cmd->data.setadapterparms); 4366 if (qeth_setadpparms_inspect_rc(cmd)) { 4367 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4368 setparms->data.mode = SET_PROMISC_MODE_OFF; 4369 } 4370 card->info.promisc_mode = setparms->data.mode; 4371 return (cmd->hdr.return_code) ? -EIO : 0; 4372 } 4373 4374 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4375 { 4376 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4377 SET_PROMISC_MODE_OFF; 4378 struct qeth_cmd_buffer *iob; 4379 struct qeth_ipa_cmd *cmd; 4380 4381 QETH_CARD_TEXT(card, 4, "setprom"); 4382 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4383 4384 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4385 SETADP_DATA_SIZEOF(mode)); 4386 if (!iob) 4387 return; 4388 cmd = __ipa_cmd(iob); 4389 cmd->data.setadapterparms.data.mode = mode; 4390 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4391 } 4392 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4393 4394 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4395 struct qeth_reply *reply, unsigned long data) 4396 { 4397 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4398 struct qeth_ipacmd_setadpparms *adp_cmd; 4399 4400 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4401 if (qeth_setadpparms_inspect_rc(cmd)) 4402 return -EIO; 4403 4404 adp_cmd = &cmd->data.setadapterparms; 4405 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4406 return -EADDRNOTAVAIL; 4407 4408 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4409 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4410 return -EADDRNOTAVAIL; 4411 4412 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4413 return 0; 4414 } 4415 4416 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4417 { 4418 int rc; 4419 struct qeth_cmd_buffer *iob; 4420 struct qeth_ipa_cmd *cmd; 4421 4422 QETH_CARD_TEXT(card, 4, "chgmac"); 4423 4424 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4425 SETADP_DATA_SIZEOF(change_addr)); 4426 if (!iob) 4427 return -ENOMEM; 4428 cmd = __ipa_cmd(iob); 4429 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4430 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4431 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4432 card->dev->dev_addr); 4433 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4434 NULL); 4435 return rc; 4436 } 4437 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4438 4439 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4440 struct qeth_reply *reply, unsigned long data) 4441 { 4442 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4443 struct qeth_set_access_ctrl *access_ctrl_req; 4444 4445 QETH_CARD_TEXT(card, 4, "setaccb"); 4446 4447 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4448 QETH_CARD_TEXT_(card, 2, "rc=%d", 4449 cmd->data.setadapterparms.hdr.return_code); 4450 if (cmd->data.setadapterparms.hdr.return_code != 4451 SET_ACCESS_CTRL_RC_SUCCESS) 4452 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4453 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4454 cmd->data.setadapterparms.hdr.return_code); 4455 switch (qeth_setadpparms_inspect_rc(cmd)) { 4456 case SET_ACCESS_CTRL_RC_SUCCESS: 4457 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4458 dev_info(&card->gdev->dev, 4459 "QDIO data connection isolation is deactivated\n"); 4460 else 4461 dev_info(&card->gdev->dev, 4462 "QDIO data connection isolation is activated\n"); 4463 return 0; 4464 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4465 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4466 CARD_DEVID(card)); 4467 return 0; 4468 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4469 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4470 CARD_DEVID(card)); 4471 return 0; 4472 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4473 dev_err(&card->gdev->dev, "Adapter does not " 4474 "support QDIO data connection isolation\n"); 4475 return -EOPNOTSUPP; 4476 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4477 dev_err(&card->gdev->dev, 4478 "Adapter is dedicated. " 4479 "QDIO data connection isolation not supported\n"); 4480 return -EOPNOTSUPP; 4481 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4482 dev_err(&card->gdev->dev, 4483 "TSO does not permit QDIO data connection isolation\n"); 4484 return -EPERM; 4485 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4486 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4487 "support reflective relay mode\n"); 4488 return -EOPNOTSUPP; 4489 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4490 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4491 "enabled at the adjacent switch port"); 4492 return -EREMOTEIO; 4493 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4494 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4495 "at the adjacent switch failed\n"); 4496 /* benign error while disabling ISOLATION_MODE_FWD */ 4497 return 0; 4498 default: 4499 return -EIO; 4500 } 4501 } 4502 4503 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4504 enum qeth_ipa_isolation_modes mode) 4505 { 4506 int rc; 4507 struct qeth_cmd_buffer *iob; 4508 struct qeth_ipa_cmd *cmd; 4509 struct qeth_set_access_ctrl *access_ctrl_req; 4510 4511 QETH_CARD_TEXT(card, 4, "setacctl"); 4512 4513 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4514 dev_err(&card->gdev->dev, 4515 "Adapter does not support QDIO data connection isolation\n"); 4516 return -EOPNOTSUPP; 4517 } 4518 4519 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4520 SETADP_DATA_SIZEOF(set_access_ctrl)); 4521 if (!iob) 4522 return -ENOMEM; 4523 cmd = __ipa_cmd(iob); 4524 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4525 access_ctrl_req->subcmd_code = mode; 4526 4527 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4528 NULL); 4529 if (rc) { 4530 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4531 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4532 rc, CARD_DEVID(card)); 4533 } 4534 4535 return rc; 4536 } 4537 4538 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4539 { 4540 struct qeth_card *card; 4541 4542 card = dev->ml_priv; 4543 QETH_CARD_TEXT(card, 4, "txtimeo"); 4544 qeth_schedule_recovery(card); 4545 } 4546 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4547 4548 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4549 { 4550 struct qeth_card *card = dev->ml_priv; 4551 int rc = 0; 4552 4553 switch (regnum) { 4554 case MII_BMCR: /* Basic mode control register */ 4555 rc = BMCR_FULLDPLX; 4556 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4557 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4558 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4559 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4560 rc |= BMCR_SPEED100; 4561 break; 4562 case MII_BMSR: /* Basic mode status register */ 4563 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4564 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4565 BMSR_100BASE4; 4566 break; 4567 case MII_PHYSID1: /* PHYS ID 1 */ 4568 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4569 dev->dev_addr[2]; 4570 rc = (rc >> 5) & 0xFFFF; 4571 break; 4572 case MII_PHYSID2: /* PHYS ID 2 */ 4573 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4574 break; 4575 case MII_ADVERTISE: /* Advertisement control reg */ 4576 rc = ADVERTISE_ALL; 4577 break; 4578 case MII_LPA: /* Link partner ability reg */ 4579 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4580 LPA_100BASE4 | LPA_LPACK; 4581 break; 4582 case MII_EXPANSION: /* Expansion register */ 4583 break; 4584 case MII_DCOUNTER: /* disconnect counter */ 4585 break; 4586 case MII_FCSCOUNTER: /* false carrier counter */ 4587 break; 4588 case MII_NWAYTEST: /* N-way auto-neg test register */ 4589 break; 4590 case MII_RERRCOUNTER: /* rx error counter */ 4591 rc = card->stats.rx_length_errors + 4592 card->stats.rx_frame_errors + 4593 card->stats.rx_fifo_errors; 4594 break; 4595 case MII_SREVISION: /* silicon revision */ 4596 break; 4597 case MII_RESV1: /* reserved 1 */ 4598 break; 4599 case MII_LBRERROR: /* loopback, rx, bypass error */ 4600 break; 4601 case MII_PHYADDR: /* physical address */ 4602 break; 4603 case MII_RESV2: /* reserved 2 */ 4604 break; 4605 case MII_TPISTATUS: /* TPI status for 10mbps */ 4606 break; 4607 case MII_NCONFIG: /* network interface config */ 4608 break; 4609 default: 4610 break; 4611 } 4612 return rc; 4613 } 4614 4615 static int qeth_snmp_command_cb(struct qeth_card *card, 4616 struct qeth_reply *reply, unsigned long data) 4617 { 4618 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4619 struct qeth_arp_query_info *qinfo = reply->param; 4620 struct qeth_ipacmd_setadpparms *adp_cmd; 4621 unsigned int data_len; 4622 void *snmp_data; 4623 4624 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4625 4626 if (cmd->hdr.return_code) { 4627 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4628 return -EIO; 4629 } 4630 if (cmd->data.setadapterparms.hdr.return_code) { 4631 cmd->hdr.return_code = 4632 cmd->data.setadapterparms.hdr.return_code; 4633 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4634 return -EIO; 4635 } 4636 4637 adp_cmd = &cmd->data.setadapterparms; 4638 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4639 if (adp_cmd->hdr.seq_no == 1) { 4640 snmp_data = &adp_cmd->data.snmp; 4641 } else { 4642 snmp_data = &adp_cmd->data.snmp.request; 4643 data_len -= offsetof(struct qeth_snmp_cmd, request); 4644 } 4645 4646 /* check if there is enough room in userspace */ 4647 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4648 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4649 return -ENOSPC; 4650 } 4651 QETH_CARD_TEXT_(card, 4, "snore%i", 4652 cmd->data.setadapterparms.hdr.used_total); 4653 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4654 cmd->data.setadapterparms.hdr.seq_no); 4655 /*copy entries to user buffer*/ 4656 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4657 qinfo->udata_offset += data_len; 4658 4659 if (cmd->data.setadapterparms.hdr.seq_no < 4660 cmd->data.setadapterparms.hdr.used_total) 4661 return 1; 4662 return 0; 4663 } 4664 4665 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4666 { 4667 struct qeth_snmp_ureq __user *ureq; 4668 struct qeth_cmd_buffer *iob; 4669 unsigned int req_len; 4670 struct qeth_arp_query_info qinfo = {0, }; 4671 int rc = 0; 4672 4673 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4674 4675 if (IS_VM_NIC(card)) 4676 return -EOPNOTSUPP; 4677 4678 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4679 IS_LAYER3(card)) 4680 return -EOPNOTSUPP; 4681 4682 ureq = (struct qeth_snmp_ureq __user *) udata; 4683 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4684 get_user(req_len, &ureq->hdr.req_len)) 4685 return -EFAULT; 4686 4687 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4688 if (req_len > QETH_BUFSIZE) 4689 return -EINVAL; 4690 4691 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4692 if (!iob) 4693 return -ENOMEM; 4694 4695 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4696 &ureq->cmd, req_len)) { 4697 qeth_put_cmd(iob); 4698 return -EFAULT; 4699 } 4700 4701 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4702 if (!qinfo.udata) { 4703 qeth_put_cmd(iob); 4704 return -ENOMEM; 4705 } 4706 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4707 4708 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4709 if (rc) 4710 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4711 CARD_DEVID(card), rc); 4712 else { 4713 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4714 rc = -EFAULT; 4715 } 4716 4717 kfree(qinfo.udata); 4718 return rc; 4719 } 4720 4721 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4722 struct qeth_reply *reply, 4723 unsigned long data) 4724 { 4725 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4726 struct qeth_qoat_priv *priv = reply->param; 4727 int resdatalen; 4728 4729 QETH_CARD_TEXT(card, 3, "qoatcb"); 4730 if (qeth_setadpparms_inspect_rc(cmd)) 4731 return -EIO; 4732 4733 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4734 4735 if (resdatalen > (priv->buffer_len - priv->response_len)) 4736 return -ENOSPC; 4737 4738 memcpy(priv->buffer + priv->response_len, 4739 &cmd->data.setadapterparms.hdr, resdatalen); 4740 priv->response_len += resdatalen; 4741 4742 if (cmd->data.setadapterparms.hdr.seq_no < 4743 cmd->data.setadapterparms.hdr.used_total) 4744 return 1; 4745 return 0; 4746 } 4747 4748 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4749 { 4750 int rc = 0; 4751 struct qeth_cmd_buffer *iob; 4752 struct qeth_ipa_cmd *cmd; 4753 struct qeth_query_oat *oat_req; 4754 struct qeth_query_oat_data oat_data; 4755 struct qeth_qoat_priv priv; 4756 void __user *tmp; 4757 4758 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4759 4760 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4761 return -EOPNOTSUPP; 4762 4763 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4764 return -EFAULT; 4765 4766 priv.buffer_len = oat_data.buffer_len; 4767 priv.response_len = 0; 4768 priv.buffer = vzalloc(oat_data.buffer_len); 4769 if (!priv.buffer) 4770 return -ENOMEM; 4771 4772 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4773 SETADP_DATA_SIZEOF(query_oat)); 4774 if (!iob) { 4775 rc = -ENOMEM; 4776 goto out_free; 4777 } 4778 cmd = __ipa_cmd(iob); 4779 oat_req = &cmd->data.setadapterparms.data.query_oat; 4780 oat_req->subcmd_code = oat_data.command; 4781 4782 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4783 if (!rc) { 4784 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4785 u64_to_user_ptr(oat_data.ptr); 4786 oat_data.response_len = priv.response_len; 4787 4788 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4789 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4790 rc = -EFAULT; 4791 } 4792 4793 out_free: 4794 vfree(priv.buffer); 4795 return rc; 4796 } 4797 4798 static int qeth_query_card_info_cb(struct qeth_card *card, 4799 struct qeth_reply *reply, unsigned long data) 4800 { 4801 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4802 struct qeth_link_info *link_info = reply->param; 4803 struct qeth_query_card_info *card_info; 4804 4805 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4806 if (qeth_setadpparms_inspect_rc(cmd)) 4807 return -EIO; 4808 4809 card_info = &cmd->data.setadapterparms.data.card_info; 4810 netdev_dbg(card->dev, 4811 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", 4812 card_info->card_type, card_info->port_mode, 4813 card_info->port_speed); 4814 4815 switch (card_info->port_mode) { 4816 case CARD_INFO_PORTM_FULLDUPLEX: 4817 link_info->duplex = DUPLEX_FULL; 4818 break; 4819 case CARD_INFO_PORTM_HALFDUPLEX: 4820 link_info->duplex = DUPLEX_HALF; 4821 break; 4822 default: 4823 link_info->duplex = DUPLEX_UNKNOWN; 4824 } 4825 4826 switch (card_info->card_type) { 4827 case CARD_INFO_TYPE_1G_COPPER_A: 4828 case CARD_INFO_TYPE_1G_COPPER_B: 4829 link_info->speed = SPEED_1000; 4830 link_info->port = PORT_TP; 4831 break; 4832 case CARD_INFO_TYPE_1G_FIBRE_A: 4833 case CARD_INFO_TYPE_1G_FIBRE_B: 4834 link_info->speed = SPEED_1000; 4835 link_info->port = PORT_FIBRE; 4836 break; 4837 case CARD_INFO_TYPE_10G_FIBRE_A: 4838 case CARD_INFO_TYPE_10G_FIBRE_B: 4839 link_info->speed = SPEED_10000; 4840 link_info->port = PORT_FIBRE; 4841 break; 4842 default: 4843 switch (card_info->port_speed) { 4844 case CARD_INFO_PORTS_10M: 4845 link_info->speed = SPEED_10; 4846 break; 4847 case CARD_INFO_PORTS_100M: 4848 link_info->speed = SPEED_100; 4849 break; 4850 case CARD_INFO_PORTS_1G: 4851 link_info->speed = SPEED_1000; 4852 break; 4853 case CARD_INFO_PORTS_10G: 4854 link_info->speed = SPEED_10000; 4855 break; 4856 case CARD_INFO_PORTS_25G: 4857 link_info->speed = SPEED_25000; 4858 break; 4859 default: 4860 link_info->speed = SPEED_UNKNOWN; 4861 } 4862 4863 link_info->port = PORT_OTHER; 4864 } 4865 4866 return 0; 4867 } 4868 4869 int qeth_query_card_info(struct qeth_card *card, 4870 struct qeth_link_info *link_info) 4871 { 4872 struct qeth_cmd_buffer *iob; 4873 4874 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4875 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4876 return -EOPNOTSUPP; 4877 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4878 if (!iob) 4879 return -ENOMEM; 4880 4881 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info); 4882 } 4883 4884 static int qeth_init_link_info_oat_cb(struct qeth_card *card, 4885 struct qeth_reply *reply_priv, 4886 unsigned long data) 4887 { 4888 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4889 struct qeth_link_info *link_info = reply_priv->param; 4890 struct qeth_query_oat_physical_if *phys_if; 4891 struct qeth_query_oat_reply *reply; 4892 4893 if (qeth_setadpparms_inspect_rc(cmd)) 4894 return -EIO; 4895 4896 /* Multi-part reply is unexpected, don't bother: */ 4897 if (cmd->data.setadapterparms.hdr.used_total > 1) 4898 return -EINVAL; 4899 4900 /* Expect the reply to start with phys_if data: */ 4901 reply = &cmd->data.setadapterparms.data.query_oat.reply[0]; 4902 if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF || 4903 reply->length < sizeof(*reply)) 4904 return -EINVAL; 4905 4906 phys_if = &reply->phys_if; 4907 4908 switch (phys_if->speed_duplex) { 4909 case QETH_QOAT_PHYS_SPEED_10M_HALF: 4910 link_info->speed = SPEED_10; 4911 link_info->duplex = DUPLEX_HALF; 4912 break; 4913 case QETH_QOAT_PHYS_SPEED_10M_FULL: 4914 link_info->speed = SPEED_10; 4915 link_info->duplex = DUPLEX_FULL; 4916 break; 4917 case QETH_QOAT_PHYS_SPEED_100M_HALF: 4918 link_info->speed = SPEED_100; 4919 link_info->duplex = DUPLEX_HALF; 4920 break; 4921 case QETH_QOAT_PHYS_SPEED_100M_FULL: 4922 link_info->speed = SPEED_100; 4923 link_info->duplex = DUPLEX_FULL; 4924 break; 4925 case QETH_QOAT_PHYS_SPEED_1000M_HALF: 4926 link_info->speed = SPEED_1000; 4927 link_info->duplex = DUPLEX_HALF; 4928 break; 4929 case QETH_QOAT_PHYS_SPEED_1000M_FULL: 4930 link_info->speed = SPEED_1000; 4931 link_info->duplex = DUPLEX_FULL; 4932 break; 4933 case QETH_QOAT_PHYS_SPEED_10G_FULL: 4934 link_info->speed = SPEED_10000; 4935 link_info->duplex = DUPLEX_FULL; 4936 break; 4937 case QETH_QOAT_PHYS_SPEED_25G_FULL: 4938 link_info->speed = SPEED_25000; 4939 link_info->duplex = DUPLEX_FULL; 4940 break; 4941 case QETH_QOAT_PHYS_SPEED_UNKNOWN: 4942 default: 4943 link_info->speed = SPEED_UNKNOWN; 4944 link_info->duplex = DUPLEX_UNKNOWN; 4945 break; 4946 } 4947 4948 switch (phys_if->media_type) { 4949 case QETH_QOAT_PHYS_MEDIA_COPPER: 4950 link_info->port = PORT_TP; 4951 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4952 break; 4953 case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT: 4954 link_info->port = PORT_FIBRE; 4955 link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4956 break; 4957 case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG: 4958 link_info->port = PORT_FIBRE; 4959 link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG; 4960 break; 4961 default: 4962 link_info->port = PORT_OTHER; 4963 link_info->link_mode = QETH_LINK_MODE_UNKNOWN; 4964 break; 4965 } 4966 4967 return 0; 4968 } 4969 4970 static void qeth_init_link_info(struct qeth_card *card) 4971 { 4972 card->info.link_info.duplex = DUPLEX_FULL; 4973 4974 if (IS_IQD(card) || IS_VM_NIC(card)) { 4975 card->info.link_info.speed = SPEED_10000; 4976 card->info.link_info.port = PORT_FIBRE; 4977 card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT; 4978 } else { 4979 switch (card->info.link_type) { 4980 case QETH_LINK_TYPE_FAST_ETH: 4981 case QETH_LINK_TYPE_LANE_ETH100: 4982 card->info.link_info.speed = SPEED_100; 4983 card->info.link_info.port = PORT_TP; 4984 break; 4985 case QETH_LINK_TYPE_GBIT_ETH: 4986 case QETH_LINK_TYPE_LANE_ETH1000: 4987 card->info.link_info.speed = SPEED_1000; 4988 card->info.link_info.port = PORT_FIBRE; 4989 break; 4990 case QETH_LINK_TYPE_10GBIT_ETH: 4991 card->info.link_info.speed = SPEED_10000; 4992 card->info.link_info.port = PORT_FIBRE; 4993 break; 4994 case QETH_LINK_TYPE_25GBIT_ETH: 4995 card->info.link_info.speed = SPEED_25000; 4996 card->info.link_info.port = PORT_FIBRE; 4997 break; 4998 default: 4999 dev_info(&card->gdev->dev, "Unknown link type %x\n", 5000 card->info.link_type); 5001 card->info.link_info.speed = SPEED_UNKNOWN; 5002 card->info.link_info.port = PORT_OTHER; 5003 } 5004 5005 card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN; 5006 } 5007 5008 /* Get more accurate data via QUERY OAT: */ 5009 if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 5010 struct qeth_link_info link_info; 5011 struct qeth_cmd_buffer *iob; 5012 5013 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 5014 SETADP_DATA_SIZEOF(query_oat)); 5015 if (iob) { 5016 struct qeth_ipa_cmd *cmd = __ipa_cmd(iob); 5017 struct qeth_query_oat *oat_req; 5018 5019 oat_req = &cmd->data.setadapterparms.data.query_oat; 5020 oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE; 5021 5022 if (!qeth_send_ipa_cmd(card, iob, 5023 qeth_init_link_info_oat_cb, 5024 &link_info)) { 5025 if (link_info.speed != SPEED_UNKNOWN) 5026 card->info.link_info.speed = link_info.speed; 5027 if (link_info.duplex != DUPLEX_UNKNOWN) 5028 card->info.link_info.duplex = link_info.duplex; 5029 if (link_info.port != PORT_OTHER) 5030 card->info.link_info.port = link_info.port; 5031 if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN) 5032 card->info.link_info.link_mode = link_info.link_mode; 5033 } 5034 } 5035 } 5036 } 5037 5038 /** 5039 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 5040 * @card: pointer to a qeth_card 5041 * 5042 * Returns 5043 * 0, if a MAC address has been set for the card's netdevice 5044 * a return code, for various error conditions 5045 */ 5046 int qeth_vm_request_mac(struct qeth_card *card) 5047 { 5048 struct diag26c_mac_resp *response; 5049 struct diag26c_mac_req *request; 5050 int rc; 5051 5052 QETH_CARD_TEXT(card, 2, "vmreqmac"); 5053 5054 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 5055 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 5056 if (!request || !response) { 5057 rc = -ENOMEM; 5058 goto out; 5059 } 5060 5061 request->resp_buf_len = sizeof(*response); 5062 request->resp_version = DIAG26C_VERSION2; 5063 request->op_code = DIAG26C_GET_MAC; 5064 request->devno = card->info.ddev_devno; 5065 5066 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5067 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 5068 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 5069 if (rc) 5070 goto out; 5071 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 5072 5073 if (request->resp_buf_len < sizeof(*response) || 5074 response->version != request->resp_version) { 5075 rc = -EIO; 5076 QETH_CARD_TEXT(card, 2, "badresp"); 5077 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 5078 sizeof(request->resp_buf_len)); 5079 } else if (!is_valid_ether_addr(response->mac)) { 5080 rc = -EINVAL; 5081 QETH_CARD_TEXT(card, 2, "badmac"); 5082 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 5083 } else { 5084 ether_addr_copy(card->dev->dev_addr, response->mac); 5085 } 5086 5087 out: 5088 kfree(response); 5089 kfree(request); 5090 return rc; 5091 } 5092 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 5093 5094 static void qeth_determine_capabilities(struct qeth_card *card) 5095 { 5096 struct qeth_channel *channel = &card->data; 5097 struct ccw_device *ddev = channel->ccwdev; 5098 int rc; 5099 int ddev_offline = 0; 5100 5101 QETH_CARD_TEXT(card, 2, "detcapab"); 5102 if (!ddev->online) { 5103 ddev_offline = 1; 5104 rc = qeth_start_channel(channel); 5105 if (rc) { 5106 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5107 goto out; 5108 } 5109 } 5110 5111 rc = qeth_read_conf_data(card); 5112 if (rc) { 5113 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 5114 CARD_DEVID(card), rc); 5115 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5116 goto out_offline; 5117 } 5118 5119 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 5120 if (rc) 5121 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5122 5123 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 5124 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 5125 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 5126 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 5127 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 5128 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 5129 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 5130 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 5131 dev_info(&card->gdev->dev, 5132 "Completion Queueing supported\n"); 5133 } else { 5134 card->options.cq = QETH_CQ_NOTAVAILABLE; 5135 } 5136 5137 out_offline: 5138 if (ddev_offline == 1) 5139 qeth_stop_channel(channel); 5140 out: 5141 return; 5142 } 5143 5144 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5145 { 5146 struct qeth_card_info *info = &card->info; 5147 struct ccw_device *cdev = CARD_DDEV(card); 5148 struct ccw_dev_id dev_id; 5149 5150 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5151 ccw_device_get_id(cdev, &dev_id); 5152 5153 info->ddev_devno = dev_id.devno; 5154 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5155 !ccw_device_get_iid(cdev, &info->iid) && 5156 !ccw_device_get_chid(cdev, 0, &info->chid); 5157 info->ssid = dev_id.ssid; 5158 5159 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5160 info->chid, info->chpid); 5161 5162 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5163 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5164 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5165 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5166 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5167 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5168 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5169 } 5170 5171 static int qeth_qdio_establish(struct qeth_card *card) 5172 { 5173 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5174 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5175 struct qeth_qib_parms *qib_parms = NULL; 5176 struct qdio_initialize init_data; 5177 unsigned int i; 5178 int rc = 0; 5179 5180 QETH_CARD_TEXT(card, 2, "qdioest"); 5181 5182 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5183 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5184 if (!qib_parms) 5185 return -ENOMEM; 5186 5187 qeth_fill_qib_parms(card, qib_parms); 5188 } 5189 5190 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5191 if (card->options.cq == QETH_CQ_ENABLED) 5192 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5193 5194 for (i = 0; i < card->qdio.no_out_queues; i++) 5195 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5196 5197 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5198 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5199 QDIO_QETH_QFMT; 5200 init_data.qib_param_field_format = 0; 5201 init_data.qib_param_field = (void *)qib_parms; 5202 init_data.no_input_qs = card->qdio.no_in_queues; 5203 init_data.no_output_qs = card->qdio.no_out_queues; 5204 init_data.input_handler = qeth_qdio_input_handler; 5205 init_data.output_handler = qeth_qdio_output_handler; 5206 init_data.irq_poll = qeth_qdio_poll; 5207 init_data.int_parm = (unsigned long) card; 5208 init_data.input_sbal_addr_array = in_sbal_ptrs; 5209 init_data.output_sbal_addr_array = out_sbal_ptrs; 5210 5211 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5212 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5213 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5214 init_data.no_output_qs); 5215 if (rc) { 5216 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5217 goto out; 5218 } 5219 rc = qdio_establish(CARD_DDEV(card), &init_data); 5220 if (rc) { 5221 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5222 qdio_free(CARD_DDEV(card)); 5223 } 5224 } 5225 5226 switch (card->options.cq) { 5227 case QETH_CQ_ENABLED: 5228 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5229 break; 5230 case QETH_CQ_DISABLED: 5231 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5232 break; 5233 default: 5234 break; 5235 } 5236 5237 out: 5238 kfree(qib_parms); 5239 return rc; 5240 } 5241 5242 static void qeth_core_free_card(struct qeth_card *card) 5243 { 5244 QETH_CARD_TEXT(card, 2, "freecrd"); 5245 5246 unregister_service_level(&card->qeth_service_level); 5247 debugfs_remove_recursive(card->debugfs); 5248 qeth_put_cmd(card->read_cmd); 5249 destroy_workqueue(card->event_wq); 5250 dev_set_drvdata(&card->gdev->dev, NULL); 5251 kfree(card); 5252 } 5253 5254 static void qeth_trace_features(struct qeth_card *card) 5255 { 5256 QETH_CARD_TEXT(card, 2, "features"); 5257 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5258 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5259 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5260 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5261 sizeof(card->info.diagass_support)); 5262 } 5263 5264 static struct ccw_device_id qeth_ids[] = { 5265 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5266 .driver_info = QETH_CARD_TYPE_OSD}, 5267 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5268 .driver_info = QETH_CARD_TYPE_IQD}, 5269 #ifdef CONFIG_QETH_OSN 5270 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 5271 .driver_info = QETH_CARD_TYPE_OSN}, 5272 #endif 5273 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5274 .driver_info = QETH_CARD_TYPE_OSM}, 5275 #ifdef CONFIG_QETH_OSX 5276 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5277 .driver_info = QETH_CARD_TYPE_OSX}, 5278 #endif 5279 {}, 5280 }; 5281 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5282 5283 static struct ccw_driver qeth_ccw_driver = { 5284 .driver = { 5285 .owner = THIS_MODULE, 5286 .name = "qeth", 5287 }, 5288 .ids = qeth_ids, 5289 .probe = ccwgroup_probe_ccwdev, 5290 .remove = ccwgroup_remove_ccwdev, 5291 }; 5292 5293 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5294 { 5295 int retries = 3; 5296 int rc; 5297 5298 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5299 atomic_set(&card->force_alloc_skb, 0); 5300 rc = qeth_update_from_chp_desc(card); 5301 if (rc) 5302 return rc; 5303 retry: 5304 if (retries < 3) 5305 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5306 CARD_DEVID(card)); 5307 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5308 qeth_stop_channel(&card->data); 5309 qeth_stop_channel(&card->write); 5310 qeth_stop_channel(&card->read); 5311 qdio_free(CARD_DDEV(card)); 5312 5313 rc = qeth_start_channel(&card->read); 5314 if (rc) 5315 goto retriable; 5316 rc = qeth_start_channel(&card->write); 5317 if (rc) 5318 goto retriable; 5319 rc = qeth_start_channel(&card->data); 5320 if (rc) 5321 goto retriable; 5322 retriable: 5323 if (rc == -ERESTARTSYS) { 5324 QETH_CARD_TEXT(card, 2, "break1"); 5325 return rc; 5326 } else if (rc) { 5327 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5328 if (--retries < 0) 5329 goto out; 5330 else 5331 goto retry; 5332 } 5333 5334 qeth_determine_capabilities(card); 5335 qeth_read_ccw_conf_data(card); 5336 qeth_idx_init(card); 5337 5338 rc = qeth_idx_activate_read_channel(card); 5339 if (rc == -EINTR) { 5340 QETH_CARD_TEXT(card, 2, "break2"); 5341 return rc; 5342 } else if (rc) { 5343 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5344 if (--retries < 0) 5345 goto out; 5346 else 5347 goto retry; 5348 } 5349 5350 rc = qeth_idx_activate_write_channel(card); 5351 if (rc == -EINTR) { 5352 QETH_CARD_TEXT(card, 2, "break3"); 5353 return rc; 5354 } else if (rc) { 5355 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5356 if (--retries < 0) 5357 goto out; 5358 else 5359 goto retry; 5360 } 5361 card->read_or_write_problem = 0; 5362 rc = qeth_mpc_initialize(card); 5363 if (rc) { 5364 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5365 goto out; 5366 } 5367 5368 rc = qeth_send_startlan(card); 5369 if (rc) { 5370 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5371 if (rc == -ENETDOWN) { 5372 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5373 *carrier_ok = false; 5374 } else { 5375 goto out; 5376 } 5377 } else { 5378 *carrier_ok = true; 5379 } 5380 5381 card->options.ipa4.supported = 0; 5382 card->options.ipa6.supported = 0; 5383 card->options.adp.supported = 0; 5384 card->options.sbp.supported_funcs = 0; 5385 card->info.diagass_support = 0; 5386 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5387 if (rc == -ENOMEM) 5388 goto out; 5389 if (qeth_is_supported(card, IPA_IPV6)) { 5390 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5391 if (rc == -ENOMEM) 5392 goto out; 5393 } 5394 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5395 rc = qeth_query_setadapterparms(card); 5396 if (rc < 0) { 5397 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5398 goto out; 5399 } 5400 } 5401 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5402 rc = qeth_query_setdiagass(card); 5403 if (rc) 5404 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5405 } 5406 5407 qeth_trace_features(card); 5408 5409 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5410 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5411 card->info.hwtrap = 0; 5412 5413 if (card->options.isolation != ISOLATION_MODE_NONE) { 5414 rc = qeth_setadpparms_set_access_ctrl(card, 5415 card->options.isolation); 5416 if (rc) 5417 goto out; 5418 } 5419 5420 qeth_init_link_info(card); 5421 5422 rc = qeth_init_qdio_queues(card); 5423 if (rc) { 5424 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5425 goto out; 5426 } 5427 5428 return 0; 5429 out: 5430 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5431 "an error on the device\n"); 5432 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5433 CARD_DEVID(card), rc); 5434 return rc; 5435 } 5436 5437 static int qeth_set_online(struct qeth_card *card, 5438 const struct qeth_discipline *disc) 5439 { 5440 bool carrier_ok; 5441 int rc; 5442 5443 mutex_lock(&card->conf_mutex); 5444 QETH_CARD_TEXT(card, 2, "setonlin"); 5445 5446 rc = qeth_hardsetup_card(card, &carrier_ok); 5447 if (rc) { 5448 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5449 rc = -ENODEV; 5450 goto err_hardsetup; 5451 } 5452 5453 qeth_print_status_message(card); 5454 5455 if (card->dev->reg_state != NETREG_REGISTERED) 5456 /* no need for locking / error handling at this early stage: */ 5457 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5458 5459 rc = disc->set_online(card, carrier_ok); 5460 if (rc) 5461 goto err_online; 5462 5463 /* let user_space know that device is online */ 5464 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5465 5466 mutex_unlock(&card->conf_mutex); 5467 return 0; 5468 5469 err_online: 5470 err_hardsetup: 5471 qeth_qdio_clear_card(card, 0); 5472 qeth_clear_working_pool_list(card); 5473 qeth_flush_local_addrs(card); 5474 5475 qeth_stop_channel(&card->data); 5476 qeth_stop_channel(&card->write); 5477 qeth_stop_channel(&card->read); 5478 qdio_free(CARD_DDEV(card)); 5479 5480 mutex_unlock(&card->conf_mutex); 5481 return rc; 5482 } 5483 5484 int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5485 bool resetting) 5486 { 5487 int rc, rc2, rc3; 5488 5489 mutex_lock(&card->conf_mutex); 5490 QETH_CARD_TEXT(card, 3, "setoffl"); 5491 5492 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5493 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5494 card->info.hwtrap = 1; 5495 } 5496 5497 /* cancel any stalled cmd that might block the rtnl: */ 5498 qeth_clear_ipacmd_list(card); 5499 5500 rtnl_lock(); 5501 card->info.open_when_online = card->dev->flags & IFF_UP; 5502 dev_close(card->dev); 5503 netif_device_detach(card->dev); 5504 netif_carrier_off(card->dev); 5505 rtnl_unlock(); 5506 5507 cancel_work_sync(&card->rx_mode_work); 5508 5509 disc->set_offline(card); 5510 5511 qeth_qdio_clear_card(card, 0); 5512 qeth_drain_output_queues(card); 5513 qeth_clear_working_pool_list(card); 5514 qeth_flush_local_addrs(card); 5515 card->info.promisc_mode = 0; 5516 5517 rc = qeth_stop_channel(&card->data); 5518 rc2 = qeth_stop_channel(&card->write); 5519 rc3 = qeth_stop_channel(&card->read); 5520 if (!rc) 5521 rc = (rc2) ? rc2 : rc3; 5522 if (rc) 5523 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5524 qdio_free(CARD_DDEV(card)); 5525 5526 /* let user_space know that device is offline */ 5527 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5528 5529 mutex_unlock(&card->conf_mutex); 5530 return 0; 5531 } 5532 EXPORT_SYMBOL_GPL(qeth_set_offline); 5533 5534 static int qeth_do_reset(void *data) 5535 { 5536 const struct qeth_discipline *disc; 5537 struct qeth_card *card = data; 5538 int rc; 5539 5540 /* Lock-free, other users will block until we are done. */ 5541 disc = card->discipline; 5542 5543 QETH_CARD_TEXT(card, 2, "recover1"); 5544 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5545 return 0; 5546 QETH_CARD_TEXT(card, 2, "recover2"); 5547 dev_warn(&card->gdev->dev, 5548 "A recovery process has been started for the device\n"); 5549 5550 qeth_set_offline(card, disc, true); 5551 rc = qeth_set_online(card, disc); 5552 if (!rc) { 5553 dev_info(&card->gdev->dev, 5554 "Device successfully recovered!\n"); 5555 } else { 5556 ccwgroup_set_offline(card->gdev); 5557 dev_warn(&card->gdev->dev, 5558 "The qeth device driver failed to recover an error on the device\n"); 5559 } 5560 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5561 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5562 return 0; 5563 } 5564 5565 #if IS_ENABLED(CONFIG_QETH_L3) 5566 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5567 struct qeth_hdr *hdr) 5568 { 5569 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5570 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5571 struct net_device *dev = skb->dev; 5572 5573 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5574 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5575 "FAKELL", skb->len); 5576 return; 5577 } 5578 5579 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5580 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5581 ETH_P_IP; 5582 unsigned char tg_addr[ETH_ALEN]; 5583 5584 skb_reset_network_header(skb); 5585 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5586 case QETH_CAST_MULTICAST: 5587 if (prot == ETH_P_IP) 5588 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5589 else 5590 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5591 QETH_CARD_STAT_INC(card, rx_multicast); 5592 break; 5593 case QETH_CAST_BROADCAST: 5594 ether_addr_copy(tg_addr, dev->broadcast); 5595 QETH_CARD_STAT_INC(card, rx_multicast); 5596 break; 5597 default: 5598 if (card->options.sniffer) 5599 skb->pkt_type = PACKET_OTHERHOST; 5600 ether_addr_copy(tg_addr, dev->dev_addr); 5601 } 5602 5603 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5604 dev_hard_header(skb, dev, prot, tg_addr, 5605 &l3_hdr->next_hop.rx.src_mac, skb->len); 5606 else 5607 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5608 skb->len); 5609 } 5610 5611 /* copy VLAN tag from hdr into skb */ 5612 if (!card->options.sniffer && 5613 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5614 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5615 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5616 l3_hdr->vlan_id : 5617 l3_hdr->next_hop.rx.vlan_id; 5618 5619 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5620 } 5621 } 5622 #endif 5623 5624 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5625 struct qeth_hdr *hdr, bool uses_frags) 5626 { 5627 struct napi_struct *napi = &card->napi; 5628 bool is_cso; 5629 5630 switch (hdr->hdr.l2.id) { 5631 case QETH_HEADER_TYPE_OSN: 5632 skb_push(skb, sizeof(*hdr)); 5633 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); 5634 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5635 QETH_CARD_STAT_INC(card, rx_packets); 5636 5637 card->osn_info.data_cb(skb); 5638 return; 5639 #if IS_ENABLED(CONFIG_QETH_L3) 5640 case QETH_HEADER_TYPE_LAYER3: 5641 qeth_l3_rebuild_skb(card, skb, hdr); 5642 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5643 break; 5644 #endif 5645 case QETH_HEADER_TYPE_LAYER2: 5646 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5647 break; 5648 default: 5649 /* never happens */ 5650 if (uses_frags) 5651 napi_free_frags(napi); 5652 else 5653 dev_kfree_skb_any(skb); 5654 return; 5655 } 5656 5657 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5658 skb->ip_summed = CHECKSUM_UNNECESSARY; 5659 QETH_CARD_STAT_INC(card, rx_skb_csum); 5660 } else { 5661 skb->ip_summed = CHECKSUM_NONE; 5662 } 5663 5664 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5665 QETH_CARD_STAT_INC(card, rx_packets); 5666 if (skb_is_nonlinear(skb)) { 5667 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5668 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5669 skb_shinfo(skb)->nr_frags); 5670 } 5671 5672 if (uses_frags) { 5673 napi_gro_frags(napi); 5674 } else { 5675 skb->protocol = eth_type_trans(skb, skb->dev); 5676 napi_gro_receive(napi, skb); 5677 } 5678 } 5679 5680 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5681 { 5682 struct page *page = virt_to_page(data); 5683 unsigned int next_frag; 5684 5685 next_frag = skb_shinfo(skb)->nr_frags; 5686 get_page(page); 5687 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5688 data_len); 5689 } 5690 5691 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5692 { 5693 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5694 } 5695 5696 static int qeth_extract_skb(struct qeth_card *card, 5697 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5698 int *__offset) 5699 { 5700 struct qeth_priv *priv = netdev_priv(card->dev); 5701 struct qdio_buffer *buffer = qethbuffer->buffer; 5702 struct napi_struct *napi = &card->napi; 5703 struct qdio_buffer_element *element; 5704 unsigned int linear_len = 0; 5705 bool uses_frags = false; 5706 int offset = *__offset; 5707 bool use_rx_sg = false; 5708 unsigned int headroom; 5709 struct qeth_hdr *hdr; 5710 struct sk_buff *skb; 5711 int skb_len = 0; 5712 5713 element = &buffer->element[*element_no]; 5714 5715 next_packet: 5716 /* qeth_hdr must not cross element boundaries */ 5717 while (element->length < offset + sizeof(struct qeth_hdr)) { 5718 if (qeth_is_last_sbale(element)) 5719 return -ENODATA; 5720 element++; 5721 offset = 0; 5722 } 5723 5724 hdr = phys_to_virt(element->addr) + offset; 5725 offset += sizeof(*hdr); 5726 skb = NULL; 5727 5728 switch (hdr->hdr.l2.id) { 5729 case QETH_HEADER_TYPE_LAYER2: 5730 skb_len = hdr->hdr.l2.pkt_length; 5731 linear_len = ETH_HLEN; 5732 headroom = 0; 5733 break; 5734 case QETH_HEADER_TYPE_LAYER3: 5735 skb_len = hdr->hdr.l3.length; 5736 if (!IS_LAYER3(card)) { 5737 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5738 goto walk_packet; 5739 } 5740 5741 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5742 linear_len = ETH_HLEN; 5743 headroom = 0; 5744 break; 5745 } 5746 5747 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5748 linear_len = sizeof(struct ipv6hdr); 5749 else 5750 linear_len = sizeof(struct iphdr); 5751 headroom = ETH_HLEN; 5752 break; 5753 case QETH_HEADER_TYPE_OSN: 5754 skb_len = hdr->hdr.osn.pdu_length; 5755 if (!IS_OSN(card)) { 5756 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5757 goto walk_packet; 5758 } 5759 5760 linear_len = skb_len; 5761 headroom = sizeof(struct qeth_hdr); 5762 break; 5763 default: 5764 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5765 QETH_CARD_STAT_INC(card, rx_frame_errors); 5766 else 5767 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5768 5769 /* Can't determine packet length, drop the whole buffer. */ 5770 return -EPROTONOSUPPORT; 5771 } 5772 5773 if (skb_len < linear_len) { 5774 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5775 goto walk_packet; 5776 } 5777 5778 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5779 (skb_len > READ_ONCE(priv->rx_copybreak) && 5780 !atomic_read(&card->force_alloc_skb) && 5781 !IS_OSN(card)); 5782 5783 if (use_rx_sg) { 5784 /* QETH_CQ_ENABLED only: */ 5785 if (qethbuffer->rx_skb && 5786 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5787 skb = qethbuffer->rx_skb; 5788 qethbuffer->rx_skb = NULL; 5789 goto use_skb; 5790 } 5791 5792 skb = napi_get_frags(napi); 5793 if (!skb) { 5794 /* -ENOMEM, no point in falling back further. */ 5795 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5796 goto walk_packet; 5797 } 5798 5799 if (skb_tailroom(skb) >= linear_len + headroom) { 5800 uses_frags = true; 5801 goto use_skb; 5802 } 5803 5804 netdev_info_once(card->dev, 5805 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5806 linear_len + headroom, skb_tailroom(skb)); 5807 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5808 } 5809 5810 linear_len = skb_len; 5811 skb = napi_alloc_skb(napi, linear_len + headroom); 5812 if (!skb) { 5813 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5814 goto walk_packet; 5815 } 5816 5817 use_skb: 5818 if (headroom) 5819 skb_reserve(skb, headroom); 5820 walk_packet: 5821 while (skb_len) { 5822 int data_len = min(skb_len, (int)(element->length - offset)); 5823 char *data = phys_to_virt(element->addr) + offset; 5824 5825 skb_len -= data_len; 5826 offset += data_len; 5827 5828 /* Extract data from current element: */ 5829 if (skb && data_len) { 5830 if (linear_len) { 5831 unsigned int copy_len; 5832 5833 copy_len = min_t(unsigned int, linear_len, 5834 data_len); 5835 5836 skb_put_data(skb, data, copy_len); 5837 linear_len -= copy_len; 5838 data_len -= copy_len; 5839 data += copy_len; 5840 } 5841 5842 if (data_len) 5843 qeth_create_skb_frag(skb, data, data_len); 5844 } 5845 5846 /* Step forward to next element: */ 5847 if (skb_len) { 5848 if (qeth_is_last_sbale(element)) { 5849 QETH_CARD_TEXT(card, 4, "unexeob"); 5850 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5851 if (skb) { 5852 if (uses_frags) 5853 napi_free_frags(napi); 5854 else 5855 dev_kfree_skb_any(skb); 5856 QETH_CARD_STAT_INC(card, 5857 rx_length_errors); 5858 } 5859 return -EMSGSIZE; 5860 } 5861 element++; 5862 offset = 0; 5863 } 5864 } 5865 5866 /* This packet was skipped, go get another one: */ 5867 if (!skb) 5868 goto next_packet; 5869 5870 *element_no = element - &buffer->element[0]; 5871 *__offset = offset; 5872 5873 qeth_receive_skb(card, skb, hdr, uses_frags); 5874 return 0; 5875 } 5876 5877 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5878 struct qeth_qdio_buffer *buf, bool *done) 5879 { 5880 unsigned int work_done = 0; 5881 5882 while (budget) { 5883 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5884 &card->rx.e_offset)) { 5885 *done = true; 5886 break; 5887 } 5888 5889 work_done++; 5890 budget--; 5891 } 5892 5893 return work_done; 5894 } 5895 5896 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5897 { 5898 struct qeth_rx *ctx = &card->rx; 5899 unsigned int work_done = 0; 5900 5901 while (budget > 0) { 5902 struct qeth_qdio_buffer *buffer; 5903 unsigned int skbs_done = 0; 5904 bool done = false; 5905 5906 /* Fetch completed RX buffers: */ 5907 if (!card->rx.b_count) { 5908 card->rx.qdio_err = 0; 5909 card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), 5910 0, true, 5911 &card->rx.b_index, 5912 &card->rx.qdio_err); 5913 if (card->rx.b_count <= 0) { 5914 card->rx.b_count = 0; 5915 break; 5916 } 5917 } 5918 5919 /* Process one completed RX buffer: */ 5920 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5921 if (!(card->rx.qdio_err && 5922 qeth_check_qdio_errors(card, buffer->buffer, 5923 card->rx.qdio_err, "qinerr"))) 5924 skbs_done = qeth_extract_skbs(card, budget, buffer, 5925 &done); 5926 else 5927 done = true; 5928 5929 work_done += skbs_done; 5930 budget -= skbs_done; 5931 5932 if (done) { 5933 QETH_CARD_STAT_INC(card, rx_bufs); 5934 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5935 buffer->pool_entry = NULL; 5936 card->rx.b_count--; 5937 ctx->bufs_refill++; 5938 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5939 ctx->bufs_refill); 5940 5941 /* Step forward to next buffer: */ 5942 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5943 card->rx.buf_element = 0; 5944 card->rx.e_offset = 0; 5945 } 5946 } 5947 5948 return work_done; 5949 } 5950 5951 static void qeth_cq_poll(struct qeth_card *card) 5952 { 5953 unsigned int work_done = 0; 5954 5955 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5956 unsigned int start, error; 5957 int completed; 5958 5959 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 5960 &error); 5961 if (completed <= 0) 5962 return; 5963 5964 qeth_qdio_cq_handler(card, error, 1, start, completed); 5965 work_done += completed; 5966 } 5967 } 5968 5969 int qeth_poll(struct napi_struct *napi, int budget) 5970 { 5971 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5972 unsigned int work_done; 5973 5974 work_done = qeth_rx_poll(card, budget); 5975 5976 if (qeth_use_tx_irqs(card)) { 5977 struct qeth_qdio_out_q *queue; 5978 unsigned int i; 5979 5980 qeth_for_each_output_queue(card, queue, i) { 5981 if (!qeth_out_queue_is_empty(queue)) 5982 napi_schedule(&queue->napi); 5983 } 5984 } 5985 5986 if (card->options.cq == QETH_CQ_ENABLED) 5987 qeth_cq_poll(card); 5988 5989 if (budget) { 5990 struct qeth_rx *ctx = &card->rx; 5991 5992 /* Process any substantial refill backlog: */ 5993 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5994 5995 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5996 if (work_done >= budget) 5997 return work_done; 5998 } 5999 6000 if (napi_complete_done(napi, work_done) && 6001 qdio_start_irq(CARD_DDEV(card))) 6002 napi_schedule(napi); 6003 6004 return work_done; 6005 } 6006 EXPORT_SYMBOL_GPL(qeth_poll); 6007 6008 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 6009 unsigned int bidx, unsigned int qdio_error, 6010 int budget) 6011 { 6012 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 6013 u8 sflags = buffer->buffer->element[15].sflags; 6014 struct qeth_card *card = queue->card; 6015 bool error = !!qdio_error; 6016 6017 if (qdio_error == QDIO_ERROR_SLSB_PENDING) { 6018 struct qaob *aob = buffer->aob; 6019 struct qeth_qaob_priv1 *priv; 6020 enum iucv_tx_notify notify; 6021 6022 if (!aob) { 6023 netdev_WARN_ONCE(card->dev, 6024 "Pending TX buffer %#x without QAOB on TX queue %u\n", 6025 bidx, queue->queue_no); 6026 qeth_schedule_recovery(card); 6027 return; 6028 } 6029 6030 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 6031 6032 priv = (struct qeth_qaob_priv1 *)&aob->user1; 6033 /* QAOB hasn't completed yet: */ 6034 if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) { 6035 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 6036 6037 /* Prepare the queue slot for immediate re-use: */ 6038 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 6039 if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) { 6040 QETH_CARD_TEXT(card, 2, "outofbuf"); 6041 qeth_schedule_recovery(card); 6042 } 6043 6044 list_add(&buffer->list_entry, &queue->pending_bufs); 6045 /* Skip clearing the buffer: */ 6046 return; 6047 } 6048 6049 /* QAOB already completed: */ 6050 notify = qeth_compute_cq_notification(aob->aorc, 0); 6051 qeth_notify_skbs(queue, buffer, notify); 6052 error = !!aob->aorc; 6053 memset(aob, 0, sizeof(*aob)); 6054 } else if (card->options.cq == QETH_CQ_ENABLED) { 6055 qeth_notify_skbs(queue, buffer, 6056 qeth_compute_cq_notification(sflags, 0)); 6057 } 6058 6059 qeth_clear_output_buffer(queue, buffer, error, budget); 6060 } 6061 6062 static int qeth_tx_poll(struct napi_struct *napi, int budget) 6063 { 6064 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 6065 unsigned int queue_no = queue->queue_no; 6066 struct qeth_card *card = queue->card; 6067 struct net_device *dev = card->dev; 6068 unsigned int work_done = 0; 6069 struct netdev_queue *txq; 6070 6071 if (IS_IQD(card)) 6072 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 6073 else 6074 txq = netdev_get_tx_queue(dev, queue_no); 6075 6076 while (1) { 6077 unsigned int start, error, i; 6078 unsigned int packets = 0; 6079 unsigned int bytes = 0; 6080 int completed; 6081 6082 qeth_tx_complete_pending_bufs(card, queue, false, budget); 6083 6084 if (qeth_out_queue_is_empty(queue)) { 6085 napi_complete(napi); 6086 return 0; 6087 } 6088 6089 /* Give the CPU a breather: */ 6090 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 6091 QETH_TXQ_STAT_INC(queue, completion_yield); 6092 if (napi_complete_done(napi, 0)) 6093 napi_schedule(napi); 6094 return 0; 6095 } 6096 6097 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 6098 &start, &error); 6099 if (completed <= 0) { 6100 /* Ensure we see TX completion for pending work: */ 6101 if (napi_complete_done(napi, 0) && 6102 !atomic_read(&queue->set_pci_flags_count)) 6103 qeth_tx_arm_timer(queue, queue->rescan_usecs); 6104 return 0; 6105 } 6106 6107 for (i = start; i < start + completed; i++) { 6108 struct qeth_qdio_out_buffer *buffer; 6109 unsigned int bidx = QDIO_BUFNR(i); 6110 6111 buffer = queue->bufs[bidx]; 6112 packets += buffer->frames; 6113 bytes += buffer->bytes; 6114 6115 qeth_handle_send_error(card, buffer, error); 6116 if (IS_IQD(card)) 6117 qeth_iqd_tx_complete(queue, bidx, error, budget); 6118 else 6119 qeth_clear_output_buffer(queue, buffer, error, 6120 budget); 6121 } 6122 6123 atomic_sub(completed, &queue->used_buffers); 6124 work_done += completed; 6125 if (IS_IQD(card)) 6126 netdev_tx_completed_queue(txq, packets, bytes); 6127 else 6128 qeth_check_outbound_queue(queue); 6129 6130 /* xmit may have observed the full-condition, but not yet 6131 * stopped the txq. In which case the code below won't trigger. 6132 * So before returning, xmit will re-check the txq's fill level 6133 * and wake it up if needed. 6134 */ 6135 if (netif_tx_queue_stopped(txq) && 6136 !qeth_out_queue_is_full(queue)) 6137 netif_tx_wake_queue(txq); 6138 } 6139 } 6140 6141 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 6142 { 6143 if (!cmd->hdr.return_code) 6144 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6145 return cmd->hdr.return_code; 6146 } 6147 6148 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 6149 struct qeth_reply *reply, 6150 unsigned long data) 6151 { 6152 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6153 struct qeth_ipa_caps *caps = reply->param; 6154 6155 if (qeth_setassparms_inspect_rc(cmd)) 6156 return -EIO; 6157 6158 caps->supported = cmd->data.setassparms.data.caps.supported; 6159 caps->enabled = cmd->data.setassparms.data.caps.enabled; 6160 return 0; 6161 } 6162 6163 int qeth_setassparms_cb(struct qeth_card *card, 6164 struct qeth_reply *reply, unsigned long data) 6165 { 6166 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6167 6168 QETH_CARD_TEXT(card, 4, "defadpcb"); 6169 6170 if (cmd->hdr.return_code) 6171 return -EIO; 6172 6173 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 6174 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 6175 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 6176 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 6177 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 6178 return 0; 6179 } 6180 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6181 6182 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6183 enum qeth_ipa_funcs ipa_func, 6184 u16 cmd_code, 6185 unsigned int data_length, 6186 enum qeth_prot_versions prot) 6187 { 6188 struct qeth_ipacmd_setassparms *setassparms; 6189 struct qeth_ipacmd_setassparms_hdr *hdr; 6190 struct qeth_cmd_buffer *iob; 6191 6192 QETH_CARD_TEXT(card, 4, "getasscm"); 6193 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6194 data_length + 6195 offsetof(struct qeth_ipacmd_setassparms, 6196 data)); 6197 if (!iob) 6198 return NULL; 6199 6200 setassparms = &__ipa_cmd(iob)->data.setassparms; 6201 setassparms->assist_no = ipa_func; 6202 6203 hdr = &setassparms->hdr; 6204 hdr->length = sizeof(*hdr) + data_length; 6205 hdr->command_code = cmd_code; 6206 return iob; 6207 } 6208 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6209 6210 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6211 enum qeth_ipa_funcs ipa_func, 6212 u16 cmd_code, u32 *data, 6213 enum qeth_prot_versions prot) 6214 { 6215 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6216 struct qeth_cmd_buffer *iob; 6217 6218 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6219 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6220 if (!iob) 6221 return -ENOMEM; 6222 6223 if (data) 6224 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6225 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6226 } 6227 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6228 6229 static void qeth_unregister_dbf_views(void) 6230 { 6231 int x; 6232 6233 for (x = 0; x < QETH_DBF_INFOS; x++) { 6234 debug_unregister(qeth_dbf[x].id); 6235 qeth_dbf[x].id = NULL; 6236 } 6237 } 6238 6239 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6240 { 6241 char dbf_txt_buf[32]; 6242 va_list args; 6243 6244 if (!debug_level_enabled(id, level)) 6245 return; 6246 va_start(args, fmt); 6247 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6248 va_end(args); 6249 debug_text_event(id, level, dbf_txt_buf); 6250 } 6251 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6252 6253 static int qeth_register_dbf_views(void) 6254 { 6255 int ret; 6256 int x; 6257 6258 for (x = 0; x < QETH_DBF_INFOS; x++) { 6259 /* register the areas */ 6260 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6261 qeth_dbf[x].pages, 6262 qeth_dbf[x].areas, 6263 qeth_dbf[x].len); 6264 if (qeth_dbf[x].id == NULL) { 6265 qeth_unregister_dbf_views(); 6266 return -ENOMEM; 6267 } 6268 6269 /* register a view */ 6270 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6271 if (ret) { 6272 qeth_unregister_dbf_views(); 6273 return ret; 6274 } 6275 6276 /* set a passing level */ 6277 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6278 } 6279 6280 return 0; 6281 } 6282 6283 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6284 6285 int qeth_setup_discipline(struct qeth_card *card, 6286 enum qeth_discipline_id discipline) 6287 { 6288 int rc; 6289 6290 mutex_lock(&qeth_mod_mutex); 6291 switch (discipline) { 6292 case QETH_DISCIPLINE_LAYER3: 6293 card->discipline = try_then_request_module( 6294 symbol_get(qeth_l3_discipline), "qeth_l3"); 6295 break; 6296 case QETH_DISCIPLINE_LAYER2: 6297 card->discipline = try_then_request_module( 6298 symbol_get(qeth_l2_discipline), "qeth_l2"); 6299 break; 6300 default: 6301 break; 6302 } 6303 mutex_unlock(&qeth_mod_mutex); 6304 6305 if (!card->discipline) { 6306 dev_err(&card->gdev->dev, "There is no kernel module to " 6307 "support discipline %d\n", discipline); 6308 return -EINVAL; 6309 } 6310 6311 rc = card->discipline->setup(card->gdev); 6312 if (rc) { 6313 if (discipline == QETH_DISCIPLINE_LAYER2) 6314 symbol_put(qeth_l2_discipline); 6315 else 6316 symbol_put(qeth_l3_discipline); 6317 card->discipline = NULL; 6318 6319 return rc; 6320 } 6321 6322 card->options.layer = discipline; 6323 return 0; 6324 } 6325 6326 void qeth_remove_discipline(struct qeth_card *card) 6327 { 6328 card->discipline->remove(card->gdev); 6329 6330 if (IS_LAYER2(card)) 6331 symbol_put(qeth_l2_discipline); 6332 else 6333 symbol_put(qeth_l3_discipline); 6334 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6335 card->discipline = NULL; 6336 } 6337 6338 const struct device_type qeth_generic_devtype = { 6339 .name = "qeth_generic", 6340 }; 6341 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 6342 6343 static const struct device_type qeth_osn_devtype = { 6344 .name = "qeth_osn", 6345 }; 6346 6347 #define DBF_NAME_LEN 20 6348 6349 struct qeth_dbf_entry { 6350 char dbf_name[DBF_NAME_LEN]; 6351 debug_info_t *dbf_info; 6352 struct list_head dbf_list; 6353 }; 6354 6355 static LIST_HEAD(qeth_dbf_list); 6356 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6357 6358 static debug_info_t *qeth_get_dbf_entry(char *name) 6359 { 6360 struct qeth_dbf_entry *entry; 6361 debug_info_t *rc = NULL; 6362 6363 mutex_lock(&qeth_dbf_list_mutex); 6364 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6365 if (strcmp(entry->dbf_name, name) == 0) { 6366 rc = entry->dbf_info; 6367 break; 6368 } 6369 } 6370 mutex_unlock(&qeth_dbf_list_mutex); 6371 return rc; 6372 } 6373 6374 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6375 { 6376 struct qeth_dbf_entry *new_entry; 6377 6378 card->debug = debug_register(name, 2, 1, 8); 6379 if (!card->debug) { 6380 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6381 goto err; 6382 } 6383 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6384 goto err_dbg; 6385 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6386 if (!new_entry) 6387 goto err_dbg; 6388 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6389 new_entry->dbf_info = card->debug; 6390 mutex_lock(&qeth_dbf_list_mutex); 6391 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6392 mutex_unlock(&qeth_dbf_list_mutex); 6393 6394 return 0; 6395 6396 err_dbg: 6397 debug_unregister(card->debug); 6398 err: 6399 return -ENOMEM; 6400 } 6401 6402 static void qeth_clear_dbf_list(void) 6403 { 6404 struct qeth_dbf_entry *entry, *tmp; 6405 6406 mutex_lock(&qeth_dbf_list_mutex); 6407 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6408 list_del(&entry->dbf_list); 6409 debug_unregister(entry->dbf_info); 6410 kfree(entry); 6411 } 6412 mutex_unlock(&qeth_dbf_list_mutex); 6413 } 6414 6415 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6416 { 6417 struct net_device *dev; 6418 struct qeth_priv *priv; 6419 6420 switch (card->info.type) { 6421 case QETH_CARD_TYPE_IQD: 6422 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6423 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6424 break; 6425 case QETH_CARD_TYPE_OSM: 6426 dev = alloc_etherdev(sizeof(*priv)); 6427 break; 6428 case QETH_CARD_TYPE_OSN: 6429 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, 6430 ether_setup); 6431 break; 6432 default: 6433 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6434 } 6435 6436 if (!dev) 6437 return NULL; 6438 6439 priv = netdev_priv(dev); 6440 priv->rx_copybreak = QETH_RX_COPYBREAK; 6441 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6442 6443 dev->ml_priv = card; 6444 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6445 dev->min_mtu = IS_OSN(card) ? 64 : 576; 6446 /* initialized when device first goes online: */ 6447 dev->max_mtu = 0; 6448 dev->mtu = 0; 6449 SET_NETDEV_DEV(dev, &card->gdev->dev); 6450 netif_carrier_off(dev); 6451 6452 if (IS_OSN(card)) { 6453 dev->ethtool_ops = &qeth_osn_ethtool_ops; 6454 } else { 6455 dev->ethtool_ops = &qeth_ethtool_ops; 6456 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6457 dev->hw_features |= NETIF_F_SG; 6458 dev->vlan_features |= NETIF_F_SG; 6459 if (IS_IQD(card)) 6460 dev->features |= NETIF_F_SG; 6461 } 6462 6463 return dev; 6464 } 6465 6466 struct net_device *qeth_clone_netdev(struct net_device *orig) 6467 { 6468 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6469 6470 if (!clone) 6471 return NULL; 6472 6473 clone->dev_port = orig->dev_port; 6474 return clone; 6475 } 6476 6477 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6478 { 6479 struct qeth_card *card; 6480 struct device *dev; 6481 int rc; 6482 enum qeth_discipline_id enforced_disc; 6483 char dbf_name[DBF_NAME_LEN]; 6484 6485 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6486 6487 dev = &gdev->dev; 6488 if (!get_device(dev)) 6489 return -ENODEV; 6490 6491 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6492 6493 card = qeth_alloc_card(gdev); 6494 if (!card) { 6495 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6496 rc = -ENOMEM; 6497 goto err_dev; 6498 } 6499 6500 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6501 dev_name(&gdev->dev)); 6502 card->debug = qeth_get_dbf_entry(dbf_name); 6503 if (!card->debug) { 6504 rc = qeth_add_dbf_entry(card, dbf_name); 6505 if (rc) 6506 goto err_card; 6507 } 6508 6509 qeth_setup_card(card); 6510 card->dev = qeth_alloc_netdev(card); 6511 if (!card->dev) { 6512 rc = -ENOMEM; 6513 goto err_card; 6514 } 6515 6516 qeth_determine_capabilities(card); 6517 qeth_set_blkt_defaults(card); 6518 6519 card->qdio.no_out_queues = card->dev->num_tx_queues; 6520 rc = qeth_update_from_chp_desc(card); 6521 if (rc) 6522 goto err_chp_desc; 6523 6524 if (IS_OSN(card)) 6525 gdev->dev.groups = qeth_osn_dev_groups; 6526 else 6527 gdev->dev.groups = qeth_dev_groups; 6528 6529 enforced_disc = qeth_enforce_discipline(card); 6530 switch (enforced_disc) { 6531 case QETH_DISCIPLINE_UNDETERMINED: 6532 gdev->dev.type = &qeth_generic_devtype; 6533 break; 6534 default: 6535 card->info.layer_enforced = true; 6536 /* It's so early that we don't need the discipline_mutex yet. */ 6537 rc = qeth_setup_discipline(card, enforced_disc); 6538 if (rc) 6539 goto err_setup_disc; 6540 6541 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 6542 card->discipline->devtype; 6543 break; 6544 } 6545 6546 return 0; 6547 6548 err_setup_disc: 6549 err_chp_desc: 6550 free_netdev(card->dev); 6551 err_card: 6552 qeth_core_free_card(card); 6553 err_dev: 6554 put_device(dev); 6555 return rc; 6556 } 6557 6558 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6559 { 6560 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6561 6562 QETH_CARD_TEXT(card, 2, "removedv"); 6563 6564 mutex_lock(&card->discipline_mutex); 6565 if (card->discipline) 6566 qeth_remove_discipline(card); 6567 mutex_unlock(&card->discipline_mutex); 6568 6569 qeth_free_qdio_queues(card); 6570 6571 free_netdev(card->dev); 6572 qeth_core_free_card(card); 6573 put_device(&gdev->dev); 6574 } 6575 6576 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6577 { 6578 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6579 int rc = 0; 6580 enum qeth_discipline_id def_discipline; 6581 6582 mutex_lock(&card->discipline_mutex); 6583 if (!card->discipline) { 6584 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6585 QETH_DISCIPLINE_LAYER2; 6586 rc = qeth_setup_discipline(card, def_discipline); 6587 if (rc) 6588 goto err; 6589 } 6590 6591 rc = qeth_set_online(card, card->discipline); 6592 6593 err: 6594 mutex_unlock(&card->discipline_mutex); 6595 return rc; 6596 } 6597 6598 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6599 { 6600 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6601 int rc; 6602 6603 mutex_lock(&card->discipline_mutex); 6604 rc = qeth_set_offline(card, card->discipline, false); 6605 mutex_unlock(&card->discipline_mutex); 6606 6607 return rc; 6608 } 6609 6610 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6611 { 6612 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6613 6614 qeth_set_allowed_threads(card, 0, 1); 6615 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6616 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6617 qeth_qdio_clear_card(card, 0); 6618 qeth_drain_output_queues(card); 6619 qdio_free(CARD_DDEV(card)); 6620 } 6621 6622 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6623 size_t count) 6624 { 6625 int err; 6626 6627 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6628 buf); 6629 6630 return err ? err : count; 6631 } 6632 static DRIVER_ATTR_WO(group); 6633 6634 static struct attribute *qeth_drv_attrs[] = { 6635 &driver_attr_group.attr, 6636 NULL, 6637 }; 6638 static struct attribute_group qeth_drv_attr_group = { 6639 .attrs = qeth_drv_attrs, 6640 }; 6641 static const struct attribute_group *qeth_drv_attr_groups[] = { 6642 &qeth_drv_attr_group, 6643 NULL, 6644 }; 6645 6646 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6647 .driver = { 6648 .groups = qeth_drv_attr_groups, 6649 .owner = THIS_MODULE, 6650 .name = "qeth", 6651 }, 6652 .ccw_driver = &qeth_ccw_driver, 6653 .setup = qeth_core_probe_device, 6654 .remove = qeth_core_remove_device, 6655 .set_online = qeth_core_set_online, 6656 .set_offline = qeth_core_set_offline, 6657 .shutdown = qeth_core_shutdown, 6658 }; 6659 6660 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 6661 { 6662 struct ccwgroup_device *gdev; 6663 struct qeth_card *card; 6664 6665 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 6666 if (!gdev) 6667 return NULL; 6668 6669 card = dev_get_drvdata(&gdev->dev); 6670 put_device(&gdev->dev); 6671 return card; 6672 } 6673 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 6674 6675 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6676 { 6677 struct qeth_card *card = dev->ml_priv; 6678 struct mii_ioctl_data *mii_data; 6679 int rc = 0; 6680 6681 switch (cmd) { 6682 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6683 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 6684 break; 6685 case SIOC_QETH_GET_CARD_TYPE: 6686 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6687 !IS_VM_NIC(card)) 6688 return 1; 6689 return 0; 6690 case SIOCGMIIPHY: 6691 mii_data = if_mii(rq); 6692 mii_data->phy_id = 0; 6693 break; 6694 case SIOCGMIIREG: 6695 mii_data = if_mii(rq); 6696 if (mii_data->phy_id != 0) 6697 rc = -EINVAL; 6698 else 6699 mii_data->val_out = qeth_mdio_read(dev, 6700 mii_data->phy_id, mii_data->reg_num); 6701 break; 6702 case SIOC_QETH_QUERY_OAT: 6703 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 6704 break; 6705 default: 6706 if (card->discipline->do_ioctl) 6707 rc = card->discipline->do_ioctl(dev, rq, cmd); 6708 else 6709 rc = -EOPNOTSUPP; 6710 } 6711 if (rc) 6712 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6713 return rc; 6714 } 6715 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6716 6717 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6718 unsigned long data) 6719 { 6720 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6721 u32 *features = reply->param; 6722 6723 if (qeth_setassparms_inspect_rc(cmd)) 6724 return -EIO; 6725 6726 *features = cmd->data.setassparms.data.flags_32bit; 6727 return 0; 6728 } 6729 6730 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6731 enum qeth_prot_versions prot) 6732 { 6733 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6734 NULL, prot); 6735 } 6736 6737 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6738 enum qeth_prot_versions prot, u8 *lp2lp) 6739 { 6740 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6741 struct qeth_cmd_buffer *iob; 6742 struct qeth_ipa_caps caps; 6743 u32 features; 6744 int rc; 6745 6746 /* some L3 HW requires combined L3+L4 csum offload: */ 6747 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6748 cstype == IPA_OUTBOUND_CHECKSUM) 6749 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6750 6751 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6752 prot); 6753 if (!iob) 6754 return -ENOMEM; 6755 6756 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6757 if (rc) 6758 return rc; 6759 6760 if ((required_features & features) != required_features) { 6761 qeth_set_csum_off(card, cstype, prot); 6762 return -EOPNOTSUPP; 6763 } 6764 6765 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6766 SETASS_DATA_SIZEOF(flags_32bit), 6767 prot); 6768 if (!iob) { 6769 qeth_set_csum_off(card, cstype, prot); 6770 return -ENOMEM; 6771 } 6772 6773 if (features & QETH_IPA_CHECKSUM_LP2LP) 6774 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6775 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6776 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6777 if (rc) { 6778 qeth_set_csum_off(card, cstype, prot); 6779 return rc; 6780 } 6781 6782 if (!qeth_ipa_caps_supported(&caps, required_features) || 6783 !qeth_ipa_caps_enabled(&caps, required_features)) { 6784 qeth_set_csum_off(card, cstype, prot); 6785 return -EOPNOTSUPP; 6786 } 6787 6788 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6789 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6790 6791 if (lp2lp) 6792 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6793 6794 return 0; 6795 } 6796 6797 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6798 enum qeth_prot_versions prot, u8 *lp2lp) 6799 { 6800 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6801 qeth_set_csum_off(card, cstype, prot); 6802 } 6803 6804 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6805 unsigned long data) 6806 { 6807 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6808 struct qeth_tso_start_data *tso_data = reply->param; 6809 6810 if (qeth_setassparms_inspect_rc(cmd)) 6811 return -EIO; 6812 6813 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6814 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6815 return 0; 6816 } 6817 6818 static int qeth_set_tso_off(struct qeth_card *card, 6819 enum qeth_prot_versions prot) 6820 { 6821 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6822 IPA_CMD_ASS_STOP, NULL, prot); 6823 } 6824 6825 static int qeth_set_tso_on(struct qeth_card *card, 6826 enum qeth_prot_versions prot) 6827 { 6828 struct qeth_tso_start_data tso_data; 6829 struct qeth_cmd_buffer *iob; 6830 struct qeth_ipa_caps caps; 6831 int rc; 6832 6833 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6834 IPA_CMD_ASS_START, 0, prot); 6835 if (!iob) 6836 return -ENOMEM; 6837 6838 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6839 if (rc) 6840 return rc; 6841 6842 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6843 qeth_set_tso_off(card, prot); 6844 return -EOPNOTSUPP; 6845 } 6846 6847 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6848 IPA_CMD_ASS_ENABLE, 6849 SETASS_DATA_SIZEOF(caps), prot); 6850 if (!iob) { 6851 qeth_set_tso_off(card, prot); 6852 return -ENOMEM; 6853 } 6854 6855 /* enable TSO capability */ 6856 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6857 QETH_IPA_LARGE_SEND_TCP; 6858 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6859 if (rc) { 6860 qeth_set_tso_off(card, prot); 6861 return rc; 6862 } 6863 6864 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6865 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6866 qeth_set_tso_off(card, prot); 6867 return -EOPNOTSUPP; 6868 } 6869 6870 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6871 tso_data.mss); 6872 return 0; 6873 } 6874 6875 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6876 enum qeth_prot_versions prot) 6877 { 6878 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6879 } 6880 6881 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6882 { 6883 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6884 int rc_ipv6; 6885 6886 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6887 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6888 QETH_PROT_IPV4, NULL); 6889 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6890 /* no/one Offload Assist available, so the rc is trivial */ 6891 return rc_ipv4; 6892 6893 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6894 QETH_PROT_IPV6, NULL); 6895 6896 if (on) 6897 /* enable: success if any Assist is active */ 6898 return (rc_ipv6) ? rc_ipv4 : 0; 6899 6900 /* disable: failure if any Assist is still active */ 6901 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6902 } 6903 6904 /** 6905 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6906 * @dev: a net_device 6907 */ 6908 void qeth_enable_hw_features(struct net_device *dev) 6909 { 6910 struct qeth_card *card = dev->ml_priv; 6911 netdev_features_t features; 6912 6913 features = dev->features; 6914 /* force-off any feature that might need an IPA sequence. 6915 * netdev_update_features() will restart them. 6916 */ 6917 dev->features &= ~dev->hw_features; 6918 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6919 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6920 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6921 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6922 } 6923 netdev_update_features(dev); 6924 if (features != dev->features) 6925 dev_warn(&card->gdev->dev, 6926 "Device recovery failed to restore all offload features\n"); 6927 } 6928 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6929 6930 static void qeth_check_restricted_features(struct qeth_card *card, 6931 netdev_features_t changed, 6932 netdev_features_t actual) 6933 { 6934 netdev_features_t ipv6_features = NETIF_F_TSO6; 6935 netdev_features_t ipv4_features = NETIF_F_TSO; 6936 6937 if (!card->info.has_lp2lp_cso_v6) 6938 ipv6_features |= NETIF_F_IPV6_CSUM; 6939 if (!card->info.has_lp2lp_cso_v4) 6940 ipv4_features |= NETIF_F_IP_CSUM; 6941 6942 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6943 qeth_flush_local_addrs6(card); 6944 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6945 qeth_flush_local_addrs4(card); 6946 } 6947 6948 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6949 { 6950 struct qeth_card *card = dev->ml_priv; 6951 netdev_features_t changed = dev->features ^ features; 6952 int rc = 0; 6953 6954 QETH_CARD_TEXT(card, 2, "setfeat"); 6955 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6956 6957 if ((changed & NETIF_F_IP_CSUM)) { 6958 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6959 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6960 &card->info.has_lp2lp_cso_v4); 6961 if (rc) 6962 changed ^= NETIF_F_IP_CSUM; 6963 } 6964 if (changed & NETIF_F_IPV6_CSUM) { 6965 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6966 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6967 &card->info.has_lp2lp_cso_v6); 6968 if (rc) 6969 changed ^= NETIF_F_IPV6_CSUM; 6970 } 6971 if (changed & NETIF_F_RXCSUM) { 6972 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6973 if (rc) 6974 changed ^= NETIF_F_RXCSUM; 6975 } 6976 if (changed & NETIF_F_TSO) { 6977 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6978 QETH_PROT_IPV4); 6979 if (rc) 6980 changed ^= NETIF_F_TSO; 6981 } 6982 if (changed & NETIF_F_TSO6) { 6983 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6984 QETH_PROT_IPV6); 6985 if (rc) 6986 changed ^= NETIF_F_TSO6; 6987 } 6988 6989 qeth_check_restricted_features(card, dev->features ^ features, 6990 dev->features ^ changed); 6991 6992 /* everything changed successfully? */ 6993 if ((dev->features ^ features) == changed) 6994 return 0; 6995 /* something went wrong. save changed features and return error */ 6996 dev->features ^= changed; 6997 return -EIO; 6998 } 6999 EXPORT_SYMBOL_GPL(qeth_set_features); 7000 7001 netdev_features_t qeth_fix_features(struct net_device *dev, 7002 netdev_features_t features) 7003 { 7004 struct qeth_card *card = dev->ml_priv; 7005 7006 QETH_CARD_TEXT(card, 2, "fixfeat"); 7007 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 7008 features &= ~NETIF_F_IP_CSUM; 7009 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 7010 features &= ~NETIF_F_IPV6_CSUM; 7011 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 7012 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 7013 features &= ~NETIF_F_RXCSUM; 7014 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 7015 features &= ~NETIF_F_TSO; 7016 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 7017 features &= ~NETIF_F_TSO6; 7018 7019 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 7020 return features; 7021 } 7022 EXPORT_SYMBOL_GPL(qeth_fix_features); 7023 7024 netdev_features_t qeth_features_check(struct sk_buff *skb, 7025 struct net_device *dev, 7026 netdev_features_t features) 7027 { 7028 struct qeth_card *card = dev->ml_priv; 7029 7030 /* Traffic with local next-hop is not eligible for some offloads: */ 7031 if (skb->ip_summed == CHECKSUM_PARTIAL && 7032 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 7033 netdev_features_t restricted = 0; 7034 7035 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 7036 restricted |= NETIF_F_ALL_TSO; 7037 7038 switch (vlan_get_protocol(skb)) { 7039 case htons(ETH_P_IP): 7040 if (!card->info.has_lp2lp_cso_v4) 7041 restricted |= NETIF_F_IP_CSUM; 7042 7043 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 7044 features &= ~restricted; 7045 break; 7046 case htons(ETH_P_IPV6): 7047 if (!card->info.has_lp2lp_cso_v6) 7048 restricted |= NETIF_F_IPV6_CSUM; 7049 7050 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 7051 features &= ~restricted; 7052 break; 7053 default: 7054 break; 7055 } 7056 } 7057 7058 /* GSO segmentation builds skbs with 7059 * a (small) linear part for the headers, and 7060 * page frags for the data. 7061 * Compared to a linear skb, the header-only part consumes an 7062 * additional buffer element. This reduces buffer utilization, and 7063 * hurts throughput. So compress small segments into one element. 7064 */ 7065 if (netif_needs_gso(skb, features)) { 7066 /* match skb_segment(): */ 7067 unsigned int doffset = skb->data - skb_mac_header(skb); 7068 unsigned int hsize = skb_shinfo(skb)->gso_size; 7069 unsigned int hroom = skb_headroom(skb); 7070 7071 /* linearize only if resulting skb allocations are order-0: */ 7072 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 7073 features &= ~NETIF_F_SG; 7074 } 7075 7076 return vlan_features_check(skb, features); 7077 } 7078 EXPORT_SYMBOL_GPL(qeth_features_check); 7079 7080 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 7081 { 7082 struct qeth_card *card = dev->ml_priv; 7083 struct qeth_qdio_out_q *queue; 7084 unsigned int i; 7085 7086 QETH_CARD_TEXT(card, 5, "getstat"); 7087 7088 stats->rx_packets = card->stats.rx_packets; 7089 stats->rx_bytes = card->stats.rx_bytes; 7090 stats->rx_errors = card->stats.rx_length_errors + 7091 card->stats.rx_frame_errors + 7092 card->stats.rx_fifo_errors; 7093 stats->rx_dropped = card->stats.rx_dropped_nomem + 7094 card->stats.rx_dropped_notsupp + 7095 card->stats.rx_dropped_runt; 7096 stats->multicast = card->stats.rx_multicast; 7097 stats->rx_length_errors = card->stats.rx_length_errors; 7098 stats->rx_frame_errors = card->stats.rx_frame_errors; 7099 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 7100 7101 for (i = 0; i < card->qdio.no_out_queues; i++) { 7102 queue = card->qdio.out_qs[i]; 7103 7104 stats->tx_packets += queue->stats.tx_packets; 7105 stats->tx_bytes += queue->stats.tx_bytes; 7106 stats->tx_errors += queue->stats.tx_errors; 7107 stats->tx_dropped += queue->stats.tx_dropped; 7108 } 7109 } 7110 EXPORT_SYMBOL_GPL(qeth_get_stats64); 7111 7112 #define TC_IQD_UCAST 0 7113 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 7114 unsigned int ucast_txqs) 7115 { 7116 unsigned int prio; 7117 7118 /* IQD requires mcast traffic to be placed on a dedicated queue, and 7119 * qeth_iqd_select_queue() deals with this. 7120 * For unicast traffic, we defer the queue selection to the stack. 7121 * By installing a trivial prio map that spans over only the unicast 7122 * queues, we can encourage the stack to spread the ucast traffic evenly 7123 * without selecting the mcast queue. 7124 */ 7125 7126 /* One traffic class, spanning over all active ucast queues: */ 7127 netdev_set_num_tc(dev, 1); 7128 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 7129 QETH_IQD_MIN_UCAST_TXQ); 7130 7131 /* Map all priorities to this traffic class: */ 7132 for (prio = 0; prio <= TC_BITMASK; prio++) 7133 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 7134 } 7135 7136 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 7137 { 7138 struct net_device *dev = card->dev; 7139 int rc; 7140 7141 /* Per netif_setup_tc(), adjust the mapping first: */ 7142 if (IS_IQD(card)) 7143 qeth_iqd_set_prio_tc_map(dev, count - 1); 7144 7145 rc = netif_set_real_num_tx_queues(dev, count); 7146 7147 if (rc && IS_IQD(card)) 7148 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 7149 7150 return rc; 7151 } 7152 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 7153 7154 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 7155 u8 cast_type, struct net_device *sb_dev) 7156 { 7157 u16 txq; 7158 7159 if (cast_type != RTN_UNICAST) 7160 return QETH_IQD_MCAST_TXQ; 7161 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 7162 return QETH_IQD_MIN_UCAST_TXQ; 7163 7164 txq = netdev_pick_tx(dev, skb, sb_dev); 7165 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 7166 } 7167 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 7168 7169 int qeth_open(struct net_device *dev) 7170 { 7171 struct qeth_card *card = dev->ml_priv; 7172 struct qeth_qdio_out_q *queue; 7173 unsigned int i; 7174 7175 QETH_CARD_TEXT(card, 4, "qethopen"); 7176 7177 card->data.state = CH_STATE_UP; 7178 netif_tx_start_all_queues(dev); 7179 7180 local_bh_disable(); 7181 qeth_for_each_output_queue(card, queue, i) { 7182 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 7183 QETH_NAPI_WEIGHT); 7184 napi_enable(&queue->napi); 7185 napi_schedule(&queue->napi); 7186 } 7187 7188 napi_enable(&card->napi); 7189 napi_schedule(&card->napi); 7190 /* kick-start the NAPI softirq: */ 7191 local_bh_enable(); 7192 7193 return 0; 7194 } 7195 EXPORT_SYMBOL_GPL(qeth_open); 7196 7197 int qeth_stop(struct net_device *dev) 7198 { 7199 struct qeth_card *card = dev->ml_priv; 7200 struct qeth_qdio_out_q *queue; 7201 unsigned int i; 7202 7203 QETH_CARD_TEXT(card, 4, "qethstop"); 7204 7205 napi_disable(&card->napi); 7206 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7207 qdio_stop_irq(CARD_DDEV(card)); 7208 7209 /* Quiesce the NAPI instances: */ 7210 qeth_for_each_output_queue(card, queue, i) 7211 napi_disable(&queue->napi); 7212 7213 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7214 netif_tx_disable(dev); 7215 7216 qeth_for_each_output_queue(card, queue, i) { 7217 del_timer_sync(&queue->timer); 7218 /* Queues may get re-allocated, so remove the NAPIs. */ 7219 netif_napi_del(&queue->napi); 7220 } 7221 7222 return 0; 7223 } 7224 EXPORT_SYMBOL_GPL(qeth_stop); 7225 7226 static int __init qeth_core_init(void) 7227 { 7228 int rc; 7229 7230 pr_info("loading core functions\n"); 7231 7232 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7233 7234 rc = qeth_register_dbf_views(); 7235 if (rc) 7236 goto dbf_err; 7237 qeth_core_root_dev = root_device_register("qeth"); 7238 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7239 if (rc) 7240 goto register_err; 7241 qeth_core_header_cache = 7242 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7243 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7244 0, NULL); 7245 if (!qeth_core_header_cache) { 7246 rc = -ENOMEM; 7247 goto slab_err; 7248 } 7249 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7250 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7251 if (!qeth_qdio_outbuf_cache) { 7252 rc = -ENOMEM; 7253 goto cqslab_err; 7254 } 7255 rc = ccw_driver_register(&qeth_ccw_driver); 7256 if (rc) 7257 goto ccw_err; 7258 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7259 if (rc) 7260 goto ccwgroup_err; 7261 7262 return 0; 7263 7264 ccwgroup_err: 7265 ccw_driver_unregister(&qeth_ccw_driver); 7266 ccw_err: 7267 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7268 cqslab_err: 7269 kmem_cache_destroy(qeth_core_header_cache); 7270 slab_err: 7271 root_device_unregister(qeth_core_root_dev); 7272 register_err: 7273 qeth_unregister_dbf_views(); 7274 dbf_err: 7275 debugfs_remove_recursive(qeth_debugfs_root); 7276 pr_err("Initializing the qeth device driver failed\n"); 7277 return rc; 7278 } 7279 7280 static void __exit qeth_core_exit(void) 7281 { 7282 qeth_clear_dbf_list(); 7283 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7284 ccw_driver_unregister(&qeth_ccw_driver); 7285 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7286 kmem_cache_destroy(qeth_core_header_cache); 7287 root_device_unregister(qeth_core_root_dev); 7288 qeth_unregister_dbf_views(); 7289 debugfs_remove_recursive(qeth_debugfs_root); 7290 pr_info("core functions removed\n"); 7291 } 7292 7293 module_init(qeth_core_init); 7294 module_exit(qeth_core_exit); 7295 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7296 MODULE_DESCRIPTION("qeth core functions"); 7297 MODULE_LICENSE("GPL"); 7298